Completed
Pull Request — master (#946)
by Vincent
01:22
created

blocks.bricks.ConvolutionalTransposeActivation   A

Complexity

Total Complexity 3

Size/Duplication

Total Lines 29
Duplicated Lines 0 %
Metric Value
dl 0
loc 29
rs 10
wmc 3

3 Methods

Rating   Name   Duplication   Size   Complexity  
A _push_allocation_config() 0 3 1
A __init__() 0 7 1
A _build_convolution() 0 2 1
1
from theano import tensor
0 ignored issues
show
Unused Code introduced by
Unused tensor imported from theano
Loading history...
2
from theano.tensor.nnet import conv2d
3
from theano.tensor.nnet.abstract_conv import (AbstractConv2d_gradInputs,
4
                                              get_conv_output_shape)
5
from theano.tensor.signal.pool import pool_2d, Pool
6
7
from blocks.bricks import Initializable, Feedforward, Sequence
8
from blocks.bricks.base import application, Brick, lazy
9
from blocks.roles import add_role, FILTER, BIAS
10
from blocks.utils import shared_floatx_nans
11
12
13
class Convolutional(Initializable):
14
    """Performs a 2D convolution.
15
16
    Parameters
17
    ----------
18
    filter_size : tuple
19
        The height and width of the filter (also called *kernels*).
20
    num_filters : int
21
        Number of filters per channel.
22
    num_channels : int
23
        Number of input channels in the image. For the first layer this is
24
        normally 1 for grayscale images and 3 for color (RGB) images. For
25
        subsequent layers this is equal to the number of filters output by
26
        the previous convolutional layer. The filters are pooled over the
27
        channels.
28
    batch_size : int, optional
29
        Number of examples per batch. If given, this will be passed to
30
        Theano convolution operator, possibly resulting in faster
31
        execution.
32
    image_size : tuple, optional
33
        The height and width of the input (image or feature map). If given,
34
        this will be passed to the Theano convolution operator, resulting
35
        in possibly faster execution times.
36
    step : tuple, optional
37
        The step (or stride) with which to slide the filters over the
38
        image. Defaults to (1, 1).
39
    border_mode : {'valid', 'full'}, optional
40
        The border mode to use, see :func:`scipy.signal.convolve2d` for
41
        details. Defaults to 'valid'.
42
    tied_biases : bool
43
        If ``True``, it indicates that the biases of every filter in this
44
        layer should be shared amongst all applications of that filter.
45
        Setting this to ``False`` will untie the biases, yielding a
46
        separate bias for every location at which the filter is applied.
47
        Defaults to ``False``.
48
49
    """
50
    # Make it possible to override the implementation of conv2d that gets
51
    # used, i.e. to use theano.sandbox.cuda.dnn.dnn_conv directly in order
52
    # to leverage features not yet available in Theano's standard conv2d.
53
    # The function you override with here should accept at least the
54
    # input and the kernels as positionals, and the keyword arguments
55
    # input_shape, subsample, border_mode, and filter_shape. If some of
56
    # these are unsupported they should still be accepted and ignored,
57
    # e.g. with a wrapper function that swallows **kwargs.
58
    conv2d_impl = staticmethod(conv2d)
59
60
    # Used to override the output shape computation for a given value of
61
    # conv2d_impl. Should accept 4 positional arguments: the shape of an
62
    # image minibatch (with 4 elements: batch size, number of channels,
63
    # height, and width), the shape of the filter bank (number of filters,
64
    # number of output channels, filter height, filter width), the border
65
    # mode, and the step (vertical and horizontal strides). It is expected
66
    # to return a 4-tuple of (batch size, number of channels, output
67
    # height, output width). The first element of this tuple is not used
68
    # for anything by this brick.
69
    get_output_shape = staticmethod(get_conv_output_shape)
70
71
    @lazy(allocation=['filter_size', 'num_filters', 'num_channels'])
72
    def __init__(self, filter_size, num_filters, num_channels, batch_size=None,
73
                 image_size=(None, None), step=(1, 1), border_mode='valid',
74
                 tied_biases=False, **kwargs):
75
        super(Convolutional, self).__init__(**kwargs)
76
77
        self.filter_size = filter_size
78
        self.num_filters = num_filters
79
        self.batch_size = batch_size
80
        self.num_channels = num_channels
81
        self.image_size = image_size
82
        self.step = step
83
        self.border_mode = border_mode
84
        self.tied_biases = tied_biases
85
86
    def _allocate(self):
87
        W = shared_floatx_nans((self.num_filters, self.num_channels) +
88
                               self.filter_size, name='W')
89
        add_role(W, FILTER)
90
        self.parameters.append(W)
91
        self.add_auxiliary_variable(W.norm(2), name='W_norm')
92
        if self.use_bias:
93
            if self.tied_biases:
94
                b = shared_floatx_nans((self.num_filters,), name='b')
95
            else:
96
                # this error is raised here instead of during initializiation
97
                # because ConvolutionalSequence may specify the image size
98
                if self.image_size == (None, None) and not self.tied_biases:
99
                    raise ValueError('Cannot infer bias size without '
100
                                     'image_size specified. If you use '
101
                                     'variable image_size, you should use '
102
                                     'tied_biases=True.')
103
104
                b = shared_floatx_nans(self.get_dim('output'), name='b')
105
            add_role(b, BIAS)
106
107
            self.parameters.append(b)
108
            self.add_auxiliary_variable(b.norm(2), name='b_norm')
109
110
    def _initialize(self):
111
        if self.use_bias:
112
            W, b = self.parameters
0 ignored issues
show
Bug introduced by
The tuple unpacking with sequence defined at line 610 of blocks.bricks.base seems to be unbalanced; 2 value(s) for 0 label(s)

This happens when the amount of values does not equal the amount of labels:

a, b = ("a", "b", "c")  # only 2 labels for 3 values
Loading history...
113
            self.biases_init.initialize(b, self.rng)
114
        else:
115
            W, = self.parameters
0 ignored issues
show
Bug introduced by
The tuple unpacking with sequence defined at line 610 of blocks.bricks.base seems to be unbalanced; 1 value(s) for 0 label(s)

This happens when the amount of values does not equal the amount of labels:

a, b = ("a", "b", "c")  # only 2 labels for 3 values
Loading history...
116
        self.weights_init.initialize(W, self.rng)
117
118
    @application(inputs=['input_'], outputs=['output'])
119
    def apply(self, input_):
120
        """Perform the convolution.
121
122
        Parameters
123
        ----------
124
        input_ : :class:`~tensor.TensorVariable`
125
            A 4D tensor with the axes representing batch size, number of
126
            channels, image height, and image width.
127
128
        Returns
129
        -------
130
        output : :class:`~tensor.TensorVariable`
131
            A 4D tensor of filtered images (feature maps) with dimensions
132
            representing batch size, number of filters, feature map height,
133
            and feature map width.
134
135
            The height and width of the feature map depend on the border
136
            mode. For 'valid' it is ``image_size - filter_size + 1`` while
137
            for 'full' it is ``image_size + filter_size - 1``.
138
139
        """
140
        if self.use_bias:
141
            W, b = self.parameters
0 ignored issues
show
Bug introduced by
The tuple unpacking with sequence defined at line 610 of blocks.bricks.base seems to be unbalanced; 2 value(s) for 0 label(s)

This happens when the amount of values does not equal the amount of labels:

a, b = ("a", "b", "c")  # only 2 labels for 3 values
Loading history...
142
        else:
143
            W, = self.parameters
0 ignored issues
show
Bug introduced by
The tuple unpacking with sequence defined at line 610 of blocks.bricks.base seems to be unbalanced; 1 value(s) for 0 label(s)

This happens when the amount of values does not equal the amount of labels:

a, b = ("a", "b", "c")  # only 2 labels for 3 values
Loading history...
144
145
        if self.image_size == (None, None):
146
            input_shape = None
147
        else:
148
            input_shape = (self.batch_size, self.num_channels)
149
            input_shape += self.image_size
150
151
        output = self.conv2d_impl(
152
            input_, W,
153
            input_shape=input_shape,
154
            subsample=self.step,
155
            border_mode=self.border_mode,
156
            filter_shape=((self.num_filters, self.num_channels) +
157
                          self.filter_size))
158
        if self.use_bias:
159
            if self.tied_biases:
160
                output += b.dimshuffle('x', 0, 'x', 'x')
161
            else:
162
                output += b.dimshuffle('x', 0, 1, 2)
163
        return output
164
165
    def get_dim(self, name):
166
        if name == 'input_':
167
            return (self.num_channels,) + self.image_size
168
        if name == 'output':
169
            input_shape = (None, self.num_channels) + self.image_size
170
            kernel_shape = ((self.num_filters, self.num_channels) +
171
                            self.filter_size)
172
            out_shape = self.get_output_shape(input_shape, kernel_shape,
173
                                              self.border_mode, self.step)
174
            assert len(out_shape) == 4
175
            return out_shape[1:]
176
        return super(Convolutional, self).get_dim(name)
177
178
    @property
179
    def num_output_channels(self):
180
        return self.num_filters
181
182
183
class ConvolutionalTranspose(Convolutional):
184
    """Performs the transpose of a 2D convolution.
185
186
    Parameters
187
    ----------
188
    original_image_size : tuple
189
        The height and width of the image that forms the output of
190
        the transpose operation, which is the input of the original
191
        (non-transposed) convolution.
192
    num_filters : int
193
        Number of filters at the *output* of the transposed convolution,
194
        i.e. the number of channels in the corresponding convolution.
195
    num_channels : int
196
        Number of channels at the *input* of the transposed convolution,
197
        i.e. the number of output filters in the corresponding
198
        convolution.
199
    step : tuple, optional
200
        The step (or stride) of the corresponding *convolution*.
201
        Defaults to (1, 1).
202
    image_size : tuple, optional
203
        Image size of the input to the *transposed* convolution, i.e.
204
        the output of the corresponding convolution. Required for tied
205
        biases. Defaults to ``None``.
206
207
    See Also
208
    --------
209
    :class:`Convolutional` : For the documentation of other parameters.
210
211
    """
212
    @lazy(allocation=['original_image_size', 'filter_size', 'num_filters',
213
                      'num_channels'])
214
    def __init__(self, original_image_size, filter_size, num_filters,
215
                 num_channels, **kwargs):
216
        super(ConvolutionalTranspose, self).__init__(
217
            filter_size, num_filters, num_channels, **kwargs)
218
        self.original_image_size = original_image_size
219
220
    def conv2d_impl(self, input_, W, input_shape, subsample, border_mode,
0 ignored issues
show
Unused Code introduced by
The argument input_shape seems to be unused.
Loading history...
221
                    filter_shape):
222
        # The AbstractConv2d_gradInputs op takes a kernel that was used for the
223
        # **convolution**. We therefore have to invert num_channels and
224
        # num_filters for W.
225
        W = W.transpose(1, 0, 2, 3)
226
        imshp = (None,) + self.get_dim('output')
227
        kshp = (filter_shape[1], filter_shape[0]) + filter_shape[2:]
228
        return AbstractConv2d_gradInputs(
229
            imshp=imshp, kshp=kshp, border_mode=border_mode,
230
            subsample=subsample)(W, input_, self.get_dim('output')[1:])
231
232
    def get_dim(self, name):
233
        if name == 'output':
234
            return (self.num_filters,) + self.original_image_size
235
        return super(ConvolutionalTranspose, self).get_dim(name)
236
237
238
class Pooling(Initializable, Feedforward):
239
    """Base Brick for pooling operations.
240
241
    This should generally not be instantiated directly; see
242
    :class:`MaxPooling`.
243
244
    """
245
    @lazy(allocation=['mode', 'pooling_size'])
246
    def __init__(self, mode, pooling_size, step, input_dim, ignore_border,
247
                 padding, **kwargs):
248
        super(Pooling, self).__init__(**kwargs)
249
        self.pooling_size = pooling_size
250
        self.mode = mode
251
        self.step = step
252
        self.input_dim = input_dim if input_dim is not None else (None,) * 3
253
        self.ignore_border = ignore_border
254
        self.padding = padding
255
256
    @property
257
    def image_size(self):
258
        return self.input_dim[-2:]
259
260
    @image_size.setter
261
    def image_size(self, value):
262
        self.input_dim = self.input_dim[:-2] + value
263
264
    @property
265
    def num_channels(self):
266
        return self.input_dim[0]
267
268
    @num_channels.setter
269
    def num_channels(self, value):
270
        self.input_dim = (value,) + self.input_dim[1:]
271
272
    @application(inputs=['input_'], outputs=['output'])
273
    def apply(self, input_):
274
        """Apply the pooling (subsampling) transformation.
275
276
        Parameters
277
        ----------
278
        input_ : :class:`~tensor.TensorVariable`
279
            An tensor with dimension greater or equal to 2. The last two
280
            dimensions will be downsampled. For example, with images this
281
            means that the last two dimensions should represent the height
282
            and width of your image.
283
284
        Returns
285
        -------
286
        output : :class:`~tensor.TensorVariable`
287
            A tensor with the same number of dimensions as `input_`, but
288
            with the last two dimensions downsampled.
289
290
        """
291
        output = pool_2d(input_, self.pooling_size, st=self.step,
292
                         mode=self.mode, padding=self.padding,
293
                         ignore_border=self.ignore_border)
294
        return output
295
296
    def get_dim(self, name):
297
        if name == 'input_':
298
            return self.input_dim
299
        if name == 'output':
300
            return tuple(Pool.out_shape(
301
                self.input_dim, self.pooling_size, st=self.step,
302
                ignore_border=self.ignore_border, padding=self.padding))
303
304
    @property
305
    def num_output_channels(self):
306
        return self.input_dim[0]
307
308
309
class MaxPooling(Pooling):
310
    """Max pooling layer.
311
312
    Parameters
313
    ----------
314
    pooling_size : tuple
315
        The height and width of the pooling region i.e. this is the factor
316
        by which your input's last two dimensions will be downscaled.
317
    step : tuple, optional
318
        The vertical and horizontal shift (stride) between pooling regions.
319
        By default this is equal to `pooling_size`. Setting this to a lower
320
        number results in overlapping pooling regions.
321
    input_dim : tuple, optional
322
        A tuple of integers representing the shape of the input. The last
323
        two dimensions will be used to calculate the output dimension.
324
    padding : tuple, optional
325
        A tuple of integers representing the vertical and horizontal
326
        zero-padding to be applied to each of the top and bottom
327
        (vertical) and left and right (horizontal) edges. For example,
328
        an argument of (4, 3) will apply 4 pixels of padding to the
329
        top edge, 4 pixels of padding to the bottom edge, and 3 pixels
330
        each for the left and right edge. By default, no padding is
331
        performed.
332
    ignore_border : bool, optional
333
        Whether or not to do partial downsampling based on borders where
334
        the extent of the pooling region reaches beyond the edge of the
335
        image. If `True`, a (5, 5) image with (2, 2) pooling regions
336
        and (2, 2) step will be downsampled to shape (2, 2), otherwise
337
        it will be downsampled to (3, 3). `True` by default.
338
339
    Notes
340
    -----
341
    .. warning::
342
        As of this writing, setting `ignore_border` to `False` with a step
343
        not equal to the pooling size will force Theano to perform pooling
344
        computations on CPU rather than GPU, even if you have specified
345
        a GPU as your computation device. Additionally, Theano will only
346
        use [cuDNN]_ (if available) for pooling computations with
347
        `ignure_border` set to `True`. You can ensure that the entire
348
        input is captured by at least one pool by using the `padding`
349
        argument to add zero padding prior to pooling being performed.
350
351
    .. [cuDNN]: `NVIDIA cuDNN <https://developer.nvidia.com/cudnn>`_.
352
353
    """
354
    @lazy(allocation=['pooling_size'])
355
    def __init__(self, pooling_size, step=None, input_dim=None,
356
                 ignore_border=True, padding=(0, 0),
357
                 **kwargs):
358
        super(MaxPooling, self).__init__('max', pooling_size,
359
                                         step=step, input_dim=input_dim,
360
                                         ignore_border=ignore_border,
361
                                         padding=padding, **kwargs)
362
363
    def __setstate__(self, state):
364
        self.__dict__.update(state)
365
        # Fix objects created before pull request #899.
366
        self.mode = getattr(self, 'mode', 'max')
367
        self.padding = getattr(self, 'padding', (0, 0))
368
        self.ignore_border = getattr(self, 'ignore_border', False)
369
370
371
class AveragePooling(Pooling):
372
    """Average pooling layer.
373
374
    Parameters
375
    ----------
376
    include_padding : bool, optional
377
        When calculating an average, include zeros that are the
378
        result of zero padding added by the `padding` argument.
379
        A value of `True` is only accepted if `ignore_border`
380
        is also `True`. `False` by default.
381
382
    Notes
383
    -----
384
    For documentation on the remainder of the arguments to this
385
    class, see :class:`MaxPooling`.
386
387
    """
388
    @lazy(allocation=['pooling_size'])
389
    def __init__(self, pooling_size, step=None, input_dim=None,
390
                 ignore_border=True, padding=(0, 0),
391
                 include_padding=False, **kwargs):
392
        mode = 'average_inc_pad' if include_padding else 'average_exc_pad'
393
        super(AveragePooling, self).__init__(mode, pooling_size,
394
                                             step=step, input_dim=input_dim,
395
                                             ignore_border=ignore_border,
396
                                             padding=padding, **kwargs)
397
398
399
class _AllocationMixin(object):
400
    def _push_allocation_config(self):
401
        for attr in ['filter_size', 'num_filters', 'border_mode',
402
                     'batch_size', 'num_channels', 'image_size',
403
                     'tied_biases', 'use_bias']:
404
            setattr(self.convolution, attr, getattr(self, attr))
405
406
    @property
407
    def num_output_channels(self):
408
        # Assumes an elementwise activation function. Would need to
409
        # change to support e.g. maxout, but that would also require
410
        # a way of querying the activation function for this kind of
411
        # information.
412
        return self.num_filters
413
414
415
class ConvolutionalActivation(_AllocationMixin, Sequence, Initializable):
416
    """A convolution followed by an activation function.
417
418
    Parameters
419
    ----------
420
    activation : :class:`.BoundApplication`
421
        The application method to apply after convolution (i.e.
422
        the nonlinear activation function)
423
424
    See Also
425
    --------
426
    :class:`Convolutional` : For the documentation of other parameters.
427
428
    """
429
    @lazy(allocation=['filter_size', 'num_filters', 'num_channels'])
430
    def __init__(self, activation, filter_size, num_filters, num_channels,
431
                 batch_size=None, image_size=None, step=(1, 1),
432
                 border_mode='valid', tied_biases=False, **kwargs):
433
        self._build_convolution()
434
435
        self.filter_size = filter_size
436
        self.num_filters = num_filters
437
        self.num_channels = num_channels
438
        self.batch_size = batch_size
439
        self.image_size = image_size
440
        self.step = step
441
        self.border_mode = border_mode
442
        self.tied_biases = tied_biases
443
444
        super(ConvolutionalActivation, self).__init__(
445
            application_methods=[self.convolution.apply, activation],
446
            **kwargs)
447
448
    def _build_convolution(self):
449
        self.convolution = Convolutional()
450
451
    def get_dim(self, name):
452
        # TODO The name of the activation output doesn't need to be `output`
453
        return self.convolution.get_dim(name)
454
455
    def _push_allocation_config(self):
456
        super(ConvolutionalActivation, self)._push_allocation_config()
457
        self.convolution.step = self.step
458
459
460
class ConvolutionalTransposeActivation(ConvolutionalActivation):
461
    """A transposed convolution followed by an activation function.
462
463
    Parameters
464
    ----------
465
    activation : :class:`.BoundApplication`
466
        The application method to apply after convolution (i.e.
467
        the nonlinear activation function)
468
469
    See Also
470
    --------
471
    :class:`ConvolutionalTranspose` : For the documentation of other
472
    parameters.
473
474
    """
475
    @lazy(allocation=['original_image_size', 'filter_size', 'num_filters',
476
                      'num_channels'])
477
    def __init__(self, activation, original_image_size, filter_size,
478
                 num_filters, num_channels, **kwargs):
479
        super(ConvolutionalTransposeActivation, self).__init__(
480
            activation, filter_size, num_filters, num_channels, **kwargs)
481
        self.original_image_size = original_image_size
482
483
    def _build_convolution(self):
484
        self.convolution = ConvolutionalTranspose()
485
486
    def _push_allocation_config(self):
487
        super(ConvolutionalTransposeActivation, self)._push_allocation_config()
488
        self.convolution.original_image_size = self.original_image_size
489
490
491
class ConvolutionalSequence(Sequence, Initializable, Feedforward):
492
    """A sequence of convolutional (or pooling) operations.
493
494
    Parameters
495
    ----------
496
    layers : list
497
        List of convolutional bricks (i.e. :class:`Convolutional`,
498
        :class:`ConvolutionalActivation`, or :class:`Pooling` bricks).
499
    num_channels : int
500
        Number of input channels in the image. For the first layer this is
501
        normally 1 for grayscale images and 3 for color (RGB) images. For
502
        subsequent layers this is equal to the number of filters output by
503
        the previous convolutional layer.
504
    batch_size : int, optional
505
        Number of images in batch. If given, will be passed to
506
        theano's convolution operator resulting in possibly faster
507
        execution.
508
    image_size : tuple, optional
509
        Width and height of the input (image/featuremap). If given,
510
        will be passed to theano's convolution operator resulting in
511
        possibly faster execution.
512
    border_mode : 'valid', 'full' or None, optional
513
        The border mode to use, see :func:`scipy.signal.convolve2d` for
514
        details. Unlike with :class:`Convolutional`, this defaults to
515
        None, in which case no default value is pushed down to child
516
        bricks at allocation time. Child bricks will in this case
517
        need to rely on either a default border mode (usually valid)
518
        or one provided at construction and/or after construction
519
        (but before allocation).
520
521
    Notes
522
    -----
523
    The passed convolutional operators should be 'lazy' constructed, that
524
    is, without specifying the batch_size, num_channels and image_size. The
525
    main feature of :class:`ConvolutionalSequence` is that it will set the
526
    input dimensions of a layer to the output dimensions of the previous
527
    layer by the :meth:`~.Brick.push_allocation_config` method.
528
529
    The reason the `border_mode` parameter behaves the way it does is that
530
    pushing a single default `border_mode` makes it very difficult to
531
    have child bricks with different border modes. Normally, such things
532
    would be overridden after `push_allocation_config()`, but this is
533
    a particular hassle as the border mode affects the allocation
534
    parameters of every subsequent child brick in the sequence. Thus, only
535
    an explicitly specified border mode will be pushed down the hierarchy.
536
537
    """
538
    @lazy(allocation=['num_channels'])
539
    def __init__(self, layers, num_channels, batch_size=None, image_size=None,
540
                 border_mode=None, tied_biases=False, **kwargs):
541
        self.layers = layers
542
        self.image_size = image_size
543
        self.num_channels = num_channels
544
        self.batch_size = batch_size
545
        self.border_mode = border_mode
546
        self.tied_biases = tied_biases
547
548
        application_methods = [brick.apply for brick in layers]
549
        super(ConvolutionalSequence, self).__init__(
550
            application_methods=application_methods, **kwargs)
551
552
    def get_dim(self, name):
553
        if name == 'input_':
554
            return ((self.num_channels,) + self.image_size)
0 ignored issues
show
Unused Code Coding Style introduced by
There is an unnecessary parenthesis after return.
Loading history...
555
        if name == 'output':
556
            return self.layers[-1].get_dim(name)
557
        return super(ConvolutionalSequence, self).get_dim(name)
558
559
    def _push_allocation_config(self):
560
        num_channels = self.num_channels
561
        image_size = self.image_size
562
        for layer in self.layers:
563
            if self.border_mode is not None:
564
                layer.border_mode = self.border_mode
565
            layer.tied_biases = self.tied_biases
566
            layer.image_size = image_size
567
            layer.num_channels = num_channels
568
            layer.batch_size = self.batch_size
569
            layer.use_bias = self.use_bias
570
571
            # Push input dimensions to children
572
            layer._push_allocation_config()
0 ignored issues
show
Coding Style Best Practice introduced by
It seems like _push_allocation_config was declared protected and should not be accessed from this context.

Prefixing a member variable _ is usually regarded as the equivalent of declaring it with protected visibility that exists in other languages. Consequentially, such a member should only be accessed from the same class or a child class:

class MyParent:
    def __init__(self):
        self._x = 1;
        self.y = 2;

class MyChild(MyParent):
    def some_method(self):
        return self._x    # Ok, since accessed from a child class

class AnotherClass:
    def some_method(self, instance_of_my_child):
        return instance_of_my_child._x   # Would be flagged as AnotherClass is not
                                         # a child class of MyParent
Loading history...
573
574
            # Retrieve output dimensions
575
            # and set it for next layer
576
            if layer.image_size is not None:
577
                output_shape = layer.get_dim('output')
578
                image_size = output_shape[1:]
579
            num_channels = layer.num_output_channels
580
581
582
class Flattener(Brick):
583
    """Flattens the input.
584
585
    It may be used to pass multidimensional objects like images or feature
586
    maps of convolutional bricks into bricks which allow only two
587
    dimensional input (batch, features) like MLP.
588
589
    """
590
    @application(inputs=['input_'], outputs=['output'])
591
    def apply(self, input_):
592
        return input_.flatten(ndim=2)
593