Completed
Pull Request — master (#968)
by David
01:31
created

blocks.bricks.ConvolutionalSequence   A

Complexity

Total Complexity 13

Size/Duplication

Total Lines 103
Duplicated Lines 0 %
Metric Value
dl 0
loc 103
rs 10
wmc 13

3 Methods

Rating   Name   Duplication   Size   Complexity  
B get_dim() 0 16 6
B _push_allocation_config() 0 25 5
A __init__() 0 13 2
1
from theano.tensor.nnet import conv2d
2
from theano.tensor.nnet.abstract_conv import (AbstractConv2d_gradInputs,
3
                                              get_conv_output_shape)
4
from theano.tensor.signal.pool import pool_2d, Pool
5
6
from blocks.bricks import Initializable, Feedforward, Sequence, Activation
7
from blocks.bricks.base import application, Brick, lazy
8
from blocks.roles import add_role, FILTER, BIAS
9
from blocks.utils import shared_floatx_nans
10
11
12
class Convolutional(Initializable):
13
    """Performs a 2D convolution.
14
15
    Parameters
16
    ----------
17
    filter_size : tuple
18
        The height and width of the filter (also called *kernels*).
19
    num_filters : int
20
        Number of filters per channel.
21
    num_channels : int
22
        Number of input channels in the image. For the first layer this is
23
        normally 1 for grayscale images and 3 for color (RGB) images. For
24
        subsequent layers this is equal to the number of filters output by
25
        the previous convolutional layer. The filters are pooled over the
26
        channels.
27
    batch_size : int, optional
28
        Number of examples per batch. If given, this will be passed to
29
        Theano convolution operator, possibly resulting in faster
30
        execution.
31
    image_size : tuple, optional
32
        The height and width of the input (image or feature map). If given,
33
        this will be passed to the Theano convolution operator, resulting
34
        in possibly faster execution times.
35
    step : tuple, optional
36
        The step (or stride) with which to slide the filters over the
37
        image. Defaults to (1, 1).
38
    border_mode : {'valid', 'full'}, optional
39
        The border mode to use, see :func:`scipy.signal.convolve2d` for
40
        details. Defaults to 'valid'.
41
    tied_biases : bool
42
        If ``True``, it indicates that the biases of every filter in this
43
        layer should be shared amongst all applications of that filter.
44
        Setting this to ``False`` will untie the biases, yielding a
45
        separate bias for every location at which the filter is applied.
46
        Defaults to ``False``.
47
48
    """
49
    # Make it possible to override the implementation of conv2d that gets
50
    # used, i.e. to use theano.sandbox.cuda.dnn.dnn_conv directly in order
51
    # to leverage features not yet available in Theano's standard conv2d.
52
    # The function you override with here should accept at least the
53
    # input and the kernels as positionals, and the keyword arguments
54
    # input_shape, subsample, border_mode, and filter_shape. If some of
55
    # these are unsupported they should still be accepted and ignored,
56
    # e.g. with a wrapper function that swallows **kwargs.
57
    conv2d_impl = staticmethod(conv2d)
58
59
    # Used to override the output shape computation for a given value of
60
    # conv2d_impl. Should accept 4 positional arguments: the shape of an
61
    # image minibatch (with 4 elements: batch size, number of channels,
62
    # height, and width), the shape of the filter bank (number of filters,
63
    # number of output channels, filter height, filter width), the border
64
    # mode, and the step (vertical and horizontal strides). It is expected
65
    # to return a 4-tuple of (batch size, number of channels, output
66
    # height, output width). The first element of this tuple is not used
67
    # for anything by this brick.
68
    get_output_shape = staticmethod(get_conv_output_shape)
69
70
    @lazy(allocation=['filter_size', 'num_filters', 'num_channels'])
71
    def __init__(self, filter_size, num_filters, num_channels, batch_size=None,
72
                 image_size=(None, None), step=(1, 1), border_mode='valid',
73
                 tied_biases=False, **kwargs):
74
        super(Convolutional, self).__init__(**kwargs)
75
76
        self.filter_size = filter_size
77
        self.num_filters = num_filters
78
        self.batch_size = batch_size
79
        self.num_channels = num_channels
80
        self.image_size = image_size
81
        self.step = step
82
        self.border_mode = border_mode
83
        self.tied_biases = tied_biases
84
85
    def _allocate(self):
86
        W = shared_floatx_nans((self.num_filters, self.num_channels) +
87
                               self.filter_size, name='W')
88
        add_role(W, FILTER)
89
        self.parameters.append(W)
90
        self.add_auxiliary_variable(W.norm(2), name='W_norm')
91
        if self.use_bias:
92
            if self.tied_biases:
93
                b = shared_floatx_nans((self.num_filters,), name='b')
94
            else:
95
                # this error is raised here instead of during initializiation
96
                # because ConvolutionalSequence may specify the image size
97
                if self.image_size == (None, None) and not self.tied_biases:
98
                    raise ValueError('Cannot infer bias size without '
99
                                     'image_size specified. If you use '
100
                                     'variable image_size, you should use '
101
                                     'tied_biases=True.')
102
103
                b = shared_floatx_nans(self.get_dim('output'), name='b')
104
            add_role(b, BIAS)
105
106
            self.parameters.append(b)
107
            self.add_auxiliary_variable(b.norm(2), name='b_norm')
108
109
    def _initialize(self):
110
        if self.use_bias:
111
            W, b = self.parameters
0 ignored issues
show
Bug introduced by
The tuple unpacking with sequence defined at line 610 of blocks.bricks.base seems to be unbalanced; 2 value(s) for 0 label(s)

This happens when the amount of values does not equal the amount of labels:

a, b = ("a", "b", "c")  # only 2 labels for 3 values
Loading history...
112
            self.biases_init.initialize(b, self.rng)
113
        else:
114
            W, = self.parameters
0 ignored issues
show
Bug introduced by
The tuple unpacking with sequence defined at line 610 of blocks.bricks.base seems to be unbalanced; 1 value(s) for 0 label(s)

This happens when the amount of values does not equal the amount of labels:

a, b = ("a", "b", "c")  # only 2 labels for 3 values
Loading history...
115
        self.weights_init.initialize(W, self.rng)
116
117
    @application(inputs=['input_'], outputs=['output'])
118
    def apply(self, input_):
119
        """Perform the convolution.
120
121
        Parameters
122
        ----------
123
        input_ : :class:`~tensor.TensorVariable`
124
            A 4D tensor with the axes representing batch size, number of
125
            channels, image height, and image width.
126
127
        Returns
128
        -------
129
        output : :class:`~tensor.TensorVariable`
130
            A 4D tensor of filtered images (feature maps) with dimensions
131
            representing batch size, number of filters, feature map height,
132
            and feature map width.
133
134
            The height and width of the feature map depend on the border
135
            mode. For 'valid' it is ``image_size - filter_size + 1`` while
136
            for 'full' it is ``image_size + filter_size - 1``.
137
138
        """
139
        if self.use_bias:
140
            W, b = self.parameters
0 ignored issues
show
Bug introduced by
The tuple unpacking with sequence defined at line 610 of blocks.bricks.base seems to be unbalanced; 2 value(s) for 0 label(s)

This happens when the amount of values does not equal the amount of labels:

a, b = ("a", "b", "c")  # only 2 labels for 3 values
Loading history...
141
        else:
142
            W, = self.parameters
0 ignored issues
show
Bug introduced by
The tuple unpacking with sequence defined at line 610 of blocks.bricks.base seems to be unbalanced; 1 value(s) for 0 label(s)

This happens when the amount of values does not equal the amount of labels:

a, b = ("a", "b", "c")  # only 2 labels for 3 values
Loading history...
143
144
        if self.image_size == (None, None):
145
            input_shape = None
146
        else:
147
            input_shape = (self.batch_size, self.num_channels)
148
            input_shape += self.image_size
149
150
        output = self.conv2d_impl(
151
            input_, W,
152
            input_shape=input_shape,
153
            subsample=self.step,
154
            border_mode=self.border_mode,
155
            filter_shape=((self.num_filters, self.num_channels) +
156
                          self.filter_size))
157
        if self.use_bias:
158
            if self.tied_biases:
159
                output += b.dimshuffle('x', 0, 'x', 'x')
160
            else:
161
                output += b.dimshuffle('x', 0, 1, 2)
162
        return output
163
164
    def get_dim(self, name):
165
        if name == 'input_':
166
            return (self.num_channels,) + self.image_size
167
        if name == 'output':
168
            input_shape = (None, self.num_channels) + self.image_size
169
            kernel_shape = ((self.num_filters, self.num_channels) +
170
                            self.filter_size)
171
            out_shape = self.get_output_shape(input_shape, kernel_shape,
172
                                              self.border_mode, self.step)
173
            assert len(out_shape) == 4
174
            return out_shape[1:]
175
        return super(Convolutional, self).get_dim(name)
176
177
    @property
178
    def num_output_channels(self):
179
        return self.num_filters
180
181
182
class ConvolutionalTranspose(Convolutional):
183
    """Performs the transpose of a 2D convolution.
184
185
    Parameters
186
    ----------
187
    original_image_size : tuple
188
        The height and width of the image that forms the output of
189
        the transpose operation, which is the input of the original
190
        (non-transposed) convolution.
191
    num_filters : int
192
        Number of filters at the *output* of the transposed convolution,
193
        i.e. the number of channels in the corresponding convolution.
194
    num_channels : int
195
        Number of channels at the *input* of the transposed convolution,
196
        i.e. the number of output filters in the corresponding
197
        convolution.
198
    step : tuple, optional
199
        The step (or stride) of the corresponding *convolution*.
200
        Defaults to (1, 1).
201
    image_size : tuple, optional
202
        Image size of the input to the *transposed* convolution, i.e.
203
        the output of the corresponding convolution. Required for tied
204
        biases. Defaults to ``None``.
205
206
    See Also
207
    --------
208
    :class:`Convolutional` : For the documentation of other parameters.
209
210
    """
211
    @lazy(allocation=['original_image_size', 'filter_size', 'num_filters',
212
                      'num_channels'])
213
    def __init__(self, original_image_size, filter_size, num_filters,
214
                 num_channels, **kwargs):
215
        super(ConvolutionalTranspose, self).__init__(
216
            filter_size, num_filters, num_channels, **kwargs)
217
        self.original_image_size = original_image_size
218
219
    def conv2d_impl(self, input_, W, input_shape, subsample, border_mode,
0 ignored issues
show
Unused Code introduced by
The argument input_shape seems to be unused.
Loading history...
220
                    filter_shape):
221
        # The AbstractConv2d_gradInputs op takes a kernel that was used for the
222
        # **convolution**. We therefore have to invert num_channels and
223
        # num_filters for W.
224
        W = W.transpose(1, 0, 2, 3)
225
        imshp = (None,) + self.get_dim('output')
226
        kshp = (filter_shape[1], filter_shape[0]) + filter_shape[2:]
227
        return AbstractConv2d_gradInputs(
228
            imshp=imshp, kshp=kshp, border_mode=border_mode,
229
            subsample=subsample)(W, input_, self.get_dim('output')[1:])
230
231
    def get_dim(self, name):
232
        if name == 'output':
233
            return (self.num_filters,) + self.original_image_size
234
        return super(ConvolutionalTranspose, self).get_dim(name)
235
236
237
class Pooling(Initializable, Feedforward):
238
    """Base Brick for pooling operations.
239
240
    This should generally not be instantiated directly; see
241
    :class:`MaxPooling`.
242
243
    """
244
    @lazy(allocation=['mode', 'pooling_size'])
245
    def __init__(self, mode, pooling_size, step, input_dim, ignore_border,
246
                 padding, **kwargs):
247
        super(Pooling, self).__init__(**kwargs)
248
        self.pooling_size = pooling_size
249
        self.mode = mode
250
        self.step = step
251
        self.input_dim = input_dim if input_dim is not None else (None,) * 3
252
        self.ignore_border = ignore_border
253
        self.padding = padding
254
255
    @property
256
    def image_size(self):
257
        return self.input_dim[-2:]
258
259
    @image_size.setter
260
    def image_size(self, value):
261
        self.input_dim = self.input_dim[:-2] + value
262
263
    @property
264
    def num_channels(self):
265
        return self.input_dim[0]
266
267
    @num_channels.setter
268
    def num_channels(self, value):
269
        self.input_dim = (value,) + self.input_dim[1:]
270
271
    @application(inputs=['input_'], outputs=['output'])
272
    def apply(self, input_):
273
        """Apply the pooling (subsampling) transformation.
274
275
        Parameters
276
        ----------
277
        input_ : :class:`~tensor.TensorVariable`
278
            An tensor with dimension greater or equal to 2. The last two
279
            dimensions will be downsampled. For example, with images this
280
            means that the last two dimensions should represent the height
281
            and width of your image.
282
283
        Returns
284
        -------
285
        output : :class:`~tensor.TensorVariable`
286
            A tensor with the same number of dimensions as `input_`, but
287
            with the last two dimensions downsampled.
288
289
        """
290
        output = pool_2d(input_, self.pooling_size, st=self.step,
291
                         mode=self.mode, padding=self.padding,
292
                         ignore_border=self.ignore_border)
293
        return output
294
295
    def get_dim(self, name):
296
        if name == 'input_':
297
            return self.input_dim
298
        if name == 'output':
299
            return tuple(Pool.out_shape(
300
                self.input_dim, self.pooling_size, st=self.step,
301
                ignore_border=self.ignore_border, padding=self.padding))
302
303
    @property
304
    def num_output_channels(self):
305
        return self.input_dim[0]
306
307
308
class MaxPooling(Pooling):
309
    """Max pooling layer.
310
311
    Parameters
312
    ----------
313
    pooling_size : tuple
314
        The height and width of the pooling region i.e. this is the factor
315
        by which your input's last two dimensions will be downscaled.
316
    step : tuple, optional
317
        The vertical and horizontal shift (stride) between pooling regions.
318
        By default this is equal to `pooling_size`. Setting this to a lower
319
        number results in overlapping pooling regions.
320
    input_dim : tuple, optional
321
        A tuple of integers representing the shape of the input. The last
322
        two dimensions will be used to calculate the output dimension.
323
    padding : tuple, optional
324
        A tuple of integers representing the vertical and horizontal
325
        zero-padding to be applied to each of the top and bottom
326
        (vertical) and left and right (horizontal) edges. For example,
327
        an argument of (4, 3) will apply 4 pixels of padding to the
328
        top edge, 4 pixels of padding to the bottom edge, and 3 pixels
329
        each for the left and right edge. By default, no padding is
330
        performed.
331
    ignore_border : bool, optional
332
        Whether or not to do partial downsampling based on borders where
333
        the extent of the pooling region reaches beyond the edge of the
334
        image. If `True`, a (5, 5) image with (2, 2) pooling regions
335
        and (2, 2) step will be downsampled to shape (2, 2), otherwise
336
        it will be downsampled to (3, 3). `True` by default.
337
338
    Notes
339
    -----
340
    .. warning::
341
        As of this writing, setting `ignore_border` to `False` with a step
342
        not equal to the pooling size will force Theano to perform pooling
343
        computations on CPU rather than GPU, even if you have specified
344
        a GPU as your computation device. Additionally, Theano will only
345
        use [cuDNN]_ (if available) for pooling computations with
346
        `ignure_border` set to `True`. You can ensure that the entire
347
        input is captured by at least one pool by using the `padding`
348
        argument to add zero padding prior to pooling being performed.
349
350
    .. [cuDNN]: `NVIDIA cuDNN <https://developer.nvidia.com/cudnn>`_.
351
352
    """
353
    @lazy(allocation=['pooling_size'])
354
    def __init__(self, pooling_size, step=None, input_dim=None,
355
                 ignore_border=True, padding=(0, 0),
356
                 **kwargs):
357
        super(MaxPooling, self).__init__('max', pooling_size,
358
                                         step=step, input_dim=input_dim,
359
                                         ignore_border=ignore_border,
360
                                         padding=padding, **kwargs)
361
362
    def __setstate__(self, state):
363
        self.__dict__.update(state)
364
        # Fix objects created before pull request #899.
365
        self.mode = getattr(self, 'mode', 'max')
366
        self.padding = getattr(self, 'padding', (0, 0))
367
        self.ignore_border = getattr(self, 'ignore_border', False)
368
369
370
class AveragePooling(Pooling):
371
    """Average pooling layer.
372
373
    Parameters
374
    ----------
375
    include_padding : bool, optional
376
        When calculating an average, include zeros that are the
377
        result of zero padding added by the `padding` argument.
378
        A value of `True` is only accepted if `ignore_border`
379
        is also `True`. `False` by default.
380
381
    Notes
382
    -----
383
    For documentation on the remainder of the arguments to this
384
    class, see :class:`MaxPooling`.
385
386
    """
387
    @lazy(allocation=['pooling_size'])
388
    def __init__(self, pooling_size, step=None, input_dim=None,
389
                 ignore_border=True, padding=(0, 0),
390
                 include_padding=False, **kwargs):
391
        mode = 'average_inc_pad' if include_padding else 'average_exc_pad'
392
        super(AveragePooling, self).__init__(mode, pooling_size,
393
                                             step=step, input_dim=input_dim,
394
                                             ignore_border=ignore_border,
395
                                             padding=padding, **kwargs)
396
397
398
class _AllocationMixin(object):
399
    def _push_allocation_config(self):
400
        for attr in ['filter_size', 'num_filters', 'border_mode',
401
                     'batch_size', 'num_channels', 'image_size',
402
                     'tied_biases', 'use_bias']:
403
            setattr(self.convolution, attr, getattr(self, attr))
404
405
    @property
406
    def num_output_channels(self):
407
        # Assumes an elementwise activation function. Would need to
408
        # change to support e.g. maxout, but that would also require
409
        # a way of querying the activation function for this kind of
410
        # information.
411
        return self.num_filters
412
413
414
class ConvolutionalActivation(_AllocationMixin, Sequence, Initializable):
415
    """A convolution followed by an activation function.
416
417
    Parameters
418
    ----------
419
    activation : :class:`.BoundApplication`
420
        The application method to apply after convolution (i.e.
421
        the nonlinear activation function)
422
423
    See Also
424
    --------
425
    :class:`Convolutional` : For the documentation of other parameters.
426
427
    """
428
    @lazy(allocation=['filter_size', 'num_filters', 'num_channels'])
429
    def __init__(self, activation, filter_size, num_filters, num_channels,
430
                 batch_size=None, image_size=None, step=(1, 1),
431
                 border_mode='valid', tied_biases=False, **kwargs):
432
        self._build_convolution()
433
434
        self.filter_size = filter_size
435
        self.num_filters = num_filters
436
        self.num_channels = num_channels
437
        self.batch_size = batch_size
438
        self.image_size = image_size
439
        self.step = step
440
        self.border_mode = border_mode
441
        self.tied_biases = tied_biases
442
443
        super(ConvolutionalActivation, self).__init__(
444
            application_methods=[self.convolution.apply, activation],
445
            **kwargs)
446
447
    def _build_convolution(self):
448
        self.convolution = Convolutional()
449
450
    def get_dim(self, name):
451
        # TODO The name of the activation output doesn't need to be `output`
452
        return self.convolution.get_dim(name)
453
454
    def _push_allocation_config(self):
455
        super(ConvolutionalActivation, self)._push_allocation_config()
456
        self.convolution.step = self.step
457
458
459
class ConvolutionalTransposeActivation(ConvolutionalActivation):
460
    """A transposed convolution followed by an activation function.
461
462
    Parameters
463
    ----------
464
    activation : :class:`.BoundApplication`
465
        The application method to apply after convolution (i.e.
466
        the nonlinear activation function)
467
468
    See Also
469
    --------
470
    :class:`ConvolutionalTranspose` : For the documentation of other
471
    parameters.
472
473
    """
474
    @lazy(allocation=['original_image_size', 'filter_size', 'num_filters',
475
                      'num_channels'])
476
    def __init__(self, activation, original_image_size, filter_size,
477
                 num_filters, num_channels, **kwargs):
478
        super(ConvolutionalTransposeActivation, self).__init__(
479
            activation, filter_size, num_filters, num_channels, **kwargs)
480
        self.original_image_size = original_image_size
481
482
    def _build_convolution(self):
483
        self.convolution = ConvolutionalTranspose()
484
485
    def _push_allocation_config(self):
486
        super(ConvolutionalTransposeActivation, self)._push_allocation_config()
487
        self.convolution.original_image_size = self.original_image_size
488
489
490
class ConvolutionalSequence(Sequence, Initializable, Feedforward):
491
    """A sequence of convolutional (or pooling) operations.
492
493
    Parameters
494
    ----------
495
    layers : list
496
        List of convolutional bricks (i.e. :class:`Convolutional`,
497
        :class:`ConvolutionalActivation`, or :class:`Pooling` bricks).
498
    num_channels : int
499
        Number of input channels in the image. For the first layer this is
500
        normally 1 for grayscale images and 3 for color (RGB) images. For
501
        subsequent layers this is equal to the number of filters output by
502
        the previous convolutional layer.
503
    batch_size : int, optional
504
        Number of images in batch. If given, will be passed to
505
        theano's convolution operator resulting in possibly faster
506
        execution.
507
    image_size : tuple, optional
508
        Width and height of the input (image/featuremap). If given,
509
        will be passed to theano's convolution operator resulting in
510
        possibly faster execution.
511
    border_mode : 'valid', 'full' or None, optional
512
        The border mode to use, see :func:`scipy.signal.convolve2d` for
513
        details. Unlike with :class:`Convolutional`, this defaults to
514
        None, in which case no default value is pushed down to child
515
        bricks at allocation time. Child bricks will in this case
516
        need to rely on either a default border mode (usually valid)
517
        or one provided at construction and/or after construction
518
        (but before allocation).
519
520
    Notes
521
    -----
522
    The passed convolutional operators should be 'lazy' constructed, that
523
    is, without specifying the batch_size, num_channels and image_size. The
524
    main feature of :class:`ConvolutionalSequence` is that it will set the
525
    input dimensions of a layer to the output dimensions of the previous
526
    layer by the :meth:`~.Brick.push_allocation_config` method.
527
528
    The reason the `border_mode` parameter behaves the way it does is that
529
    pushing a single default `border_mode` makes it very difficult to
530
    have child bricks with different border modes. Normally, such things
531
    would be overridden after `push_allocation_config()`, but this is
532
    a particular hassle as the border mode affects the allocation
533
    parameters of every subsequent child brick in the sequence. Thus, only
534
    an explicitly specified border mode will be pushed down the hierarchy.
535
536
    """
537
    @lazy(allocation=['num_channels'])
538
    def __init__(self, layers, num_channels, batch_size=None, image_size=None,
539
                 border_mode=None, tied_biases=False, **kwargs):
540
        self.layers = layers
541
        self.image_size = image_size
542
        self.num_channels = num_channels
543
        self.batch_size = batch_size
544
        self.border_mode = border_mode
545
        self.tied_biases = tied_biases
546
547
        application_methods = [brick.apply for brick in layers]
548
        super(ConvolutionalSequence, self).__init__(
549
            application_methods=application_methods, **kwargs)
550
551
    def get_dim(self, name):
552
        if name == 'input_':
553
            return ((self.num_channels,) + self.image_size)
0 ignored issues
show
Unused Code Coding Style introduced by
There is an unnecessary parenthesis after return.
Loading history...
554
        if name == 'output':
555
            last = len(self.layers) - 1
556
            while last > 0:
557
                try:
558
                    return self.layers[last].get_dim(name)
559
                except ValueError:
560
                    last -= 1
561
            else:
562
                # Executed if the while condition becomes False. The
563
                # output shape of an empty ConvolutionalSequence or one
564
                # consisting only of Activations is the input shape.
0 ignored issues
show
Bug introduced by
The else clause is not necessary as the loop does not contain a break statement.

If the loop cannot exit early through the use of break, the else part will always be executed. You can therefore just leave off the else.

Loading history...
565
                return self.get_dim('input_')
566
        return super(ConvolutionalSequence, self).get_dim(name)
567
568
    def _push_allocation_config(self):
569
        num_channels = self.num_channels
570
        image_size = self.image_size
571
        for layer in self.layers:
572
            if isinstance(layer, Activation):
573
                # Activations operate elementwise; nothing to set.
574
                layer._push_allocation_config()
0 ignored issues
show
Coding Style Best Practice introduced by
It seems like _push_allocation_config was declared protected and should not be accessed from this context.

Prefixing a member variable _ is usually regarded as the equivalent of declaring it with protected visibility that exists in other languages. Consequentially, such a member should only be accessed from the same class or a child class:

class MyParent:
    def __init__(self):
        self._x = 1;
        self.y = 2;

class MyChild(MyParent):
    def some_method(self):
        return self._x    # Ok, since accessed from a child class

class AnotherClass:
    def some_method(self, instance_of_my_child):
        return instance_of_my_child._x   # Would be flagged as AnotherClass is not
                                         # a child class of MyParent
Loading history...
575
                continue
576
            if self.border_mode is not None:
577
                layer.border_mode = self.border_mode
578
            layer.tied_biases = self.tied_biases
579
            layer.image_size = image_size
580
            layer.num_channels = num_channels
581
            layer.batch_size = self.batch_size
582
            layer.use_bias = self.use_bias
583
584
            # Push input dimensions to children
585
            layer._push_allocation_config()
0 ignored issues
show
Coding Style Best Practice introduced by
It seems like _push_allocation_config was declared protected and should not be accessed from this context.

Prefixing a member variable _ is usually regarded as the equivalent of declaring it with protected visibility that exists in other languages. Consequentially, such a member should only be accessed from the same class or a child class:

class MyParent:
    def __init__(self):
        self._x = 1;
        self.y = 2;

class MyChild(MyParent):
    def some_method(self):
        return self._x    # Ok, since accessed from a child class

class AnotherClass:
    def some_method(self, instance_of_my_child):
        return instance_of_my_child._x   # Would be flagged as AnotherClass is not
                                         # a child class of MyParent
Loading history...
586
587
            # Retrieve output dimensions
588
            # and set it for next layer
589
            if layer.image_size is not None:
590
                output_shape = layer.get_dim('output')
591
                image_size = output_shape[1:]
592
            num_channels = layer.num_output_channels
593
594
595
class Flattener(Brick):
596
    """Flattens the input.
597
598
    It may be used to pass multidimensional objects like images or feature
599
    maps of convolutional bricks into bricks which allow only two
600
    dimensional input (batch, features) like MLP.
601
602
    """
603
    @application(inputs=['input_'], outputs=['output'])
604
    def apply(self, input_):
605
        return input_.flatten(ndim=2)
606