Completed
Pull Request — master (#946)
by Vincent
02:09
created

blocks.bricks.ConvolutionalTranspose   A

Complexity

Total Complexity 4

Size/Duplication

Total Lines 51
Duplicated Lines 0 %
Metric Value
wmc 4
dl 0
loc 51
rs 10

3 Methods

Rating   Name   Duplication   Size   Complexity  
A conv2d_impl() 0 11 1
A get_dim() 0 4 2
A __init__() 0 7 1
1
from theano import tensor
0 ignored issues
show
Unused Code introduced by
Unused tensor imported from theano
Loading history...
2
from theano.tensor.nnet import conv2d
3
from theano.tensor.nnet.abstract_conv import (AbstractConv2d_gradInputs,
4
                                              get_conv_output_shape)
5
from theano.tensor.signal.pool import pool_2d, Pool
6
7
from blocks.bricks import Initializable, Feedforward, Sequence
8
from blocks.bricks.base import application, Brick, lazy
9
from blocks.roles import add_role, FILTER, BIAS
10
from blocks.utils import shared_floatx_nans
11
12
13
class Convolutional(Initializable):
14
    """Performs a 2D convolution.
15
16
    Parameters
17
    ----------
18
    filter_size : tuple
19
        The height and width of the filter (also called *kernels*).
20
    num_filters : int
21
        Number of filters per channel.
22
    num_channels : int
23
        Number of input channels in the image. For the first layer this is
24
        normally 1 for grayscale images and 3 for color (RGB) images. For
25
        subsequent layers this is equal to the number of filters output by
26
        the previous convolutional layer. The filters are pooled over the
27
        channels.
28
    batch_size : int, optional
29
        Number of examples per batch. If given, this will be passed to
30
        Theano convolution operator, possibly resulting in faster
31
        execution.
32
    image_size : tuple, optional
33
        The height and width of the input (image or feature map). If given,
34
        this will be passed to the Theano convolution operator, resulting
35
        in possibly faster execution times.
36
    step : tuple, optional
37
        The step (or stride) with which to slide the filters over the
38
        image. Defaults to (1, 1).
39
    border_mode : {'valid', 'full'}, optional
40
        The border mode to use, see :func:`scipy.signal.convolve2d` for
41
        details. Defaults to 'valid'.
42
    tied_biases : bool
43
        If ``True``, it indicates that the biases of every filter in this
44
        layer should be shared amongst all applications of that filter.
45
        Setting this to ``False`` will untie the biases, yielding a
46
        separate bias for every location at which the filter is applied.
47
        Defaults to ``False``.
48
49
    """
50
    # Make it possible to override the implementation of conv2d that gets
51
    # used, i.e. to use theano.sandbox.cuda.dnn.dnn_conv directly in order
52
    # to leverage features not yet available in Theano's standard conv2d.
53
    # The function you override with here should accept at least the
54
    # input and the kernels as positionals, and the keyword arguments
55
    # image_shape, subsample, border_mode, and filter_shape. If some of
56
    # these are unsupported they should still be accepted and ignored,
57
    # e.g. with a wrapper function that swallows **kwargs.
58
    conv2d_impl = staticmethod(conv2d)
59
60
    # Used to override the output shape computation for a given value of
61
    # conv2d_impl. Should accept 4 positional arguments: the shape of an
62
    # image minibatch (with 4 elements: batch size, number of channels,
63
    # height, and width), the shape of the filter bank (number of filters,
64
    # number of output channels, filter height, filter width), the border
65
    # mode, and the step (vertical and horizontal strides). It is expected
66
    # to return a 4-tuple of (batch size, number of channels, output
67
    # height, output width). The first element of this tuple is not used
68
    # for anything by this brick.
69
    get_output_shape = staticmethod(get_conv_output_shape)
70
71
    @lazy(allocation=['filter_size', 'num_filters', 'num_channels'])
72
    def __init__(self, filter_size, num_filters, num_channels, batch_size=None,
73
                 image_size=(None, None), step=(1, 1), border_mode='valid',
74
                 tied_biases=False, **kwargs):
75
        super(Convolutional, self).__init__(**kwargs)
76
77
        self.filter_size = filter_size
78
        self.num_filters = num_filters
79
        self.batch_size = batch_size
80
        self.num_channels = num_channels
81
        self.image_size = image_size
82
        self.step = step
83
        self.border_mode = border_mode
84
        self.tied_biases = tied_biases
85
86
    def _allocate(self):
87
        W = shared_floatx_nans((self.num_filters, self.num_channels) +
88
                               self.filter_size, name='W')
89
        add_role(W, FILTER)
90
        self.parameters.append(W)
91
        self.add_auxiliary_variable(W.norm(2), name='W_norm')
92
        if self.use_bias:
93
            if self.tied_biases:
94
                b = shared_floatx_nans((self.num_filters,), name='b')
95
            else:
96
                # this error is raised here instead of during initializiation
97
                # because ConvolutionalSequence may specify the image size
98
                if self.image_size == (None, None) and not self.tied_biases:
99
                    raise ValueError('Cannot infer bias size without '
100
                                     'image_size specified. If you use '
101
                                     'variable image_size, you should use '
102
                                     'tied_biases=True.')
103
104
                b = shared_floatx_nans(self.get_dim('output'), name='b')
105
            add_role(b, BIAS)
106
107
            self.parameters.append(b)
108
            self.add_auxiliary_variable(b.norm(2), name='b_norm')
109
110
    def _initialize(self):
111
        if self.use_bias:
112
            W, b = self.parameters
0 ignored issues
show
Bug introduced by
The tuple unpacking with sequence defined at line 610 of blocks.bricks.base seems to be unbalanced; 2 value(s) for 0 label(s)

This happens when the amount of values does not equal the amount of labels:

a, b = ("a", "b", "c")  # only 2 labels for 3 values
Loading history...
113
            self.biases_init.initialize(b, self.rng)
114
        else:
115
            W, = self.parameters
0 ignored issues
show
Bug introduced by
The tuple unpacking with sequence defined at line 610 of blocks.bricks.base seems to be unbalanced; 1 value(s) for 0 label(s)

This happens when the amount of values does not equal the amount of labels:

a, b = ("a", "b", "c")  # only 2 labels for 3 values
Loading history...
116
        self.weights_init.initialize(W, self.rng)
117
118
    @application(inputs=['input_'], outputs=['output'])
119
    def apply(self, input_):
120
        """Perform the convolution.
121
122
        Parameters
123
        ----------
124
        input_ : :class:`~tensor.TensorVariable`
125
            A 4D tensor with the axes representing batch size, number of
126
            channels, image height, and image width.
127
128
        Returns
129
        -------
130
        output : :class:`~tensor.TensorVariable`
131
            A 4D tensor of filtered images (feature maps) with dimensions
132
            representing batch size, number of filters, feature map height,
133
            and feature map width.
134
135
            The height and width of the feature map depend on the border
136
            mode. For 'valid' it is ``image_size - filter_size + 1`` while
137
            for 'full' it is ``image_size + filter_size - 1``.
138
139
        """
140
        if self.use_bias:
141
            W, b = self.parameters
0 ignored issues
show
Bug introduced by
The tuple unpacking with sequence defined at line 610 of blocks.bricks.base seems to be unbalanced; 2 value(s) for 0 label(s)

This happens when the amount of values does not equal the amount of labels:

a, b = ("a", "b", "c")  # only 2 labels for 3 values
Loading history...
142
        else:
143
            W, = self.parameters
0 ignored issues
show
Bug introduced by
The tuple unpacking with sequence defined at line 610 of blocks.bricks.base seems to be unbalanced; 1 value(s) for 0 label(s)

This happens when the amount of values does not equal the amount of labels:

a, b = ("a", "b", "c")  # only 2 labels for 3 values
Loading history...
144
145
        if self.image_size == (None, None):
146
            image_shape = None
147
        else:
148
            image_shape = (self.batch_size, self.num_channels)
149
            image_shape += self.image_size
150
151
        output = self.conv2d_impl(
152
            input_, W,
153
            image_shape=image_shape,
154
            subsample=self.step,
155
            border_mode=self.border_mode,
156
            filter_shape=((self.num_filters, self.num_channels) +
157
                          self.filter_size))
158
        if self.use_bias:
159
            if self.tied_biases:
160
                output += b.dimshuffle('x', 0, 'x', 'x')
161
            else:
162
                output += b.dimshuffle('x', 0, 1, 2)
163
        return output
164
165
    def get_dim(self, name):
166
        if name == 'input_':
167
            return (self.num_channels,) + self.image_size
168
        if name == 'output':
169
            image_shape = (None, self.num_channels) + self.image_size
170
            kernel_shape = ((self.num_filters, self.num_channels) +
171
                            self.filter_size)
172
            out_shape = self.get_output_shape(image_shape, kernel_shape,
173
                                              self.border_mode, self.step)
174
            assert len(out_shape) == 4
175
            return out_shape[1:]
176
        return super(Convolutional, self).get_dim(name)
177
178
    @property
179
    def num_output_channels(self):
180
        return self.num_filters
181
182
183
class ConvolutionalTranspose(Convolutional):
184
    """Performs the transpose of a 2D convolution.
185
186
    Parameters
187
    ----------
188
    image_size : tuple, optional
189
        Required for tied biases. Defaults to ``None``.
190
    num_filters : int
191
        Number of filters at the *output* of the transposed convolution,
192
        i.e. the number of channels in the corresponding convolution.
193
    num_channels : int
194
        Number of channels at the *input* of the transposed convolution,
195
        i.e. the number of output filters in the corresponding
196
        convolution.
197
    original_image_size : tuple
198
        The height and width of the image that forms the output of
199
        the transpose operation, which is the input of the original
200
        (non-transposed) convolution.
201
    step : tuple, optional
202
        The step (or stride) of the corresponding *convolution*.
203
        Defaults to (1, 1).
204
205
    See Also
206
    --------
207
    :class:`Convolutional` : For the documentation of other parameters.
208
209
    """
210
    @lazy(allocation=['filter_size', 'num_filters', 'num_channels',
211
                      'original_image_size'])
212
    def __init__(self, filter_size, num_filters, num_channels,
213
                 original_image_size, **kwargs):
214
        super(ConvolutionalTranspose, self).__init__(
215
            filter_size, num_filters, num_channels, **kwargs)
216
        self.original_image_size = original_image_size
217
218
    def conv2d_impl(self, input_, W, image_shape, subsample, border_mode,
0 ignored issues
show
Unused Code introduced by
The argument image_shape seems to be unused.
Loading history...
219
                    filter_shape):
220
        # The AbstractConv2d_gradInputs op takes a kernel that was used for the
221
        # **convolution**. We therefore have to invert num_channels and
222
        # num_filters for W.
223
        W = W.transpose(1, 0, 2, 3)
224
        imshp = (None,) + self.get_dim('output')
225
        kshp = (filter_shape[1], filter_shape[0]) + filter_shape[2:]
226
        return AbstractConv2d_gradInputs(
227
            imshp=imshp, kshp=kshp, border_mode=border_mode,
228
            subsample=subsample)(W, input_, self.get_dim('output')[1:])
229
230
    def get_dim(self, name):
231
        if name == 'output':
232
            return (self.num_filters,) + self.original_image_size
233
        return super(ConvolutionalTranspose, self).get_dim(name)
234
235
236
class Pooling(Initializable, Feedforward):
237
    """Base Brick for pooling operations.
238
239
    This should generally not be instantiated directly; see
240
    :class:`MaxPooling`.
241
242
    """
243
    @lazy(allocation=['mode', 'pooling_size'])
244
    def __init__(self, mode, pooling_size, step, input_dim, ignore_border,
245
                 padding, **kwargs):
246
        super(Pooling, self).__init__(**kwargs)
247
        self.pooling_size = pooling_size
248
        self.mode = mode
249
        self.step = step
250
        self.input_dim = input_dim if input_dim is not None else (None,) * 3
251
        self.ignore_border = ignore_border
252
        self.padding = padding
253
254
    @property
255
    def image_size(self):
256
        return self.input_dim[-2:]
257
258
    @image_size.setter
259
    def image_size(self, value):
260
        self.input_dim = self.input_dim[:-2] + value
261
262
    @property
263
    def num_channels(self):
264
        return self.input_dim[0]
265
266
    @num_channels.setter
267
    def num_channels(self, value):
268
        self.input_dim = (value,) + self.input_dim[1:]
269
270
    @application(inputs=['input_'], outputs=['output'])
271
    def apply(self, input_):
272
        """Apply the pooling (subsampling) transformation.
273
274
        Parameters
275
        ----------
276
        input_ : :class:`~tensor.TensorVariable`
277
            An tensor with dimension greater or equal to 2. The last two
278
            dimensions will be downsampled. For example, with images this
279
            means that the last two dimensions should represent the height
280
            and width of your image.
281
282
        Returns
283
        -------
284
        output : :class:`~tensor.TensorVariable`
285
            A tensor with the same number of dimensions as `input_`, but
286
            with the last two dimensions downsampled.
287
288
        """
289
        output = pool_2d(input_, self.pooling_size, st=self.step,
290
                         mode=self.mode, padding=self.padding,
291
                         ignore_border=self.ignore_border)
292
        return output
293
294
    def get_dim(self, name):
295
        if name == 'input_':
296
            return self.input_dim
297
        if name == 'output':
298
            return tuple(Pool.out_shape(
299
                self.input_dim, self.pooling_size, st=self.step,
300
                ignore_border=self.ignore_border, padding=self.padding))
301
302
    @property
303
    def num_output_channels(self):
304
        return self.input_dim[0]
305
306
307
class MaxPooling(Pooling):
308
    """Max pooling layer.
309
310
    Parameters
311
    ----------
312
    pooling_size : tuple
313
        The height and width of the pooling region i.e. this is the factor
314
        by which your input's last two dimensions will be downscaled.
315
    step : tuple, optional
316
        The vertical and horizontal shift (stride) between pooling regions.
317
        By default this is equal to `pooling_size`. Setting this to a lower
318
        number results in overlapping pooling regions.
319
    input_dim : tuple, optional
320
        A tuple of integers representing the shape of the input. The last
321
        two dimensions will be used to calculate the output dimension.
322
    padding : tuple, optional
323
        A tuple of integers representing the vertical and horizontal
324
        zero-padding to be applied to each of the top and bottom
325
        (vertical) and left and right (horizontal) edges. For example,
326
        an argument of (4, 3) will apply 4 pixels of padding to the
327
        top edge, 4 pixels of padding to the bottom edge, and 3 pixels
328
        each for the left and right edge. By default, no padding is
329
        performed.
330
    ignore_border : bool, optional
331
        Whether or not to do partial downsampling based on borders where
332
        the extent of the pooling region reaches beyond the edge of the
333
        image. If `True`, a (5, 5) image with (2, 2) pooling regions
334
        and (2, 2) step will be downsampled to shape (2, 2), otherwise
335
        it will be downsampled to (3, 3). `True` by default.
336
337
    Notes
338
    -----
339
    .. warning::
340
        As of this writing, setting `ignore_border` to `False` with a step
341
        not equal to the pooling size will force Theano to perform pooling
342
        computations on CPU rather than GPU, even if you have specified
343
        a GPU as your computation device. Additionally, Theano will only
344
        use [cuDNN]_ (if available) for pooling computations with
345
        `ignure_border` set to `True`. You can ensure that the entire
346
        input is captured by at least one pool by using the `padding`
347
        argument to add zero padding prior to pooling being performed.
348
349
    .. [cuDNN]: `NVIDIA cuDNN <https://developer.nvidia.com/cudnn>`_.
350
351
    """
352
    @lazy(allocation=['pooling_size'])
353
    def __init__(self, pooling_size, step=None, input_dim=None,
354
                 ignore_border=True, padding=(0, 0),
355
                 **kwargs):
356
        super(MaxPooling, self).__init__('max', pooling_size,
357
                                         step=step, input_dim=input_dim,
358
                                         ignore_border=ignore_border,
359
                                         padding=padding, **kwargs)
360
361
    def __setstate__(self, state):
362
        self.__dict__.update(state)
363
        # Fix objects created before pull request #899.
364
        self.mode = getattr(self, 'mode', 'max')
365
        self.padding = getattr(self, 'padding', (0, 0))
366
        self.ignore_border = getattr(self, 'ignore_border', False)
367
368
369
class AveragePooling(Pooling):
370
    """Average pooling layer.
371
372
    Parameters
373
    ----------
374
    include_padding : bool, optional
375
        When calculating an average, include zeros that are the
376
        result of zero padding added by the `padding` argument.
377
        A value of `True` is only accepted if `ignore_border`
378
        is also `True`. `False` by default.
379
380
    Notes
381
    -----
382
    For documentation on the remainder of the arguments to this
383
    class, see :class:`MaxPooling`.
384
385
    """
386
    @lazy(allocation=['pooling_size'])
387
    def __init__(self, pooling_size, step=None, input_dim=None,
388
                 ignore_border=True, padding=(0, 0),
389
                 include_padding=False, **kwargs):
390
        mode = 'average_inc_pad' if include_padding else 'average_exc_pad'
391
        super(AveragePooling, self).__init__(mode, pooling_size,
392
                                             step=step, input_dim=input_dim,
393
                                             ignore_border=ignore_border,
394
                                             padding=padding, **kwargs)
395
396
397
class _AllocationMixin(object):
398
    def _push_allocation_config(self):
399
        for attr in ['filter_size', 'num_filters', 'border_mode',
400
                     'batch_size', 'num_channels', 'image_size',
401
                     'tied_biases', 'use_bias']:
402
            setattr(self.convolution, attr, getattr(self, attr))
403
404
    @property
405
    def num_output_channels(self):
406
        # Assumes an elementwise activation function. Would need to
407
        # change to support e.g. maxout, but that would also require
408
        # a way of querying the activation function for this kind of
409
        # information.
410
        return self.num_filters
411
412
413
class ConvolutionalActivation(_AllocationMixin, Sequence, Initializable):
414
    """A convolution followed by an activation function.
415
416
    Parameters
417
    ----------
418
    activation : :class:`.BoundApplication`
419
        The application method to apply after convolution (i.e.
420
        the nonlinear activation function)
421
422
    See Also
423
    --------
424
    :class:`Convolutional` : For the documentation of other parameters.
425
426
    """
427
    @lazy(allocation=['filter_size', 'num_filters', 'num_channels'])
428
    def __init__(self, activation, filter_size, num_filters, num_channels,
429
                 batch_size=None, image_size=None, step=(1, 1),
430
                 border_mode='valid', tied_biases=False, **kwargs):
431
        self.convolution = Convolutional()
432
433
        self.filter_size = filter_size
434
        self.num_filters = num_filters
435
        self.num_channels = num_channels
436
        self.batch_size = batch_size
437
        self.image_size = image_size
438
        self.step = step
439
        self.border_mode = border_mode
440
        self.tied_biases = tied_biases
441
442
        super(ConvolutionalActivation, self).__init__(
443
            application_methods=[self.convolution.apply, activation],
444
            **kwargs)
445
446
    def get_dim(self, name):
447
        # TODO The name of the activation output doesn't need to be `output`
448
        return self.convolution.get_dim(name)
449
450
    def _push_allocation_config(self):
451
        super(ConvolutionalActivation, self)._push_allocation_config()
452
        self.convolution.step = self.step
453
454
455
class ConvolutionalTransposeActivation(_AllocationMixin, Sequence,
456
                                       Initializable):
457
    """A transposed convolution followed by an activation function.
458
459
    Parameters
460
    ----------
461
    activation : :class:`.BoundApplication`
462
        The application method to apply after convolution (i.e.
463
        the nonlinear activation function)
464
465
    See Also
466
    --------
467
    :class:`ConvolutionalTranspose` : For the documentation of other
468
    parameters.
469
470
    """
471
    @lazy(allocation=['filter_size', 'num_filters', 'num_channels',
472
                      'original_image_size'])
473
    def __init__(self, activation, filter_size, num_filters, num_channels,
474
                 original_image_size, batch_size=None, image_size=None,
475
                 step=(1, 1), border_mode='valid', tied_biases=False,
476
                 **kwargs):
477
        self.convolution = ConvolutionalTranspose()
478
479
        self.filter_size = filter_size
480
        self.num_filters = num_filters
481
        self.num_channels = num_channels
482
        self.batch_size = batch_size
483
        self.image_size = image_size
484
        self.original_image_size = original_image_size
485
        self.step = step
486
        self.border_mode = border_mode
487
        self.tied_biases = tied_biases
488
489
        super(ConvolutionalTransposeActivation, self).__init__(
490
            application_methods=[self.convolution.apply, activation],
491
            **kwargs)
492
493
    def get_dim(self, name):
494
        # TODO The name of the activation output doesn't need to be `output`
495
        return self.convolution.get_dim(name)
496
497
    def _push_allocation_config(self):
498
        super(ConvolutionalTransposeActivation, self)._push_allocation_config()
499
        self.convolution.step = self.step
500
        self.convolution.original_image_size = self.original_image_size
501
502
503
class ConvolutionalSequence(Sequence, Initializable, Feedforward):
504
    """A sequence of convolutional (or pooling) operations.
505
506
    Parameters
507
    ----------
508
    layers : list
509
        List of convolutional bricks (i.e. :class:`Convolutional`,
510
        :class:`ConvolutionalActivation`, or :class:`Pooling` bricks).
511
    num_channels : int
512
        Number of input channels in the image. For the first layer this is
513
        normally 1 for grayscale images and 3 for color (RGB) images. For
514
        subsequent layers this is equal to the number of filters output by
515
        the previous convolutional layer.
516
    batch_size : int, optional
517
        Number of images in batch. If given, will be passed to
518
        theano's convolution operator resulting in possibly faster
519
        execution.
520
    image_size : tuple, optional
521
        Width and height of the input (image/featuremap). If given,
522
        will be passed to theano's convolution operator resulting in
523
        possibly faster execution.
524
    border_mode : 'valid', 'full' or None, optional
525
        The border mode to use, see :func:`scipy.signal.convolve2d` for
526
        details. Unlike with :class:`Convolutional`, this defaults to
527
        None, in which case no default value is pushed down to child
528
        bricks at allocation time. Child bricks will in this case
529
        need to rely on either a default border mode (usually valid)
530
        or one provided at construction and/or after construction
531
        (but before allocation).
532
533
    Notes
534
    -----
535
    The passed convolutional operators should be 'lazy' constructed, that
536
    is, without specifying the batch_size, num_channels and image_size. The
537
    main feature of :class:`ConvolutionalSequence` is that it will set the
538
    input dimensions of a layer to the output dimensions of the previous
539
    layer by the :meth:`~.Brick.push_allocation_config` method.
540
541
    The reason the `border_mode` parameter behaves the way it does is that
542
    pushing a single default `border_mode` makes it very difficult to
543
    have child bricks with different border modes. Normally, such things
544
    would be overridden after `push_allocation_config()`, but this is
545
    a particular hassle as the border mode affects the allocation
546
    parameters of every subsequent child brick in the sequence. Thus, only
547
    an explicitly specified border mode will be pushed down the hierarchy.
548
549
    """
550
    @lazy(allocation=['num_channels'])
551
    def __init__(self, layers, num_channels, batch_size=None, image_size=None,
552
                 border_mode=None, tied_biases=False, **kwargs):
553
        self.layers = layers
554
        self.image_size = image_size
555
        self.num_channels = num_channels
556
        self.batch_size = batch_size
557
        self.border_mode = border_mode
558
        self.tied_biases = tied_biases
559
560
        application_methods = [brick.apply for brick in layers]
561
        super(ConvolutionalSequence, self).__init__(
562
            application_methods=application_methods, **kwargs)
563
564
    def get_dim(self, name):
565
        if name == 'input_':
566
            return ((self.num_channels,) + self.image_size)
0 ignored issues
show
Unused Code Coding Style introduced by
There is an unnecessary parenthesis after return.
Loading history...
567
        if name == 'output':
568
            return self.layers[-1].get_dim(name)
569
        return super(ConvolutionalSequence, self).get_dim(name)
570
571
    def _push_allocation_config(self):
572
        num_channels = self.num_channels
573
        image_size = self.image_size
574
        for layer in self.layers:
575
            if self.border_mode is not None:
576
                layer.border_mode = self.border_mode
577
            layer.tied_biases = self.tied_biases
578
            layer.image_size = image_size
579
            layer.num_channels = num_channels
580
            layer.batch_size = self.batch_size
581
            layer.use_bias = self.use_bias
582
583
            # Push input dimensions to children
584
            layer._push_allocation_config()
0 ignored issues
show
Coding Style Best Practice introduced by
It seems like _push_allocation_config was declared protected and should not be accessed from this context.

Prefixing a member variable _ is usually regarded as the equivalent of declaring it with protected visibility that exists in other languages. Consequentially, such a member should only be accessed from the same class or a child class:

class MyParent:
    def __init__(self):
        self._x = 1;
        self.y = 2;

class MyChild(MyParent):
    def some_method(self):
        return self._x    # Ok, since accessed from a child class

class AnotherClass:
    def some_method(self, instance_of_my_child):
        return instance_of_my_child._x   # Would be flagged as AnotherClass is not
                                         # a child class of MyParent
Loading history...
585
586
            # Retrieve output dimensions
587
            # and set it for next layer
588
            if layer.image_size is not None:
589
                output_shape = layer.get_dim('output')
590
                image_size = output_shape[1:]
591
            num_channels = layer.num_output_channels
592
593
594
class Flattener(Brick):
595
    """Flattens the input.
596
597
    It may be used to pass multidimensional objects like images or feature
598
    maps of convolutional bricks into bricks which allow only two
599
    dimensional input (batch, features) like MLP.
600
601
    """
602
    @application(inputs=['input_'], outputs=['output'])
603
    def apply(self, input_):
604
        return input_.flatten(ndim=2)
605