Code Duplication    Length = 51-52 lines in 3 locations

savu/plugins/driver/cpu_iterative_plugin.py 1 location

@@ 248-299 (lines=52) @@
245
        '''
246
        self._ip_complete = True
247
248
    def __finalise_datasets(self):
249
        '''
250
        Inspect the two Data objects that are used to contain the input and
251
        output data for iterations over the course of the iterative processing
252
        (input/output depending on which particular iteration was being done).
253
254
        Mark one of them as the "final dataset" to be added to the output
255
        NeXuS file, and mark the other as "obsolete/to be removed".
256
257
        The decision between which one is kept and which one is removed
258
        depends on which Data object contains the OUTPUT of the very last
259
        iteration.
260
261
        For an odd number of iterations, this is the "original" Data object.
262
        For an even number of iteration, this is the "clone" Data object.
263
        '''
264
        for s1, s2 in self._ip_data_dict['iterating'].items():
265
            name = s1.get_name()
266
            name = name if 'itr_clone' not in name else s2.get_name()
267
            final_dataset = s1 if s1 in self.parameters['out_datasets'] else s2
268
            obsolete = s1 if s1 is not final_dataset else s2
269
            obsolete.remove = True
270
271
            # switch names if necessary
272
            if final_dataset.get_name() != name:
273
                # If this is true, then the output dataset of the last
274
                # iteration is the clone Data object (hence, the mismatched
275
                # names).
276
                #
277
                # So then:
278
                # - obsolete = original
279
                # - final_dataset = clone
280
                #
281
                # which means that the CLONED dataset needs to be set in the
282
                # Experiment object (self.exp) as the "out data", but under
283
                # the name of the ORIGINAL dataset.
284
                # And also, the ORIGINAL dataset is set in the Experiment
285
                # object, but under the name of the CLONED/OBSOLETE dataset
286
                temp = obsolete
287
                self.exp.index['out_data'][name] = final_dataset
288
                self.exp.index['out_data'][s2.get_name()] = temp
289
                # One last thing to do in this case is to set the "name"
290
                # inside the Data object that final_result is set to.
291
                #
292
                # This is because, in this case, the CLONED dataset is in
293
                # final_result, and the "name" within the Data object will
294
                # be some value like "itr_0".
295
                #
296
                # However, the name within the Data object needs to be the
297
                # name of the ORIGINAL Data object in order for the creation
298
                # of the output NeXuS file to work.
299
                final_dataset._set_name(name)
300
301
    def __set_original_datasets(self):
302
        '''

savu/plugins/driver/iterative_plugin.py 1 location

@@ 166-216 (lines=51) @@
163
    def get_iteration(self):
164
        return self._ip_iteration
165
166
    def __finalise_datasets(self):
167
        '''
168
        Inspect the two Data objects that are used to contain the input and
169
        output data for iterations over the course of the iterative processing
170
        (input/output depending on which particular iteration was being done).
171
172
        Mark one of them as the "final dataset" to be added to the output
173
        NeXuS file, and mark the other as "to be removed".
174
175
        The decision between which one is kept and which one is removed
176
        depends on which Data object contains the OUTPUT of the very last
177
        iteration.
178
179
        For an odd number of iterations, this is the "original" Data object.
180
        For an even number of iteration, this is the "clone" Data object.
181
        '''
182
        for s1, s2 in self._ip_data_dict['iterating'].items():
183
            name = s1.get_name()
184
            name = name if 'itr_clone' not in name else s2.get_name()
185
            final_dataset = s1 if s1 in self.parameters['out_datasets'] else s2
186
            obsolete = s1 if s1 is not final_dataset else s2
187
            obsolete.remove = True
188
            # switch names if necessary
189
            if final_dataset.get_name() != name:
190
                # If this is true, then the output dataset of the last
191
                # iteration is the clone Data object (hence, the mismatched
192
                # names).
193
                #
194
                # So then:
195
                # - obsolete = original
196
                # - final_dataset = clone
197
                #
198
                # which means that the CLONED dataset needs to be set in the
199
                # Experiment object (self.exp) as the "out data", but under
200
                # the name of the ORIGINAL dataset.
201
                # And also, the ORIGINAL dataset is set in the Experiment
202
                # object, but under the name of the CLONED/OBSOLETE dataset
203
                temp = obsolete
204
                self.exp.index['out_data'][name] = final_dataset
205
                self.exp.index['out_data'][s2.get_name()] = temp
206
                # One last thing to do in this case is to set the "name"
207
                # inside the Data object that final_result is set to.
208
                #
209
                # This is because, in this case, the CLONED dataset is in
210
                # final_result, and the "name" within the Data object will
211
                # be some value like "itr_0".
212
                #
213
                # However, the name within the Data object needs to be the
214
                # name of the ORIGINAL Data object in order for the creation
215
                # of the output NeXuS file to work.
216
                final_dataset._set_name(name)
217
218
    def set_processing_complete(self):
219
        self._ip_complete = True

savu/core/iterative_plugin_runner.py 1 location

@@ 399-450 (lines=52) @@
396
        self.end_plugin._finalise_datasets()
397
        self.end_plugin._finalise_plugin_datasets()
398
399
    def _finalise_iterated_datasets(self, exp):
400
        '''
401
        Inspect the two Data objects that are used to contain the input and
402
        output data for iterations over the course of the iterative processing
403
        (input/output depending on which particular iteration was being done).
404
405
        Mark one of them as the "final dataset" to be added to the output
406
        NeXuS file, and mark the other as "obsolete/to be removed".
407
408
        The decision between which one is kept and which one is removed
409
        depends on which Data object contains the OUTPUT of the very last
410
        iteration.
411
412
        For an odd number of iterations, this is the "original" Data object.
413
        For an even number of iteration, this is the "clone" Data object.
414
        '''
415
        for s1, s2 in self._ip_data_dict['iterating'].items():
416
            name = s1.get_name()
417
            name = name if 'itr_clone' not in name else s2.get_name()
418
            final_dataset = s1 if s1 in self.end_plugin.parameters['out_datasets'] else s2
419
            obsolete = s1 if s1 is not final_dataset else s2
420
            obsolete.remove = True
421
422
            # switch names if necessary
423
            if final_dataset.get_name() != name:
424
                # If this is true, then the output dataset of the last
425
                # iteration is the clone Data object (hence, the mismatched
426
                # names).
427
                #
428
                # So then:
429
                # - obsolete = original
430
                # - final_dataset = clone
431
                #
432
                # which means that the CLONED dataset needs to be set in the
433
                # Experiment object (self.exp) as the "out data", but under
434
                # the name of the ORIGINAL dataset.
435
                # And also, the ORIGINAL dataset is set in the Experiment
436
                # object, but under the name of the CLONED/OBSOLETE dataset
437
                temp = obsolete
438
                exp.index['out_data'][name] = final_dataset
439
                exp.index['out_data'][s2.get_name()] = temp
440
                # One last thing to do in this case is to set the "name"
441
                # inside the Data object that final_result is set to.
442
                #
443
                # This is because, in this case, the CLONED dataset is in
444
                # final_result, and the "name" within the Data object will
445
                # be some value like "itr_0".
446
                #
447
                # However, the name within the Data object needs to be the
448
                # name of the ORIGINAL Data object in order for the creation
449
                # of the output NeXuS file to work.
450
                final_dataset._set_name(name)
451
452
    def set_alternating_datasets(self):
453
        d1 = self.end_plugin.parameters['out_datasets'][0]