| Conditions | 3 | 
| Total Lines | 54 | 
| Code Lines | 26 | 
| Lines | 54 | 
| Ratio | 100 % | 
| Changes | 0 | ||
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
| 1 | # Copyright 2014 Diamond Light Source Ltd.  | 
            ||
| 44 | View Code Duplication | def setup(self):  | 
            |
| 
                                                                                                    
                        
                         | 
                |||
| 45 | in_dataset, out_dataset = self.get_datasets()  | 
            ||
| 46 | |||
| 47 | #=================== populate output dataset ==========================  | 
            ||
| 48 | # Due to the reduction in dimensions, the out_dataset will have  | 
            ||
| 49 | # different axis_labels, patterns and shape to the in_dataset and  | 
            ||
| 50 | # these will need to be defined.  | 
            ||
| 51 | # For more information about the syntax used here see:  | 
            ||
| 52 | # http://savu.readthedocs.io/en/latest/api_plugin/savu.data.data_structures.data_create  | 
            ||
| 53 | |||
| 54 | # AMEND THE PATTERNS: The output dataset will have one dimension less  | 
            ||
| 55 | # than the in_dataset, so remove the final slice dimension from any  | 
            ||
| 56 | # patterns you want to keep.  | 
            ||
| 57 | rm_dim = str(in_dataset[0].get_data_patterns()  | 
            ||
| 58 | ['SINOGRAM']['slice_dims'][-1])  | 
            ||
| 59 | patterns = ['SINOGRAM.' + rm_dim, 'PROJECTION.' + rm_dim]  | 
            ||
| 60 | |||
| 61 | # AMEND THE AXIS LABELS: Find the dimensions to remove using their  | 
            ||
| 62 | # axis_labels to ensure the plugin is as generic as possible and will  | 
            ||
| 63 | # work for data in all orientations.  | 
            ||
| 64 | axis_labels = copy.copy(in_dataset[0].get_axis_labels())  | 
            ||
| 65 | rm_labels = ['detector_x', 'detector_y']  | 
            ||
| 66 | rm_dims = sorted([in_dataset[0].get_data_dimension_by_axis_label(a)  | 
            ||
| 67 | for a in rm_labels])[::-1]  | 
            ||
| 68 | for d in rm_dims:  | 
            ||
| 69 | del axis_labels[d]  | 
            ||
| 70 | # Add a new axis label to the list  | 
            ||
| 71 |         axis_labels.append({'Q': 'Angstrom^-1'}) | 
            ||
| 72 | |||
| 73 | # AMEND THE SHAPE: Remove the two unrequired dimensions from the  | 
            ||
| 74 | # original shape and add a new dimension shape.  | 
            ||
| 75 | shape = list(in_dataset[0].get_shape())  | 
            ||
| 76 | for d in rm_dims:  | 
            ||
| 77 | del shape[d]  | 
            ||
| 78 |         shape += (self.get_parameters('num_bins'),) | 
            ||
| 79 | |||
| 80 | # populate the output dataset  | 
            ||
| 81 | out_dataset[0].create_dataset(  | 
            ||
| 82 |                 patterns={in_dataset[0]: patterns}, | 
            ||
| 83 | axis_labels=axis_labels,  | 
            ||
| 84 | shape=tuple(shape))  | 
            ||
| 85 | |||
| 86 | # ASSOCIATE AN EXTRA PATTERN WITH THE DATASET: SINOGRAM and PROJECTION  | 
            ||
| 87 | # patterns are already asssociated with the output dataset, but add  | 
            ||
| 88 | # another one.  | 
            ||
| 89 | spectrum = \  | 
            ||
| 90 |             {'core_dims': (-1,), 'slice_dims': tuple(range(len(shape)-1))} | 
            ||
| 91 |         out_dataset[0].add_pattern("SPECTRUM", **spectrum) | 
            ||
| 92 | #======================================================================  | 
            ||
| 93 | |||
| 94 | #================== populate plugin datasets ==========================  | 
            ||
| 95 | in_pData, out_pData = self.get_plugin_datasets()  | 
            ||
| 96 |         in_pData[0].plugin_data_setup('DIFFRACTION', 'single') | 
            ||
| 97 |         out_pData[0].plugin_data_setup('SPECTRUM', 'single') | 
            ||
| 98 | #======================================================================  | 
            ||
| 109 |