| Conditions | 8 |
| Total Lines | 58 |
| Code Lines | 44 |
| Lines | 0 |
| Ratio | 0 % |
| Changes | 0 | ||
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
| 1 | # Copyright 2014 Diamond Light Source Ltd. |
||
| 59 | def __get_backing_file(self, data_obj): |
||
| 60 | fname = '%s/%s.h5' % \ |
||
| 61 | (self.exp.get('out_path'), self.parameters['file_name']) |
||
| 62 | |||
| 63 | if os.path.exists(fname): |
||
| 64 | return h5py.File(fname, 'r') |
||
| 65 | |||
| 66 | self.hdf5 = Hdf5Utils(self.exp) |
||
| 67 | |||
| 68 | size = tuple(self.parameters['size']) |
||
|
|
|||
| 69 | |||
| 70 | patterns = data_obj.get_data_patterns() |
||
| 71 | p_name = patterns[self.parameters['pattern']] if \ |
||
| 72 | self.parameters['pattern'] is not None else list(patterns.keys())[0] |
||
| 73 | p_name = list(patterns.keys())[0] |
||
| 74 | p_dict = patterns[p_name] |
||
| 75 | p_dict['max_frames_transfer'] = 1 |
||
| 76 | nnext = {p_name: p_dict} |
||
| 77 | |||
| 78 | pattern_idx = {'current': nnext, 'next': nnext} |
||
| 79 | chunking = Chunking(self.exp, pattern_idx) |
||
| 80 | chunks = chunking._calculate_chunking(size, np.int16) |
||
| 81 | |||
| 82 | h5file = self.hdf5._open_backing_h5(fname, 'w') |
||
| 83 | dset = h5file.create_dataset('test', size, chunks=chunks) |
||
| 84 | |||
| 85 | self.exp._barrier() |
||
| 86 | |||
| 87 | slice_dirs = list(nnext.values())[0]['slice_dims'] |
||
| 88 | nDims = len(dset.shape) |
||
| 89 | total_frames = np.prod([dset.shape[i] for i in slice_dirs]) |
||
| 90 | sub_size = \ |
||
| 91 | [1 if i in slice_dirs else dset.shape[i] for i in range(nDims)] |
||
| 92 | |||
| 93 | # need an mpi barrier after creating the file before populating it |
||
| 94 | idx = 0 |
||
| 95 | sl, total_frames = \ |
||
| 96 | self.__get_start_slice_list(slice_dirs, dset.shape, total_frames) |
||
| 97 | # calculate the first slice |
||
| 98 | for i in range(total_frames): |
||
| 99 | low, high = self.parameters['range'] |
||
| 100 | dset[tuple(sl)] = np.random.randint( |
||
| 101 | low, high=high, size=sub_size, dtype=self.parameters['dtype_']) |
||
| 102 | if sl[slice_dirs[idx]].stop == dset.shape[slice_dirs[idx]]: |
||
| 103 | idx += 1 |
||
| 104 | if idx == len(slice_dirs): |
||
| 105 | break |
||
| 106 | tmp = sl[slice_dirs[idx]] |
||
| 107 | sl[slice_dirs[idx]] = slice(tmp.start+1, tmp.stop+1) |
||
| 108 | |||
| 109 | self.exp._barrier() |
||
| 110 | |||
| 111 | try: |
||
| 112 | h5file.close() |
||
| 113 | except: |
||
| 114 | logging.debug('There was a problem trying to close the file in random_hdf5_loader') |
||
| 115 | |||
| 116 | return self.hdf5._open_backing_h5(fname, 'r') |
||
| 117 | |||
| 172 |