| Conditions | 22 |
| Total Lines | 100 |
| Lines | 0 |
| Ratio | 0 % |
| Changes | 6 | ||
| Bugs | 1 | Features | 0 |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like fit() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | #!/usr/bin/env python |
||
| 17 | def fit(model_filename, spectrum_filenames, threads, clobber, from_filename, |
||
| 18 | **kwargs): |
||
| 19 | """ |
||
| 20 | Fit a series of spectra. |
||
| 21 | """ |
||
| 22 | |||
| 23 | import AnniesLasso as tc |
||
| 24 | |||
| 25 | model = tc.load_model(model_filename, threads=threads) |
||
| 26 | logger = logging.getLogger("AnniesLasso") |
||
| 27 | assert model.is_trained |
||
| 28 | |||
| 29 | chunk_size = kwargs.pop("parallel_chunks", 1000) if threads > 1 else 1 |
||
| 30 | fluxes = [] |
||
| 31 | ivars = [] |
||
| 32 | output_filenames = [] |
||
| 33 | failures = 0 |
||
| 34 | |||
| 35 | fit_velocity = kwargs.pop("fit_velocity", False) |
||
| 36 | |||
| 37 | # MAGIC HACK |
||
| 38 | delete_meta_keys = ("fjac", ) # To save space... |
||
| 39 | initial_labels = loadtxt("initial_labels.txt") |
||
| 40 | |||
| 41 | if from_filename: |
||
| 42 | with open(spectrum_filenames[0], "r") as fp: |
||
| 43 | _ = list(map(str.strip, fp.readlines())) |
||
| 44 | spectrum_filenames = _ |
||
| 45 | |||
| 46 | output_suffix = kwargs.get("output_suffix", None) |
||
| 47 | output_suffix = "result" if output_suffix is None else str(output_suffix) |
||
| 48 | N = len(spectrum_filenames) |
||
| 49 | for i, filename in enumerate(spectrum_filenames): |
||
| 50 | logger.info("At spectrum {0}/{1}: {2}".format(i + 1, N, filename)) |
||
| 51 | |||
| 52 | basename, _ = os.path.splitext(filename) |
||
| 53 | output_filename = "-".join([basename, output_suffix]) + ".pkl" |
||
| 54 | |||
| 55 | if os.path.exists(output_filename) and not clobber: |
||
| 56 | logger.info("Output filename {} already exists and not clobbering."\ |
||
| 57 | .format(output_filename)) |
||
| 58 | continue |
||
| 59 | |||
| 60 | try: |
||
| 61 | with open(filename, "rb") as fp: |
||
| 62 | flux, ivar = pickle.load(fp) |
||
| 63 | fluxes.append(flux) |
||
| 64 | ivars.append(ivar) |
||
| 65 | |||
| 66 | output_filenames.append(output_filename) |
||
| 67 | |||
| 68 | except: |
||
| 69 | logger.exception("Error occurred loading {}".format(filename)) |
||
| 70 | failures += 1 |
||
| 71 | |||
| 72 | else: |
||
| 73 | if len(output_filenames) >= chunk_size: |
||
| 74 | |||
| 75 | results, covs, metas = model.fit(fluxes, ivars, |
||
| 76 | initial_labels=initial_labels, model_redshift=fit_velocity, |
||
| 77 | full_output=True) |
||
| 78 | |||
| 79 | for result, cov, meta, output_filename \ |
||
| 80 | in zip(results, covs, metas, output_filenames): |
||
| 81 | |||
| 82 | for key in delete_meta_keys: |
||
| 83 | if key in meta: |
||
| 84 | del meta[key] |
||
| 85 | |||
| 86 | with open(output_filename, "wb") as fp: |
||
| 87 | pickle.dump((result, cov, meta), fp, 2) # For legacy. |
||
| 88 | logger.info("Saved output to {}".format(output_filename)) |
||
| 89 | |||
| 90 | del output_filenames[0:], fluxes[0:], ivars[0:] |
||
| 91 | |||
| 92 | |||
| 93 | if len(output_filenames) > 0: |
||
| 94 | |||
| 95 | results, covs, metas = model.fit(fluxes, ivars, |
||
| 96 | initial_labels=initial_labels, model_redshift=fit_velocity, |
||
| 97 | full_output=True) |
||
| 98 | |||
| 99 | for result, cov, meta, output_filename \ |
||
| 100 | in zip(results, covs, metas, output_filenames): |
||
| 101 | |||
| 102 | for key in delete_meta_keys: |
||
| 103 | if key in meta: |
||
| 104 | del meta[key] |
||
| 105 | |||
| 106 | with open(output_filename, "wb") as fp: |
||
| 107 | pickle.dump((result, cov, meta), fp, 2) # For legacy. |
||
| 108 | logger.info("Saved output to {}".format(output_filename)) |
||
| 109 | |||
| 110 | del output_filenames[0:], fluxes[0:], ivars[0:] |
||
| 111 | |||
| 112 | |||
| 113 | logger.info("Number of failures: {}".format(failures)) |
||
| 114 | logger.info("Number of successes: {}".format(N - failures)) |
||
| 115 | |||
| 116 | return None |
||
| 117 | |||
| 318 |