Conditions | 3 |
Total Lines | 51 |
Code Lines | 24 |
Lines | 0 |
Ratio | 0 % |
Tests | 18 |
CRAP Score | 3.009 |
Changes | 0 |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Methods with many parameters are not only hard to understand, but their parameters also often become inconsistent when you need more, or different data.
There are several approaches to avoid long parameter lists:
1 | # -*- coding: utf-8 -*- |
||
151 | 1 | def plot_random_samples(model, times, data, errs, |
|
152 | samples, scale, filename, size=8, |
||
153 | extra_years=[0, 0]): |
||
154 | """Plot random samples and data |
||
155 | |||
156 | Parameters |
||
157 | ---------- |
||
158 | model: `celerite.Model`, `george.Model`, or `celerite.ModelSet` |
||
159 | The Gaussian Process model to plot samples from. |
||
160 | times: array_like |
||
161 | Time axis values. |
||
162 | data: array_like |
||
163 | Data values. |
||
164 | errs: array_like |
||
165 | Data uncertainties for the errorbars. |
||
166 | samples: array_like |
||
167 | The (MCMC) sample of the parameter vector. |
||
168 | scale: float |
||
169 | The scale factor of the data to adjust the value axis. |
||
170 | filename: string |
||
171 | Output filename of the figure file. |
||
172 | size: int, optional |
||
173 | Number of samples to plot. |
||
174 | extra_years: list or tuple, optional |
||
175 | Extend the prediction period extra_years[0] into the past |
||
176 | and extra_years[1] into the future. |
||
177 | """ |
||
178 | 1 | t = np.linspace(times.min() - extra_years[0], |
|
179 | times.max() + extra_years[1], 2000) |
||
180 | 1 | fig, ax = plt.subplots() |
|
181 | # Plot the data. |
||
182 | 1 | ax.errorbar(times, data, yerr=2 * errs, fmt=".k", zorder=8) |
|
183 | 1 | ax.set_xlim(np.min(t), np.max(t)) |
|
184 | 1 | ax.set_xlabel("time [years]") |
|
185 | 1 | ax.set_ylabel("number density [10$^{{{0:.0f}}}$ cm$^{{-3}}$]" |
|
186 | .format(-np.log10(scale))) |
||
187 | 1 | for i in np.random.randint(len(samples), size=size): |
|
188 | 1 | logging.info("plotting random sample %s.", i) |
|
189 | 1 | logging.debug("sample values: %s", samples[i]) |
|
190 | 1 | model.set_parameter_vector(samples[i]) |
|
191 | 1 | logging.debug("sample log likelihood: %s", model.log_likelihood(data)) |
|
192 | # Compute the prediction conditioned on the observations and plot it. |
||
193 | 1 | mu, cov = model.predict(data, t=t) |
|
194 | 1 | try: |
|
195 | 1 | cov[np.diag_indices_from(cov)] += model.kernel.jitter |
|
196 | except AttributeError: |
||
197 | pass |
||
198 | 1 | sample = np.random.multivariate_normal(mu, cov, 1)[0] |
|
199 | 1 | ax.plot(t, sample, color="C0", alpha=0.25, zorder=12) |
|
200 | # fig.subplots_adjust(left=0.2, bottom=0.2, top=0.9, right=0.9) |
||
201 | fig.savefig(filename, transparent=True) |
||
202 |