Conditions | 29 |
Total Lines | 187 |
Code Lines | 153 |
Lines | 0 |
Ratio | 0 % |
Changes | 0 |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like mutis.correlation.Correlation.gen_corr() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | # Licensed under a 3-clause BSD style license - see LICENSE |
||
133 | def gen_corr(self, uncert=True, dsamples=500): |
||
134 | """Generates the correlation of the signals. |
||
135 | |||
136 | Generates the correlation of the signals, and computes their |
||
137 | confidence level from the synthetic light curves, which must |
||
138 | have been generated before. |
||
139 | """ |
||
140 | |||
141 | if uncert and self.signal1.dvalues is None: |
||
142 | log.error( |
||
143 | "uncert is True but no uncertainties for Signal 1 were specified, setting uncert to False" |
||
144 | ) |
||
145 | uncert = False |
||
146 | if uncert and self.signal2.dvalues is None: |
||
147 | log.error( |
||
148 | "uncert is True but no uncertainties for Signal 2 were specified, setting uncert to False" |
||
149 | ) |
||
150 | uncert = False |
||
151 | |||
152 | if len(self.times) == 0 or len(self.dts) == 0: |
||
153 | raise Exception( |
||
154 | "You need to define the times on which to calculate the correlation." |
||
155 | "Please use gen_times() or manually set them." |
||
156 | ) |
||
157 | |||
158 | # TODO: refactor if/elif with a helper function |
||
159 | mc_corr = np.empty((self.samples, self.times.size)) |
||
160 | if uncert: |
||
161 | mc_sig = np.empty((dsamples, self.times.size)) |
||
162 | |||
163 | if self.fcorr == "welsh": |
||
164 | for idx in range(self.samples): |
||
165 | mc_corr[idx] = welsh( |
||
166 | self.signal1.times, |
||
167 | self.signal1.synth[idx], |
||
168 | self.signal2.times, |
||
169 | self.signal2.synth[idx], |
||
170 | self.times, |
||
171 | self.dts, |
||
172 | ) |
||
173 | if uncert: |
||
174 | for idx in range(dsamples): |
||
175 | mc_sig[idx] = welsh( |
||
176 | self.signal1.times, |
||
177 | self.signal1.values |
||
178 | + self.signal1.dvalues * np.random.randn(self.signal1.values.size), |
||
179 | self.signal2.times, |
||
180 | self.signal2.values |
||
181 | + self.signal2.dvalues * np.random.randn(self.signal2.values.size), |
||
182 | self.times, |
||
183 | self.dts, |
||
184 | ) |
||
185 | self.values = welsh( |
||
186 | self.signal1.times, |
||
187 | self.signal1.values, |
||
188 | self.signal2.times, |
||
189 | self.signal2.values, |
||
190 | self.times, |
||
191 | self.dts, |
||
192 | ) |
||
193 | elif self.fcorr == "kroedel": |
||
194 | for idx in range(self.samples): |
||
195 | mc_corr[idx] = kroedel( |
||
196 | self.signal1.times, |
||
197 | self.signal1.synth[idx], |
||
198 | self.signal2.times, |
||
199 | self.signal2.synth[idx], |
||
200 | self.times, |
||
201 | self.dts, |
||
202 | ) |
||
203 | if uncert: |
||
204 | for idx in range(dsamples): |
||
205 | mc_sig[idx] = kroedel( |
||
206 | self.signal1.times, |
||
207 | self.signal1.values |
||
208 | + self.signal1.dvalues * np.random.randn(self.signal1.values.size), |
||
209 | self.signal2.times, |
||
210 | self.signal2.values |
||
211 | + self.signal2.dvalues * np.random.randn(self.signal2.values.size), |
||
212 | self.times, |
||
213 | self.dts, |
||
214 | ) |
||
215 | self.values = kroedel( |
||
216 | self.signal1.times, |
||
217 | self.signal1.values, |
||
218 | self.signal2.times, |
||
219 | self.signal2.values, |
||
220 | self.times, |
||
221 | self.dts, |
||
222 | ) |
||
223 | elif self.fcorr == "welsh_old": # should produce the exactly same results, but we keep it for debugs and testcov |
||
224 | for idx in range(self.samples): |
||
225 | mc_corr[idx] = welsh_old( |
||
226 | self.signal1.times, |
||
227 | self.signal1.synth[idx], |
||
228 | self.signal2.times, |
||
229 | self.signal2.synth[idx], |
||
230 | self.times, |
||
231 | self.dts, |
||
232 | ) |
||
233 | if uncert: |
||
234 | for idx in range(dsamples): |
||
235 | mc_sig[idx] = welsh_old( |
||
236 | self.signal1.times, |
||
237 | self.signal1.values |
||
238 | + self.signal1.dvalues * np.random.randn(self.signal1.values.size), |
||
239 | self.signal2.times, |
||
240 | self.signal2.values |
||
241 | + self.signal2.dvalues * np.random.randn(self.signal2.values.size), |
||
242 | self.times, |
||
243 | self.dts, |
||
244 | ) |
||
245 | self.values = welsh_old( |
||
246 | self.signal1.times, |
||
247 | self.signal1.values, |
||
248 | self.signal2.times, |
||
249 | self.signal2.values, |
||
250 | self.times, |
||
251 | self.dts, |
||
252 | ) |
||
253 | elif self.fcorr == "kroedel_old": # should produce the exactly same results, but we keep it for debugs and testcov |
||
254 | for idx in range(self.samples): |
||
255 | mc_corr[idx] = kroedel_old( |
||
256 | self.signal1.times, |
||
257 | self.signal1.synth[idx], |
||
258 | self.signal2.times, |
||
259 | self.signal2.synth[idx], |
||
260 | self.times, |
||
261 | self.dts, |
||
262 | ) |
||
263 | if uncert: |
||
264 | for idx in range(dsamples): |
||
265 | mc_sig[idx] = kroedel_old( |
||
266 | self.signal1.times, |
||
267 | self.signal1.values |
||
268 | + self.signal1.dvalues * np.random.randn(self.signal1.values.size), |
||
269 | self.signal2.times, |
||
270 | self.signal2.values |
||
271 | + self.signal2.dvalues * np.random.randn(self.signal2.values.size), |
||
272 | self.times, |
||
273 | self.dts, |
||
274 | ) |
||
275 | self.values = kroedel_old( |
||
276 | self.signal1.times, |
||
277 | self.signal1.values, |
||
278 | self.signal2.times, |
||
279 | self.signal2.values, |
||
280 | self.times, |
||
281 | self.dts, |
||
282 | ) |
||
283 | elif self.fcorr == "numpy": |
||
284 | for idx in range(self.samples): |
||
285 | mc_corr[idx] = nindcf( |
||
286 | self.signal1.times, |
||
287 | self.signal1.synth[idx], |
||
288 | self.signal2.times, |
||
289 | self.signal2.synth[idx], |
||
290 | ) |
||
291 | if uncert: |
||
292 | for idx in range(dsamples): |
||
293 | mc_sig[idx] = nindcf( |
||
294 | self.signal1.times, |
||
295 | self.signal1.values |
||
296 | + self.signal1.dvalues * np.random.randn(self.signal1.values.size), |
||
297 | self.signal2.times, |
||
298 | self.signal2.values |
||
299 | + self.signal2.dvalues * np.random.randn(self.signal2.values.size), |
||
300 | ) |
||
301 | self.values = nindcf( |
||
302 | self.signal1.times, |
||
303 | self.signal1.values, |
||
304 | self.signal2.times, |
||
305 | self.signal2.values, |
||
306 | ) |
||
307 | else: |
||
308 | raise Exception("Unknown method " + self.fcorr + " for correlation.") |
||
309 | |||
310 | self.l3s = np.percentile(mc_corr, [0.135, 99.865], axis=0) |
||
311 | self.l2s = np.percentile(mc_corr, [2.28, 97.73], axis=0) |
||
312 | self.l1s = np.percentile(mc_corr, [15.865, 84.135], axis=0) |
||
313 | |||
314 | self.mc_corr = mc_corr # save them to be able to compute exact significance later... |
||
315 | |||
316 | if uncert: |
||
317 | self.s3s = np.percentile(mc_sig, [0.135, 99.865], axis=0) |
||
318 | self.s2s = np.percentile(mc_sig, [2.28, 97.73], axis=0) |
||
319 | self.s1s = np.percentile(mc_sig, [15.865, 84.135], axis=0) |
||
320 | |||
502 |