Conditions | 20 |
Total Lines | 114 |
Code Lines | 89 |
Lines | 18 |
Ratio | 15.79 % |
Changes | 0 |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like mutis.correlation.Correlation.gen_corr() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | # Licensed under a 3-clause BSD style license - see LICENSE |
||
78 | def gen_corr(self, uncert=True, dsamples=500): |
||
79 | """Generates the correlation of the signals. |
||
80 | |||
81 | Generates the correlation of the signals, and computes their |
||
82 | confidence level from the synthetic light curves, which must |
||
83 | have been generated before. |
||
84 | """ |
||
85 | |||
86 | if (uncert is True) and (self.signal1.dvalues is None): |
||
87 | log.error("uncert is True but no uncertainties for Signal 1 were specified") |
||
88 | uncert = False |
||
89 | if (uncert is True) and (self.signal2.dvalues is None): |
||
90 | log.error("uncert is True but no uncertainties for Signal 2 were specified") |
||
91 | uncert = False |
||
92 | |||
93 | if not len(self.times) or not len(self.dts): |
||
94 | raise Exception( |
||
95 | "You need to define the times on which to calculate the correlation." |
||
96 | "Please use gen_times() or manually set them." |
||
97 | ) |
||
98 | |||
99 | # TODO: refactor if/elif with a helper function |
||
100 | mc_corr = np.empty((self.samples, self.times.size)) |
||
101 | mc_sig = np.empty((dsamples, self.times.size)) |
||
102 | |||
103 | if self.fcorr == "welsh_ab": |
||
104 | for n in range(self.samples): |
||
105 | mc_corr[n] = welsh_ab( |
||
106 | self.signal1.times, |
||
107 | self.signal1.synth[n], |
||
108 | self.signal2.times, |
||
109 | self.signal2.synth[n], |
||
110 | self.times, |
||
111 | self.dts, |
||
112 | ) |
||
113 | View Code Duplication | if uncert is True: |
|
114 | for n in range(dsamples): |
||
115 | mc_sig[n] = welsh_ab( |
||
116 | self.signal1.times, |
||
117 | self.signal1.values + self.signal1.dvalues * np.random.randn(self.signal1.values.size), |
||
118 | self.signal2.times, |
||
119 | self.signal2.values + self.signal2.dvalues * np.random.randn(self.signal2.values.size), |
||
120 | self.times, |
||
121 | self.dts, |
||
122 | ) |
||
123 | self.values = welsh_ab( |
||
124 | self.signal1.times, |
||
125 | self.signal1.values, |
||
126 | self.signal2.times, |
||
127 | self.signal2.values, |
||
128 | self.times, |
||
129 | self.dts, |
||
130 | ) |
||
131 | elif self.fcorr == "kroedel_ab": |
||
132 | for n in range(self.samples): |
||
133 | mc_corr[n] = kroedel_ab( |
||
134 | self.signal1.times, |
||
135 | self.signal1.synth[n], |
||
136 | self.signal2.times, |
||
137 | self.signal2.synth[n], |
||
138 | self.times, |
||
139 | self.dts, |
||
140 | ) |
||
141 | View Code Duplication | if uncert is True: |
|
142 | for n in range(dsamples): |
||
143 | mc_sig[n] = kroedel_ab( |
||
144 | self.signal1.times, |
||
145 | self.signal1.values + self.signal1.dvalues * np.random.randn(self.signal1.values.size), |
||
146 | self.signal2.times, |
||
147 | self.signal2.values + self.signal2.dvalues * np.random.randn(self.signal2.values.size), |
||
148 | self.times, |
||
149 | self.dts, |
||
150 | ) |
||
151 | self.values = kroedel_ab( |
||
152 | self.signal1.times, |
||
153 | self.signal1.values, |
||
154 | self.signal2.times, |
||
155 | self.signal2.values, |
||
156 | self.times, |
||
157 | self.dts, |
||
158 | ) |
||
159 | elif self.fcorr == "numpy": |
||
160 | for n in range(self.samples): |
||
161 | mc_corr[n] = nindcf( |
||
162 | self.signal1.times, |
||
163 | self.signal1.synth[n], |
||
164 | self.signal2.times, |
||
165 | self.signal2.synth[n], |
||
166 | ) |
||
167 | if uncert is True: |
||
168 | for n in range(dsamples): |
||
169 | mc_sig[n] = nindcf( |
||
170 | self.signal1.times, |
||
171 | self.signal1.values + self.signal1.dvalues * np.random.randn(self.signal1.values.size), |
||
172 | self.signal2.times, |
||
173 | self.signal2.values + self.signal2.dvalues * np.random.randn(self.signal2.values.size), |
||
174 | ) |
||
175 | self.values = nindcf( |
||
176 | self.signal1.times, |
||
177 | self.signal1.values, |
||
178 | self.signal2.times, |
||
179 | self.signal2.values, |
||
180 | ) |
||
181 | else: |
||
182 | raise Exception("Unknown method " + self.fcorr + " for correlation.") |
||
183 | |||
184 | self.l3s = np.percentile(mc_corr, [0.135, 99.865], axis=0) |
||
185 | self.l2s = np.percentile(mc_corr, [2.28, 97.73], axis=0) |
||
186 | self.l1s = np.percentile(mc_corr, [15.865, 84.135], axis=0) |
||
187 | |||
188 | if uncert is True: |
||
189 | self.s3s = np.percentile(mc_sig, [0.135, 99.865], axis=0) |
||
190 | self.s2s = np.percentile(mc_sig, [2.28, 97.73], axis=0) |
||
191 | self.s1s = np.percentile(mc_sig, [15.865, 84.135], axis=0) |
||
192 | |||
357 |