Total Complexity | 56 |
Total Lines | 548 |
Duplicated Lines | 7.12 % |
Changes | 0 |
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like mutis.lib.correlation often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | # Licensed under a 3-clause BSD style license - see LICENSE |
||
2 | """Methods for correlation of light curves.""" |
||
3 | |||
4 | import logging |
||
5 | |||
6 | import numpy as np |
||
|
|||
7 | |||
8 | from numba import jit, float64, vectorize, guvectorize |
||
9 | |||
10 | from mutis.lib.utils import get_grid |
||
11 | |||
12 | __all__ = [ |
||
13 | "kroedel_old", |
||
14 | "welsh_old", |
||
15 | "kroedel", |
||
16 | "welsh", |
||
17 | "nindcf", |
||
18 | "gen_times_rawab", |
||
19 | "gen_times_uniform", |
||
20 | "gen_times_canopy", |
||
21 | ] |
||
22 | |||
23 | log = logging.getLogger(__name__) |
||
24 | |||
25 | |||
26 | def _kroedel_old(t1, d1, t2, d2, t, dt): |
||
27 | t1m, t2m = get_grid(t1, t2) |
||
28 | d1m, d2m = np.meshgrid(d1, d2) |
||
29 | |||
30 | mask = ((t - dt / 2) < (t2m - t1m)) & ((t2m - t1m) < (t + dt / 2)) |
||
31 | |||
32 | udcf = (d1m - np.mean(d1)) * (d2m - np.mean(d2)) / np.std(d1) / np.std(d2) |
||
33 | |||
34 | return np.mean(udcf[mask]) |
||
35 | |||
36 | |||
37 | View Code Duplication | def kroedel_old(t1, d1, t2, d2, t, dt): |
|
38 | """Krolik & Edelson (1988) correlation with adaptative binning. |
||
39 | Old version, not using numba. Kept for debugging purposes. |
||
40 | """ |
||
41 | |||
42 | if t.size != dt.size: |
||
43 | log.error("Error, t and dt not the same size") |
||
44 | return False |
||
45 | if t1.size != d1.size: |
||
46 | log.error("Error, t1 and d1 not the same size") |
||
47 | return False |
||
48 | if t2.size != d2.size: |
||
49 | log.error("Error, t2 and d2 not the same size") |
||
50 | return False |
||
51 | |||
52 | res = np.empty(t.size) |
||
53 | for i in range(t.size): |
||
54 | res[i] = _kroedel_old(t1, d1, t2, d2, t[i], dt[i]) |
||
55 | return res |
||
56 | |||
57 | def _welsh_old(t1, d1, t2, d2, t, dt): |
||
58 | t1m, t2m = get_grid(t1, t2) |
||
59 | d1m, d2m = np.meshgrid(d1, d2) |
||
60 | |||
61 | msk = ((t - dt / 2) < (t2m - t1m)) & ((t2m - t1m) < (t + dt / 2)) |
||
62 | |||
63 | udcf = ( |
||
64 | (d1m - np.mean(d1m[msk])) * (d2m - np.mean(d2m[msk])) / np.std(d1m[msk]) / np.std(d2m[msk]) |
||
65 | ) |
||
66 | |||
67 | return np.mean(udcf[msk]) |
||
68 | |||
69 | View Code Duplication | def welsh_old(t1, d1, t2, d2, t, dt): |
|
70 | """Welsh (1999) correlation with adaptative binning. |
||
71 | Old version, not using numba. Kept for debugging purposes. |
||
72 | """ |
||
73 | |||
74 | if t.size != dt.size: |
||
75 | log.error("Error, t and dt not the same size") |
||
76 | return False |
||
77 | if t1.size != d1.size: |
||
78 | log.error("Error, t1 and d1 not the same size") |
||
79 | return False |
||
80 | if t2.size != d2.size: |
||
81 | log.error("Error, t2 and d2 not the same size") |
||
82 | return False |
||
83 | |||
84 | # res = np.array([]) |
||
85 | res = np.empty(t.size) |
||
86 | for i in range(t.size): |
||
87 | res[i] = _welsh_old(t1, d1, t2, d2, t[i], dt[i]) |
||
88 | return res |
||
89 | |||
90 | @guvectorize([(float64[:], float64[:], float64[:], float64[:], float64[:], float64[:], float64[:])], '(n),(n),(m),(m),(c),(c)->(c)') |
||
91 | def _kroedel_numba(t1, d1, t2, d2, t, dt, res): # pragma: no cover |
||
92 | """Helper function for kroedel()""" |
||
93 | |||
94 | d1_mean = np.mean(d1) |
||
95 | d2_mean = np.mean(d2) |
||
96 | |||
97 | d1_std = np.std(d1) |
||
98 | d2_std = np.std(d2) |
||
99 | |||
100 | for k in range(len(t)): |
||
101 | N = 0 |
||
102 | |||
103 | numerator = 0 |
||
104 | |||
105 | for i in range(len(t1)): |
||
106 | for j in range(len(t2)): |
||
107 | if ((t[k] - dt[k] / 2) < (t2[j] - t1[i])) & ((t2[j] - t1[i]) < (t[k] + dt[k] / 2)): |
||
108 | numerator = numerator + (d1[i]-d1_mean)*(d2[j]-d2_mean) |
||
109 | N = N + 1 |
||
110 | |||
111 | res[k] = numerator / N / d1_std / d2_std |
||
112 | |||
113 | def kroedel(t1, d1, t2, d2, t, dt): |
||
114 | """Krolik & Edelson (1988) correlation with adaptative binning. |
||
115 | |||
116 | This function implements the correlation function proposed by |
||
117 | Krolik & Edelson (1988), which allows for the computation of |
||
118 | the correlation for -discrete- signals non-uniformly sampled |
||
119 | in time. |
||
120 | |||
121 | Parameters |
||
122 | ---------- |
||
123 | t1 : :class:`~numpy.ndarray` |
||
124 | Times corresponding to the first signal. |
||
125 | d1 : :class:`~numpy.ndarray` |
||
126 | Values of the first signal. |
||
127 | t2 : :class:`~numpy.ndarray` |
||
128 | Times corresponding to the second signal. |
||
129 | d2 : :class:`~numpy.ndarray` |
||
130 | Values of the second signal. |
||
131 | t : :class:`~numpy.ndarray` |
||
132 | Times on which to compute the correlation (binning). |
||
133 | dt : :class:`~numpy.ndarray` |
||
134 | Size of the bins on which to compute the correlation. |
||
135 | |||
136 | Returns |
||
137 | ------- |
||
138 | res : :class:`~numpy.ndarray` (size `len(t)`) |
||
139 | Values of the correlation at the times `t`. |
||
140 | |||
141 | Examples |
||
142 | -------- |
||
143 | An example of raw usage would be: |
||
144 | |||
145 | >>> import numpy as np |
||
146 | >>> from mutis.lib.correlation import kroedel_ab |
||
147 | >>> t1 = np.linspace(1, 10, 100); s1 = np.sin(t1) |
||
148 | >>> t2 = np.linspace(1, 10, 100); s2 = np.cos(t2) |
||
149 | >>> t = np.linspace(1, 10, 100); dt = np.full(t.shape, 0.1) |
||
150 | >>> kroedel(t1, d1, t2, d2, t, dt) |
||
151 | |||
152 | However, it is recommended to be used as expalined in the |
||
153 | standard MUTIS' workflow notebook. |
||
154 | """ |
||
155 | |||
156 | if t.size != dt.size: |
||
157 | log.error("Error, t and dt not the same size") |
||
158 | return False |
||
159 | if t1.size != d1.size: |
||
160 | log.error("Error, t1 and d1 not the same size") |
||
161 | return False |
||
162 | if t2.size != d2.size: |
||
163 | log.error("Error, t2 and d2 not the same size") |
||
164 | return False |
||
165 | |||
166 | return _kroedel_numba(t1, d1, t2, d2, t, dt) |
||
167 | |||
168 | @guvectorize([(float64[:], float64[:], float64[:], float64[:], float64[:], float64[:], float64[:])], '(n),(n),(m),(m),(c),(c)->(c)') |
||
169 | def _welsh_numba(t1, d1, t2, d2, t, dt, res): # pragma: no cover |
||
170 | """Helper function for welsh()""" |
||
171 | |||
172 | for k in range(len(t)): |
||
173 | N=0 |
||
174 | |||
175 | d1_M_sum = 0 |
||
176 | d1_M_sq_sum = 0 |
||
177 | |||
178 | d2_M_sum = 0 |
||
179 | d2_M_sq_sum = 0 |
||
180 | |||
181 | d1_d2_M_sum = 0 |
||
182 | |||
183 | for i in range(len(t1)): |
||
184 | for j in range(len(t2)): |
||
185 | if ((t[k] - dt[k] / 2) < (t2[j] - t1[i])) & ((t2[j] - t1[i]) < (t[k] + dt[k] / 2)): |
||
186 | d1_M_sum = d1_M_sum + d1[i] |
||
187 | d2_M_sum = d2_M_sum + d2[j] |
||
188 | |||
189 | d1_M_sq_sum = d1_M_sq_sum + d1[i]**2 |
||
190 | d2_M_sq_sum = d2_M_sq_sum + d2[j]**2 |
||
191 | |||
192 | d1_d2_M_sum = d1_d2_M_sum + d1[i]*d2[j] |
||
193 | |||
194 | N = N + 1 |
||
195 | |||
196 | d1_M_mean = d1_M_sum/N |
||
197 | d2_M_mean = d2_M_sum/N |
||
198 | |||
199 | d1_M_std = (d1_M_sq_sum / N - d1_M_mean**2)**0.5 |
||
200 | d2_M_std = (d2_M_sq_sum / N - d2_M_mean**2)**0.5 |
||
201 | |||
202 | res[k] = (d1_d2_M_sum/N - d1_M_mean*d2_M_mean) / d1_M_std / d2_M_std |
||
203 | |||
204 | def welsh(t1, d1, t2, d2, t, dt): |
||
205 | """Welsh (1999) correlation with adaptative binning. |
||
206 | |||
207 | This function implements the correlation function proposed |
||
208 | by Welsh (1999), which allows for the computation of the correlation |
||
209 | for -discrete- signals non-uniformly sampled in time. |
||
210 | |||
211 | Parameters |
||
212 | ---------- |
||
213 | t1 : :class:`~numpy.ndarray` |
||
214 | Times corresponding to the first signal. |
||
215 | d1 : :class:`~numpy.ndarray` |
||
216 | Values of the first signal. |
||
217 | t2 : :class:`~numpy.ndarray` |
||
218 | Times corresponding to the second signal. |
||
219 | d2 : :class:`~numpy.ndarray` |
||
220 | Values of the second signal. |
||
221 | t : :class:`~numpy.ndarray` |
||
222 | Times on which to compute the correlation (binning). |
||
223 | dt : :class:`~numpy.ndarray` |
||
224 | Size of the bins on which to compute the correlation. |
||
225 | |||
226 | Returns |
||
227 | ------- |
||
228 | res : :class:`~numpy.ndarray` (size `len(t)`) |
||
229 | Values of the correlation at the times `t`. |
||
230 | |||
231 | Examples |
||
232 | -------- |
||
233 | An example of raw usage would be: |
||
234 | |||
235 | >>> import numpy as np |
||
236 | >>> from mutis.lib.correlation import welsh_ab |
||
237 | >>> t1 = np.linspace(1, 10, 100); s1 = np.sin(t1) |
||
238 | >>> t2 = np.linspace(1, 10, 100); s2 = np.cos(t2) |
||
239 | >>> t = np.linspace(1, 10, 100); dt = np.full(t.shape, 0.1) |
||
240 | >>> welsh(t1, d1, t2, d2, t, dt) |
||
241 | |||
242 | However, it is recommended to be used as expalined in the |
||
243 | standard MUTIS' workflow notebook. |
||
244 | """ |
||
245 | |||
246 | if t.size != dt.size: |
||
247 | log.error("Error, t and dt not the same size") |
||
248 | return False |
||
249 | if t1.size != d1.size: |
||
250 | log.error("Error, t1 and d1 not the same size") |
||
251 | return False |
||
252 | if t2.size != d2.size: |
||
253 | log.error("Error, t2 and d2 not the same size") |
||
254 | return False |
||
255 | |||
256 | return _welsh_numba(t1, d1, t2, d2, t, dt) |
||
257 | |||
258 | def ndcf(x, y): |
||
259 | """Computes the normalised correlation of two discrete signals (ignoring times).""" |
||
260 | |||
261 | x = (x - np.mean(x)) / np.std(x) / len(x) |
||
262 | y = (y - np.mean(y)) / np.std(y) |
||
263 | return np.correlate(y, x, "full") |
||
264 | |||
265 | |||
266 | def nindcf(t1, s1, t2, s2): |
||
267 | """Computes the normalised correlation of two discrete signals (interpolating them).""" |
||
268 | |||
269 | dt = np.max([(t1.max() - t1.min()) / t1.size, (t2.max() - t2.min()) / t2.size]) |
||
270 | n1 = int(np.ptp(t1) / dt * 10.0) |
||
271 | n2 = int(np.ptp(t1) / dt * 10.0) |
||
272 | s1i = np.interp(np.linspace(t1.min(), t1.max(), n1), t1, s1) |
||
273 | s2i = np.interp(np.linspace(t2.min(), t2.max(), n2), t2, s2) |
||
274 | return ndcf(s1i, s2i) |
||
275 | |||
276 | |||
277 | def gen_times_rawab(t1, t2, dt0=None, ndtmax=1.0, nbinsmin=121, force=None): |
||
278 | """LEGACY. Returns t, dt for use with adaptative binning methods. |
||
279 | |||
280 | Uses a shitty algorithm to find a time binning in which each bin contains |
||
281 | a minimum of points (specified by `nbinsmin`, with an starting bin size |
||
282 | (`dt0`) and a maximum bin size (`ndtmax*dt0`). |
||
283 | |||
284 | The algorithms start at the first time bin, and enlarges the bin size |
||
285 | until it has enough points or it reaches the maximum length, then creates |
||
286 | another starting at that point. |
||
287 | |||
288 | If `force` is True, then it discards the created bins on which there are |
||
289 | not enough points. |
||
290 | """ |
||
291 | |||
292 | # Sensible values for these parameters must be found by hand, and depend |
||
293 | # on the characteristic of input data. |
||
294 | # |
||
295 | # dt0: |
||
296 | # minimum bin size, also used as step in a.b. |
||
297 | # default: dt0 = 0.25*(tmax-tmin)/np.sqrt(t1.size*t2.size+1) |
||
298 | # (more or less a statistically reasonable binning, |
||
299 | # to increase precision) |
||
300 | # ndtmax: |
||
301 | # Maximum size of bins (in units of dt0). |
||
302 | # default: 1.0 |
||
303 | # nbinsmin: |
||
304 | # if the data has a lot of error, higher values are needed |
||
305 | # to soften the correlation beyond spurious variability. |
||
306 | # default: 121 (11x11) |
||
307 | |||
308 | # tmin = -(np.min([t1.max(),t2.max()]) - np.max([t1.min(),t2.min()])) |
||
309 | tmax = +(np.max([t1.max(), t2.max()]) - np.min([t1.min(), t2.min()])) |
||
310 | tmin = -tmax |
||
311 | |||
312 | if dt0 is None: |
||
313 | dt0 = 0.25 * (tmax - tmin) / np.sqrt(t1.size * t2.size + 1) |
||
314 | |||
315 | t = np.array([]) |
||
316 | dt = np.array([]) |
||
317 | nb = np.array([]) |
||
318 | t1m, t2m = np.meshgrid(t1, t2) |
||
319 | |||
320 | ti = tmin |
||
321 | tf = ti + dt0 |
||
322 | |||
323 | while tf < tmax: |
||
324 | tm = (ti + tf) / 2 |
||
325 | dtm = tf - ti |
||
326 | nbins = np.sum((((tm - dtm / 2) < (t2m - t1m)) & ((t2m - t1m) < (tm + dtm / 2)))) |
||
327 | if dtm <= dt0 * ndtmax: |
||
328 | if nbins >= nbinsmin: |
||
329 | t = np.append(t, tm) |
||
330 | dt = np.append(dt, dtm) |
||
331 | nb = np.append(nb, nbins) |
||
332 | ti, tf = tf, tf + dt0 |
||
333 | else: |
||
334 | tf = tf + 0.1 * dt0 # try small increments |
||
335 | else: |
||
336 | ti, tf = tf, tf + dt0 |
||
337 | |||
338 | # force zero to appear in t ## |
||
339 | if force is None: |
||
340 | force = [0] |
||
341 | for tm in force: |
||
342 | dtm = dt0 / 2 |
||
343 | nbins = np.sum((((tm - dtm / 2) < (t2m - t1m)) & ((t2m - t1m) < (tm + dtm / 2)))) |
||
344 | while dtm <= dt0 * ndtmax: |
||
345 | if nbins >= nbinsmin: |
||
346 | t = np.append(t, tm) |
||
347 | dt = np.append(dt, dtm) |
||
348 | nb = np.append(nb, nbins) |
||
349 | break |
||
350 | else: |
||
351 | dtm = dtm + dt0 |
||
352 | |||
353 | idx = np.argsort(t) |
||
354 | t = t[idx] |
||
355 | dt = dt[idx] |
||
356 | nb = nb[idx] |
||
357 | |||
358 | return t, dt, nb |
||
359 | |||
360 | |||
361 | def gen_times_uniform(t1, t2, dtm=None, tmin=None, tmax=None, nbinsmin=121, n=200): |
||
362 | """Returns an uniform t, dt time binning for use with adaptative binning methods. |
||
363 | |||
364 | The time interval on which the correlation is defined is split in |
||
365 | `n` bins. Bins with a number of point less than `nbinsmin` are discarded. |
||
366 | |||
367 | Parameters |
||
368 | ---------- |
||
369 | t1 : :py:class:`np.ndarray` |
||
370 | Times of the first signal. |
||
371 | t2 : :py:class:`np.ndarray` |
||
372 | Times of the second signal. |
||
373 | tmin : :py:class:`~float` |
||
374 | Start of the time intervals (if not specified, start of the interval on which the correlation is define). |
||
375 | tmax : :py:class:`~float` |
||
376 | End of the time intervals (if not specified, end of the interval on which the correlation is define). |
||
377 | nbinsmin : :py:class:`~float` |
||
378 | Minimum of points falling on each bin. |
||
379 | n : :py:class:`~float` |
||
380 | Number of bins in which to split (needs not to be the number of bins returned). |
||
381 | |||
382 | Returns |
||
383 | ------- |
||
384 | t : :class:`~numpy.ndarray` |
||
385 | Time binning on which to compute the correlation. |
||
386 | dt : :class:`~numpy.ndarray` |
||
387 | Size of the bins defined by `t` |
||
388 | nb : :class:`~numpy.ndarray` |
||
389 | Number of points falling on each bin defined by `t` and `dt`. |
||
390 | """ |
||
391 | |||
392 | if tmax is None: |
||
393 | tmax = +(np.max([t1.max(), t2.max()]) - np.min([t1.min(), t2.min()])) |
||
394 | if tmin is None: |
||
395 | tmin = -tmax |
||
396 | |||
397 | t = np.linspace(tmin, tmax, n) |
||
398 | if dtm is None: |
||
399 | dtm = (tmax - tmin) / n |
||
400 | dt = np.full(t.shape, dtm) |
||
401 | nb = np.empty(t.shape) |
||
402 | t1m, t2m = np.meshgrid(t1, t2) |
||
403 | |||
404 | for im, tm in enumerate(t): |
||
405 | nb[im] = np.sum((((tm - dtm / 2) < (t2m - t1m)) & ((t2m - t1m) < (tm + dtm / 2)))) |
||
406 | idx = nb < nbinsmin |
||
407 | t = np.delete(t, idx) |
||
408 | dt = np.delete(dt, idx) |
||
409 | nb = np.delete(nb, idx) |
||
410 | |||
411 | return t, dt, nb |
||
412 | |||
413 | |||
414 | def gen_times_canopy(t1, t2, dtmin=0.01, dtmax=0.5, nbinsmin=500, nf=0.5): |
||
415 | """Returns a non-uniform t, dt time binning for use with adaptative binning methods. |
||
416 | |||
417 | This cumbersome algorithm does more or less the following: |
||
418 | 1) Divides the time interval on which the correlation is defined in |
||
419 | the maximum number of points (minimum bin size defined by `dtmin`). |
||
420 | 2) Checks the number of point falling on each bin. |
||
421 | 3) If there are several consecutive intervals with a number of points |
||
422 | over `nbinsmin`, it groups them (reducing the number of points |
||
423 | exponentially as defined by `nf`, if the number of intervals in the |
||
424 | group is high, or one by one if it is low.) |
||
425 | 4) Repeat until APPROXIMATELY we have reached intervals of size `dtmax`. |
||
426 | |||
427 | How the exact implementation works, I forgot! But the results are more |
||
428 | or less nice... |
||
429 | |||
430 | Parameters |
||
431 | ---------- |
||
432 | t1 : :py:class:`np.ndarray` |
||
433 | Times of the first signal. |
||
434 | t2 : :py:class:`np.ndarray` |
||
435 | Times of the second signal. |
||
436 | dtmin : :py:class:`~float` |
||
437 | Start of the time intervals (if not specified, start of the |
||
438 | interval on which the correlation is define). |
||
439 | dtmax : :py:class:`~float` |
||
440 | End of the time intervals (if not specified, end of the interval |
||
441 | on which the correlation is define). |
||
442 | nbinsmin : :py:class:`~float` |
||
443 | Minimum of points falling on each bin. |
||
444 | nf : :py:class:`~float` |
||
445 | How fast are the intervals divided. |
||
446 | |||
447 | Returns |
||
448 | ------- |
||
449 | t : :class:`~numpy.ndarray` |
||
450 | Time binning on which to compute the correlation. |
||
451 | dt : :class:`~numpy.ndarray` |
||
452 | Size of the bins defined by `t` |
||
453 | nb : :class:`~numpy.ndarray` |
||
454 | Number of points falling on each bin defined by `t` and `dt`. |
||
455 | """ |
||
456 | |||
457 | t1m, t2m = np.meshgrid(t1, t2) |
||
458 | |||
459 | def _comp_nb(t, dt): |
||
460 | nb = np.empty(len(t)) |
||
461 | for i in range(len(t)): |
||
462 | nb[i] = np.sum( |
||
463 | (((t[i] - dt[i] / 2) < (t2m - t1m)) & ((t2m - t1m) < (t[i] + dt[i] / 2))) |
||
464 | ) |
||
465 | return nb |
||
466 | |||
467 | tmax = +(np.max([t1.max(), t2.max()]) - np.min([t1.min(), t2.min()])) |
||
468 | tmin = -tmax |
||
469 | |||
470 | t = np.linspace(tmin, tmax, int((tmax - tmin) / dtmin)) |
||
471 | dt = np.full(t.size, np.ptp(t) / t.size) |
||
472 | nb = _comp_nb(t, dt) |
||
473 | |||
474 | k = 0 |
||
475 | while k < int(np.log(dtmax / dtmin) / np.log(1 / nf)): |
||
476 | k = k + 1 |
||
477 | |||
478 | idx = nb < nbinsmin |
||
479 | |||
480 | ts, dts, nbs = t, dt, nb |
||
481 | |||
482 | t, dt = np.copy(ts), np.copy(dts) |
||
483 | |||
484 | n_grp = 0 |
||
485 | grps = ( |
||
486 | np.where(np.diff(np.concatenate(([False], idx, [False]), dtype=int)) != 0)[0] |
||
487 | ).reshape(-1, 2) |
||
488 | for i_grp, grp in enumerate(grps): |
||
489 | if grp[0] > 0: |
||
490 | ar = grp[0] |
||
491 | a = t[grp[0] - 1] |
||
492 | else: |
||
493 | ar = grp[0] |
||
494 | a = t[grp[0]] |
||
495 | |||
496 | if grp[1] < t.size - 1: |
||
497 | br = grp[1] - 1 |
||
498 | b = t[grp[1]] |
||
499 | else: |
||
500 | br = grp[1] - 1 |
||
501 | b = t[grp[1] - 1] |
||
502 | |||
503 | if (br - ar) < 8: |
||
504 | if br - ar >= 1: |
||
505 | n = br - ar + 1 |
||
506 | else: |
||
507 | n = br - ar + 2 |
||
508 | |||
509 | tins = np.linspace(a, b, n, endpoint=False)[1:] |
||
510 | |||
511 | ts = np.delete(ts, np.arange(ar, br + 1) - n_grp) |
||
512 | dts = np.delete(dts, np.arange(ar, br + 1) - n_grp) |
||
513 | |||
514 | ts = np.insert(ts, grp[0] - n_grp, tins) |
||
515 | dts = np.insert(dts, grp[0] - n_grp, np.full(n - 1, (b - a) / (n - 1))) |
||
516 | |||
517 | if br - ar >= 1: |
||
518 | n_grp = n_grp + 1 |
||
519 | else: |
||
520 | pass |
||
521 | else: |
||
522 | n = int(nf * (br - ar + 1)) |
||
523 | |||
524 | tins = np.linspace(a, b, n, endpoint=False)[1:] |
||
525 | |||
526 | ts = np.delete(ts, np.arange(ar, br + 1) - n_grp) |
||
527 | dts = np.delete(dts, np.arange(ar, br + 1) - n_grp) |
||
528 | |||
529 | ts = np.insert(ts, grp[0] - n_grp, tins) |
||
530 | dts = np.insert(dts, grp[0] - n_grp, np.full(n - 1, (b - a) / (n - 1))) |
||
531 | |||
532 | if br - ar >= 1: |
||
533 | n_grp = n_grp + (grp[1] - grp[0] - n) + 1 |
||
534 | else: |
||
535 | pass |
||
536 | |||
537 | t = ts |
||
538 | dt = dts |
||
539 | nb = _comp_nb(t, dt) |
||
540 | |||
541 | idx = nb < nbinsmin |
||
542 | |||
543 | t = np.delete(t, idx) |
||
544 | dt = np.delete(dt, idx) |
||
545 | nb = np.delete(nb, idx) |
||
546 | |||
547 | return t, dt, nb |
||
548 |