|
1
|
|
|
# -*- coding: utf-8 -*- |
|
2
|
|
|
""" |
|
3
|
|
|
This file contains the Qudi logic for the extraction of laser pulses. |
|
4
|
|
|
|
|
5
|
|
|
Qudi is free software: you can redistribute it and/or modify |
|
6
|
|
|
it under the terms of the GNU General Public License as published by |
|
7
|
|
|
the Free Software Foundation, either version 3 of the License, or |
|
8
|
|
|
(at your option) any later version. |
|
9
|
|
|
|
|
10
|
|
|
Qudi is distributed in the hope that it will be useful, |
|
11
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of |
|
12
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
|
13
|
|
|
GNU General Public License for more details. |
|
14
|
|
|
|
|
15
|
|
|
You should have received a copy of the GNU General Public License |
|
16
|
|
|
along with Qudi. If not, see <http://www.gnu.org/licenses/>. |
|
17
|
|
|
|
|
18
|
|
|
Copyright (c) the Qudi Developers. See the COPYRIGHT.txt file at the |
|
19
|
|
|
top-level directory of this distribution and at <https://github.com/Ulm-IQO/qudi/> |
|
20
|
|
|
""" |
|
21
|
|
|
|
|
22
|
|
|
import numpy as np |
|
23
|
|
|
from scipy import ndimage |
|
24
|
|
|
from logic.generic_logic import GenericLogic |
|
25
|
|
|
|
|
26
|
|
|
|
|
27
|
|
|
class PulseExtractionLogic(GenericLogic): |
|
28
|
|
|
"""unstable: Nikolas Tomek """ |
|
29
|
|
|
|
|
30
|
|
|
_modclass = 'PulseExtractionLogic' |
|
31
|
|
|
_modtype = 'logic' |
|
32
|
|
|
|
|
33
|
|
|
# declare connectors |
|
34
|
|
|
_out = {'pulseextractionlogic': 'PulseExtractionLogic'} |
|
35
|
|
|
|
|
36
|
|
|
def __init__(self, config, **kwargs): |
|
37
|
|
|
super().__init__(config=config, **kwargs) |
|
38
|
|
|
|
|
39
|
|
|
self.log.info('The following configuration was found.') |
|
40
|
|
|
# checking for the right configuration |
|
41
|
|
|
for key in config.keys(): |
|
42
|
|
|
self.log.info('{0}: {1}'.format(key, config[key])) |
|
43
|
|
|
|
|
44
|
|
|
def on_activate(self, e): |
|
45
|
|
|
""" Initialisation performed during activation of the module. |
|
46
|
|
|
|
|
47
|
|
|
@param object e: Event class object from Fysom. |
|
48
|
|
|
An object created by the state machine module Fysom, |
|
49
|
|
|
which is connected to a specific event (have a look in |
|
50
|
|
|
the Base Class). This object contains the passed event, |
|
51
|
|
|
the state before the event happened and the destination |
|
52
|
|
|
of the state which should be reached after the event |
|
53
|
|
|
had happened. |
|
54
|
|
|
""" |
|
55
|
|
|
self.extraction_method = None # will later on be used to switch between different methods |
|
56
|
|
|
return |
|
57
|
|
|
|
|
58
|
|
|
def on_deactivate(self, e): |
|
59
|
|
|
""" Deinitialisation performed during deactivation of the module. |
|
60
|
|
|
|
|
61
|
|
|
@param object e: Event class object from Fysom. A more detailed |
|
62
|
|
|
explanation can be found in method activation. |
|
63
|
|
|
""" |
|
64
|
|
|
pass |
|
65
|
|
|
|
|
66
|
|
|
def gated_extraction(self, count_data, conv_std_dev): |
|
67
|
|
|
""" Detects the rising flank in the gated timetrace data and extracts |
|
68
|
|
|
just the laser pulses. |
|
69
|
|
|
|
|
70
|
|
|
@param numpy.ndarray count_data: 2D array, the raw timetrace data from a |
|
71
|
|
|
gated fast counter, dimensions: |
|
72
|
|
|
0: gate number, |
|
73
|
|
|
1: time bin) |
|
74
|
|
|
@param float conv_std_dev: standard deviation of the gaussian filter to be |
|
75
|
|
|
applied for smoothing |
|
76
|
|
|
|
|
77
|
|
|
@return numpy.ndarray: The extracted laser pulses of the timetrace |
|
78
|
|
|
dimensions: |
|
79
|
|
|
0: laser number, |
|
80
|
|
|
1: time bin |
|
81
|
|
|
""" |
|
82
|
|
|
# sum up all gated timetraces to ease flank detection |
|
83
|
|
|
timetrace_sum = np.sum(count_data, 0) |
|
84
|
|
|
|
|
85
|
|
|
# apply gaussian filter to remove noise and compute the gradient of the |
|
86
|
|
|
# timetrace sum |
|
87
|
|
|
#FIXME: That option should be stated in the config, or should be |
|
88
|
|
|
# choosable by the GUI, since it is not always desired. |
|
89
|
|
|
# It should also be possible to display the bare laserpulse, |
|
90
|
|
|
# without cutting away something. |
|
91
|
|
|
|
|
92
|
|
|
conv_deriv = self._convolve_derive(timetrace_sum.astype(float), conv_std_dev) |
|
93
|
|
|
# get indices of rising and falling flank |
|
94
|
|
|
|
|
95
|
|
|
rising_ind = conv_deriv.argmax() |
|
96
|
|
|
falling_ind = conv_deriv.argmin() |
|
97
|
|
|
# slice the data array to cut off anything but laser pulses |
|
98
|
|
|
laser_arr = count_data[:, rising_ind:falling_ind] |
|
99
|
|
|
return laser_arr.astype(int) |
|
100
|
|
|
|
|
101
|
|
|
def ungated_extraction(self, count_data, conv_std_dev, num_of_lasers): |
|
102
|
|
|
""" Detects the laser pulses in the ungated timetrace data and extracts |
|
103
|
|
|
them. |
|
104
|
|
|
|
|
105
|
|
|
@param numpy.ndarray count_data: 1D array the raw timetrace data from an |
|
106
|
|
|
ungated fast counter |
|
107
|
|
|
@param int num_of_lasers: The total number of laser pulses inside the |
|
108
|
|
|
pulse sequence |
|
109
|
|
|
@param float conv_std_dev: standard deviation of the gaussian filter to be |
|
110
|
|
|
applied for smoothing |
|
111
|
|
|
|
|
112
|
|
|
@return 2D numpy.ndarray: 2D array, the extracted laser pulses of the |
|
113
|
|
|
timetrace, dimensions: |
|
114
|
|
|
0: laser number, |
|
115
|
|
|
1: time bin |
|
116
|
|
|
|
|
117
|
|
|
Procedure: |
|
118
|
|
|
Edge Detection: |
|
119
|
|
|
--------------- |
|
120
|
|
|
|
|
121
|
|
|
The count_data array with the laser pulses is smoothed with a |
|
122
|
|
|
gaussian filter (convolution), which used a defined standard |
|
123
|
|
|
deviation of 10 entries (bins). Then the derivation of the convolved |
|
124
|
|
|
time trace is taken to obtain the maxima and minima, which |
|
125
|
|
|
corresponds to the rising and falling edge of the pulses. |
|
126
|
|
|
|
|
127
|
|
|
The convolution with a gaussian removes nasty peaks due to count |
|
128
|
|
|
fluctuation within a laser pulse and at the same time ensures a |
|
129
|
|
|
clear distinction of the maxima and minima in the derived convolved |
|
130
|
|
|
trace. |
|
131
|
|
|
|
|
132
|
|
|
The maxima and minima are not found sequentially, pulse by pulse, |
|
133
|
|
|
but are rather globally obtained. I.e. the convolved and derived |
|
134
|
|
|
array is searched iteratively for a maximum and a minimum, and after |
|
135
|
|
|
finding those the array entries within the 4 times |
|
136
|
|
|
self.conv_std_dev (2*self.conv_std_dev to the left and |
|
137
|
|
|
2*self.conv_std_dev) are set to zero. |
|
138
|
|
|
|
|
139
|
|
|
The crucial part is the knowledge of the number of laser pulses and |
|
140
|
|
|
the choice of the appropriate std_dev for the gauss filter. |
|
141
|
|
|
|
|
142
|
|
|
To ensure a good performance of the edge detection, you have to |
|
143
|
|
|
ensure a steep rising and falling edge of the laser pulse! Be also |
|
144
|
|
|
careful in choosing a large conv_std_dev value and using a small |
|
145
|
|
|
laser pulse (rule of thumb: conv_std_dev < laser_length/10). |
|
146
|
|
|
""" |
|
147
|
|
|
|
|
148
|
|
|
# apply gaussian filter to remove noise and compute the gradient of the |
|
149
|
|
|
# timetrace |
|
150
|
|
|
|
|
151
|
|
|
conv_deriv = self._convolve_derive(count_data.astype(float), conv_std_dev) |
|
152
|
|
|
|
|
153
|
|
|
# use a reference for array, because the exact position of the peaks or |
|
154
|
|
|
# dips (i.e. maxima or minima, which are the inflection points in the |
|
155
|
|
|
# pulse) are distorted by a large conv_std_dev value. |
|
156
|
|
|
conv_deriv_ref = self._convolve_derive(count_data, 10) |
|
157
|
|
|
|
|
158
|
|
|
# initialize arrays to contain indices for all rising and falling |
|
159
|
|
|
# flanks, respectively |
|
160
|
|
|
rising_ind = np.empty([num_of_lasers],int) |
|
161
|
|
|
falling_ind = np.empty([num_of_lasers],int) |
|
162
|
|
|
|
|
163
|
|
|
# Find as many rising and falling flanks as there are laser pulses in |
|
164
|
|
|
# the trace: |
|
165
|
|
|
for i in range(num_of_lasers): |
|
166
|
|
|
|
|
167
|
|
|
# save the index of the absolute maximum of the derived time trace |
|
168
|
|
|
# as rising edge position |
|
169
|
|
|
rising_ind[i] = np.argmax(conv_deriv) |
|
170
|
|
|
|
|
171
|
|
|
# refine the rising edge detection, by using a small and fixed |
|
172
|
|
|
# conv_std_dev parameter to find the inflection point more precise |
|
173
|
|
|
start_ind = int(rising_ind[i]-conv_std_dev) |
|
174
|
|
|
if start_ind < 0: |
|
175
|
|
|
start_ind = 0 |
|
176
|
|
|
|
|
177
|
|
|
stop_ind = int(rising_ind[i]+conv_std_dev) |
|
178
|
|
|
if stop_ind > len(conv_deriv): |
|
179
|
|
|
stop_ind = len(conv_deriv) |
|
180
|
|
|
|
|
181
|
|
|
if start_ind == stop_ind: |
|
182
|
|
|
stop_ind = start_ind+1 |
|
183
|
|
|
|
|
184
|
|
|
rising_ind[i] = start_ind + np.argmax(conv_deriv_ref[start_ind:stop_ind]) |
|
185
|
|
|
|
|
186
|
|
|
# set this position and the surrounding of the saved edge to 0 to |
|
187
|
|
|
# avoid a second detection |
|
188
|
|
|
if rising_ind[i] < 2*conv_std_dev: del_ind_start = 0 |
|
189
|
|
|
else: |
|
190
|
|
|
del_ind_start = rising_ind[i] - 2*conv_std_dev |
|
191
|
|
|
if (conv_deriv.size - rising_ind[i]) < 2*conv_std_dev: |
|
192
|
|
|
del_ind_stop = conv_deriv.size-1 |
|
193
|
|
|
else: |
|
194
|
|
|
del_ind_stop = rising_ind[i] + 2*conv_std_dev |
|
195
|
|
|
conv_deriv[del_ind_start:del_ind_stop] = 0 |
|
196
|
|
|
|
|
197
|
|
|
# save the index of the absolute minimum of the derived time trace |
|
198
|
|
|
# as falling edge position |
|
199
|
|
|
falling_ind[i] = np.argmin(conv_deriv) |
|
200
|
|
|
|
|
201
|
|
|
# refine the falling edge detection, by using a small and fixed |
|
202
|
|
|
# conv_std_dev parameter to find the inflection point more precise |
|
203
|
|
|
start_ind = int(falling_ind[i]-conv_std_dev) |
|
204
|
|
|
if start_ind < 0: |
|
205
|
|
|
start_ind = 0 |
|
206
|
|
|
|
|
207
|
|
|
stop_ind = int(falling_ind[i]+conv_std_dev) |
|
208
|
|
|
if stop_ind > len(conv_deriv): |
|
209
|
|
|
stop_ind = len(conv_deriv) |
|
210
|
|
|
|
|
211
|
|
|
if start_ind == stop_ind: |
|
212
|
|
|
stop_ind = start_ind+1 |
|
213
|
|
|
|
|
214
|
|
|
falling_ind[i] = start_ind + np.argmin(conv_deriv_ref[start_ind:stop_ind]) |
|
215
|
|
|
|
|
216
|
|
|
# set this position and the sourrounding of the saved flank to 0 to |
|
217
|
|
|
# avoid a second detection |
|
218
|
|
|
if falling_ind[i] < 2*conv_std_dev: del_ind_start = 0 |
|
219
|
|
|
else: |
|
220
|
|
|
del_ind_start = falling_ind[i] - 2*conv_std_dev |
|
221
|
|
|
if (conv_deriv.size - falling_ind[i]) < 2*conv_std_dev: |
|
222
|
|
|
del_ind_stop = conv_deriv.size-1 |
|
223
|
|
|
else: |
|
224
|
|
|
del_ind_stop = falling_ind[i] + 2*conv_std_dev |
|
225
|
|
|
conv_deriv[del_ind_start:del_ind_stop] = 0 |
|
226
|
|
|
|
|
227
|
|
|
# sort all indices of rising and falling flanks |
|
228
|
|
|
rising_ind.sort() |
|
229
|
|
|
falling_ind.sort() |
|
230
|
|
|
|
|
231
|
|
|
# find the maximum laser length to use as size for the laser array |
|
232
|
|
|
laser_length = np.max(falling_ind-rising_ind) |
|
233
|
|
|
|
|
234
|
|
|
#Todo: Find better method, here the idea is to take a histogram to find |
|
235
|
|
|
# length of pulses |
|
236
|
|
|
#diff = (falling_ind-rising_ind)[np.where( falling_ind-rising_ind > 0)] |
|
237
|
|
|
#self.histo = np.histogram(diff) |
|
238
|
|
|
#laser_length = int(self.histo[1][self.histo[0].argmax()]) |
|
239
|
|
|
|
|
240
|
|
|
# initialize the empty output array |
|
241
|
|
|
laser_arr = np.zeros([num_of_lasers, laser_length],int) |
|
242
|
|
|
# slice the detected laser pulses of the timetrace and save them in the |
|
243
|
|
|
# output array according to the found rising edge |
|
244
|
|
|
for i in range(num_of_lasers): |
|
245
|
|
|
if (rising_ind[i]+laser_length > count_data.size): |
|
246
|
|
|
lenarr = count_data[rising_ind[i]:].size |
|
247
|
|
|
laser_arr[i, 0:lenarr] = count_data[rising_ind[i]:] |
|
248
|
|
|
else: |
|
249
|
|
|
laser_arr[i] = count_data[rising_ind[i]:rising_ind[i]+laser_length] |
|
250
|
|
|
return laser_arr.astype(int) |
|
251
|
|
|
|
|
252
|
|
|
|
|
253
|
|
|
def _convolve_derive(self, data, std_dev): |
|
254
|
|
|
""" Smooth the input data by applying a gaussian filter. |
|
255
|
|
|
|
|
256
|
|
|
@param numpy.ndarray timetrace: 1D array, the raw data to be smoothed |
|
257
|
|
|
and derived |
|
258
|
|
|
@param float std_dev: standard deviation of the gaussian filter to be |
|
259
|
|
View Code Duplication |
applied for smoothing |
|
|
|
|
|
|
260
|
|
|
|
|
261
|
|
|
@return numpy.ndarray: 1D array, the smoothed and derived data |
|
262
|
|
|
|
|
263
|
|
|
The convolution is applied with specified standard deviation. The |
|
264
|
|
|
derivative of the smoothed data is computed afterwards and returned. If |
|
265
|
|
|
the input data is some kind of rectangular signal containing high |
|
266
|
|
|
frequency noise, the output data will show sharp peaks corresponding to |
|
267
|
|
|
the rising and falling flanks of the input signal. |
|
268
|
|
|
""" |
|
269
|
|
|
conv = ndimage.filters.gaussian_filter1d(data, std_dev) |
|
270
|
|
|
conv_deriv = np.gradient(conv) |
|
271
|
|
|
return conv_deriv |
|
272
|
|
|
|
|
273
|
|
|
|
|
274
|
|
|
|
|
275
|
|
|
def get_data_laserpulses(self, num_of_lasers, conv_std_dev): |
|
276
|
|
|
""" Capture the fast counter data and extracts the laser pulses. |
|
277
|
|
|
|
|
278
|
|
|
@param int num_of_lasers: The total number of laser pulses inside the |
|
279
|
|
|
pulse sequence |
|
280
|
|
|
@param int conv_std_dev: Standard deviation of gaussian convolution |
|
281
|
|
|
|
|
282
|
|
|
|
|
283
|
|
|
@return tuple (numpy.ndarray, numpy.ndarray): |
|
284
|
|
|
Explanation of the return value: |
|
285
|
|
|
|
|
286
|
|
|
numpy.ndarray: 2D array, the extracted laser pulses of the |
|
287
|
|
|
timetrace, with the dimensions: |
|
288
|
|
|
0: laser number |
|
289
|
|
|
1: time bin |
|
290
|
|
|
numpy.ndarray: 1D or 2D, the raw timetrace from the fast |
|
291
|
|
|
counter |
|
292
|
|
|
""" |
|
293
|
|
|
# poll data from the fast counting device, netobtain is needed for |
|
294
|
|
|
# getting numpy array over network |
|
295
|
|
|
raw_data = netobtain(self._fast_counter_device.get_data_trace()) |
|
296
|
|
|
if self.old_raw_data is not None: |
|
297
|
|
|
#if raw_data.shape == self.old_raw_data.shape: |
|
298
|
|
|
raw_data = np.add(raw_data, self.old_raw_data) |
|
299
|
|
|
|
|
300
|
|
|
# Saving data for testing |
|
301
|
|
|
|
|
302
|
|
|
# name = str(self._iter) + '.dat' |
|
303
|
|
|
# self._iter = self._iter + 1 |
|
304
|
|
|
# np.savetxt(name, raw_data.transpose()) |
|
305
|
|
|
|
|
306
|
|
|
# call appropriate laser extraction method depending on if the fast |
|
307
|
|
|
# counter is gated or not. |
|
308
|
|
|
if self.is_counter_gated: |
|
309
|
|
|
laser_data = self._gated_extraction(raw_data, conv_std_dev) |
|
310
|
|
|
else: |
|
311
|
|
|
laser_data = self._ungated_extraction(raw_data, num_of_lasers, conv_std_dev) |
|
312
|
|
|
return laser_data.astype(dtype=int), raw_data.astype(dtype=int) |
|
313
|
|
|
|
|
314
|
|
|
|
|
315
|
|
|
def _check_if_counter_gated(self): |
|
316
|
|
|
'''Check the fast counter if it is gated or not |
|
317
|
|
|
''' |
|
318
|
|
|
self.is_counter_gated = self._fast_counter_device.is_gated() |
|
319
|
|
|
return |
|
320
|
|
|
|