1
|
|
|
"""Tools to perform particle tracking with Trackmate ImageJ plugin. |
2
|
|
|
|
3
|
|
|
This module includes functions for prepping images and performing tracking |
4
|
|
|
using the TrackMate ImageJ plugin [1]_. |
5
|
|
|
|
6
|
|
|
References |
7
|
|
|
---------- |
8
|
|
|
.. [1] Tinevez, JY,; Perry, N. & Schindelin, J. et al. (2016), "TrackMate: an |
9
|
|
|
open and extensible platoform for single-particle tracking.", Methods 115: |
10
|
|
|
80-90, PMID 27713081 (on Google Scholar). |
11
|
|
|
|
12
|
|
|
""" |
13
|
|
|
|
14
|
|
|
import os |
15
|
|
|
import sys |
16
|
|
|
import subprocess |
17
|
|
|
import tempfile |
18
|
|
|
import random |
19
|
|
|
from itertools import compress |
20
|
|
|
|
21
|
|
|
import os.path as op |
22
|
|
|
import numpy as np |
23
|
|
|
import skimage.io as sio |
24
|
|
|
|
25
|
|
|
import diff_classifier as dc |
26
|
|
|
import diff_classifier.aws as aws |
27
|
|
|
|
28
|
|
|
from sklearn import linear_model |
29
|
|
|
from sklearn import svm |
30
|
|
|
|
31
|
|
|
|
32
|
|
View Code Duplication |
def _get_fiji(): |
|
|
|
|
33
|
|
|
"""Checks if Fiji is downloaded. |
34
|
|
|
|
35
|
|
|
Checks if an existing version of Fiji is downloaded and whether |
36
|
|
|
it is identified as the system variable "FIJI_BIN." Only compatible |
37
|
|
|
with Linux and Mac systems. |
38
|
|
|
|
39
|
|
|
""" |
40
|
|
|
home = os.path.expanduser("~") |
41
|
|
|
paths = [ |
42
|
|
|
os.path.join(home, 'Applications/Fiji.app/Contents/MacOS/ImageJ-macosx'), |
43
|
|
|
os.path.join(home, 'Fiji.app/Contents/MacOS/ImageJ-macosx'), |
44
|
|
|
os.path.join(home, 'Fiji.app/ImageJ-linux64') |
45
|
|
|
] |
46
|
|
|
exists = [os.path.exists(p) for p in paths] |
47
|
|
|
# paths = list(compress(paths, exists)) |
48
|
|
|
|
49
|
|
|
# Currently only works on Mac and Linux |
50
|
|
|
if sys.platform != 'darwin' and not sys.platform.startswith('linux'): |
51
|
|
|
# print('System is not Linux or Mac') |
52
|
|
|
raise ValueError('System is not Linux or Mac') |
53
|
|
|
|
54
|
|
|
# Has the user specified Fiji for us already as an environment variable? |
55
|
|
|
# Want to use it if it is already installed |
56
|
|
|
elif "FIJI_BIN" in os.environ: |
57
|
|
|
print("FIJI_BIN defined.") |
58
|
|
|
return os.environ["FIJI_BIN"] |
59
|
|
|
|
60
|
|
|
# Is Fiji installed somewhere, but not defined in path? |
61
|
|
|
# Checks in two locations, doesn't search entire system |
62
|
|
|
elif any(exists): |
63
|
|
|
print('Exists elsewhere') |
64
|
|
|
counter = 0 |
65
|
|
|
for exist in exists: |
66
|
|
|
if exist: |
67
|
|
|
path = paths[counter] |
68
|
|
|
counter += 1 |
69
|
|
|
return path |
|
|
|
|
70
|
|
|
|
71
|
|
|
else: |
72
|
|
|
# Download it if not |
73
|
|
|
if sys.platform == 'darwin': |
74
|
|
|
subprocess.run(['wget', 'https://downloads.imagej.net/fiji/latest/fiji-macosx.zip'], cwd=home) |
75
|
|
|
subprocess.run(['unzip', 'fiji-macosx.zip'], cwd=home) |
76
|
|
|
os.environ['FIJI_BIN'] = os.path.join(home, 'Fiji.app/Contents/MacOS/ImageJ-macosx') |
77
|
|
|
|
78
|
|
|
elif sys.platform.startswith('linux'): |
79
|
|
|
subprocess.run(['wget', 'https://downloads.imagej.net/fiji/latest/fiji-linux64.zip'], cwd=home) |
80
|
|
|
subprocess.run(['unzip', 'fiji-linux64.zip'], cwd=home) |
81
|
|
|
os.environ['FIJI_BIN'] = os.path.join(home, 'Fiji.app/ImageJ-linux64') |
82
|
|
|
print("Downloaded Fiji") |
83
|
|
|
return _get_fiji() |
84
|
|
|
|
85
|
|
|
|
86
|
|
|
|
87
|
|
View Code Duplication |
def partition_im(tiffname, irows=4, icols=4, ores=(2048, 2048), |
|
|
|
|
88
|
|
|
ires=(512, 512)): |
89
|
|
|
"""Partitions image into smaller images. |
90
|
|
|
|
91
|
|
|
Partitions a large image into irows x icols images of size ires and write |
92
|
|
|
them to the drive. Default input image sizes are from a Nikon/Hamamatsu |
93
|
|
|
camera setup (2048 x 2044 pixels). |
94
|
|
|
|
95
|
|
|
Parameters |
96
|
|
|
---------- |
97
|
|
|
tiffname : string |
98
|
|
|
Location of input image to be partitioned. |
99
|
|
|
irows : int |
100
|
|
|
Number of rows of size ires pixels to be partitioned from source image. |
101
|
|
|
icols : int |
102
|
|
|
Number of columns of size ires pixels to be partitioned from source image. |
103
|
|
|
ores : tuple of int |
104
|
|
|
Input images are scaled to size ores pixels prior to splitting. |
105
|
|
|
ires : tuple of int |
106
|
|
|
Output images are of size ires pixels. |
107
|
|
|
|
108
|
|
|
Returns |
109
|
|
|
-------- |
110
|
|
|
names : list of str |
111
|
|
|
List of output image filenames |
112
|
|
|
|
113
|
|
|
Examples |
114
|
|
|
-------- |
115
|
|
|
>>> partition_im('your/sample/image.tif', irows=8, icols=8, ires=(256, 256)) |
116
|
|
|
|
117
|
|
|
""" |
118
|
|
|
test = sio.imread(tiffname) |
119
|
|
|
oshape = test.shape |
120
|
|
|
test2 = np.zeros((oshape[0], ores[0], ores[1]), dtype=test.dtype) |
121
|
|
|
test2[0:oshape[0], 0:oshape[1], :] = test |
122
|
|
|
|
123
|
|
|
new_image = np.zeros((oshape[0], ires[0], ires[1]), dtype=test.dtype) |
124
|
|
|
names = [] |
125
|
|
|
|
126
|
|
|
for row in range(irows): |
|
|
|
|
127
|
|
|
for col in range(icols): |
128
|
|
|
new_image = test2[:, row*ires[0]:(row+1)*ires[0], |
129
|
|
|
col*ires[1]:(col+1)*ires[1]] |
130
|
|
|
current = tiffname.split('.tif')[0] + '_%s_%s.tif' % (row, col) |
131
|
|
|
sio.imsave(current, new_image) |
132
|
|
|
names.append(current) |
133
|
|
|
|
134
|
|
|
return names |
135
|
|
|
|
136
|
|
|
|
137
|
|
|
def mean_intensity(local_im, frame=0): |
138
|
|
|
"""Calculates mean intensity of first frame of input image. |
139
|
|
|
|
140
|
|
|
Parameters |
141
|
|
|
---------- |
142
|
|
|
local_im : string |
143
|
|
|
Location of input image. |
144
|
|
|
frame : int |
145
|
|
|
Frame at which to perform mean. |
146
|
|
|
|
147
|
|
|
Returns |
148
|
|
|
------- |
149
|
|
|
test_intensity : float |
150
|
|
|
Mean intensity of input image. |
151
|
|
|
|
152
|
|
|
Examples |
153
|
|
|
-------- |
154
|
|
|
>>> mean_intensity('your/sample/image') |
155
|
|
|
|
156
|
|
|
""" |
157
|
|
|
test_image = sio.imread(local_im) |
158
|
|
|
test_intensity = np.mean(test_image[frame, :, :]) |
159
|
|
|
|
160
|
|
|
return test_intensity |
161
|
|
|
|
162
|
|
|
|
163
|
|
View Code Duplication |
def track(target, out_file, template=None, fiji_bin=None, |
|
|
|
|
164
|
|
|
tparams=None): |
165
|
|
|
"""Performs particle tracking on input video. |
166
|
|
|
|
167
|
|
|
Particle tracking is performed with the ImageJ plugin Trackmate. Outputs |
168
|
|
|
a csv file containing analysis settings and particle trajectories. |
169
|
|
|
|
170
|
|
|
Parameters |
171
|
|
|
---------- |
172
|
|
|
target : str |
173
|
|
|
Full path to a tif file to do tracking on. Can also be a URL |
174
|
|
|
(e.g., 'http://fiji.sc/samples/FakeTracks.tif') |
175
|
|
|
out_file : str |
176
|
|
|
Full path to a csv file to store the results. |
177
|
|
|
template : str, optional |
178
|
|
|
The full path of a template for tracking. Defaults to use |
179
|
|
|
`data/trackmate_template.py` stored in the diff_classifier source-code. |
180
|
|
|
fiji_bin : str |
181
|
|
|
The full path to ImageJ executable file. Includes default search |
182
|
|
|
locations for Mac and Linux systems. |
183
|
|
|
radius : float |
184
|
|
|
Estimated radius of particles in image. |
185
|
|
|
threshold : float |
186
|
|
|
Threshold value for particle detection step. |
187
|
|
|
do_median_filtering : bool |
188
|
|
|
If True, performs a median filter on video prior to tracking. |
189
|
|
|
quality : float |
190
|
|
|
Lower quality cutoff value for particle filtering. |
191
|
|
|
xdims : tuple of int |
192
|
|
|
Upper and lower x limits for particle filtering. |
193
|
|
|
ydims : tuple of int |
194
|
|
|
Upper and lower y limits for particle filtering. |
195
|
|
|
median_intensity : float |
196
|
|
|
Lower median intensity cutoff value for particle filtering. |
197
|
|
|
snr : float |
198
|
|
|
Lower signal to noise ratio cutoff value for particle filtering. |
199
|
|
|
limking_max_distance : float |
200
|
|
|
Maximum allowable distance in pixels between two frames to join |
201
|
|
|
particles in track. |
202
|
|
|
gap_closing_max_distance : float |
203
|
|
|
Maximum allowable distance in pixels between more than two frames to |
204
|
|
|
join particles in track. |
205
|
|
|
max_frame_gap : int |
206
|
|
|
Maximum allowable number of frames a particle is allowed to leave video |
207
|
|
|
and be counted as same trajectory. |
208
|
|
|
track_duration : float |
209
|
|
|
Lower duration cutoff in frames for trajectory filtering. |
210
|
|
|
|
211
|
|
|
""" |
212
|
|
|
|
213
|
|
|
tdefault = {'frames': 651, 'radius': 3.0, 'threshold': 0.0, |
214
|
|
|
'do_median_filtering': False, 'quality': 15.0, |
215
|
|
|
'xdims': (0, 511), 'ydims': (1, 511), |
216
|
|
|
'median_intensity': 300.0, |
217
|
|
|
'snr': 0.0, 'linking_max_distance': 6.0, |
218
|
|
|
'gap_closing_max_distance': 10.0, |
219
|
|
|
'max_frame_gap': 3, 'track_duration': 20.0, |
220
|
|
|
'detector': 'Dog'} |
221
|
|
|
|
222
|
|
|
if tparams is None: |
223
|
|
|
tparams = {} |
224
|
|
|
|
225
|
|
|
for key, value in tdefault.items(): |
226
|
|
|
if key not in tparams: |
227
|
|
|
tparams[key] = value |
228
|
|
|
|
229
|
|
|
if template is None: |
230
|
|
|
template = op.join(op.split(dc.__file__)[0], |
231
|
|
|
'data', |
232
|
|
|
'trackmate_template3.py') |
233
|
|
|
|
234
|
|
|
fiji_bin = _get_fiji() |
235
|
|
|
|
236
|
|
|
script = ''.join(open(template).readlines()) |
237
|
|
|
tpfile = tempfile.NamedTemporaryFile(suffix=".py") |
238
|
|
|
fid = open(tpfile.name, 'w') |
239
|
|
|
fid.write(script.format(target_file=target, frames=str(tparams['frames']), |
240
|
|
|
radius=str(tparams['radius']), |
241
|
|
|
threshold=str(tparams['threshold']), |
242
|
|
|
do_median_filtering=str(tparams['do_median_filtering']), |
243
|
|
|
quality=str(tparams['quality']), |
244
|
|
|
xd=str(tparams['xdims'][1]), |
245
|
|
|
yd=str(tparams['ydims'][1]), |
246
|
|
|
ylo=str(tparams['ydims'][0]), |
247
|
|
|
median_intensity=str(tparams['median_intensity']), |
248
|
|
|
snr=str(tparams['snr']), |
249
|
|
|
linking_max_distance=str(tparams['linking_max_distance']), |
250
|
|
|
gap_closing_max_distance=str(tparams['gap_closing_max_distance']), |
251
|
|
|
max_frame_gap=str(tparams['max_frame_gap']), |
252
|
|
|
track_duration=str(tparams['track_duration']), |
253
|
|
|
set_detector=str(tparams['detector']))) |
254
|
|
|
fid.close() |
255
|
|
|
|
256
|
|
|
cmd = "%s --ij2 --headless --run %s" % (fiji_bin, tpfile.name) |
257
|
|
|
print(cmd) |
258
|
|
|
subp = subprocess.run(cmd, stdout=subprocess.PIPE, shell=True) |
259
|
|
|
fid = open(out_file, 'w') |
260
|
|
|
fid.write(subp.stdout.decode()) |
261
|
|
|
#print(subp.stdout.decode()) |
262
|
|
|
fid.close() |
263
|
|
|
|
264
|
|
|
|
265
|
|
View Code Duplication |
def regress_sys(folder, all_videos, yfit, training_size, randselect=True, |
|
|
|
|
266
|
|
|
trainingdata=[], frame=0, have_output=True, download=True, |
267
|
|
|
bucket_name='ccurtis.data'): |
268
|
|
|
"""Uses regression based on image intensities to select tracking parameters. |
269
|
|
|
|
270
|
|
|
This function uses regression methods from the scikit-learn module to |
271
|
|
|
predict the lower quality cutoff values for particle filtering in TrackMate |
272
|
|
|
based on the intensity distributions of input images. Currently only uses |
273
|
|
|
the first frame of videos for analysis, and is limited to predicting |
274
|
|
|
quality values. |
275
|
|
|
|
276
|
|
|
In practice, users will run regress_sys twice in different modes to build |
277
|
|
|
a regression system. First, set have_output to False. Function will return |
278
|
|
|
list of randomly selected videos to include in the training dataset. The |
279
|
|
|
user should then manually track particles using the Trackmate GUI, and enter |
280
|
|
|
these values in during the next round as the input yfit variable. |
281
|
|
|
|
282
|
|
|
Parameters |
283
|
|
|
---------- |
284
|
|
|
folder : str |
285
|
|
|
S3 directory containing video files specified in all_videos. |
286
|
|
|
all_videos: list of str |
287
|
|
|
Contains prefixes of video filenames of entire video set to be |
288
|
|
|
tracked. Training dataset will be some subset of these videos. |
289
|
|
|
yfit: numpy.ndarray |
290
|
|
|
Contains manually acquired quality levels using Trackmate for the |
291
|
|
|
files contained in the training dataset. |
292
|
|
|
training_size : int |
293
|
|
|
Number of files in training dataset. |
294
|
|
|
randselect : bool |
295
|
|
|
If True, will randomly select training videos from all_videos. |
296
|
|
|
If False, will use trainingdata as input training dataset. |
297
|
|
|
trainingdata : list of str |
298
|
|
|
Optional manually selected prefixes of video filenames to be |
299
|
|
|
used as training dataset. |
300
|
|
|
have_output: bool |
301
|
|
|
If you have already acquired the quality values (yfit) for the |
302
|
|
|
training dataset, set to True. If False, it will output the files |
303
|
|
|
the user will need to acquire quality values for. |
304
|
|
|
bucket_name : str |
305
|
|
|
S3 bucket containing videos to be downloaded for regression |
306
|
|
|
calculations. |
307
|
|
|
|
308
|
|
|
Returns |
309
|
|
|
------- |
310
|
|
|
regress_object : list of sklearn.svm.classes. |
311
|
|
|
Contains list of regression objects assembled from the training |
312
|
|
|
datasets. Uses the mean, 10th percentile, 90th percentile, and |
313
|
|
|
standard deviation intensities to predict the quality parameter |
314
|
|
|
in Trackmate. |
315
|
|
|
tprefix : list of str |
316
|
|
|
Contains randomly selected images from all_videos to be included in |
317
|
|
|
training dataset. |
318
|
|
|
|
319
|
|
|
""" |
320
|
|
|
|
321
|
|
|
if randselect: |
322
|
|
|
tprefix = [] |
323
|
|
|
for i in range(0, training_size): |
|
|
|
|
324
|
|
|
random.seed(i+1) |
325
|
|
|
tprefix.append(all_videos[random.randint(0, len(all_videos))]) |
326
|
|
|
if have_output is False: |
327
|
|
|
print("Get parameters for: {}".format(tprefix[i])) |
328
|
|
|
else: |
329
|
|
|
tprefix = trainingdata |
330
|
|
|
|
331
|
|
|
if have_output is True: |
332
|
|
|
# Define descriptors |
333
|
|
|
descriptors = np.zeros((training_size, 4)) |
334
|
|
|
counter = 0 |
335
|
|
|
for name in tprefix: |
336
|
|
|
local_im = name + '.tif' |
337
|
|
|
remote_im = "{}/{}".format(folder, local_im) |
338
|
|
|
if download: |
339
|
|
|
aws.download_s3(remote_im, local_im, bucket_name=bucket_name) |
340
|
|
|
test_image = sio.imread(local_im) |
341
|
|
|
descriptors[counter, 0] = np.mean(test_image[frame, :, :]) |
342
|
|
|
descriptors[counter, 1] = np.std(test_image[frame, :, :]) |
343
|
|
|
descriptors[counter, 2] = np.percentile(test_image[frame, :, :], 10) |
344
|
|
|
descriptors[counter, 3] = np.percentile(test_image[frame, :, :], 90) |
345
|
|
|
counter = counter + 1 |
346
|
|
|
|
347
|
|
|
# Define regression techniques |
348
|
|
|
xfit = descriptors |
349
|
|
|
classifiers = [ |
350
|
|
|
svm.SVR(), |
351
|
|
|
linear_model.SGDRegressor(), |
352
|
|
|
linear_model.BayesianRidge(), |
353
|
|
|
linear_model.LassoLars(), |
354
|
|
|
linear_model.ARDRegression(), |
355
|
|
|
linear_model.PassiveAggressiveRegressor(), |
356
|
|
|
linear_model.TheilSenRegressor(), |
357
|
|
|
linear_model.LinearRegression()] |
358
|
|
|
|
359
|
|
|
regress_object = [] |
360
|
|
|
for item in classifiers: |
361
|
|
|
clf = item |
362
|
|
|
regress_object.append(clf.fit(xfit, yfit)) |
363
|
|
|
|
364
|
|
|
return regress_object |
365
|
|
|
|
366
|
|
|
else: |
367
|
|
|
return tprefix |
368
|
|
|
|
369
|
|
|
|
370
|
|
View Code Duplication |
def regress_tracking_params(regress_object, to_track, |
|
|
|
|
371
|
|
|
regmethod='LinearRegression', frame=0): |
372
|
|
|
"""Predicts quality values to be used in particle tracking. |
373
|
|
|
|
374
|
|
|
Uses the regress object from regress_sys to predict tracking |
375
|
|
|
parameters for TrackMate analysis. |
376
|
|
|
|
377
|
|
|
Parameters |
378
|
|
|
---------- |
379
|
|
|
regress_object: list of sklearn.svm.classes. |
380
|
|
|
Obtained from regress_sys |
381
|
|
|
to_track: string |
382
|
|
|
Prefix of video files to be tracked. |
383
|
|
|
regmethod: {'LinearRegression', 'SVR', 'SGDRegressor', 'BayesianRidge', |
384
|
|
|
'LassoLars', 'ARDRegression', 'PassiveAggressiveRegressor', |
385
|
|
|
'TheilSenRegressor'} |
386
|
|
|
Desired regression method. |
387
|
|
|
|
388
|
|
|
Returns |
389
|
|
|
------- |
390
|
|
|
fqual: float |
391
|
|
|
Predicted quality factor used in TrackMate analysis. |
392
|
|
|
|
393
|
|
|
""" |
394
|
|
|
|
395
|
|
|
local_im = to_track + '.tif' |
396
|
|
|
pX = np.zeros((1, 4)) |
397
|
|
|
test_image = sio.imread(local_im) |
398
|
|
|
pX[0, 0] = np.mean(test_image[frame, :, :]) |
399
|
|
|
pX[0, 1] = np.std(test_image[frame, :, :]) |
400
|
|
|
pX[0, 2] = np.percentile(test_image[frame, :, :], 10) |
401
|
|
|
pX[0, 3] = np.percentile(test_image[frame:, :, :], 90) |
402
|
|
|
|
403
|
|
|
quality = [] |
404
|
|
|
for item in regress_object: |
405
|
|
|
quality.append(item.predict(pX)[0]) |
406
|
|
|
|
407
|
|
|
if regmethod == 'SVR': |
408
|
|
|
fqual = quality[0] |
409
|
|
|
elif regmethod == 'SGDRegressor': |
410
|
|
|
fqual = quality[1] |
411
|
|
|
elif regmethod == 'BayesianRidge': |
412
|
|
|
fqual = quality[2] |
413
|
|
|
elif regmethod == 'LassoLars': |
414
|
|
|
fqual = quality[3] |
415
|
|
|
elif regmethod == 'ARDRegression': |
416
|
|
|
fqual = quality[4] |
417
|
|
|
elif regmethod == 'PassiveAggressiveRegressor': |
418
|
|
|
fqual = quality[5] |
419
|
|
|
elif regmethod == 'TheilSenRegressor': |
420
|
|
|
fqual = quality[6] |
421
|
|
|
elif regmethod == 'LinearRegression': |
422
|
|
|
fqual = quality[7] |
423
|
|
|
else: |
424
|
|
|
fqual = 3.0 |
425
|
|
|
|
426
|
|
|
return fqual |
427
|
|
|
|