|
1
|
|
|
# -*- coding: utf-8 -*- |
|
2
|
|
|
# |
|
3
|
|
|
# This file is part of SENAITE.CORE. |
|
4
|
|
|
# |
|
5
|
|
|
# SENAITE.CORE is free software: you can redistribute it and/or modify it under |
|
6
|
|
|
# the terms of the GNU General Public License as published by the Free Software |
|
7
|
|
|
# Foundation, version 2. |
|
8
|
|
|
# |
|
9
|
|
|
# This program is distributed in the hope that it will be useful, but WITHOUT |
|
10
|
|
|
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
|
11
|
|
|
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more |
|
12
|
|
|
# details. |
|
13
|
|
|
# |
|
14
|
|
|
# You should have received a copy of the GNU General Public License along with |
|
15
|
|
|
# this program; if not, write to the Free Software Foundation, Inc., 51 |
|
16
|
|
|
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
|
17
|
|
|
# |
|
18
|
|
|
# Copyright 2018-2025 by it's authors. |
|
19
|
|
|
# Some rights reserved, see README and LICENSE. |
|
20
|
|
|
|
|
21
|
|
|
import six |
|
22
|
|
|
from bika.lims import api |
|
23
|
|
|
from bika.lims import bikaMessageFactory as _ |
|
24
|
|
|
from bika.lims import logger |
|
25
|
|
|
from bika.lims import safe_unicode |
|
26
|
|
|
from bika.lims.interfaces import IReferenceAnalysis |
|
27
|
|
|
from bika.lims.interfaces import IRoutineAnalysis |
|
28
|
|
|
from plone.memoize.view import memoize_contextless |
|
29
|
|
|
from senaite.core.api import dtime |
|
30
|
|
|
from senaite.core.catalog import ANALYSIS_CATALOG |
|
31
|
|
|
from senaite.core.catalog import SAMPLE_CATALOG |
|
32
|
|
|
from senaite.core.catalog import SENAITE_CATALOG |
|
33
|
|
|
from senaite.core.catalog import SETUP_CATALOG |
|
34
|
|
|
from senaite.core.exportimport.instruments.logger import Logger |
|
35
|
|
|
from senaite.core.i18n import translate as t |
|
36
|
|
|
from senaite.core.registry import get_registry_record |
|
37
|
|
|
from zope.cachedescriptors.property import Lazy as lazy_property |
|
38
|
|
|
from zope.deprecation import deprecate |
|
39
|
|
|
|
|
40
|
|
|
ALLOWED_SAMPLE_STATES = ["sample_received", "to_be_verified"] |
|
41
|
|
|
ALLOWED_ANALYSIS_STATES = ["unassigned", "assigned", "to_be_verified"] |
|
42
|
|
|
DEFAULT_RESULT_KEY = "DefaultResult" |
|
43
|
|
|
EMPTY_MARKER = object() |
|
44
|
|
|
|
|
45
|
|
|
|
|
46
|
|
|
class AnalysisResultsImporter(Logger): |
|
47
|
|
|
"""Results importer |
|
48
|
|
|
""" |
|
49
|
|
|
def __init__(self, parser, context, |
|
50
|
|
|
override=None, |
|
51
|
|
|
allowed_sample_states=None, |
|
52
|
|
|
allowed_analysis_states=None, |
|
53
|
|
|
instrument_uid=None): |
|
54
|
|
|
super(AnalysisResultsImporter, self).__init__() |
|
55
|
|
|
|
|
56
|
|
|
self.context = context |
|
57
|
|
|
|
|
58
|
|
|
# results override settings |
|
59
|
|
|
self.override = override |
|
60
|
|
|
if override is None: |
|
61
|
|
|
self.override = [False, False] |
|
62
|
|
|
|
|
63
|
|
|
# allowed sample states |
|
64
|
|
|
self.allowed_sample_states = allowed_sample_states |
|
65
|
|
|
if not allowed_sample_states: |
|
66
|
|
|
self.allowed_sample_states = ALLOWED_SAMPLE_STATES |
|
67
|
|
|
# translated states |
|
68
|
|
|
self.allowed_sample_states_msg = [ |
|
69
|
|
|
t(_(s)) for s in self.allowed_sample_states] |
|
70
|
|
|
|
|
71
|
|
|
# allowed analyses states |
|
72
|
|
|
self.allowed_analysis_states = allowed_analysis_states |
|
73
|
|
|
if not allowed_analysis_states: |
|
74
|
|
|
self.allowed_analysis_states = ALLOWED_ANALYSIS_STATES |
|
75
|
|
|
self.allowed_analysis_states_msg = [ |
|
76
|
|
|
t(_(s)) for s in self.allowed_analysis_states] |
|
77
|
|
|
|
|
78
|
|
|
# instrument UID |
|
79
|
|
|
self.instrument_uid = instrument_uid |
|
80
|
|
|
self.priorizedsearchcriteria = "" |
|
81
|
|
|
# Search Indexes for Sample IDs |
|
82
|
|
|
self.searchcriteria = ["getId", "getClientSampleID"] |
|
83
|
|
|
|
|
84
|
|
|
# BBB |
|
85
|
|
|
self._parser = parser |
|
86
|
|
|
self.allowed_ar_states = self.allowed_sample_states |
|
87
|
|
|
self._allowed_analysis_states = self.allowed_analysis_states |
|
88
|
|
|
self._override = self.override |
|
89
|
|
|
self._idsearch = ["getId", "getClientSampleID"] |
|
90
|
|
|
self._priorizedsearchcriteria = self.priorizedsearchcriteria |
|
91
|
|
|
|
|
92
|
|
|
@property |
|
93
|
|
|
@deprecate("Please use self.wf_tool instead") |
|
94
|
|
|
def wf(self): |
|
95
|
|
|
# BBB |
|
96
|
|
|
return self.wf_tool |
|
97
|
|
|
|
|
98
|
|
|
@property |
|
99
|
|
|
@deprecate("Please use self.sample_catalog instead") |
|
100
|
|
|
def ar_catalog(self): |
|
101
|
|
|
# BBB |
|
102
|
|
|
return self.sample_catalog |
|
103
|
|
|
|
|
104
|
|
|
@property |
|
105
|
|
|
@deprecate("Please use self.analysis_catalog instead") |
|
106
|
|
|
def bac(self): |
|
107
|
|
|
# BBB |
|
108
|
|
|
return self.analysis_catalog |
|
109
|
|
|
|
|
110
|
|
|
@property |
|
111
|
|
|
@deprecate("Please use self.senaite_catalog instead") |
|
112
|
|
|
def bc(self): |
|
113
|
|
|
# BBB |
|
114
|
|
|
return self.senaite_catalog |
|
115
|
|
|
|
|
116
|
|
|
@property |
|
117
|
|
|
@deprecate("Please use self.setup_catalog instead") |
|
118
|
|
|
def bsc(self): |
|
119
|
|
|
# BBB |
|
120
|
|
|
return self.setup_catalog |
|
121
|
|
|
|
|
122
|
|
|
@lazy_property |
|
123
|
|
|
def sample_catalog(self): |
|
124
|
|
|
return api.get_tool(SAMPLE_CATALOG) |
|
125
|
|
|
|
|
126
|
|
|
@lazy_property |
|
127
|
|
|
def analysis_catalog(self): |
|
128
|
|
|
return api.get_tool(ANALYSIS_CATALOG) |
|
129
|
|
|
|
|
130
|
|
|
@lazy_property |
|
131
|
|
|
def setup_catalog(self): |
|
132
|
|
|
return api.get_tool(SETUP_CATALOG) |
|
133
|
|
|
|
|
134
|
|
|
@lazy_property |
|
135
|
|
|
def senaite_catalog(self): |
|
136
|
|
|
return api.get_tool(SENAITE_CATALOG) |
|
137
|
|
|
|
|
138
|
|
|
@lazy_property |
|
139
|
|
|
def wf_tool(self): |
|
140
|
|
|
return api.get_tool("portal_workflow") |
|
141
|
|
|
|
|
142
|
|
|
@lazy_property |
|
143
|
|
|
def bika_setup(self): |
|
144
|
|
|
"""Get the bika setup object |
|
145
|
|
|
""" |
|
146
|
|
|
return api.get_bika_setup() |
|
147
|
|
|
|
|
148
|
|
|
@lazy_property |
|
149
|
|
|
def setup(self): |
|
150
|
|
|
"""Get the Senaite setup object |
|
151
|
|
|
""" |
|
152
|
|
|
return api.get_senaite_setup() |
|
153
|
|
|
|
|
154
|
|
|
@lazy_property |
|
155
|
|
|
def attachment_types(self): |
|
156
|
|
|
"""Get the senaite setup object |
|
157
|
|
|
""" |
|
158
|
|
|
return self.setup.attachmenttypes |
|
159
|
|
|
|
|
160
|
|
|
@lazy_property |
|
161
|
|
|
def instrument(self): |
|
162
|
|
|
if not self.instrument_uid: |
|
163
|
|
|
return None |
|
164
|
|
|
return api.get_object(self.instrument_uid, None) |
|
165
|
|
|
|
|
166
|
|
|
@lazy_property |
|
167
|
|
|
def services(self): |
|
168
|
|
|
"""Return all services |
|
169
|
|
|
""" |
|
170
|
|
|
services = self.setup_catalog(portal_type="AnalysisService") |
|
171
|
|
|
return list(map(api.get_object, services)) |
|
172
|
|
|
|
|
173
|
|
|
@property |
|
174
|
|
|
def parser(self): |
|
175
|
|
|
"""Returns the parser that is used for the import |
|
176
|
|
|
""" |
|
177
|
|
|
# Maybe we can use an adapter lookup here? |
|
178
|
|
|
return self._parser |
|
179
|
|
|
|
|
180
|
|
|
@parser.setter |
|
181
|
|
|
def parser(self, value): |
|
182
|
|
|
self._parser = value |
|
183
|
|
|
|
|
184
|
|
|
@deprecate("Please use self.parser instead") |
|
185
|
|
|
def getParser(self): |
|
186
|
|
|
return self.parser |
|
187
|
|
|
|
|
188
|
|
|
def get_automatic_importer(self, instrument, parser, **kw): |
|
189
|
|
|
"""Return the automatic importer |
|
190
|
|
|
""" |
|
191
|
|
|
raise NotImplementedError("Must be provided by Adapter Implementation") |
|
192
|
|
|
|
|
193
|
|
|
def get_automatic_parser(self, infile, **kw): |
|
194
|
|
|
"""Return the automatic parser |
|
195
|
|
|
""" |
|
196
|
|
|
raise NotImplementedError("Must be provided by Adapter Implementation") |
|
197
|
|
|
|
|
198
|
|
|
@deprecate("Please use self.allowed_sample_states instead") |
|
199
|
|
|
def getAllowedARStates(self): |
|
200
|
|
|
"""BBB: Return allowed sample states |
|
201
|
|
|
|
|
202
|
|
|
The results import will only take into account the analyses contained |
|
203
|
|
|
inside an Samples which current state is one from these. |
|
204
|
|
|
""" |
|
205
|
|
|
return self.allowed_sample_states |
|
206
|
|
|
|
|
207
|
|
|
@deprecate("Please use self.allowed_sample_states instead") |
|
208
|
|
|
def getAllowedAnalysisStates(self): |
|
209
|
|
|
"""BBB: Return allowed analysis states |
|
210
|
|
|
|
|
211
|
|
|
The results import will only take into account the analyses if its |
|
212
|
|
|
current state is in the allowed analysis states. |
|
213
|
|
|
""" |
|
214
|
|
|
return self.allowed_analysis_states |
|
215
|
|
|
|
|
216
|
|
|
@deprecate("Please use self.override instead") |
|
217
|
|
|
def getOverride(self): |
|
218
|
|
|
"""BBB: Return result override flags |
|
219
|
|
|
|
|
220
|
|
|
Flags if the importer can override previously entered results. |
|
221
|
|
|
|
|
222
|
|
|
[False, False]: Results are not overriden (default) |
|
223
|
|
|
[True, False]: Results are overriden, but only when empty |
|
224
|
|
|
[True, True]: Results are always overriden, also with empties |
|
225
|
|
|
""" |
|
226
|
|
|
return self.override |
|
227
|
|
|
|
|
228
|
|
|
@property |
|
229
|
|
|
def override_non_empty(self): |
|
230
|
|
|
"""Returns if the value can be written |
|
231
|
|
|
""" |
|
232
|
|
|
return self.override[0] is True |
|
233
|
|
|
|
|
234
|
|
|
@property |
|
235
|
|
|
def override_with_empty(self): |
|
236
|
|
|
"""Returns if the value can be written |
|
237
|
|
|
""" |
|
238
|
|
|
return self.override[1] is True |
|
239
|
|
|
|
|
240
|
|
|
def can_override_analysis_result(self, analysis, result): |
|
241
|
|
|
"""Checks if the result can be overwritten or not |
|
242
|
|
|
|
|
243
|
|
|
:returns: True if exisiting results can be overwritten |
|
244
|
|
|
""" |
|
245
|
|
|
analysis_result = analysis.getResult() |
|
246
|
|
|
empty_result = False |
|
247
|
|
|
if not result: |
|
248
|
|
|
empty_result = len(str(result).strip()) == 0 |
|
249
|
|
|
if analysis_result and not self.override_non_empty: |
|
250
|
|
|
return False |
|
251
|
|
|
elif empty_result and not self.override_with_empty: |
|
252
|
|
|
return False |
|
253
|
|
|
return True |
|
254
|
|
|
|
|
255
|
|
|
def convert_analysis_result(self, analysis, result): |
|
256
|
|
|
"""Convert the analysis result |
|
257
|
|
|
|
|
258
|
|
|
:returns: Converted analysis result |
|
259
|
|
|
""" |
|
260
|
|
|
|
|
261
|
|
|
if api.is_floatable(result) and not analysis.getStringResult(): |
|
262
|
|
|
# ensure floatable string result containing a decimal point |
|
263
|
|
|
result = str(result) |
|
264
|
|
|
if "." not in result: |
|
265
|
|
|
result = "{}.0".format(result) |
|
266
|
|
|
|
|
267
|
|
|
result_options = analysis.getResultOptions() |
|
268
|
|
|
result_type = analysis.getResultType() |
|
269
|
|
|
|
|
270
|
|
|
if result_options: |
|
271
|
|
|
# NOTE: Result options can be set as integer or float values! |
|
272
|
|
|
result_values = map( |
|
273
|
|
|
lambda r: r.get("ResultValue"), result_options) |
|
274
|
|
|
if result_type == "select" and api.is_floatable(result): |
|
275
|
|
|
# check if the integer result matches a result option |
|
276
|
|
|
selection = str(int(float(result))) |
|
277
|
|
|
if selection in result_values: |
|
278
|
|
|
# XXX: Results like e.g. "1.1" or 1.2 match result options |
|
279
|
|
|
# with the value set to "1" as well! |
|
280
|
|
|
return selection |
|
281
|
|
|
|
|
282
|
|
|
return result |
|
283
|
|
|
|
|
284
|
|
|
def getKeywordsToBeExcluded(self): |
|
285
|
|
|
"""Returns a list of analysis keywords to be excluded |
|
286
|
|
|
""" |
|
287
|
|
|
return [] |
|
288
|
|
|
|
|
289
|
|
|
def parse_results(self): |
|
290
|
|
|
"""Parse the results file and return the raw results |
|
291
|
|
|
""" |
|
292
|
|
|
parsed = self.parser.parse() |
|
293
|
|
|
|
|
294
|
|
|
if not parsed: |
|
295
|
|
|
return {} |
|
296
|
|
|
|
|
297
|
|
|
self.errors = self.parser.errors |
|
298
|
|
|
self.warns = self.parser.warns |
|
299
|
|
|
self.logs = self.parser.logs |
|
300
|
|
|
|
|
301
|
|
|
return self.parser.getRawResults() |
|
302
|
|
|
|
|
303
|
|
|
@lazy_property |
|
304
|
|
|
def keywords(self): |
|
305
|
|
|
"""Return the parsed keywords |
|
306
|
|
|
""" |
|
307
|
|
|
keywords = [] |
|
308
|
|
|
for keyword in self.parser.getAnalysisKeywords(): |
|
309
|
|
|
if not keyword: |
|
310
|
|
|
continue |
|
311
|
|
|
if keyword in self.getKeywordsToBeExcluded(): |
|
312
|
|
|
continue |
|
313
|
|
|
# check if keyword is valid |
|
314
|
|
|
if not self.is_valid_keyword(keyword): |
|
315
|
|
|
self.warn(_("Service keyword {analysis_keyword} not found" |
|
316
|
|
|
.format(analysis_keyword=keyword))) |
|
317
|
|
|
continue |
|
318
|
|
|
# remember the valid service keyword |
|
319
|
|
|
keywords.append(keyword) |
|
320
|
|
|
|
|
321
|
|
|
if len(keywords) == 0: |
|
322
|
|
|
self.warn(_("No services could be found for parsed keywords")) |
|
323
|
|
|
|
|
324
|
|
|
return keywords |
|
325
|
|
|
|
|
326
|
|
|
@memoize_contextless |
|
327
|
|
|
def is_valid_keyword(self, keyword): |
|
328
|
|
|
"""Check if the keyword is valid |
|
329
|
|
|
""" |
|
330
|
|
|
results = self.setup_catalog(getKeyword=keyword) |
|
331
|
|
|
if not results: |
|
332
|
|
|
return False |
|
333
|
|
|
return True |
|
334
|
|
|
|
|
335
|
|
|
def get_reference_sample_by_id(self, sid): |
|
336
|
|
|
"""Get a reference sample by ID |
|
337
|
|
|
""" |
|
338
|
|
|
query = {"portal_type": "ReferenceSample", "getId": sid} |
|
339
|
|
|
results = api.search(query, SENAITE_CATALOG) |
|
340
|
|
|
if len(results) == 0: |
|
341
|
|
|
return None |
|
342
|
|
|
return api.get_object(results[0]) |
|
343
|
|
|
|
|
344
|
|
|
def get_attachment_type_by_title(self, title): |
|
345
|
|
|
"""Get an attachment type by title |
|
346
|
|
|
|
|
347
|
|
|
:param title: Attachment type title |
|
348
|
|
|
:returns: Attachment object or None |
|
349
|
|
|
""" |
|
350
|
|
|
query = { |
|
351
|
|
|
"portal_type": "AttachmentType", |
|
352
|
|
|
"title": title, |
|
353
|
|
|
"is_active": True, |
|
354
|
|
|
} |
|
355
|
|
|
results = self.setup_catalog(query) |
|
356
|
|
|
if not len(results) > 0: |
|
357
|
|
|
return None |
|
358
|
|
|
return api.get_object(results[0]) |
|
359
|
|
|
|
|
360
|
|
|
def process(self): |
|
361
|
|
|
parsed_results = self.parse_results() |
|
362
|
|
|
|
|
363
|
|
|
# no parsed results, return |
|
364
|
|
|
if not parsed_results: |
|
365
|
|
|
return False |
|
366
|
|
|
|
|
367
|
|
|
# Log allowed sample and analyses states |
|
368
|
|
|
self.log(_("Allowed sample states: {allowed_states}" |
|
369
|
|
|
.format(allowed_states=", ".join( |
|
370
|
|
|
self.allowed_sample_states_msg)))) |
|
371
|
|
|
self.log(_("Allowed analysis states: {allowed_states}" |
|
372
|
|
|
.format(allowed_states=", ".join( |
|
373
|
|
|
self.allowed_analysis_states_msg)))) |
|
374
|
|
|
if not any([self.override_non_empty, self.override_with_empty]): |
|
375
|
|
|
self.log(_("Don't override analysis results")) |
|
376
|
|
|
if self.override_non_empty: |
|
377
|
|
|
self.log(_("Override non-empty analysis results")) |
|
378
|
|
|
if self.override_with_empty: |
|
379
|
|
|
self.log(_("Override non-empty analysis results, also with empty")) |
|
380
|
|
|
|
|
381
|
|
|
# Attachments will be created in any worksheet that contains |
|
382
|
|
|
# analyses that are updated by this import |
|
383
|
|
|
attachments = {} |
|
384
|
|
|
infile = self.parser.getInputFile() |
|
385
|
|
|
|
|
386
|
|
|
analysis_attach_importfile = get_registry_record("import_analysis_attach_importfile") |
|
387
|
|
|
|
|
388
|
|
|
ancount = 0 |
|
389
|
|
|
updated_analyses = [] |
|
390
|
|
|
importedinsts = {} |
|
391
|
|
|
importedars = {} |
|
392
|
|
|
|
|
393
|
|
|
for sid, results in parsed_results.items(): |
|
394
|
|
|
refsample = None |
|
395
|
|
|
|
|
396
|
|
|
# fetch all analyses for the given sample ID |
|
397
|
|
|
analyses = self.get_analyses_for(sid) |
|
398
|
|
|
|
|
399
|
|
|
# No registered analyses found, but maybe we need to |
|
400
|
|
|
# create them first if we have an instrument |
|
401
|
|
|
if len(analyses) == 0 and not self.instrument: |
|
402
|
|
|
self.warn(_("Instrument not found")) |
|
403
|
|
|
self.warn(_("No Sample with '{allowed_ar_states}' states" |
|
404
|
|
|
"found, and no QC analyses found for {sid}" |
|
405
|
|
|
.format(allowed_ar_states=", ".join( |
|
406
|
|
|
self.allowed_sample_states_msg), |
|
407
|
|
|
sid=sid))) |
|
408
|
|
|
continue |
|
409
|
|
|
|
|
410
|
|
|
# we have an instrument |
|
411
|
|
|
elif len(analyses) == 0 and self.instrument: |
|
412
|
|
|
# Create a new ReferenceAnalysis and link it to the Instrument. |
|
413
|
|
|
refsample = self.get_reference_sample_by_id(sid) |
|
414
|
|
|
if not refsample: |
|
415
|
|
|
self.warn(_("No Sample found for {sid}" |
|
416
|
|
|
.format(sid=sid))) |
|
417
|
|
|
continue |
|
418
|
|
|
|
|
419
|
|
|
# Allowed are more than one result for the same sample and |
|
420
|
|
|
# analysis. Needed for calibration tests. |
|
421
|
|
|
service_uids = [] |
|
422
|
|
|
for result in results: |
|
423
|
|
|
# For each keyword, create a ReferenceAnalysis and attach |
|
424
|
|
|
# it to the ReferenceSample |
|
425
|
|
|
service_uids.extend([ |
|
426
|
|
|
api.get_uid(service) for service in self.services |
|
427
|
|
|
if service.getKeyword() in result.keys()]) |
|
428
|
|
|
|
|
429
|
|
|
analyses = self.instrument.addReferences( |
|
430
|
|
|
refsample, list(set(service_uids))) |
|
431
|
|
|
|
|
432
|
|
|
# No analyses found |
|
433
|
|
|
elif len(analyses) == 0: |
|
434
|
|
|
self.warn(_("No analyses found for {sid} " |
|
435
|
|
|
"in the states '{allowed_sample_states}' " |
|
436
|
|
|
.format(allowed_sample_states=", ".join( |
|
437
|
|
|
self.allowed_sample_states_msg), |
|
438
|
|
|
sid=sid))) |
|
439
|
|
|
continue |
|
440
|
|
|
|
|
441
|
|
|
# import the results |
|
442
|
|
|
for result in results: |
|
443
|
|
|
|
|
444
|
|
|
for keyword, values in result.items(): |
|
445
|
|
|
|
|
446
|
|
|
# keyword might be excluded |
|
447
|
|
|
if keyword not in self.keywords: |
|
448
|
|
|
continue |
|
449
|
|
|
|
|
450
|
|
|
ans = [a for a in analyses if a.getKeyword() == keyword |
|
451
|
|
|
and api.get_workflow_status_of(a) |
|
452
|
|
|
in self.allowed_analysis_states] |
|
453
|
|
|
|
|
454
|
|
|
analysis = None |
|
455
|
|
|
|
|
456
|
|
|
if len(ans) == 0: |
|
457
|
|
|
# no analysis found for keyword |
|
458
|
|
|
self.warn(_("No analyses found for {sid} " |
|
459
|
|
|
"and keyword '{keyword}'" |
|
460
|
|
|
.format(sid=sid, keyword=keyword))) |
|
461
|
|
|
continue |
|
462
|
|
|
elif len(ans) > 1: |
|
463
|
|
|
# multiple analyses found for keyword |
|
464
|
|
|
self.warn(_("More than one analysis found for " |
|
465
|
|
|
"{sid} and keyword '{keyword}'" |
|
466
|
|
|
.format(sid=sid, keyword=keyword))) |
|
467
|
|
|
continue |
|
468
|
|
|
else: |
|
469
|
|
|
analysis = ans[0] |
|
470
|
|
|
|
|
471
|
|
|
# Create attachment in worksheet linked to this analysis. |
|
472
|
|
|
# Only if this import has not already created the |
|
473
|
|
|
# attachment And only if the filename of the attachment is |
|
474
|
|
|
# unique in this worksheet. |
|
475
|
|
|
# Otherwise we will attempt to use existing attachment. |
|
476
|
|
|
ws = analysis.getWorksheet() |
|
477
|
|
|
if ws: |
|
478
|
|
|
wsid = ws.getId() |
|
479
|
|
|
if wsid not in attachments: |
|
480
|
|
|
fn = infile.filename |
|
481
|
|
|
fn_attachments = self.get_attachment_filenames(ws) |
|
482
|
|
|
if fn in fn_attachments.keys(): |
|
483
|
|
|
attachments[wsid] = fn_attachments[fn] |
|
484
|
|
|
else: |
|
485
|
|
|
attachments[wsid] = self.create_attachment( |
|
486
|
|
|
ws, infile) |
|
487
|
|
|
|
|
488
|
|
|
# Process the analysis |
|
489
|
|
|
processed = self.process_analysis(sid, analysis, values) |
|
490
|
|
|
|
|
491
|
|
|
if processed: |
|
492
|
|
|
updated_analyses.append(analysis) |
|
493
|
|
|
ancount += 1 |
|
494
|
|
|
|
|
495
|
|
|
if refsample and self.instrument: |
|
496
|
|
|
inst = self.instrument |
|
497
|
|
|
# Calibration Test (import to Instrument) |
|
498
|
|
|
importedinst = inst.title in importedinsts.keys() \ |
|
499
|
|
|
and importedinsts[inst.title] or [] |
|
500
|
|
|
if keyword not in importedinst: |
|
501
|
|
|
importedinst.append(keyword) |
|
502
|
|
|
importedinsts[inst.title] = importedinst |
|
503
|
|
|
else: |
|
504
|
|
|
ar = analysis.portal_type == "Analysis" \ |
|
505
|
|
|
and analysis.aq_parent or None |
|
506
|
|
|
if ar is not None: |
|
507
|
|
|
importedar = ar.getId() in importedars.keys() \ |
|
508
|
|
|
and importedars[ar.getId()] or [] |
|
509
|
|
|
if keyword not in importedar: |
|
510
|
|
|
importedar.append(keyword) |
|
511
|
|
|
importedars[ar.getId()] = importedar |
|
512
|
|
|
|
|
513
|
|
|
if ws and analysis_attach_importfile: |
|
514
|
|
|
# attach import file |
|
515
|
|
|
self.attach_attachment( |
|
516
|
|
|
analysis, attachments[ws.getId()]) |
|
517
|
|
|
|
|
518
|
|
|
# recalculate analyses with calculations after all results are set |
|
519
|
|
|
for analysis in updated_analyses: |
|
520
|
|
|
# only routine analyses can be used in calculations |
|
521
|
|
|
if IRoutineAnalysis.providedBy(analysis): |
|
522
|
|
|
sample_id = analysis.getRequestID() |
|
523
|
|
|
self.calculateTotalResults(sample_id, analysis) |
|
524
|
|
|
|
|
525
|
|
|
# reindex sample to update progress (and other indexes/metadata) |
|
526
|
|
|
samples = set(map(api.get_parent, updated_analyses)) |
|
527
|
|
|
for sample in samples: |
|
528
|
|
|
sample.reindexObject() |
|
529
|
|
|
|
|
530
|
|
|
for arid, acodes in six.iteritems(importedars): |
|
531
|
|
|
acodesmsg = "Analysis %s" % ', '.join(acodes) |
|
532
|
|
|
self.log(_("{request_id}: {keywords} imported sucessfully" |
|
533
|
|
|
.format(request_id=arid, keywords=acodesmsg))) |
|
534
|
|
|
|
|
535
|
|
|
for instid, acodes in six.iteritems(importedinsts): |
|
536
|
|
|
acodesmsg = "Analysis %s" % ', '.join(acodes) |
|
537
|
|
|
msg = "%s: %s %s" % (instid, acodesmsg, "imported sucessfully") |
|
538
|
|
|
self.log(msg) |
|
539
|
|
|
|
|
540
|
|
|
if refsample and self.instrument: |
|
|
|
|
|
|
541
|
|
|
self.log(_("Import finished successfully: {updated_ars} Samples, " |
|
542
|
|
|
"{updated_instruments} Instruments and " |
|
543
|
|
|
"{updated_results} results updated" |
|
544
|
|
|
.format(updated_ars=str(len(importedars)), |
|
545
|
|
|
updated_instruments=str(len(importedinsts)), |
|
546
|
|
|
updated_results=str(ancount)))) |
|
547
|
|
|
else: |
|
548
|
|
|
self.log(_("Import finished successfully: {updated_ars} Samples " |
|
549
|
|
|
"and {updated_results} results updated" |
|
550
|
|
|
.format(updated_ars=str(len(importedars)), |
|
551
|
|
|
updated_results=str(ancount)))) |
|
552
|
|
|
|
|
553
|
|
|
@deprecate("Please use self.process_analysis instead") |
|
554
|
|
|
def _process_analysis(self, sid, analysis, values): |
|
555
|
|
|
return self.process_analysis(sid, analysis, values) |
|
556
|
|
|
|
|
557
|
|
|
def process_analysis(self, sid, analysis, values): |
|
558
|
|
|
"""Process a single analysis result |
|
559
|
|
|
|
|
560
|
|
|
:param sid: Sample ID |
|
561
|
|
|
:param analysis: Analysis object |
|
562
|
|
|
:param values: Dictionary of values, including the result to set |
|
563
|
|
|
:returns: True if the interims has been set |
|
564
|
|
|
""" |
|
565
|
|
|
|
|
566
|
|
|
# set the analysis interim fields |
|
567
|
|
|
interims_updated = self.set_analysis_interims(sid, analysis, values) |
|
568
|
|
|
|
|
569
|
|
|
# set the analysis result |
|
570
|
|
|
result_updated = self.set_analysis_result(sid, analysis, values) |
|
571
|
|
|
|
|
572
|
|
|
# set additional field values |
|
573
|
|
|
fields_updated = self.set_analysis_fields(sid, analysis, values) |
|
574
|
|
|
|
|
575
|
|
|
# Nothing updated |
|
576
|
|
|
if not any([result_updated, interims_updated, fields_updated]): |
|
577
|
|
|
return False |
|
578
|
|
|
|
|
579
|
|
|
# submit the result |
|
580
|
|
|
self.save_submit_analysis(analysis) |
|
581
|
|
|
analysis.reindexObject() |
|
582
|
|
|
|
|
583
|
|
|
return True |
|
584
|
|
|
|
|
585
|
|
|
def set_analysis_interims(self, sid, analysis, values): |
|
586
|
|
|
"""Set the analysis interim fields |
|
587
|
|
|
|
|
588
|
|
|
:param sid: Sample ID |
|
589
|
|
|
:param analysis: Analysis object |
|
590
|
|
|
:param values: Dictionary of values, including the result to set |
|
591
|
|
|
:returns: True if the interims were written |
|
592
|
|
|
""" |
|
593
|
|
|
updated = False |
|
594
|
|
|
keys = values.keys() |
|
595
|
|
|
interims = self.get_interim_fields(analysis) |
|
596
|
|
|
interims_out = [] |
|
597
|
|
|
|
|
598
|
|
|
for interim in interims: |
|
599
|
|
|
value = EMPTY_MARKER |
|
600
|
|
|
keyword = interim.get("keyword") |
|
601
|
|
|
title = interim.get("title") |
|
602
|
|
|
interim_copy = interim.copy() |
|
603
|
|
|
# Check if we have an interim value set |
|
604
|
|
|
if keyword in keys: |
|
605
|
|
|
value = values.get(keyword) |
|
606
|
|
|
elif title in keys: |
|
607
|
|
|
value = values.get(title) |
|
608
|
|
|
if value is not EMPTY_MARKER: |
|
609
|
|
|
# set the value |
|
610
|
|
|
interim_copy["value"] = value |
|
611
|
|
|
updated = True |
|
612
|
|
|
# TODO: change test not to rely on this logline! |
|
613
|
|
|
self.log(_("{sid} result for '{analysis_keyword}:" |
|
614
|
|
|
"{interim_keyword}': '{value}'" |
|
615
|
|
|
.format(sid=sid, |
|
616
|
|
|
analysis_keyword=analysis.getKeyword(), |
|
617
|
|
|
interim_keyword=keyword, |
|
618
|
|
|
value=str(value)))) |
|
619
|
|
|
interims_out.append(interim_copy) |
|
620
|
|
|
|
|
621
|
|
|
# write back interims |
|
622
|
|
|
if len(interims_out) > 0: |
|
623
|
|
|
analysis.setInterimFields(interims_out) |
|
624
|
|
|
analysis.calculateResult(override=self.override[0]) |
|
625
|
|
|
|
|
626
|
|
|
return updated |
|
627
|
|
|
|
|
628
|
|
|
def set_analysis_result(self, sid, analysis, values): |
|
629
|
|
|
"""Set the analysis result field |
|
630
|
|
|
|
|
631
|
|
|
Results can be only set for Analyses with no calculation assigned. |
|
632
|
|
|
|
|
633
|
|
|
If the Analysis has already a result, it is only overridden |
|
634
|
|
|
when the right override option is set. |
|
635
|
|
|
|
|
636
|
|
|
:param sid: Sample ID |
|
637
|
|
|
:param analysis: Analysis object |
|
638
|
|
|
:param values: Dictionary of values, including the result to set |
|
639
|
|
|
:returns: True if the result was written |
|
640
|
|
|
""" |
|
641
|
|
|
keyword = analysis.getKeyword() |
|
642
|
|
|
result_key = values.get(DEFAULT_RESULT_KEY, "") |
|
643
|
|
|
result = values.get(result_key, "") |
|
644
|
|
|
calculation = analysis.getCalculation() |
|
645
|
|
|
|
|
646
|
|
|
# check if analysis has a calculation set |
|
647
|
|
|
if calculation: |
|
648
|
|
|
self.log(_(u"Skipping result for analysis '{keyword}' of sample " |
|
649
|
|
|
"'{sid}' with calculation '{calculation}'" |
|
650
|
|
|
.format(keyword=keyword, |
|
651
|
|
|
sid=sid, |
|
652
|
|
|
calculation=safe_unicode(calculation.Title())))) |
|
653
|
|
|
return False |
|
654
|
|
|
|
|
655
|
|
|
# check if non-empty result can be overwritten |
|
656
|
|
|
if not self.can_override_analysis_result(analysis, result): |
|
657
|
|
|
self.log(_(u"Analysis '{keyword}' of sample '{sid}' has the " |
|
658
|
|
|
"result '{result}' set, which is kept due to the " |
|
659
|
|
|
"selected override option" |
|
660
|
|
|
.format(sid=sid, |
|
661
|
|
|
result=safe_unicode(analysis.getResult()), |
|
662
|
|
|
keyword=keyword))) |
|
663
|
|
|
return False |
|
664
|
|
|
|
|
665
|
|
|
# convert result for result options |
|
666
|
|
|
result = self.convert_analysis_result(analysis, result) |
|
667
|
|
|
|
|
668
|
|
|
# convert capture date if set |
|
669
|
|
|
date_captured = values.get("DateTime") |
|
670
|
|
|
if date_captured: |
|
671
|
|
|
date_captured = dtime.to_DT(date_captured) |
|
672
|
|
|
|
|
673
|
|
|
# set the analysis result |
|
674
|
|
|
analysis.setResult(result) |
|
675
|
|
|
|
|
676
|
|
|
# set the result capture date |
|
677
|
|
|
if date_captured: |
|
678
|
|
|
analysis.setResultCaptureDate(date_captured) |
|
679
|
|
|
|
|
680
|
|
|
self.log(_(u"{sid} result for '{keyword}': '{result}'" |
|
681
|
|
|
.format(sid=sid, keyword=keyword, |
|
682
|
|
|
result=safe_unicode(result)))) |
|
683
|
|
|
|
|
684
|
|
|
return True |
|
685
|
|
|
|
|
686
|
|
|
def set_analysis_fields(self, sid, analysis, values): |
|
687
|
|
|
"""Set additional analysis fields |
|
688
|
|
|
|
|
689
|
|
|
This allows to set additional analysis fields like |
|
690
|
|
|
Remarks, Uncertainty LDL/UDL etc. |
|
691
|
|
|
|
|
692
|
|
|
:param sid: Sample ID |
|
693
|
|
|
:param analysis: Analysis object |
|
694
|
|
|
:param values: Dictionary of values, including the result to set |
|
695
|
|
|
:returns: True if the result was written |
|
696
|
|
|
""" |
|
697
|
|
|
updated = False |
|
698
|
|
|
|
|
699
|
|
|
fields = api.get_fields(analysis) |
|
700
|
|
|
interim_fields = self.get_interim_fields(analysis) |
|
701
|
|
|
|
|
702
|
|
|
for key, value in values.items(): |
|
703
|
|
|
if key not in fields: |
|
704
|
|
|
# skip nonexisting fields |
|
705
|
|
|
continue |
|
706
|
|
|
elif key == "Result": |
|
707
|
|
|
# skip the result field |
|
708
|
|
|
continue |
|
709
|
|
|
elif key in interim_fields: |
|
710
|
|
|
# skip the interim fields |
|
711
|
|
|
continue |
|
712
|
|
|
|
|
713
|
|
|
field = fields.get(key) |
|
714
|
|
|
field_value = field.get(analysis) |
|
715
|
|
|
|
|
716
|
|
|
if field_value and not self.override_non_empty: |
|
717
|
|
|
# skip fields with existing values |
|
718
|
|
|
continue |
|
719
|
|
|
|
|
720
|
|
|
# set the new field value, preferrably with the setter |
|
721
|
|
|
setter = "set{}".format(field.getName().capitalize()) |
|
722
|
|
|
mutator = getattr(analysis, setter, None) |
|
723
|
|
|
if mutator: |
|
724
|
|
|
# we have a setter |
|
725
|
|
|
mutator(value) |
|
726
|
|
|
else: |
|
727
|
|
|
# set with the field's set method |
|
728
|
|
|
field.set(analysis, value) |
|
729
|
|
|
|
|
730
|
|
|
updated = True |
|
731
|
|
|
self.log(_(u"{sid} Updated field '{field}' with '{value}'" |
|
732
|
|
|
.format(sid=sid, field=key, |
|
733
|
|
|
value=safe_unicode(value)))) |
|
734
|
|
|
return updated |
|
735
|
|
|
|
|
736
|
|
|
def save_submit_analysis(self, analysis): |
|
737
|
|
|
"""Submit analysis and ignore errors |
|
738
|
|
|
""" |
|
739
|
|
|
# Allow manual submission if this setting is disabled |
|
740
|
|
|
submit = get_registry_record("import_analysis_submit") |
|
741
|
|
|
if submit is False: |
|
742
|
|
|
return |
|
743
|
|
|
try: |
|
744
|
|
|
api.do_transition_for(analysis, "submit") |
|
745
|
|
|
except api.APIError: |
|
746
|
|
|
pass |
|
747
|
|
|
|
|
748
|
|
|
def get_interim_fields(self, analysis): |
|
749
|
|
|
"""Return the interim fields of the analysis |
|
750
|
|
|
""" |
|
751
|
|
|
interim_fields = getattr(analysis, "getInterimFields", None) |
|
752
|
|
|
if not callable(interim_fields): |
|
753
|
|
|
return [] |
|
754
|
|
|
return interim_fields() |
|
755
|
|
|
|
|
756
|
|
|
def calculateTotalResults(self, objid, analysis): |
|
757
|
|
|
""" If an AR(objid) has an analysis that has a calculation |
|
758
|
|
|
then check if param analysis is used on the calculations formula. |
|
759
|
|
|
Here we are dealing with two types of analysis. |
|
760
|
|
|
1. Calculated Analysis - Results are calculated. |
|
761
|
|
|
2. Analysis - Results are captured and not calculated |
|
762
|
|
|
:param objid: AR ID or Worksheet's Reference Sample IDs |
|
763
|
|
|
:param analysis: Analysis Object |
|
764
|
|
|
""" |
|
765
|
|
|
for obj in self.get_analyses_for(objid): |
|
766
|
|
|
# skip analyses w/o calculations |
|
767
|
|
|
if not obj.getCalculation(): |
|
768
|
|
|
continue |
|
769
|
|
|
# get the calculation |
|
770
|
|
|
calculation = obj.getCalculation() |
|
771
|
|
|
# get the dependent services of the calculation |
|
772
|
|
|
dependencies = calculation.getDependentServices() |
|
773
|
|
|
# get the analysis service of the passed in analysis |
|
774
|
|
|
service = analysis.getAnalysisService() |
|
775
|
|
|
# skip when service is not a dependency of the calculation |
|
776
|
|
|
if service not in dependencies: |
|
777
|
|
|
continue |
|
778
|
|
|
# recalculate analysis result |
|
779
|
|
|
success = obj.calculateResult(override=self.override[0]) |
|
780
|
|
|
if success: |
|
781
|
|
|
self.save_submit_analysis(obj) |
|
782
|
|
|
obj.reindexObject(idxs=["Result"]) |
|
783
|
|
|
self.log(_("{request_id}: calculated result for " |
|
784
|
|
|
"'{analysis_keyword}': '{analysis_result}'" |
|
785
|
|
|
.format(request_id=objid, |
|
786
|
|
|
analysis_keyword=obj.getKeyword(), |
|
787
|
|
|
analysis_result=str(obj.getResult())))) |
|
788
|
|
|
# recursively recalculate analyses that have this analysis as |
|
789
|
|
|
# a dependent service |
|
790
|
|
|
self.calculateTotalResults(objid, obj) |
|
791
|
|
|
|
|
792
|
|
|
def create_attachment(self, ws, infile): |
|
793
|
|
|
"""Create a new attachment in the attachment |
|
794
|
|
|
|
|
795
|
|
|
:param ws: Worksheet |
|
796
|
|
|
:param infile: upload file wrapper |
|
797
|
|
|
:returns: Attachment object |
|
798
|
|
|
""" |
|
799
|
|
|
if not infile: |
|
800
|
|
|
return None |
|
801
|
|
|
|
|
802
|
|
|
att_type = self.create_mime_attachmenttype() |
|
803
|
|
|
filename = infile.filename |
|
804
|
|
|
|
|
805
|
|
|
attachment = api.create(ws, "Attachment") |
|
806
|
|
|
attachment.edit( |
|
807
|
|
|
title=filename, |
|
808
|
|
|
AttachmentFile=infile, |
|
809
|
|
|
AttachmentType=api.get_uid(att_type), |
|
810
|
|
|
AttachmentKeys="Results, Automatic import", |
|
811
|
|
|
RenderInReport=False, |
|
812
|
|
|
) |
|
813
|
|
|
attachment.reindexObject() |
|
814
|
|
|
|
|
815
|
|
|
logger.info(_(u"Attached file '{filename}' to worksheet {worksheet}" |
|
816
|
|
|
.format(filename=api.safe_unicode(filename), |
|
817
|
|
|
worksheet=ws.getId()))) |
|
818
|
|
|
|
|
819
|
|
|
return attachment |
|
820
|
|
|
|
|
821
|
|
|
def create_mime_attachmenttype(self): |
|
822
|
|
|
"""Create (or get) an attachment filetype |
|
823
|
|
|
""" |
|
824
|
|
|
file_type = self.parser.getAttachmentFileType() |
|
825
|
|
|
obj = self.get_attachment_type_by_title(file_type) |
|
826
|
|
|
if not obj: |
|
827
|
|
|
obj = api.create(self.attachment_types, "AttachmentType") |
|
828
|
|
|
obj.edit(title=file_type, |
|
829
|
|
|
description="Auto generated") |
|
830
|
|
|
return obj |
|
831
|
|
|
|
|
832
|
|
|
def attach_attachment(self, analysis, attachment): |
|
833
|
|
|
"""Attach a file or a given set of files to an analysis |
|
834
|
|
|
|
|
835
|
|
|
:param analysis: analysis where the files are to be attached |
|
836
|
|
|
:param attachment: files to be attached. This can be either a |
|
837
|
|
|
single file or a list of files |
|
838
|
|
|
:return: None |
|
839
|
|
|
""" |
|
840
|
|
|
if not attachment: |
|
841
|
|
|
return |
|
842
|
|
|
if isinstance(attachment, list): |
|
843
|
|
|
for attach in attachment: |
|
844
|
|
|
self.attach_attachment(analysis, attach) |
|
845
|
|
|
return |
|
846
|
|
|
# current attachments |
|
847
|
|
|
an_atts = analysis.getAttachment() |
|
848
|
|
|
atts_filenames = [att.getAttachmentFile().filename for att in an_atts] |
|
849
|
|
|
filename = attachment.getAttachmentFile().filename |
|
850
|
|
|
|
|
851
|
|
|
if filename not in atts_filenames: |
|
852
|
|
|
an_atts.append(attachment) |
|
853
|
|
|
logger.info( |
|
854
|
|
|
_(u"Attaching '{attachment}' to Analysis '{analysis}'" |
|
855
|
|
|
.format(attachment=api.safe_unicode(filename), |
|
856
|
|
|
analysis=analysis.getKeyword()))) |
|
857
|
|
|
analysis.setAttachment([att.UID() for att in an_atts]) |
|
858
|
|
|
analysis.reindexObject() |
|
859
|
|
|
else: |
|
860
|
|
|
self.log(_(u"Attachment '{attachment}' was already linked " |
|
861
|
|
|
"to analysis {analysis}" |
|
862
|
|
|
.format(attachment=api.safe_unicode(filename), |
|
863
|
|
|
analysis=analysis.getKeyword()))) |
|
864
|
|
|
|
|
865
|
|
|
def get_attachment_filenames(self, ws): |
|
866
|
|
|
"""Returns all attachment filenames in the given worksheet |
|
867
|
|
|
""" |
|
868
|
|
|
fn_attachments = {} |
|
869
|
|
|
for att in ws.objectValues("Attachment"): |
|
870
|
|
|
fn = att.getAttachmentFile().filename |
|
871
|
|
|
if fn not in fn_attachments: |
|
872
|
|
|
fn_attachments[fn] = [] |
|
873
|
|
|
fn_attachments[fn].append(att) |
|
874
|
|
|
return fn_attachments |
|
875
|
|
|
|
|
876
|
|
|
def is_analysis_allowed(self, analysis): |
|
877
|
|
|
"""Filter analyses that match the import criteria |
|
878
|
|
|
""" |
|
879
|
|
|
if IReferenceAnalysis.providedBy(analysis): |
|
880
|
|
|
return True |
|
881
|
|
|
# Routine Analyses must be in the allowed WF states |
|
882
|
|
|
status = api.get_workflow_status_of(analysis) |
|
883
|
|
|
if status in self.allowed_analysis_states: |
|
884
|
|
|
return True |
|
885
|
|
|
return False |
|
886
|
|
|
|
|
887
|
|
|
def get_analyses_for(self, sid): |
|
888
|
|
|
"""Get analyses for the given sample ID |
|
889
|
|
|
|
|
890
|
|
|
Only analyses that in the allowed analyses states are returned. |
|
891
|
|
|
If not a ReferenceAnalysis, allowed sample states are also checked. |
|
892
|
|
|
|
|
893
|
|
|
:param sid: sample ID or Worksheet Reference Sample ID |
|
894
|
|
|
:returns: list of analyses / empty list if no alanyses were found |
|
895
|
|
|
""" |
|
896
|
|
|
analyses = [] |
|
897
|
|
|
|
|
898
|
|
|
# Acceleration of searches using priorization |
|
899
|
|
|
if self.priorizedsearchcriteria in ["rgid", "rid", "ruid"]: |
|
900
|
|
|
# Look from reference analyses |
|
901
|
|
|
analyses = self._getZODBAnalysesFromReferenceAnalyses( |
|
902
|
|
|
sid, self.priorizedsearchcriteria) |
|
903
|
|
|
|
|
904
|
|
|
if len(analyses) == 0: |
|
905
|
|
|
# Look from ar and derived |
|
906
|
|
|
analyses = self._getZODBAnalysesFromAR( |
|
907
|
|
|
sid, "", self.searchcriteria, self.allowed_sample_states) |
|
908
|
|
|
|
|
909
|
|
|
return list(filter(self.is_analysis_allowed, analyses)) |
|
910
|
|
|
|
|
911
|
|
|
@deprecate("Please use self.find_objects instead") |
|
912
|
|
|
def _getObjects(self, oid, criteria, states): |
|
913
|
|
|
return self.find_objects(oid, criteria, states) |
|
914
|
|
|
|
|
915
|
|
|
def find_objects(self, oid, criteria, states): |
|
916
|
|
|
"""Find objects |
|
917
|
|
|
|
|
918
|
|
|
:param oid: Primary search ID |
|
919
|
|
|
""" |
|
920
|
|
|
results = [] |
|
921
|
|
|
|
|
922
|
|
|
if criteria in ["arid"]: |
|
923
|
|
|
query = {"getId": oid, "review_state": states} |
|
924
|
|
|
results = self.sample_catalog(query) |
|
925
|
|
|
elif criteria == "csid": |
|
926
|
|
|
query = {"getClientSampleID": oid, "review_state": states} |
|
927
|
|
|
results = self.sample_catalog(query) |
|
928
|
|
|
elif criteria == "aruid": |
|
929
|
|
|
query = {"UID": oid, "review_state": states} |
|
930
|
|
|
results = self.sample_catalog(query) |
|
931
|
|
|
elif criteria == "rgid": |
|
932
|
|
|
query = { |
|
933
|
|
|
"portal_type": ["ReferenceAnalysis", "DuplicateAnalysis"], |
|
934
|
|
|
"getReferenceAnalysesGroupID": oid, |
|
935
|
|
|
} |
|
936
|
|
|
results = self.analysis_catalog(query) |
|
937
|
|
|
elif criteria == "rid": |
|
938
|
|
|
query = { |
|
939
|
|
|
"portal_type": ["ReferenceAnalysis", "DuplicateAnalysis"], |
|
940
|
|
|
"getId": oid, |
|
941
|
|
|
} |
|
942
|
|
|
results = self.analysis_catalog(query) |
|
943
|
|
|
elif criteria == "ruid": |
|
944
|
|
|
query = { |
|
945
|
|
|
"portal_type": ["ReferenceAnalysis", "DuplicateAnalysis"], |
|
946
|
|
|
"UID": oid, |
|
947
|
|
|
} |
|
948
|
|
|
results = self.analysis_catalog(query) |
|
949
|
|
|
|
|
950
|
|
|
if len(results) > 0: |
|
951
|
|
|
self.priorizedsearchcriteria = criteria |
|
952
|
|
|
|
|
953
|
|
|
return results |
|
954
|
|
|
|
|
955
|
|
|
@deprecate("Please use self.get_analyses_for instead") |
|
956
|
|
|
def _getZODBAnalyses(self, sid): |
|
957
|
|
|
return self.get_analyses_for(sid) |
|
958
|
|
|
|
|
959
|
|
|
def _getZODBAnalysesFromAR(self, objid, criteria, |
|
960
|
|
|
allowedsearches, arstates): |
|
961
|
|
|
ars = [] |
|
962
|
|
|
analyses = [] |
|
963
|
|
|
if criteria: |
|
964
|
|
|
ars = self.find_objects(objid, criteria, arstates) |
|
965
|
|
|
if not ars or len(ars) == 0: |
|
966
|
|
|
return self._getZODBAnalysesFromAR(objid, None, |
|
967
|
|
|
allowedsearches, arstates) |
|
968
|
|
|
else: |
|
969
|
|
|
sortorder = ["arid", "csid", "aruid"] |
|
970
|
|
|
for crit in sortorder: |
|
971
|
|
|
if (crit == "arid" and "getId" in allowedsearches) \ |
|
972
|
|
|
or (crit == "csid" and "getClientSampleID" |
|
973
|
|
|
in allowedsearches) \ |
|
974
|
|
|
or (crit == "aruid" and "getId" in allowedsearches): |
|
975
|
|
|
ars = self.find_objects(objid, crit, arstates) |
|
976
|
|
|
if ars and len(ars) > 0: |
|
977
|
|
|
break |
|
978
|
|
|
|
|
979
|
|
|
if not ars or len(ars) == 0: |
|
980
|
|
|
return self._getZODBAnalysesFromReferenceAnalyses(objid, None) |
|
981
|
|
|
|
|
982
|
|
|
elif len(ars) > 1: |
|
983
|
|
|
self.err("More than one Sample found for {object_id}" |
|
984
|
|
|
.format(object_id=objid)) |
|
985
|
|
|
return [] |
|
986
|
|
|
|
|
987
|
|
|
ar = ars[0].getObject() |
|
988
|
|
|
analyses = [analysis.getObject() for analysis in ar.getAnalyses()] |
|
989
|
|
|
|
|
990
|
|
|
return analyses |
|
991
|
|
|
|
|
992
|
|
|
def _getZODBAnalysesFromReferenceAnalyses(self, objid, criteria): |
|
993
|
|
|
analyses = [] |
|
994
|
|
|
if criteria: |
|
995
|
|
|
refans = self.find_objects(objid, criteria, []) |
|
996
|
|
|
if len(refans) == 0: |
|
997
|
|
|
return [] |
|
998
|
|
|
|
|
999
|
|
|
elif criteria == "rgid": |
|
1000
|
|
|
return [an.getObject() for an in refans] |
|
1001
|
|
|
|
|
1002
|
|
|
elif len(refans) == 1: |
|
1003
|
|
|
# The search has been made using the internal identifier |
|
1004
|
|
|
# from a Reference Analysis (id or uid). That is not usual. |
|
1005
|
|
|
an = refans[0].getObject() |
|
1006
|
|
|
worksheet = an.getWorksheet() |
|
1007
|
|
|
if worksheet: |
|
1008
|
|
|
# A regular QC test (assigned to a Worksheet) |
|
1009
|
|
|
return [an, ] |
|
1010
|
|
|
elif an.getInstrument(): |
|
1011
|
|
|
# An Internal Calibration Test |
|
1012
|
|
|
return [an, ] |
|
1013
|
|
|
else: |
|
1014
|
|
|
# Oops. This should never happen! |
|
1015
|
|
|
# A ReferenceAnalysis must be always assigned to |
|
1016
|
|
|
# a Worksheet (Regular QC) or to an Instrument |
|
1017
|
|
|
# (Internal Calibration Test) |
|
1018
|
|
|
self.err("The Reference Analysis {object_id} has neither " |
|
1019
|
|
|
"instrument nor worksheet assigned" |
|
1020
|
|
|
.format(object_id=objid)) |
|
1021
|
|
|
return [] |
|
1022
|
|
|
else: |
|
1023
|
|
|
# This should never happen! |
|
1024
|
|
|
# Fetching ReferenceAnalysis for its id or uid should |
|
1025
|
|
|
# *always* return a unique result |
|
1026
|
|
|
self.err( |
|
1027
|
|
|
"More than one Reference Analysis found for {object_id}" |
|
1028
|
|
|
.format(object_id=objid)) |
|
1029
|
|
|
return [] |
|
1030
|
|
|
|
|
1031
|
|
|
else: |
|
1032
|
|
|
sortorder = ["rgid", "rid", "ruid"] |
|
1033
|
|
|
for crit in sortorder: |
|
1034
|
|
|
analyses = self._getZODBAnalysesFromReferenceAnalyses(objid, |
|
1035
|
|
|
crit) |
|
1036
|
|
|
if len(analyses) > 0: |
|
1037
|
|
|
return analyses |
|
1038
|
|
|
|
|
1039
|
|
|
return analyses |
|
1040
|
|
|
|