1
|
|
|
# -*- coding: utf-8 -*- |
2
|
|
|
# |
3
|
|
|
# This file is part of SENAITE.CORE. |
4
|
|
|
# |
5
|
|
|
# SENAITE.CORE is free software: you can redistribute it and/or modify it under |
6
|
|
|
# the terms of the GNU General Public License as published by the Free Software |
7
|
|
|
# Foundation, version 2. |
8
|
|
|
# |
9
|
|
|
# This program is distributed in the hope that it will be useful, but WITHOUT |
10
|
|
|
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
11
|
|
|
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more |
12
|
|
|
# details. |
13
|
|
|
# |
14
|
|
|
# You should have received a copy of the GNU General Public License along with |
15
|
|
|
# this program; if not, write to the Free Software Foundation, Inc., 51 |
16
|
|
|
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
17
|
|
|
# |
18
|
|
|
# Copyright 2018-2021 by it's authors. |
19
|
|
|
# Some rights reserved, see README and LICENSE. |
20
|
|
|
|
21
|
|
|
import cgi |
22
|
|
|
import json |
23
|
|
|
import math |
24
|
|
|
from decimal import Decimal |
25
|
|
|
|
26
|
|
|
from AccessControl import ClassSecurityInfo |
27
|
|
|
from bika.lims import api |
28
|
|
|
from bika.lims import bikaMessageFactory as _ |
29
|
|
|
from bika.lims import deprecated |
30
|
|
|
from bika.lims import logger |
31
|
|
|
from bika.lims import workflow as wf |
32
|
|
|
from bika.lims.browser.fields import HistoryAwareReferenceField |
33
|
|
|
from bika.lims.browser.fields import InterimFieldsField |
34
|
|
|
from bika.lims.browser.fields import ResultRangeField |
35
|
|
|
from bika.lims.browser.fields import UIDReferenceField |
36
|
|
|
from bika.lims.browser.fields.uidreferencefield import get_backreferences |
37
|
|
|
from bika.lims.browser.widgets import RecordsWidget |
38
|
|
|
from bika.lims.config import LDL |
39
|
|
|
from bika.lims.config import UDL |
40
|
|
|
from bika.lims.content.abstractbaseanalysis import AbstractBaseAnalysis |
41
|
|
|
from bika.lims.content.abstractbaseanalysis import schema |
42
|
|
|
from bika.lims.interfaces import IDuplicateAnalysis |
43
|
|
|
from bika.lims.permissions import FieldEditAnalysisResult |
44
|
|
|
from bika.lims.utils import drop_trailing_zeros_decimal |
45
|
|
|
from bika.lims.utils import formatDecimalMark |
46
|
|
|
from bika.lims.utils.analysis import format_numeric_result |
47
|
|
|
from bika.lims.utils.analysis import get_significant_digits |
48
|
|
|
from bika.lims.workflow import getTransitionActor |
49
|
|
|
from bika.lims.workflow import getTransitionDate |
50
|
|
|
from DateTime import DateTime |
51
|
|
|
from Products.Archetypes.Field import DateTimeField |
52
|
|
|
from Products.Archetypes.Field import FixedPointField |
53
|
|
|
from Products.Archetypes.Field import IntegerField |
54
|
|
|
from Products.Archetypes.Field import StringField |
55
|
|
|
from Products.Archetypes.references import HoldingReference |
56
|
|
|
from Products.Archetypes.Schema import Schema |
57
|
|
|
from Products.CMFCore.permissions import View |
58
|
|
|
|
59
|
|
|
# A link directly to the AnalysisService object used to create the analysis |
60
|
|
|
AnalysisService = UIDReferenceField( |
61
|
|
|
'AnalysisService' |
62
|
|
|
) |
63
|
|
|
|
64
|
|
|
# Attachments which are added manually in the UI, or automatically when |
65
|
|
|
# results are imported from a file supplied by an instrument. |
66
|
|
|
Attachment = UIDReferenceField( |
67
|
|
|
'Attachment', |
68
|
|
|
multiValued=1, |
69
|
|
|
allowed_types=('Attachment',) |
70
|
|
|
) |
71
|
|
|
|
72
|
|
|
# The final result of the analysis is stored here. The field contains a |
73
|
|
|
# String value, but the result itself is required to be numeric. If |
74
|
|
|
# a non-numeric result is needed, ResultOptions can be used. |
75
|
|
|
Result = StringField( |
76
|
|
|
'Result', |
77
|
|
|
read_permission=View, |
78
|
|
|
write_permission=FieldEditAnalysisResult, |
79
|
|
|
) |
80
|
|
|
|
81
|
|
|
# When the result is changed, this value is updated to the current time. |
82
|
|
|
# Only the most recent result capture date is recorded here and used to |
83
|
|
|
# populate catalog values, however the workflow review_history can be |
84
|
|
|
# used to get all dates of result capture |
85
|
|
|
ResultCaptureDate = DateTimeField( |
86
|
|
|
'ResultCaptureDate' |
87
|
|
|
) |
88
|
|
|
|
89
|
|
|
# Returns the retracted analysis this analysis is a retest of |
90
|
|
|
RetestOf = UIDReferenceField( |
91
|
|
|
'RetestOf' |
92
|
|
|
) |
93
|
|
|
|
94
|
|
|
# If the result is outside of the detection limits of the method or instrument, |
95
|
|
|
# the operand (< or >) is stored here. For routine analyses this is taken |
96
|
|
|
# from the Result, if the result entered explicitly startswith "<" or ">" |
97
|
|
|
DetectionLimitOperand = StringField( |
98
|
|
|
'DetectionLimitOperand', |
99
|
|
|
read_permission=View, |
100
|
|
|
write_permission=FieldEditAnalysisResult, |
101
|
|
|
) |
102
|
|
|
|
103
|
|
|
# The ID of the logged in user who submitted the result for this Analysis. |
104
|
|
|
Analyst = StringField( |
105
|
|
|
'Analyst' |
106
|
|
|
) |
107
|
|
|
|
108
|
|
|
# The actual uncertainty for this analysis' result, populated from the ranges |
109
|
|
|
# specified in the analysis service when the result is submitted. |
110
|
|
|
Uncertainty = FixedPointField( |
111
|
|
|
'Uncertainty', |
112
|
|
|
read_permission=View, |
113
|
|
|
write_permission="Field: Edit Result", |
114
|
|
|
precision=10, |
115
|
|
|
) |
116
|
|
|
|
117
|
|
|
# transitioned to a 'verified' state. This value is set automatically |
118
|
|
|
# when the analysis is created, based on the value set for the property |
119
|
|
|
# NumberOfRequiredVerifications from the Analysis Service |
120
|
|
|
NumberOfRequiredVerifications = IntegerField( |
121
|
|
|
'NumberOfRequiredVerifications', |
122
|
|
|
default=1 |
123
|
|
|
) |
124
|
|
|
|
125
|
|
|
# Routine Analyses and Reference Analysis have a versioned link to |
126
|
|
|
# the calculation at creation time. |
127
|
|
|
Calculation = HistoryAwareReferenceField( |
128
|
|
|
'Calculation', |
129
|
|
|
read_permission=View, |
130
|
|
|
write_permission=FieldEditAnalysisResult, |
131
|
|
|
allowed_types=('Calculation',), |
132
|
|
|
relationship='AnalysisCalculation', |
133
|
|
|
referenceClass=HoldingReference |
134
|
|
|
) |
135
|
|
|
|
136
|
|
|
# InterimFields are defined in Calculations, Services, and Analyses. |
137
|
|
|
# In Analysis Services, the default values are taken from Calculation. |
138
|
|
|
# In Analyses, the default values are taken from the Analysis Service. |
139
|
|
|
# When instrument results are imported, the values in analysis are overridden |
140
|
|
|
# before the calculation is performed. |
141
|
|
|
InterimFields = InterimFieldsField( |
142
|
|
|
'InterimFields', |
143
|
|
|
read_permission=View, |
144
|
|
|
write_permission=FieldEditAnalysisResult, |
145
|
|
|
schemata='Method', |
146
|
|
|
widget=RecordsWidget( |
147
|
|
|
label=_("Calculation Interim Fields"), |
148
|
|
|
description=_( |
149
|
|
|
"Values can be entered here which will override the defaults " |
150
|
|
|
"specified in the Calculation Interim Fields."), |
151
|
|
|
) |
152
|
|
|
) |
153
|
|
|
|
154
|
|
|
# Results Range that applies to this analysis |
155
|
|
|
ResultsRange = ResultRangeField( |
156
|
|
|
"ResultsRange", |
157
|
|
|
required=0 |
158
|
|
|
) |
159
|
|
|
|
160
|
|
|
schema = schema.copy() + Schema(( |
161
|
|
|
AnalysisService, |
162
|
|
|
Analyst, |
163
|
|
|
Attachment, |
164
|
|
|
DetectionLimitOperand, |
165
|
|
|
# NumberOfRequiredVerifications overrides AbstractBaseClass |
166
|
|
|
NumberOfRequiredVerifications, |
167
|
|
|
Result, |
168
|
|
|
ResultCaptureDate, |
169
|
|
|
RetestOf, |
170
|
|
|
Uncertainty, |
171
|
|
|
Calculation, |
172
|
|
|
InterimFields, |
173
|
|
|
ResultsRange, |
174
|
|
|
)) |
175
|
|
|
|
176
|
|
|
|
177
|
|
|
class AbstractAnalysis(AbstractBaseAnalysis): |
178
|
|
|
security = ClassSecurityInfo() |
179
|
|
|
displayContentsTab = False |
180
|
|
|
schema = schema |
|
|
|
|
181
|
|
|
|
182
|
|
|
@deprecated('[1705] Currently returns the Analysis object itself. If you ' |
183
|
|
|
'need to get the service, use getAnalysisService instead') |
184
|
|
|
@security.public |
185
|
|
|
def getService(self): |
186
|
|
|
return self |
187
|
|
|
|
188
|
|
|
def getServiceUID(self): |
189
|
|
|
"""Return the UID of the associated service. |
190
|
|
|
""" |
191
|
|
|
return self.getRawAnalysisService() |
192
|
|
|
|
193
|
|
|
@security.public |
194
|
|
|
def getNumberOfVerifications(self): |
195
|
|
|
return len(self.getVerificators()) |
196
|
|
|
|
197
|
|
|
@security.public |
198
|
|
|
def getNumberOfRemainingVerifications(self): |
199
|
|
|
required = self.getNumberOfRequiredVerifications() |
200
|
|
|
done = self.getNumberOfVerifications() |
201
|
|
|
if done >= required: |
202
|
|
|
return 0 |
203
|
|
|
return required - done |
204
|
|
|
|
205
|
|
|
# TODO Workflow - analysis . Remove? |
206
|
|
|
@security.public |
207
|
|
|
def getLastVerificator(self): |
208
|
|
|
verifiers = self.getVerificators() |
209
|
|
|
return verifiers and verifiers[-1] or None |
210
|
|
|
|
211
|
|
|
@security.public |
212
|
|
|
def getVerificators(self): |
213
|
|
|
"""Returns the user ids of the users that verified this analysis |
214
|
|
|
""" |
215
|
|
|
verifiers = list() |
216
|
|
|
actions = ["verify", "multi_verify"] |
217
|
|
|
for event in wf.getReviewHistory(self): |
218
|
|
|
if event['action'] in actions: |
219
|
|
|
verifiers.append(event['actor']) |
220
|
|
|
sorted(verifiers, reverse=True) |
221
|
|
|
return verifiers |
222
|
|
|
|
223
|
|
|
@security.public |
224
|
|
|
def getDefaultUncertainty(self, result=None): |
225
|
|
|
"""Return the uncertainty value, if the result falls within |
226
|
|
|
specified ranges for the service from which this analysis was derived. |
227
|
|
|
""" |
228
|
|
|
|
229
|
|
|
if result is None: |
230
|
|
|
result = self.getResult() |
231
|
|
|
|
232
|
|
|
uncertainties = self.getUncertainties() |
233
|
|
|
if uncertainties: |
234
|
|
|
try: |
235
|
|
|
res = float(result) |
236
|
|
|
except (TypeError, ValueError): |
237
|
|
|
# if analysis result is not a number, then we assume in range |
238
|
|
|
return None |
239
|
|
|
|
240
|
|
|
for d in uncertainties: |
241
|
|
|
_min = float(d['intercept_min']) |
242
|
|
|
_max = float(d['intercept_max']) |
243
|
|
|
if _min <= res and res <= _max: |
244
|
|
|
if str(d['errorvalue']).strip().endswith('%'): |
245
|
|
|
try: |
246
|
|
|
percvalue = float(d['errorvalue'].replace('%', '')) |
247
|
|
|
except ValueError: |
248
|
|
|
return None |
249
|
|
|
uncertainty = res / 100 * percvalue |
250
|
|
|
else: |
251
|
|
|
uncertainty = float(d['errorvalue']) |
252
|
|
|
|
253
|
|
|
return uncertainty |
254
|
|
|
return None |
255
|
|
|
|
256
|
|
|
@security.public |
257
|
|
|
def getUncertainty(self, result=None): |
258
|
|
|
"""Returns the uncertainty for this analysis and result. |
259
|
|
|
Returns the value from Schema's Uncertainty field if the Service has |
260
|
|
|
the option 'Allow manual uncertainty'. Otherwise, do a callback to |
261
|
|
|
getDefaultUncertainty(). Returns None if no result specified and the |
262
|
|
|
current result for this analysis is below or above detections limits. |
263
|
|
|
""" |
264
|
|
|
uncertainty = self.getField('Uncertainty').get(self) |
265
|
|
|
if result is None and (self.isAboveUpperDetectionLimit() or |
266
|
|
|
self.isBelowLowerDetectionLimit()): |
267
|
|
|
return None |
268
|
|
|
|
269
|
|
|
if uncertainty and self.getAllowManualUncertainty() is True: |
270
|
|
|
try: |
271
|
|
|
uncertainty = float(uncertainty) |
272
|
|
|
return uncertainty |
273
|
|
|
except (TypeError, ValueError): |
274
|
|
|
# if uncertainty is not a number, return default value |
275
|
|
|
pass |
276
|
|
|
return self.getDefaultUncertainty(result) |
277
|
|
|
|
278
|
|
|
@security.public |
279
|
|
|
def setUncertainty(self, unc): |
280
|
|
|
"""Sets the uncertainty for this analysis. If the result is a |
281
|
|
|
Detection Limit or the value is below LDL or upper UDL, sets the |
282
|
|
|
uncertainty value to 0 |
283
|
|
|
""" |
284
|
|
|
# Uncertainty calculation on DL |
285
|
|
|
# https://jira.bikalabs.com/browse/LIMS-1808 |
286
|
|
|
if self.isAboveUpperDetectionLimit() or \ |
287
|
|
|
self.isBelowLowerDetectionLimit(): |
288
|
|
|
self.getField('Uncertainty').set(self, None) |
289
|
|
|
else: |
290
|
|
|
self.getField('Uncertainty').set(self, unc) |
291
|
|
|
|
292
|
|
|
@security.public |
293
|
|
|
def setDetectionLimitOperand(self, value): |
294
|
|
|
"""Set detection limit operand for this analysis |
295
|
|
|
Allowed detection limit operands are `<` and `>`. |
296
|
|
|
""" |
297
|
|
|
manual_dl = self.getAllowManualDetectionLimit() |
298
|
|
|
selector = self.getDetectionLimitSelector() |
299
|
|
|
if not manual_dl and not selector: |
300
|
|
|
# Don't allow the user to set the limit operand if manual assignment |
301
|
|
|
# is not allowed and selector is not visible |
302
|
|
|
return |
303
|
|
|
|
304
|
|
|
# Changing the detection limit operand has a side effect on the result |
305
|
|
|
result = self.getResult() |
306
|
|
|
if value in [LDL, UDL]: |
307
|
|
|
# flush uncertainty |
308
|
|
|
self.setUncertainty("") |
309
|
|
|
|
310
|
|
|
# If no previous result or user is not allowed to manually set the |
311
|
|
|
# the detection limit, override the result with default LDL/UDL |
312
|
|
|
has_result = api.is_floatable(result) |
313
|
|
|
if not has_result or not manual_dl: |
314
|
|
|
# set the result according to the system default UDL/LDL values |
315
|
|
|
if value == LDL: |
316
|
|
|
result = self.getLowerDetectionLimit() |
317
|
|
|
else: |
318
|
|
|
result = self.getUpperDetectionLimit() |
319
|
|
|
|
320
|
|
|
else: |
321
|
|
|
value = "" |
322
|
|
|
# Restore the DetectionLimitSelector, cause maybe its visibility |
323
|
|
|
# was changed because allow manual detection limit was enabled and |
324
|
|
|
# the user set a result with "<" or ">" |
325
|
|
|
if manual_dl: |
326
|
|
|
service = self.getAnalysisService() |
327
|
|
|
selector = service.getDetectionLimitSelector() |
328
|
|
|
self.setDetectionLimitSelector(selector) |
329
|
|
|
|
330
|
|
|
# Set the result |
331
|
|
|
self.getField("Result").set(self, result) |
332
|
|
|
|
333
|
|
|
# Set the detection limit to the field |
334
|
|
|
self.getField("DetectionLimitOperand").set(self, value) |
335
|
|
|
|
336
|
|
|
# Method getLowerDetectionLimit overrides method of class BaseAnalysis |
337
|
|
|
@security.public |
338
|
|
|
def getLowerDetectionLimit(self): |
339
|
|
|
"""Returns the Lower Detection Limit (LDL) that applies to this |
340
|
|
|
analysis in particular. If no value set or the analysis service |
341
|
|
|
doesn't allow manual input of detection limits, returns the value set |
342
|
|
|
by default in the Analysis Service |
343
|
|
|
""" |
344
|
|
|
if self.isLowerDetectionLimit(): |
345
|
|
|
result = self.getResult() |
346
|
|
|
try: |
347
|
|
|
# in this case, the result itself is the LDL. |
348
|
|
|
return float(result) |
349
|
|
|
except (TypeError, ValueError): |
350
|
|
|
logger.warn("The result for the analysis %s is a lower " |
351
|
|
|
"detection limit, but not floatable: '%s'. " |
352
|
|
|
"Returnig AS's default LDL." % |
353
|
|
|
(self.id, result)) |
354
|
|
|
return AbstractBaseAnalysis.getLowerDetectionLimit(self) |
355
|
|
|
|
356
|
|
|
# Method getUpperDetectionLimit overrides method of class BaseAnalysis |
357
|
|
|
@security.public |
358
|
|
|
def getUpperDetectionLimit(self): |
359
|
|
|
"""Returns the Upper Detection Limit (UDL) that applies to this |
360
|
|
|
analysis in particular. If no value set or the analysis service |
361
|
|
|
doesn't allow manual input of detection limits, returns the value set |
362
|
|
|
by default in the Analysis Service |
363
|
|
|
""" |
364
|
|
|
if self.isUpperDetectionLimit(): |
365
|
|
|
result = self.getResult() |
366
|
|
|
try: |
367
|
|
|
# in this case, the result itself is the LDL. |
368
|
|
|
return float(result) |
369
|
|
|
except (TypeError, ValueError): |
370
|
|
|
logger.warn("The result for the analysis %s is a lower " |
371
|
|
|
"detection limit, but not floatable: '%s'. " |
372
|
|
|
"Returnig AS's default LDL." % |
373
|
|
|
(self.id, result)) |
374
|
|
|
return AbstractBaseAnalysis.getUpperDetectionLimit(self) |
375
|
|
|
|
376
|
|
|
@security.public |
377
|
|
|
def isBelowLowerDetectionLimit(self): |
378
|
|
|
"""Returns True if the result is below the Lower Detection Limit or |
379
|
|
|
if Lower Detection Limit has been manually set |
380
|
|
|
""" |
381
|
|
|
if self.isLowerDetectionLimit(): |
382
|
|
|
return True |
383
|
|
|
|
384
|
|
|
result = self.getResult() |
385
|
|
|
if result and str(result).strip().startswith(LDL): |
386
|
|
|
return True |
387
|
|
|
|
388
|
|
|
if api.is_floatable(result): |
389
|
|
|
return api.to_float(result) < self.getLowerDetectionLimit() |
390
|
|
|
|
391
|
|
|
return False |
392
|
|
|
|
393
|
|
|
@security.public |
394
|
|
|
def isAboveUpperDetectionLimit(self): |
395
|
|
|
"""Returns True if the result is above the Upper Detection Limit or |
396
|
|
|
if Upper Detection Limit has been manually set |
397
|
|
|
""" |
398
|
|
|
if self.isUpperDetectionLimit(): |
399
|
|
|
return True |
400
|
|
|
|
401
|
|
|
result = self.getResult() |
402
|
|
|
if result and str(result).strip().startswith(UDL): |
403
|
|
|
return True |
404
|
|
|
|
405
|
|
|
if api.is_floatable(result): |
406
|
|
|
return api.to_float(result) > self.getUpperDetectionLimit() |
407
|
|
|
|
408
|
|
|
return False |
409
|
|
|
|
410
|
|
|
@security.public |
411
|
|
|
def getDetectionLimits(self): |
412
|
|
|
"""Returns a two-value array with the limits of detection (LDL and |
413
|
|
|
UDL) that applies to this analysis in particular. If no value set or |
414
|
|
|
the analysis service doesn't allow manual input of detection limits, |
415
|
|
|
returns the value set by default in the Analysis Service |
416
|
|
|
""" |
417
|
|
|
return [self.getLowerDetectionLimit(), self.getUpperDetectionLimit()] |
418
|
|
|
|
419
|
|
|
@security.public |
420
|
|
|
def isLowerDetectionLimit(self): |
421
|
|
|
"""Returns True if the result for this analysis represents a Lower |
422
|
|
|
Detection Limit. Otherwise, returns False |
423
|
|
|
""" |
424
|
|
|
return self.getDetectionLimitOperand() == LDL |
425
|
|
|
|
426
|
|
|
@security.public |
427
|
|
|
def isUpperDetectionLimit(self): |
428
|
|
|
"""Returns True if the result for this analysis represents an Upper |
429
|
|
|
Detection Limit. Otherwise, returns False |
430
|
|
|
""" |
431
|
|
|
return self.getDetectionLimitOperand() == UDL |
432
|
|
|
|
433
|
|
|
@security.public |
434
|
|
|
def getDependents(self): |
435
|
|
|
"""Return a list of analyses who depend on us to calculate their result |
436
|
|
|
""" |
437
|
|
|
raise NotImplementedError("getDependents is not implemented.") |
438
|
|
|
|
439
|
|
|
@security.public |
440
|
|
|
def getDependencies(self, with_retests=False): |
441
|
|
|
"""Return a list of siblings who we depend on to calculate our result. |
442
|
|
|
:param with_retests: If false, siblings with retests are dismissed |
443
|
|
|
:type with_retests: bool |
444
|
|
|
:return: Analyses the current analysis depends on |
445
|
|
|
:rtype: list of IAnalysis |
446
|
|
|
""" |
447
|
|
|
raise NotImplementedError("getDependencies is not implemented.") |
448
|
|
|
|
449
|
|
|
@security.public |
450
|
|
|
def setResult(self, value): |
451
|
|
|
"""Validate and set a value into the Result field, taking into |
452
|
|
|
account the Detection Limits. |
453
|
|
|
:param value: is expected to be a string. |
454
|
|
|
""" |
455
|
|
|
prev_result = self.getField("Result").get(self) or "" |
456
|
|
|
|
457
|
|
|
# Convert to list ff the analysis has result options set with multi |
458
|
|
|
if self.getResultOptions() and "multi" in self.getResultOptionsType(): |
459
|
|
|
if not isinstance(value, (list, tuple)): |
460
|
|
|
value = filter(None, [value]) |
461
|
|
|
|
462
|
|
|
# Handle list results |
463
|
|
|
if isinstance(value, (list, tuple)): |
464
|
|
|
value = json.dumps(value) |
465
|
|
|
|
466
|
|
|
# Ensure result integrity regards to None, empty and 0 values |
467
|
|
|
val = str("" if not value and value != 0 else value).strip() |
468
|
|
|
|
469
|
|
|
# UDL/LDL directly entered in the results field |
470
|
|
|
if val and val[0] in [LDL, UDL]: |
471
|
|
|
# Result prefixed with LDL/UDL |
472
|
|
|
oper = val[0] |
473
|
|
|
# Strip off LDL/UDL from the result |
474
|
|
|
val = val.replace(oper, "", 1) |
475
|
|
|
# Check if the value is indeterminate / non-floatable |
476
|
|
|
try: |
477
|
|
|
val = float(val) |
478
|
|
|
except (ValueError, TypeError): |
479
|
|
|
val = value |
480
|
|
|
|
481
|
|
|
# We dismiss the operand and the selector visibility unless the user |
482
|
|
|
# is allowed to manually set the detection limit or the DL selector |
483
|
|
|
# is visible. |
484
|
|
|
allow_manual = self.getAllowManualDetectionLimit() |
485
|
|
|
selector = self.getDetectionLimitSelector() |
486
|
|
|
if allow_manual or selector: |
487
|
|
|
# Ensure visibility of the detection limit selector |
488
|
|
|
self.setDetectionLimitSelector(True) |
489
|
|
|
|
490
|
|
|
# Set the detection limit operand |
491
|
|
|
self.setDetectionLimitOperand(oper) |
492
|
|
|
|
493
|
|
|
if not allow_manual: |
494
|
|
|
# Override value by default DL |
495
|
|
|
if oper == LDL: |
496
|
|
|
val = self.getLowerDetectionLimit() |
497
|
|
|
else: |
498
|
|
|
val = self.getUpperDetectionLimit() |
499
|
|
|
|
500
|
|
|
# Update ResultCapture date if necessary |
501
|
|
|
if not val: |
502
|
|
|
self.setResultCaptureDate(None) |
503
|
|
|
elif prev_result != val: |
504
|
|
|
self.setResultCaptureDate(DateTime()) |
505
|
|
|
|
506
|
|
|
# Set the result field |
507
|
|
|
self.getField("Result").set(self, val) |
508
|
|
|
|
509
|
|
|
@security.public |
510
|
|
|
def calculateResult(self, override=False, cascade=False): |
511
|
|
|
"""Calculates the result for the current analysis if it depends of |
512
|
|
|
other analysis/interim fields. Otherwise, do nothing |
513
|
|
|
""" |
514
|
|
|
if self.getResult() and override is False: |
515
|
|
|
return False |
516
|
|
|
|
517
|
|
|
calc = self.getCalculation() |
518
|
|
|
if not calc: |
519
|
|
|
return False |
520
|
|
|
|
521
|
|
|
# Include the current context UID in the mapping, so it can be passed |
522
|
|
|
# as a param in built-in functions, like 'get_result(%(context_uid)s)' |
523
|
|
|
mapping = {"context_uid": '"{}"'.format(self.UID())} |
524
|
|
|
|
525
|
|
|
# Interims' priority order (from low to high): |
526
|
|
|
# Calculation < Analysis |
527
|
|
|
interims = calc.getInterimFields() + self.getInterimFields() |
528
|
|
|
|
529
|
|
|
# Add interims to mapping |
530
|
|
|
for i in interims: |
531
|
|
|
if 'keyword' not in i: |
532
|
|
|
continue |
533
|
|
|
# skip unset values |
534
|
|
|
if i['value'] == '': |
535
|
|
|
continue |
536
|
|
|
try: |
537
|
|
|
ivalue = float(i['value']) |
538
|
|
|
mapping[i['keyword']] = ivalue |
539
|
|
|
except (TypeError, ValueError): |
540
|
|
|
# Interim not float, abort |
541
|
|
|
return False |
542
|
|
|
|
543
|
|
|
# Add dependencies results to mapping |
544
|
|
|
dependencies = self.getDependencies() |
545
|
|
|
for dependency in dependencies: |
546
|
|
|
result = dependency.getResult() |
547
|
|
|
if not result: |
548
|
|
|
# Dependency without results found |
549
|
|
|
if cascade: |
550
|
|
|
# Try to calculate the dependency result |
551
|
|
|
dependency.calculateResult(override, cascade) |
552
|
|
|
result = dependency.getResult() |
553
|
|
|
else: |
554
|
|
|
return False |
555
|
|
|
if result: |
556
|
|
|
try: |
557
|
|
|
result = float(str(result)) |
558
|
|
|
key = dependency.getKeyword() |
559
|
|
|
ldl = dependency.getLowerDetectionLimit() |
560
|
|
|
udl = dependency.getUpperDetectionLimit() |
561
|
|
|
bdl = dependency.isBelowLowerDetectionLimit() |
562
|
|
|
adl = dependency.isAboveUpperDetectionLimit() |
563
|
|
|
mapping[key] = result |
564
|
|
|
mapping['%s.%s' % (key, 'RESULT')] = result |
565
|
|
|
mapping['%s.%s' % (key, 'LDL')] = ldl |
566
|
|
|
mapping['%s.%s' % (key, 'UDL')] = udl |
567
|
|
|
mapping['%s.%s' % (key, 'BELOWLDL')] = int(bdl) |
568
|
|
|
mapping['%s.%s' % (key, 'ABOVEUDL')] = int(adl) |
569
|
|
|
except (TypeError, ValueError): |
570
|
|
|
return False |
571
|
|
|
|
572
|
|
|
# Calculate |
573
|
|
|
formula = calc.getMinifiedFormula() |
574
|
|
|
formula = formula.replace('[', '%(').replace(']', ')f') |
575
|
|
|
try: |
576
|
|
|
formula = eval("'%s'%%mapping" % formula, |
577
|
|
|
{"__builtins__": None, |
578
|
|
|
'math': math, |
579
|
|
|
'context': self}, |
580
|
|
|
{'mapping': mapping}) |
581
|
|
|
result = eval(formula, calc._getGlobals()) |
582
|
|
|
except TypeError: |
583
|
|
|
self.setResult("NA") |
584
|
|
|
return True |
585
|
|
|
except ZeroDivisionError: |
586
|
|
|
self.setResult('0/0') |
587
|
|
|
return True |
588
|
|
|
except KeyError: |
589
|
|
|
self.setResult("NA") |
590
|
|
|
return True |
591
|
|
|
except ImportError: |
592
|
|
|
self.setResult("NA") |
593
|
|
|
return True |
594
|
|
|
|
595
|
|
|
self.setResult(str(result)) |
596
|
|
|
return True |
597
|
|
|
|
598
|
|
|
@security.public |
599
|
|
|
def getPrice(self): |
600
|
|
|
"""The function obtains the analysis' price without VAT and without |
601
|
|
|
member discount |
602
|
|
|
:return: the price (without VAT or Member Discount) in decimal format |
603
|
|
|
""" |
604
|
|
|
analysis_request = self.aq_parent |
605
|
|
|
client = analysis_request.aq_parent |
606
|
|
|
if client.getBulkDiscount(): |
607
|
|
|
price = self.getBulkPrice() |
608
|
|
|
else: |
609
|
|
|
price = self.getField('Price').get(self) |
610
|
|
|
return price |
611
|
|
|
|
612
|
|
|
@security.public |
613
|
|
|
def getVATAmount(self): |
614
|
|
|
"""Compute the VAT amount without member discount. |
615
|
|
|
:return: the result as a float |
616
|
|
|
""" |
617
|
|
|
vat = self.getVAT() |
618
|
|
|
price = self.getPrice() |
619
|
|
|
return Decimal(price) * Decimal(vat) / 100 |
620
|
|
|
|
621
|
|
|
@security.public |
622
|
|
|
def getTotalPrice(self): |
623
|
|
|
"""Obtain the total price without client's member discount. The function |
624
|
|
|
keeps in mind the client's bulk discount. |
625
|
|
|
:return: the result as a float |
626
|
|
|
""" |
627
|
|
|
return Decimal(self.getPrice()) + Decimal(self.getVATAmount()) |
628
|
|
|
|
629
|
|
|
@security.public |
630
|
|
|
def getDuration(self): |
631
|
|
|
"""Returns the time in minutes taken for this analysis. |
632
|
|
|
If the analysis is not yet 'ready to process', returns 0 |
633
|
|
|
If the analysis is still in progress (not yet verified), |
634
|
|
|
duration = date_verified - date_start_process |
635
|
|
|
Otherwise: |
636
|
|
|
duration = current_datetime - date_start_process |
637
|
|
|
:return: time in minutes taken for this analysis |
638
|
|
|
:rtype: int |
639
|
|
|
""" |
640
|
|
|
starttime = self.getStartProcessDate() |
641
|
|
|
if not starttime: |
642
|
|
|
# The analysis is not yet ready to be processed |
643
|
|
|
return 0 |
644
|
|
|
endtime = self.getDateVerified() or DateTime() |
645
|
|
|
|
646
|
|
|
# Duration in minutes |
647
|
|
|
duration = (endtime - starttime) * 24 * 60 |
648
|
|
|
return duration |
649
|
|
|
|
650
|
|
|
@security.public |
651
|
|
|
def getEarliness(self): |
652
|
|
|
"""The remaining time in minutes for this analysis to be completed. |
653
|
|
|
Returns zero if the analysis is neither 'ready to process' nor a |
654
|
|
|
turnaround time is set. |
655
|
|
|
earliness = duration - max_turnaround_time |
656
|
|
|
The analysis is late if the earliness is negative |
657
|
|
|
:return: the remaining time in minutes before the analysis reaches TAT |
658
|
|
|
:rtype: int |
659
|
|
|
""" |
660
|
|
|
maxtime = self.getMaxTimeAllowed() |
661
|
|
|
if not maxtime: |
662
|
|
|
# No Turnaround time is set for this analysis |
663
|
|
|
return 0 |
664
|
|
|
return api.to_minutes(**maxtime) - self.getDuration() |
665
|
|
|
|
666
|
|
|
@security.public |
667
|
|
|
def isLateAnalysis(self): |
668
|
|
|
"""Returns true if the analysis is late in accordance with the maximum |
669
|
|
|
turnaround time. If no maximum turnaround time is set for this analysis |
670
|
|
|
or it is not yet ready to be processed, or there is still time |
671
|
|
|
remaining (earliness), returns False. |
672
|
|
|
:return: true if the analysis is late |
673
|
|
|
:rtype: bool |
674
|
|
|
""" |
675
|
|
|
return self.getEarliness() < 0 |
676
|
|
|
|
677
|
|
|
@security.public |
678
|
|
|
def getLateness(self): |
679
|
|
|
"""The time in minutes that exceeds the maximum turnaround set for this |
680
|
|
|
analysis. If the analysis has no turnaround time set or is not ready |
681
|
|
|
for process yet, returns 0. The analysis is not late if the lateness is |
682
|
|
|
negative |
683
|
|
|
:return: the time in minutes that exceeds the maximum turnaround time |
684
|
|
|
:rtype: int |
685
|
|
|
""" |
686
|
|
|
return -self.getEarliness() |
687
|
|
|
|
688
|
|
|
@security.public |
689
|
|
|
def isInstrumentAllowed(self, instrument): |
690
|
|
|
"""Checks if the specified instrument can be set for this analysis, |
691
|
|
|
|
692
|
|
|
:param instrument: string,Instrument |
693
|
|
|
:return: True if the assignment of the passed in instrument is allowed |
694
|
|
|
:rtype: bool |
695
|
|
|
""" |
696
|
|
|
uid = api.get_uid(instrument) |
697
|
|
|
return uid in map(api.get_uid, self.getAllowedInstruments()) |
698
|
|
|
|
699
|
|
|
@security.public |
700
|
|
|
def isMethodAllowed(self, method): |
701
|
|
|
"""Checks if the analysis can follow the method specified |
702
|
|
|
|
703
|
|
|
:param method: string,Method |
704
|
|
|
:return: True if the analysis can follow the method specified |
705
|
|
|
:rtype: bool |
706
|
|
|
""" |
707
|
|
|
uid = api.get_uid(method) |
708
|
|
|
return uid in map(api.get_uid, self.getAllowedMethods()) |
709
|
|
|
|
710
|
|
|
@security.public |
711
|
|
|
def getAllowedMethods(self): |
712
|
|
|
"""Returns the allowed methods for this analysis, either if the method |
713
|
|
|
was assigned directly (by using "Allows manual entry of results") or |
714
|
|
|
indirectly via Instrument ("Allows instrument entry of results") in |
715
|
|
|
Analysis Service Edit View. |
716
|
|
|
:return: A list with the methods allowed for this analysis |
717
|
|
|
:rtype: list of Methods |
718
|
|
|
""" |
719
|
|
|
service = self.getAnalysisService() |
720
|
|
|
if not service: |
721
|
|
|
return [] |
722
|
|
|
# get the available methods of the service |
723
|
|
|
return service.getMethods() |
724
|
|
|
|
725
|
|
|
@security.public |
726
|
|
|
def getAllowedInstruments(self): |
727
|
|
|
"""Returns the allowed instruments from the service |
728
|
|
|
|
729
|
|
|
:return: A list of instruments allowed for this Analysis |
730
|
|
|
:rtype: list of instruments |
731
|
|
|
""" |
732
|
|
|
service = self.getAnalysisService() |
733
|
|
|
if not service: |
734
|
|
|
return [] |
735
|
|
|
return service.getInstruments() |
736
|
|
|
|
737
|
|
|
@security.public |
738
|
|
|
def getExponentialFormatPrecision(self, result=None): |
739
|
|
|
""" Returns the precision for the Analysis Service and result |
740
|
|
|
provided. Results with a precision value above this exponential |
741
|
|
|
format precision should be formatted as scientific notation. |
742
|
|
|
|
743
|
|
|
If the Calculate Precision according to Uncertainty is not set, |
744
|
|
|
the method will return the exponential precision value set in the |
745
|
|
|
Schema. Otherwise, will calculate the precision value according to |
746
|
|
|
the Uncertainty and the result. |
747
|
|
|
|
748
|
|
|
If Calculate Precision from the Uncertainty is set but no result |
749
|
|
|
provided neither uncertainty values are set, returns the fixed |
750
|
|
|
exponential precision. |
751
|
|
|
|
752
|
|
|
Will return positive values if the result is below 0 and will return |
753
|
|
|
0 or positive values if the result is above 0. |
754
|
|
|
|
755
|
|
|
Given an analysis service with fixed exponential format |
756
|
|
|
precision of 4: |
757
|
|
|
Result Uncertainty Returns |
758
|
|
|
5.234 0.22 0 |
759
|
|
|
13.5 1.34 1 |
760
|
|
|
0.0077 0.008 -3 |
761
|
|
|
32092 0.81 4 |
762
|
|
|
456021 423 5 |
763
|
|
|
|
764
|
|
|
For further details, visit https://jira.bikalabs.com/browse/LIMS-1334 |
765
|
|
|
|
766
|
|
|
:param result: if provided and "Calculate Precision according to the |
767
|
|
|
Uncertainty" is set, the result will be used to retrieve the |
768
|
|
|
uncertainty from which the precision must be calculated. Otherwise, |
769
|
|
|
the fixed-precision will be used. |
770
|
|
|
:returns: the precision |
771
|
|
|
""" |
772
|
|
|
if not result or self.getPrecisionFromUncertainty() is False: |
773
|
|
|
return self._getExponentialFormatPrecision() |
774
|
|
|
else: |
775
|
|
|
uncertainty = self.getUncertainty(result) |
776
|
|
|
if uncertainty is None: |
777
|
|
|
return self._getExponentialFormatPrecision() |
778
|
|
|
|
779
|
|
|
try: |
780
|
|
|
float(result) |
781
|
|
|
except ValueError: |
782
|
|
|
# if analysis result is not a number, then we assume in range |
783
|
|
|
return self._getExponentialFormatPrecision() |
784
|
|
|
|
785
|
|
|
return get_significant_digits(uncertainty) |
786
|
|
|
|
787
|
|
|
def _getExponentialFormatPrecision(self): |
788
|
|
|
field = self.getField('ExponentialFormatPrecision') |
789
|
|
|
value = field.get(self) |
790
|
|
|
if value is None: |
791
|
|
|
# https://github.com/bikalims/bika.lims/issues/2004 |
792
|
|
|
# We require the field, because None values make no sense at all. |
793
|
|
|
value = self.Schema().getField( |
794
|
|
|
'ExponentialFormatPrecision').getDefault(self) |
795
|
|
|
return value |
796
|
|
|
|
797
|
|
|
@security.public |
798
|
|
|
def getFormattedResult(self, specs=None, decimalmark='.', sciformat=1, |
799
|
|
|
html=True): |
800
|
|
|
"""Formatted result: |
801
|
|
|
1. If the result is a detection limit, returns '< LDL' or '> UDL' |
802
|
|
|
2. Print ResultText of matching ResultOptions |
803
|
|
|
3. If the result is not floatable, return it without being formatted |
804
|
|
|
4. If the analysis specs has hidemin or hidemax enabled and the |
805
|
|
|
result is out of range, render result as '<min' or '>max' |
806
|
|
|
5. If the result is below Lower Detection Limit, show '<LDL' |
807
|
|
|
6. If the result is above Upper Detecion Limit, show '>UDL' |
808
|
|
|
7. Otherwise, render numerical value |
809
|
|
|
:param specs: Optional result specifications, a dictionary as follows: |
810
|
|
|
{'min': <min_val>, |
811
|
|
|
'max': <max_val>, |
812
|
|
|
'error': <error>, |
813
|
|
|
'hidemin': <hidemin_val>, |
814
|
|
|
'hidemax': <hidemax_val>} |
815
|
|
|
:param decimalmark: The string to be used as a decimal separator. |
816
|
|
|
default is '.' |
817
|
|
|
:param sciformat: 1. The sci notation has to be formatted as aE^+b |
818
|
|
|
2. The sci notation has to be formatted as a·10^b |
819
|
|
|
3. As 2, but with super html entity for exp |
820
|
|
|
4. The sci notation has to be formatted as a·10^b |
821
|
|
|
5. As 4, but with super html entity for exp |
822
|
|
|
By default 1 |
823
|
|
|
:param html: if true, returns an string with the special characters |
824
|
|
|
escaped: e.g: '<' and '>' (LDL and UDL for results like < 23.4). |
825
|
|
|
""" |
826
|
|
|
result = self.getResult() |
827
|
|
|
|
828
|
|
|
# 1. The result is a detection limit, return '< LDL' or '> UDL' |
829
|
|
|
dl = self.getDetectionLimitOperand() |
830
|
|
|
if dl: |
831
|
|
|
try: |
832
|
|
|
res = float(result) # required, check if floatable |
833
|
|
|
res = drop_trailing_zeros_decimal(res) |
834
|
|
|
fdm = formatDecimalMark(res, decimalmark) |
835
|
|
|
hdl = cgi.escape(dl) if html else dl |
836
|
|
|
return '%s %s' % (hdl, fdm) |
837
|
|
|
except (TypeError, ValueError): |
838
|
|
|
logger.warn( |
839
|
|
|
"The result for the analysis %s is a detection limit, " |
840
|
|
|
"but not floatable: %s" % (self.id, result)) |
841
|
|
|
return formatDecimalMark(result, decimalmark=decimalmark) |
842
|
|
|
|
843
|
|
|
# 2. Print ResultText of matching ResultOptions |
844
|
|
|
choices = self.getResultOptions() |
845
|
|
|
if choices: |
846
|
|
|
# Create a dict for easy mapping of result options |
847
|
|
|
values_texts = dict(map( |
848
|
|
|
lambda c: (str(c["ResultValue"]), c["ResultText"]), choices |
849
|
|
|
)) |
850
|
|
|
|
851
|
|
|
# Result might contain a single result option |
852
|
|
|
match = values_texts.get(str(result)) |
853
|
|
|
if match: |
854
|
|
|
return match |
855
|
|
|
|
856
|
|
|
# Result might be a string with multiple options e.g. "['2', '1']" |
857
|
|
|
try: |
858
|
|
|
raw_result = json.loads(result) |
859
|
|
|
texts = map(lambda r: values_texts.get(str(r)), raw_result) |
|
|
|
|
860
|
|
|
texts = filter(None, texts) |
861
|
|
|
return "<br/>".join(texts) |
862
|
|
|
except (ValueError, TypeError): |
863
|
|
|
pass |
864
|
|
|
|
865
|
|
|
# 3. If the result is not floatable, return it without being formatted |
866
|
|
|
try: |
867
|
|
|
result = float(result) |
868
|
|
|
except (TypeError, ValueError): |
869
|
|
|
return formatDecimalMark(result, decimalmark=decimalmark) |
870
|
|
|
|
871
|
|
|
# 4. If the analysis specs has enabled hidemin or hidemax and the |
872
|
|
|
# result is out of range, render result as '<min' or '>max' |
873
|
|
|
specs = specs if specs else self.getResultsRange() |
874
|
|
|
hidemin = specs.get('hidemin', '') |
875
|
|
|
hidemax = specs.get('hidemax', '') |
876
|
|
|
try: |
877
|
|
|
belowmin = hidemin and result < float(hidemin) or False |
878
|
|
|
except (TypeError, ValueError): |
879
|
|
|
belowmin = False |
880
|
|
|
try: |
881
|
|
|
abovemax = hidemax and result > float(hidemax) or False |
882
|
|
|
except (TypeError, ValueError): |
883
|
|
|
abovemax = False |
884
|
|
|
|
885
|
|
|
# 4.1. If result is below min and hidemin enabled, return '<min' |
886
|
|
|
if belowmin: |
887
|
|
|
fdm = formatDecimalMark('< %s' % hidemin, decimalmark) |
888
|
|
|
return fdm.replace('< ', '< ', 1) if html else fdm |
889
|
|
|
|
890
|
|
|
# 4.2. If result is above max and hidemax enabled, return '>max' |
891
|
|
|
if abovemax: |
892
|
|
|
fdm = formatDecimalMark('> %s' % hidemax, decimalmark) |
893
|
|
|
return fdm.replace('> ', '> ', 1) if html else fdm |
894
|
|
|
|
895
|
|
|
# Below Lower Detection Limit (LDL)? |
896
|
|
|
ldl = self.getLowerDetectionLimit() |
897
|
|
|
if result < ldl: |
898
|
|
|
# LDL must not be formatted according to precision, etc. |
899
|
|
|
# Drop trailing zeros from decimal |
900
|
|
|
ldl = drop_trailing_zeros_decimal(ldl) |
901
|
|
|
fdm = formatDecimalMark('< %s' % ldl, decimalmark) |
902
|
|
|
return fdm.replace('< ', '< ', 1) if html else fdm |
903
|
|
|
|
904
|
|
|
# Above Upper Detection Limit (UDL)? |
905
|
|
|
udl = self.getUpperDetectionLimit() |
906
|
|
|
if result > udl: |
907
|
|
|
# UDL must not be formatted according to precision, etc. |
908
|
|
|
# Drop trailing zeros from decimal |
909
|
|
|
udl = drop_trailing_zeros_decimal(udl) |
910
|
|
|
fdm = formatDecimalMark('> %s' % udl, decimalmark) |
911
|
|
|
return fdm.replace('> ', '> ', 1) if html else fdm |
912
|
|
|
|
913
|
|
|
# Render numerical values |
914
|
|
|
return format_numeric_result(self, self.getResult(), |
915
|
|
|
decimalmark=decimalmark, |
916
|
|
|
sciformat=sciformat) |
917
|
|
|
|
918
|
|
|
@security.public |
919
|
|
|
def getPrecision(self, result=None): |
920
|
|
|
"""Returns the precision for the Analysis. |
921
|
|
|
|
922
|
|
|
- If ManualUncertainty is set, calculates the precision of the result |
923
|
|
|
in accordance with the manual uncertainty set. |
924
|
|
|
|
925
|
|
|
- If Calculate Precision from Uncertainty is set in Analysis Service, |
926
|
|
|
calculates the precision in accordance with the uncertainty infered |
927
|
|
|
from uncertainties ranges. |
928
|
|
|
|
929
|
|
|
- If neither Manual Uncertainty nor Calculate Precision from |
930
|
|
|
Uncertainty are set, returns the precision from the Analysis Service |
931
|
|
|
|
932
|
|
|
- If you have a number with zero uncertainty: If you roll a pair of |
933
|
|
|
dice and observe five spots, the number of spots is 5. This is a raw |
934
|
|
|
data point, with no uncertainty whatsoever. So just write down the |
935
|
|
|
number. Similarly, the number of centimeters per inch is 2.54, |
936
|
|
|
by definition, with no uncertainty whatsoever. Again: just write |
937
|
|
|
down the number. |
938
|
|
|
|
939
|
|
|
Further information at AbstractBaseAnalysis.getPrecision() |
940
|
|
|
""" |
941
|
|
|
allow_manual = self.getAllowManualUncertainty() |
942
|
|
|
precision_unc = self.getPrecisionFromUncertainty() |
943
|
|
|
if allow_manual or precision_unc: |
944
|
|
|
uncertainty = self.getUncertainty(result) |
945
|
|
|
if uncertainty is None: |
946
|
|
|
return self.getField('Precision').get(self) |
947
|
|
|
if uncertainty == 0 and result is None: |
948
|
|
|
return self.getField('Precision').get(self) |
949
|
|
|
if uncertainty == 0: |
950
|
|
|
strres = str(result) |
951
|
|
|
numdecimals = strres[::-1].find('.') |
952
|
|
|
return numdecimals |
953
|
|
|
return get_significant_digits(uncertainty) |
954
|
|
|
return self.getField('Precision').get(self) |
955
|
|
|
|
956
|
|
|
@security.public |
957
|
|
|
def getAnalyst(self): |
958
|
|
|
"""Returns the stored Analyst or the user who submitted the result |
959
|
|
|
""" |
960
|
|
|
analyst = self.getField("Analyst").get(self) or self.getAssignedAnalyst() |
961
|
|
|
if not analyst: |
962
|
|
|
analyst = self.getSubmittedBy() |
963
|
|
|
return analyst or "" |
964
|
|
|
|
965
|
|
|
@security.public |
966
|
|
|
def getAssignedAnalyst(self): |
967
|
|
|
"""Returns the Analyst assigned to the worksheet this |
968
|
|
|
analysis is assigned to |
969
|
|
|
""" |
970
|
|
|
worksheet = self.getWorksheet() |
971
|
|
|
if not worksheet: |
972
|
|
|
return "" |
973
|
|
|
return worksheet.getAnalyst() or "" |
974
|
|
|
|
975
|
|
|
@security.public |
976
|
|
|
def getAnalystName(self): |
977
|
|
|
"""Returns the name of the currently assigned analyst |
978
|
|
|
""" |
979
|
|
|
analyst = self.getAnalyst() |
980
|
|
|
if not analyst: |
981
|
|
|
return "" |
982
|
|
|
user = api.get_user(analyst.strip()) |
983
|
|
|
return user and user.getProperty("fullname") or analyst |
984
|
|
|
|
985
|
|
|
@security.public |
986
|
|
|
def getSubmittedBy(self): |
987
|
|
|
""" |
988
|
|
|
Returns the identifier of the user who submitted the result if the |
989
|
|
|
state of the current analysis is "to_be_verified" or "verified" |
990
|
|
|
:return: the user_id of the user who did the last submission of result |
991
|
|
|
""" |
992
|
|
|
return getTransitionActor(self, 'submit') |
993
|
|
|
|
994
|
|
|
@security.public |
995
|
|
|
def getDateSubmitted(self): |
996
|
|
|
"""Returns the time the result was submitted. |
997
|
|
|
:return: a DateTime object. |
998
|
|
|
:rtype: DateTime |
999
|
|
|
""" |
1000
|
|
|
return getTransitionDate(self, 'submit', return_as_datetime=True) |
1001
|
|
|
|
1002
|
|
|
@security.public |
1003
|
|
|
def getDateVerified(self): |
1004
|
|
|
"""Returns the time the analysis was verified. If the analysis hasn't |
1005
|
|
|
been yet verified, returns None |
1006
|
|
|
:return: the time the analysis was verified or None |
1007
|
|
|
:rtype: DateTime |
1008
|
|
|
""" |
1009
|
|
|
return getTransitionDate(self, 'verify', return_as_datetime=True) |
1010
|
|
|
|
1011
|
|
|
@security.public |
1012
|
|
|
def getStartProcessDate(self): |
1013
|
|
|
"""Returns the date time when the analysis is ready to be processed. |
1014
|
|
|
It returns the datetime when the object was created, but might be |
1015
|
|
|
different depending on the type of analysis (e.g. "Date Received" for |
1016
|
|
|
routine analyses): see overriden functions. |
1017
|
|
|
:return: Date time when the analysis is ready to be processed. |
1018
|
|
|
:rtype: DateTime |
1019
|
|
|
""" |
1020
|
|
|
return self.created() |
1021
|
|
|
|
1022
|
|
|
@security.public |
1023
|
|
|
def getParentURL(self): |
1024
|
|
|
"""This method is used to populate catalog values |
1025
|
|
|
This function returns the analysis' parent URL |
1026
|
|
|
""" |
1027
|
|
|
parent = self.aq_parent |
1028
|
|
|
if parent: |
1029
|
|
|
return parent.absolute_url_path() |
1030
|
|
|
|
1031
|
|
|
@security.public |
1032
|
|
|
def getWorksheetUID(self): |
1033
|
|
|
"""This method is used to populate catalog values |
1034
|
|
|
Returns WS UID if this analysis is assigned to a worksheet, or None. |
1035
|
|
|
""" |
1036
|
|
|
worksheet = self.getWorksheet() |
1037
|
|
|
if worksheet: |
1038
|
|
|
return worksheet.UID() |
1039
|
|
|
|
1040
|
|
|
@security.public |
1041
|
|
|
def getWorksheet(self): |
1042
|
|
|
"""Returns the Worksheet to which this analysis belongs to, or None |
1043
|
|
|
""" |
1044
|
|
|
worksheet = self.getBackReferences('WorksheetAnalysis') |
1045
|
|
|
if not worksheet: |
1046
|
|
|
return None |
1047
|
|
|
if len(worksheet) > 1: |
1048
|
|
|
logger.error( |
1049
|
|
|
"Analysis %s is assigned to more than one worksheet." |
1050
|
|
|
% self.getId()) |
1051
|
|
|
return worksheet[0] |
1052
|
|
|
|
1053
|
|
|
@security.public |
1054
|
|
|
def remove_duplicates(self, ws): |
1055
|
|
|
"""When this analysis is unassigned from a worksheet, this function |
1056
|
|
|
is responsible for deleting DuplicateAnalysis objects from the ws. |
1057
|
|
|
""" |
1058
|
|
|
for analysis in ws.objectValues(): |
1059
|
|
|
if IDuplicateAnalysis.providedBy(analysis) \ |
1060
|
|
|
and analysis.getAnalysis().UID() == self.UID(): |
1061
|
|
|
ws.removeAnalysis(analysis) |
1062
|
|
|
|
1063
|
|
|
def setInterimValue(self, keyword, value): |
1064
|
|
|
"""Sets a value to an interim of this analysis |
1065
|
|
|
:param keyword: the keyword of the interim |
1066
|
|
|
:param value: the value for the interim |
1067
|
|
|
""" |
1068
|
|
|
# Ensure result integrity regards to None, empty and 0 values |
1069
|
|
|
val = str('' if not value and value != 0 else value).strip() |
1070
|
|
|
interims = self.getInterimFields() |
1071
|
|
|
for interim in interims: |
1072
|
|
|
if interim['keyword'] == keyword: |
1073
|
|
|
interim['value'] = val |
1074
|
|
|
self.setInterimFields(interims) |
1075
|
|
|
return |
1076
|
|
|
|
1077
|
|
|
logger.warning("Interim '{}' for analysis '{}' not found" |
1078
|
|
|
.format(keyword, self.getKeyword())) |
1079
|
|
|
|
1080
|
|
|
def getInterimValue(self, keyword): |
1081
|
|
|
"""Returns the value of an interim of this analysis |
1082
|
|
|
""" |
1083
|
|
|
interims = filter(lambda item: item["keyword"] == keyword, |
1084
|
|
|
self.getInterimFields()) |
1085
|
|
|
if not interims: |
1086
|
|
|
logger.warning("Interim '{}' for analysis '{}' not found" |
1087
|
|
|
.format(keyword, self.getKeyword())) |
1088
|
|
|
return None |
1089
|
|
|
if len(interims) > 1: |
1090
|
|
|
logger.error("More than one interim '{}' found for '{}'" |
1091
|
|
|
.format(keyword, self.getKeyword())) |
1092
|
|
|
return None |
1093
|
|
|
return interims[0].get('value', '') |
1094
|
|
|
|
1095
|
|
|
def isRetest(self): |
1096
|
|
|
"""Returns whether this analysis is a retest or not |
1097
|
|
|
""" |
1098
|
|
|
return self.getRetestOf() and True or False |
1099
|
|
|
|
1100
|
|
|
def getRetestOfUID(self): |
1101
|
|
|
"""Returns the UID of the retracted analysis this is a retest of |
1102
|
|
|
""" |
1103
|
|
|
retest_of = self.getRetestOf() |
1104
|
|
|
if retest_of: |
1105
|
|
|
return api.get_uid(retest_of) |
1106
|
|
|
|
1107
|
|
|
def getRetest(self): |
1108
|
|
|
"""Returns the retest that comes from this analysis, if any |
1109
|
|
|
""" |
1110
|
|
|
relationship = "{}RetestOf".format(self.portal_type) |
1111
|
|
|
back_refs = get_backreferences(self, relationship) |
1112
|
|
|
if not back_refs: |
1113
|
|
|
return None |
1114
|
|
|
if len(back_refs) > 1: |
1115
|
|
|
logger.warn("Analysis {} with multiple retests".format(self.id)) |
1116
|
|
|
retest_uid = back_refs[0] |
1117
|
|
|
retest = api.get_object_by_uid(retest_uid, default=None) |
1118
|
|
|
if retest is None: |
1119
|
|
|
logger.error("Retest with UID {} not found".format(retest_uid)) |
1120
|
|
|
return retest |
1121
|
|
|
|