1
|
|
|
# -*- coding: utf-8 -*- |
2
|
|
|
# |
3
|
|
|
# This file is part of SENAITE.CORE. |
4
|
|
|
# |
5
|
|
|
# SENAITE.CORE is free software: you can redistribute it and/or modify it under |
6
|
|
|
# the terms of the GNU General Public License as published by the Free Software |
7
|
|
|
# Foundation, version 2. |
8
|
|
|
# |
9
|
|
|
# This program is distributed in the hope that it will be useful, but WITHOUT |
10
|
|
|
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
11
|
|
|
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more |
12
|
|
|
# details. |
13
|
|
|
# |
14
|
|
|
# You should have received a copy of the GNU General Public License along with |
15
|
|
|
# this program; if not, write to the Free Software Foundation, Inc., 51 |
16
|
|
|
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
17
|
|
|
# |
18
|
|
|
# Copyright 2018-2024 by it's authors. |
19
|
|
|
# Some rights reserved, see README and LICENSE. |
20
|
|
|
|
21
|
|
|
import cgi |
22
|
|
|
import copy |
23
|
|
|
import json |
24
|
|
|
import math |
25
|
|
|
from decimal import Decimal |
26
|
|
|
from six import string_types |
27
|
|
|
|
28
|
|
|
from AccessControl import ClassSecurityInfo |
29
|
|
|
from bika.lims import api |
30
|
|
|
from bika.lims import bikaMessageFactory as _ |
31
|
|
|
from bika.lims import deprecated |
32
|
|
|
from bika.lims import logger |
33
|
|
|
from bika.lims.browser.fields import HistoryAwareReferenceField |
34
|
|
|
from bika.lims.browser.fields import InterimFieldsField |
35
|
|
|
from bika.lims.browser.fields import ResultRangeField |
36
|
|
|
from bika.lims.browser.fields import UIDReferenceField |
37
|
|
|
from bika.lims.browser.fields.uidreferencefield import get_backreferences |
38
|
|
|
from bika.lims.browser.widgets import RecordsWidget |
39
|
|
|
from bika.lims.config import LDL |
40
|
|
|
from bika.lims.config import UDL |
41
|
|
|
from bika.lims.content.abstractbaseanalysis import AbstractBaseAnalysis |
42
|
|
|
from bika.lims.content.abstractbaseanalysis import schema |
43
|
|
|
from bika.lims.interfaces import IDuplicateAnalysis |
44
|
|
|
from senaite.core.permissions import FieldEditAnalysisResult |
45
|
|
|
from senaite.core.permissions import ViewResults |
46
|
|
|
from bika.lims.utils import formatDecimalMark |
47
|
|
|
from bika.lims.utils.analysis import format_numeric_result |
48
|
|
|
from bika.lims.utils.analysis import get_significant_digits |
49
|
|
|
from bika.lims.workflow import getTransitionActor |
50
|
|
|
from bika.lims.workflow import getTransitionDate |
51
|
|
|
from DateTime import DateTime |
52
|
|
|
from senaite.core.browser.fields.datetime import DateTimeField |
53
|
|
|
from Products.Archetypes.Field import IntegerField |
54
|
|
|
from Products.Archetypes.Field import StringField |
55
|
|
|
from Products.Archetypes.references import HoldingReference |
56
|
|
|
from Products.Archetypes.Schema import Schema |
57
|
|
|
from Products.CMFCore.permissions import View |
58
|
|
|
|
59
|
|
|
# A link directly to the AnalysisService object used to create the analysis |
60
|
|
|
AnalysisService = UIDReferenceField( |
61
|
|
|
'AnalysisService' |
62
|
|
|
) |
63
|
|
|
|
64
|
|
|
# Attachments which are added manually in the UI, or automatically when |
65
|
|
|
# results are imported from a file supplied by an instrument. |
66
|
|
|
Attachment = UIDReferenceField( |
67
|
|
|
'Attachment', |
68
|
|
|
multiValued=1, |
69
|
|
|
allowed_types=('Attachment',), |
70
|
|
|
relationship='AnalysisAttachment' |
71
|
|
|
) |
72
|
|
|
|
73
|
|
|
# The final result of the analysis is stored here. The field contains a |
74
|
|
|
# String value, but the result itself is required to be numeric. If |
75
|
|
|
# a non-numeric result is needed, ResultOptions can be used. |
76
|
|
|
Result = StringField( |
77
|
|
|
'Result', |
78
|
|
|
read_permission=ViewResults, |
79
|
|
|
write_permission=FieldEditAnalysisResult, |
80
|
|
|
) |
81
|
|
|
|
82
|
|
|
# When the result is changed, this value is updated to the current time. |
83
|
|
|
# Only the most recent result capture date is recorded here and used to |
84
|
|
|
# populate catalog values, however the workflow review_history can be |
85
|
|
|
# used to get all dates of result capture |
86
|
|
|
ResultCaptureDate = DateTimeField( |
87
|
|
|
'ResultCaptureDate', |
88
|
|
|
read_permission=View, |
89
|
|
|
write_permission=FieldEditAnalysisResult, |
90
|
|
|
max="current", |
91
|
|
|
) |
92
|
|
|
|
93
|
|
|
# Returns the retracted analysis this analysis is a retest of |
94
|
|
|
RetestOf = UIDReferenceField( |
95
|
|
|
'RetestOf', |
96
|
|
|
relationship="AnalysisRetestOf", |
97
|
|
|
) |
98
|
|
|
|
99
|
|
|
# If the result is outside of the detection limits of the method or instrument, |
100
|
|
|
# the operand (< or >) is stored here. For routine analyses this is taken |
101
|
|
|
# from the Result, if the result entered explicitly startswith "<" or ">" |
102
|
|
|
DetectionLimitOperand = StringField( |
103
|
|
|
'DetectionLimitOperand', |
104
|
|
|
read_permission=View, |
105
|
|
|
write_permission=FieldEditAnalysisResult, |
106
|
|
|
) |
107
|
|
|
|
108
|
|
|
# The ID of the logged in user who submitted the result for this Analysis. |
109
|
|
|
Analyst = StringField( |
110
|
|
|
'Analyst' |
111
|
|
|
) |
112
|
|
|
|
113
|
|
|
# The actual uncertainty for this analysis' result, populated from the ranges |
114
|
|
|
# specified in the analysis service when the result is submitted. |
115
|
|
|
Uncertainty = StringField( |
116
|
|
|
'Uncertainty', |
117
|
|
|
read_permission=View, |
118
|
|
|
write_permission="Field: Edit Result", |
119
|
|
|
precision=10, |
120
|
|
|
) |
121
|
|
|
|
122
|
|
|
# transitioned to a 'verified' state. This value is set automatically |
123
|
|
|
# when the analysis is created, based on the value set for the property |
124
|
|
|
# NumberOfRequiredVerifications from the Analysis Service |
125
|
|
|
NumberOfRequiredVerifications = IntegerField( |
126
|
|
|
'NumberOfRequiredVerifications', |
127
|
|
|
default=1 |
128
|
|
|
) |
129
|
|
|
|
130
|
|
|
# Routine Analyses and Reference Analysis have a versioned link to |
131
|
|
|
# the calculation at creation time. |
132
|
|
|
Calculation = HistoryAwareReferenceField( |
133
|
|
|
'Calculation', |
134
|
|
|
read_permission=View, |
135
|
|
|
write_permission=FieldEditAnalysisResult, |
136
|
|
|
allowed_types=('Calculation',), |
137
|
|
|
relationship='AnalysisCalculation', |
138
|
|
|
referenceClass=HoldingReference |
139
|
|
|
) |
140
|
|
|
|
141
|
|
|
# InterimFields are defined in Calculations, Services, and Analyses. |
142
|
|
|
# In Analysis Services, the default values are taken from Calculation. |
143
|
|
|
# In Analyses, the default values are taken from the Analysis Service. |
144
|
|
|
# When instrument results are imported, the values in analysis are overridden |
145
|
|
|
# before the calculation is performed. |
146
|
|
|
InterimFields = InterimFieldsField( |
147
|
|
|
'InterimFields', |
148
|
|
|
read_permission=View, |
149
|
|
|
write_permission=FieldEditAnalysisResult, |
150
|
|
|
schemata='Method', |
151
|
|
|
widget=RecordsWidget( |
152
|
|
|
label=_("Calculation Interim Fields"), |
153
|
|
|
description=_( |
154
|
|
|
"Values can be entered here which will override the defaults " |
155
|
|
|
"specified in the Calculation Interim Fields."), |
156
|
|
|
) |
157
|
|
|
) |
158
|
|
|
|
159
|
|
|
# Results Range that applies to this analysis |
160
|
|
|
ResultsRange = ResultRangeField( |
161
|
|
|
"ResultsRange", |
162
|
|
|
required=0 |
163
|
|
|
) |
164
|
|
|
|
165
|
|
|
schema = schema.copy() + Schema(( |
166
|
|
|
AnalysisService, |
167
|
|
|
Analyst, |
168
|
|
|
Attachment, |
169
|
|
|
DetectionLimitOperand, |
170
|
|
|
# NumberOfRequiredVerifications overrides AbstractBaseClass |
171
|
|
|
NumberOfRequiredVerifications, |
172
|
|
|
Result, |
173
|
|
|
ResultCaptureDate, |
174
|
|
|
RetestOf, |
175
|
|
|
Uncertainty, |
176
|
|
|
Calculation, |
177
|
|
|
InterimFields, |
178
|
|
|
ResultsRange, |
179
|
|
|
)) |
180
|
|
|
|
181
|
|
|
|
182
|
|
|
class AbstractAnalysis(AbstractBaseAnalysis): |
183
|
|
|
security = ClassSecurityInfo() |
184
|
|
|
displayContentsTab = False |
185
|
|
|
schema = schema |
|
|
|
|
186
|
|
|
|
187
|
|
|
@deprecated('[1705] Currently returns the Analysis object itself. If you ' |
188
|
|
|
'need to get the service, use getAnalysisService instead') |
189
|
|
|
@security.public |
190
|
|
|
def getService(self): |
191
|
|
|
return self |
192
|
|
|
|
193
|
|
|
def getServiceUID(self): |
194
|
|
|
"""Return the UID of the associated service. |
195
|
|
|
""" |
196
|
|
|
return self.getRawAnalysisService() |
197
|
|
|
|
198
|
|
|
@security.public |
199
|
|
|
def getNumberOfVerifications(self): |
200
|
|
|
return len(self.getVerificators()) |
201
|
|
|
|
202
|
|
|
@security.public |
203
|
|
|
def getNumberOfRemainingVerifications(self): |
204
|
|
|
required = self.getNumberOfRequiredVerifications() |
205
|
|
|
done = self.getNumberOfVerifications() |
206
|
|
|
if done >= required: |
207
|
|
|
return 0 |
208
|
|
|
return required - done |
209
|
|
|
|
210
|
|
|
# TODO Workflow - analysis . Remove? |
211
|
|
|
@security.public |
212
|
|
|
def getLastVerificator(self): |
213
|
|
|
verifiers = self.getVerificators() |
214
|
|
|
return verifiers and verifiers[-1] or None |
215
|
|
|
|
216
|
|
|
@security.public |
217
|
|
|
def getVerificators(self): |
218
|
|
|
"""Returns the user ids of the users that verified this analysis |
219
|
|
|
""" |
220
|
|
|
verifiers = list() |
221
|
|
|
actions = ["retest", "verify", "multi_verify"] |
222
|
|
|
for event in api.get_review_history(self, rev=False): |
223
|
|
|
if event.get("review_state") == "verified": |
224
|
|
|
# include all transitions their end state is 'verified' |
225
|
|
|
verifiers.append(event["actor"]) |
226
|
|
|
elif event.get("action") in actions: |
227
|
|
|
# include some transitions their end state is not 'verified' |
228
|
|
|
verifiers.append(event["actor"]) |
229
|
|
|
return verifiers |
230
|
|
|
|
231
|
|
|
@security.public |
232
|
|
|
def getDefaultUncertainty(self, result=None): |
233
|
|
|
"""Return the uncertainty value, if the result falls within |
234
|
|
|
specified ranges for the service from which this analysis was derived. |
235
|
|
|
""" |
236
|
|
|
|
237
|
|
|
if result is None: |
238
|
|
|
result = self.getResult() |
239
|
|
|
|
240
|
|
|
uncertainties = self.getUncertainties() |
241
|
|
|
if uncertainties: |
242
|
|
|
try: |
243
|
|
|
res = float(result) |
244
|
|
|
except (TypeError, ValueError): |
245
|
|
|
# if analysis result is not a number, then we assume in range |
246
|
|
|
return None |
247
|
|
|
|
248
|
|
|
for d in uncertainties: |
249
|
|
|
|
250
|
|
|
# convert to min/max |
251
|
|
|
unc_min = api.to_float(d["intercept_min"], default=0) |
252
|
|
|
unc_max = api.to_float(d["intercept_max"], default=0) |
253
|
|
|
|
254
|
|
|
if unc_min <= res and res <= unc_max: |
255
|
|
|
_err = str(d["errorvalue"]).strip() |
256
|
|
|
if _err.endswith("%"): |
257
|
|
|
try: |
258
|
|
|
percvalue = float(_err.replace("%", "")) |
259
|
|
|
except ValueError: |
260
|
|
|
return None |
261
|
|
|
# calculate uncertainty from result |
262
|
|
|
uncertainty = res / 100 * percvalue |
263
|
|
|
else: |
264
|
|
|
uncertainty = api.to_float(_err, default=0) |
265
|
|
|
|
266
|
|
|
# convert back to string value |
267
|
|
|
return api.float_to_string(uncertainty, default=None) |
268
|
|
|
return None |
269
|
|
|
|
270
|
|
|
@security.public |
271
|
|
|
def getUncertainty(self, result=None): |
272
|
|
|
"""Returns the uncertainty for this analysis and result. |
273
|
|
|
|
274
|
|
|
Returns the value from Schema's Uncertainty field if the Service has |
275
|
|
|
the option 'Allow manual uncertainty'. |
276
|
|
|
Otherwise, do a callback to getDefaultUncertainty(). |
277
|
|
|
|
278
|
|
|
Returns empty string if no result specified and the current result for this |
279
|
|
|
analysis is below or above detections limits. |
280
|
|
|
""" |
281
|
|
|
uncertainty = self.getField("Uncertainty").get(self) |
282
|
|
|
if result is None: |
283
|
|
|
if self.isAboveUpperDetectionLimit(): |
284
|
|
|
return None |
285
|
|
|
if self.isBelowLowerDetectionLimit(): |
286
|
|
|
return None |
287
|
|
|
|
288
|
|
|
if uncertainty and self.getAllowManualUncertainty(): |
289
|
|
|
return api.float_to_string(uncertainty, default=None) |
290
|
|
|
|
291
|
|
|
return self.getDefaultUncertainty(result) |
292
|
|
|
|
293
|
|
|
@security.public |
294
|
|
|
def setUncertainty(self, unc): |
295
|
|
|
"""Sets the uncertainty for this analysis |
296
|
|
|
|
297
|
|
|
If the result is a Detection Limit or the value is below LDL or upper |
298
|
|
|
UDL, set the uncertainty to None`` |
299
|
|
|
""" |
300
|
|
|
# Uncertainty calculation on DL |
301
|
|
|
# https://jira.bikalabs.com/browse/LIMS-1808 |
302
|
|
|
if self.isAboveUpperDetectionLimit(): |
303
|
|
|
unc = None |
304
|
|
|
if self.isBelowLowerDetectionLimit(): |
305
|
|
|
unc = None |
306
|
|
|
|
307
|
|
|
field = self.getField("Uncertainty") |
308
|
|
|
field.set(self, api.float_to_string(unc, default=None)) |
309
|
|
|
|
310
|
|
|
@security.public |
311
|
|
|
def setDetectionLimitOperand(self, value): |
312
|
|
|
"""Set detection limit operand for this analysis |
313
|
|
|
Allowed detection limit operands are `<` and `>`. |
314
|
|
|
""" |
315
|
|
|
manual_dl = self.getAllowManualDetectionLimit() |
316
|
|
|
selector = self.getDetectionLimitSelector() |
317
|
|
|
if not manual_dl and not selector: |
318
|
|
|
# Don't allow the user to set the limit operand if manual assignment |
319
|
|
|
# is not allowed and selector is not visible |
320
|
|
|
return |
321
|
|
|
|
322
|
|
|
# Changing the detection limit operand has a side effect on the result |
323
|
|
|
result = self.getResult() |
324
|
|
|
if value in [LDL, UDL]: |
325
|
|
|
# flush uncertainty |
326
|
|
|
self.setUncertainty("") |
327
|
|
|
|
328
|
|
|
# If no previous result or user is not allowed to manually set the |
329
|
|
|
# the detection limit, override the result with default LDL/UDL |
330
|
|
|
has_result = api.is_floatable(result) |
331
|
|
|
if not has_result or not manual_dl: |
332
|
|
|
# set the result according to the system default UDL/LDL values |
333
|
|
|
if value == LDL: |
334
|
|
|
result = self.getLowerDetectionLimit() |
335
|
|
|
else: |
336
|
|
|
result = self.getUpperDetectionLimit() |
337
|
|
|
|
338
|
|
|
else: |
339
|
|
|
value = "" |
340
|
|
|
|
341
|
|
|
# Set the result |
342
|
|
|
self.getField("Result").set(self, result) |
343
|
|
|
|
344
|
|
|
# Set the detection limit to the field |
345
|
|
|
self.getField("DetectionLimitOperand").set(self, value) |
346
|
|
|
|
347
|
|
|
# Method getLowerDetectionLimit overrides method of class BaseAnalysis |
348
|
|
|
@security.public |
349
|
|
|
def getLowerDetectionLimit(self): |
350
|
|
|
"""Returns the Lower Detection Limit (LDL) that applies to this |
351
|
|
|
analysis in particular. If no value set or the analysis service |
352
|
|
|
doesn't allow manual input of detection limits, returns the value set |
353
|
|
|
by default in the Analysis Service |
354
|
|
|
""" |
355
|
|
|
if self.isLowerDetectionLimit(): |
356
|
|
|
result = self.getResult() |
357
|
|
|
if api.is_floatable(result): |
358
|
|
|
return result |
359
|
|
|
|
360
|
|
|
logger.warn("The result for the analysis %s is a lower detection " |
361
|
|
|
"limit, but not floatable: '%s'. Returning AS's " |
362
|
|
|
"default LDL." % (self.id, result)) |
363
|
|
|
return AbstractBaseAnalysis.getLowerDetectionLimit(self) |
364
|
|
|
|
365
|
|
|
# Method getUpperDetectionLimit overrides method of class BaseAnalysis |
366
|
|
|
@security.public |
367
|
|
|
def getUpperDetectionLimit(self): |
368
|
|
|
"""Returns the Upper Detection Limit (UDL) that applies to this |
369
|
|
|
analysis in particular. If no value set or the analysis service |
370
|
|
|
doesn't allow manual input of detection limits, returns the value set |
371
|
|
|
by default in the Analysis Service |
372
|
|
|
""" |
373
|
|
|
if self.isUpperDetectionLimit(): |
374
|
|
|
result = self.getResult() |
375
|
|
|
if api.is_floatable(result): |
376
|
|
|
return result |
377
|
|
|
|
378
|
|
|
logger.warn("The result for the analysis %s is an upper detection " |
379
|
|
|
"limit, but not floatable: '%s'. Returning AS's " |
380
|
|
|
"default UDL." % (self.id, result)) |
381
|
|
|
return AbstractBaseAnalysis.getUpperDetectionLimit(self) |
382
|
|
|
|
383
|
|
|
@security.public |
384
|
|
|
def isBelowLowerDetectionLimit(self): |
385
|
|
|
"""Returns True if the result is below the Lower Detection Limit or |
386
|
|
|
if Lower Detection Limit has been manually set |
387
|
|
|
""" |
388
|
|
|
if self.isLowerDetectionLimit(): |
389
|
|
|
return True |
390
|
|
|
|
391
|
|
|
result = self.getResult() |
392
|
|
|
if result and str(result).strip().startswith(LDL): |
393
|
|
|
return True |
394
|
|
|
|
395
|
|
|
if api.is_floatable(result): |
396
|
|
|
ldl = self.getLowerDetectionLimit() |
397
|
|
|
return api.to_float(result) < api.to_float(ldl, 0.0) |
398
|
|
|
|
399
|
|
|
return False |
400
|
|
|
|
401
|
|
|
@security.public |
402
|
|
|
def isAboveUpperDetectionLimit(self): |
403
|
|
|
"""Returns True if the result is above the Upper Detection Limit or |
404
|
|
|
if Upper Detection Limit has been manually set |
405
|
|
|
""" |
406
|
|
|
if self.isUpperDetectionLimit(): |
407
|
|
|
return True |
408
|
|
|
|
409
|
|
|
result = self.getResult() |
410
|
|
|
if result and str(result).strip().startswith(UDL): |
411
|
|
|
return True |
412
|
|
|
|
413
|
|
|
if api.is_floatable(result): |
414
|
|
|
udl = self.getUpperDetectionLimit() |
415
|
|
|
return api.to_float(result) > api.to_float(udl, 0.0) |
416
|
|
|
|
417
|
|
|
return False |
418
|
|
|
|
419
|
|
|
# TODO: REMOVE: nowhere used |
420
|
|
|
@deprecated("This Method will be removed in version 2.5") |
421
|
|
|
@security.public |
422
|
|
|
def getDetectionLimits(self): |
423
|
|
|
"""Returns a two-value array with the limits of detection (LDL and |
424
|
|
|
UDL) that applies to this analysis in particular. If no value set or |
425
|
|
|
the analysis service doesn't allow manual input of detection limits, |
426
|
|
|
returns the value set by default in the Analysis Service |
427
|
|
|
""" |
428
|
|
|
ldl = self.getLowerDetectionLimit() |
429
|
|
|
udl = self.getUpperDetectionLimit() |
430
|
|
|
return [api.to_float(ldl, 0.0), api.to_float(udl, 0.0)] |
431
|
|
|
|
432
|
|
|
@security.public |
433
|
|
|
def isLowerDetectionLimit(self): |
434
|
|
|
"""Returns True if the result for this analysis represents a Lower |
435
|
|
|
Detection Limit. Otherwise, returns False |
436
|
|
|
""" |
437
|
|
|
return self.getDetectionLimitOperand() == LDL |
438
|
|
|
|
439
|
|
|
@security.public |
440
|
|
|
def isUpperDetectionLimit(self): |
441
|
|
|
"""Returns True if the result for this analysis represents an Upper |
442
|
|
|
Detection Limit. Otherwise, returns False |
443
|
|
|
""" |
444
|
|
|
return self.getDetectionLimitOperand() == UDL |
445
|
|
|
|
446
|
|
|
@security.public |
447
|
|
|
def getDependents(self): |
448
|
|
|
"""Return a list of analyses who depend on us to calculate their result |
449
|
|
|
""" |
450
|
|
|
raise NotImplementedError("getDependents is not implemented.") |
451
|
|
|
|
452
|
|
|
@security.public |
453
|
|
|
def getDependencies(self, with_retests=False): |
454
|
|
|
"""Return a list of siblings who we depend on to calculate our result. |
455
|
|
|
:param with_retests: If false, siblings with retests are dismissed |
456
|
|
|
:type with_retests: bool |
457
|
|
|
:return: Analyses the current analysis depends on |
458
|
|
|
:rtype: list of IAnalysis |
459
|
|
|
""" |
460
|
|
|
raise NotImplementedError("getDependencies is not implemented.") |
461
|
|
|
|
462
|
|
|
@security.public |
463
|
|
|
def setResult(self, value): |
464
|
|
|
"""Validate and set a value into the Result field, taking into |
465
|
|
|
account the Detection Limits. |
466
|
|
|
:param value: is expected to be a string. |
467
|
|
|
""" |
468
|
|
|
# Convert to list ff the analysis has result options set with multi |
469
|
|
|
if self.getResultOptions() and "multi" in self.getResultOptionsType(): |
470
|
|
|
if not isinstance(value, (list, tuple)): |
471
|
|
|
value = filter(None, [value]) |
472
|
|
|
|
473
|
|
|
# Handle list results |
474
|
|
|
if isinstance(value, (list, tuple)): |
475
|
|
|
value = json.dumps(value) |
476
|
|
|
|
477
|
|
|
# Ensure result integrity regards to None, empty and 0 values |
478
|
|
|
val = str("" if not value and value != 0 else value).strip() |
479
|
|
|
|
480
|
|
|
# Check if an string result is expected |
481
|
|
|
string_result = self.getStringResult() |
482
|
|
|
|
483
|
|
|
# UDL/LDL directly entered in the results field |
484
|
|
|
if not string_result and val[:1] in [LDL, UDL]: |
485
|
|
|
# Strip off the detection limit operand from the result |
486
|
|
|
operand = val[0] |
487
|
|
|
val = val.replace(operand, "", 1).strip() |
488
|
|
|
|
489
|
|
|
# Result becomes the detection limit |
490
|
|
|
selector = self.getDetectionLimitSelector() |
491
|
|
|
allow_manual = self.getAllowManualDetectionLimit() |
492
|
|
|
if any([selector, allow_manual]): |
493
|
|
|
|
494
|
|
|
# Set the detection limit operand |
495
|
|
|
self.setDetectionLimitOperand(operand) |
496
|
|
|
|
497
|
|
|
if not allow_manual: |
498
|
|
|
# Manual introduction of DL is not permitted |
499
|
|
|
if operand == LDL: |
500
|
|
|
# Result is default LDL |
501
|
|
|
val = self.getLowerDetectionLimit() |
502
|
|
|
else: |
503
|
|
|
# Result is default UDL |
504
|
|
|
val = self.getUpperDetectionLimit() |
505
|
|
|
|
506
|
|
|
elif not self.getDetectionLimitSelector(): |
507
|
|
|
# User cannot choose the detection limit from a selection list, |
508
|
|
|
# but might be allowed to manually enter the dl with the result. |
509
|
|
|
# If so, reset the detection limit operand, cause the previous |
510
|
|
|
# entered result might be an DL, but current doesn't |
511
|
|
|
self.setDetectionLimitOperand("") |
512
|
|
|
|
513
|
|
|
# Set the result field |
514
|
|
|
self.getField("Result").set(self, val) |
515
|
|
|
|
516
|
|
|
@security.public |
517
|
|
|
def calculateResult(self, override=False, cascade=False): |
518
|
|
|
"""Calculates the result for the current analysis if it depends of |
519
|
|
|
other analysis/interim fields. Otherwise, do nothing |
520
|
|
|
""" |
521
|
|
|
if self.getResult() and override is False: |
522
|
|
|
return False |
523
|
|
|
|
524
|
|
|
calc = self.getCalculation() |
525
|
|
|
if not calc: |
526
|
|
|
return False |
527
|
|
|
|
528
|
|
|
# get the formula from the calculation |
529
|
|
|
formula = calc.getMinifiedFormula() |
530
|
|
|
|
531
|
|
|
# Include the current context UID in the mapping, so it can be passed |
532
|
|
|
# as a param in built-in functions, like 'get_result(%(context_uid)s)' |
533
|
|
|
mapping = {"context_uid": '"{}"'.format(self.UID())} |
534
|
|
|
|
535
|
|
|
# Interims' priority order (from low to high): |
536
|
|
|
# Calculation < Analysis |
537
|
|
|
interims = calc.getInterimFields() + self.getInterimFields() |
538
|
|
|
|
539
|
|
|
# Add interims to mapping |
540
|
|
|
for i in interims: |
541
|
|
|
|
542
|
|
|
interim_keyword = i.get("keyword") |
543
|
|
|
if not interim_keyword: |
544
|
|
|
continue |
545
|
|
|
|
546
|
|
|
# skip unset values |
547
|
|
|
interim_value = i.get("value", "") |
548
|
|
|
if interim_value == "": |
549
|
|
|
continue |
550
|
|
|
|
551
|
|
|
# Convert to floatable if necessary |
552
|
|
|
if api.is_floatable(interim_value): |
553
|
|
|
interim_value = float(interim_value) |
554
|
|
|
else: |
555
|
|
|
# If the interim value is a string, since the formula is also a string, |
556
|
|
|
# it is needed to wrap the string interim values in between inverted commas. |
557
|
|
|
# |
558
|
|
|
# E.g. formula = '"ok" if %(var)s == "example_value" else "not ok"' |
559
|
|
|
# |
560
|
|
|
# if interim_value = "example_value" after |
561
|
|
|
# formula = eval("'%s'%%mapping" % formula, {'mapping': {'var': interim_value}}) |
562
|
|
|
# print(formula) |
563
|
|
|
# > '"ok" if example_value == "example_value" else "not ok"' -> Error |
564
|
|
|
# |
565
|
|
|
# else if interim_value ='"example_value"' after |
566
|
|
|
# formula = eval("'%s'%%mapping" % formula, {'mapping': {'var': interim_value}}) |
567
|
|
|
# print(formula) |
568
|
|
|
# > '"ok" if "example_value" == "example_value" else "not ok"' -> Correct |
569
|
|
|
interim_value = '"{}"'.format(interim_value) |
570
|
|
|
|
571
|
|
|
# Convert 'Numeric' interim values using `float`. Convert the rest using `str` |
572
|
|
|
converter = "s" if i.get("result_type") else "f" |
573
|
|
|
formula = formula.replace( |
574
|
|
|
"[" + interim_keyword + "]", "%(" + interim_keyword + ")" + converter |
575
|
|
|
) |
576
|
|
|
|
577
|
|
|
mapping[interim_keyword] = interim_value |
578
|
|
|
|
579
|
|
|
# Add dependencies results to mapping |
580
|
|
|
dependencies = self.getDependencies() |
581
|
|
|
for dependency in dependencies: |
582
|
|
|
result = dependency.getResult() |
583
|
|
|
# check if the dependency is a string result |
584
|
|
|
str_result = dependency.getStringResult() |
585
|
|
|
keyword = dependency.getKeyword() |
586
|
|
|
if not result: |
587
|
|
|
# Dependency without results found |
588
|
|
|
if cascade: |
589
|
|
|
# Try to calculate the dependency result |
590
|
|
|
dependency.calculateResult(override, cascade) |
591
|
|
|
result = dependency.getResult() |
592
|
|
|
else: |
593
|
|
|
return False |
594
|
|
|
if result: |
595
|
|
|
try: |
596
|
|
|
# we need to quote a string result because of the `eval` below |
597
|
|
|
result = '"%s"' % result if str_result else float(str(result)) |
598
|
|
|
key = dependency.getKeyword() |
599
|
|
|
ldl = dependency.getLowerDetectionLimit() |
600
|
|
|
udl = dependency.getUpperDetectionLimit() |
601
|
|
|
bdl = dependency.isBelowLowerDetectionLimit() |
602
|
|
|
adl = dependency.isAboveUpperDetectionLimit() |
603
|
|
|
mapping[key] = result |
604
|
|
|
mapping['%s.%s' % (key, 'RESULT')] = result |
605
|
|
|
mapping['%s.%s' % (key, 'LDL')] = api.to_float(ldl, 0.0) |
606
|
|
|
mapping['%s.%s' % (key, 'UDL')] = api.to_float(udl, 0.0) |
607
|
|
|
mapping['%s.%s' % (key, 'BELOWLDL')] = int(bdl) |
608
|
|
|
mapping['%s.%s' % (key, 'ABOVEUDL')] = int(adl) |
609
|
|
|
except (TypeError, ValueError): |
610
|
|
|
return False |
611
|
|
|
|
612
|
|
|
# replace placeholder -> formatting string |
613
|
|
|
# https://docs.python.org/2.7/library/stdtypes.html?highlight=built#string-formatting-operations |
614
|
|
|
converter = "s" if str_result else "f" |
615
|
|
|
formula = formula.replace("[" + keyword + "]", "%(" + keyword + ")" + converter) |
616
|
|
|
|
617
|
|
|
# convert any remaining placeholders, e.g. from interims etc. |
618
|
|
|
# NOTE: we assume remaining values are all floatable! |
619
|
|
|
formula = formula.replace("[", "%(").replace("]", ")f") |
620
|
|
|
|
621
|
|
|
# Calculate |
622
|
|
|
try: |
623
|
|
|
formula = eval("'%s'%%mapping" % formula, |
624
|
|
|
{"__builtins__": None, |
625
|
|
|
'math': math, |
626
|
|
|
'context': self}, |
627
|
|
|
{'mapping': mapping}) |
628
|
|
|
result = eval(formula, calc._getGlobals()) |
629
|
|
|
except ZeroDivisionError: |
630
|
|
|
self.setResult('0/0') |
631
|
|
|
return True |
632
|
|
|
except (KeyError, TypeError, ImportError) as e: |
633
|
|
|
msg = "Cannot eval formula ({}): {}".format(e.message, formula) |
634
|
|
|
logger.error(msg) |
635
|
|
|
self.setResult("NA") |
636
|
|
|
return True |
637
|
|
|
|
638
|
|
|
self.setResult(str(result)) |
639
|
|
|
return True |
640
|
|
|
|
641
|
|
|
@security.public |
642
|
|
|
def getVATAmount(self): |
643
|
|
|
"""Compute the VAT amount without member discount. |
644
|
|
|
:return: the result as a float |
645
|
|
|
""" |
646
|
|
|
vat = self.getVAT() |
647
|
|
|
price = self.getPrice() |
648
|
|
|
return Decimal(price) * Decimal(vat) / 100 |
649
|
|
|
|
650
|
|
|
@security.public |
651
|
|
|
def getTotalPrice(self): |
652
|
|
|
"""Obtain the total price without client's member discount. The function |
653
|
|
|
keeps in mind the client's bulk discount. |
654
|
|
|
:return: the result as a float |
655
|
|
|
""" |
656
|
|
|
return Decimal(self.getPrice()) + Decimal(self.getVATAmount()) |
657
|
|
|
|
658
|
|
|
@security.public |
659
|
|
|
def getDuration(self): |
660
|
|
|
"""Returns the time in minutes taken for this analysis. |
661
|
|
|
If the analysis is not yet 'ready to process', returns 0 |
662
|
|
|
If the analysis is still in progress (not yet verified), |
663
|
|
|
duration = date_verified - date_start_process |
664
|
|
|
Otherwise: |
665
|
|
|
duration = current_datetime - date_start_process |
666
|
|
|
:return: time in minutes taken for this analysis |
667
|
|
|
:rtype: int |
668
|
|
|
""" |
669
|
|
|
starttime = self.getStartProcessDate() |
670
|
|
|
if not starttime: |
671
|
|
|
# The analysis is not yet ready to be processed |
672
|
|
|
return 0 |
673
|
|
|
endtime = self.getDateVerified() or DateTime() |
674
|
|
|
|
675
|
|
|
# Duration in minutes |
676
|
|
|
duration = (endtime - starttime) * 24 * 60 |
677
|
|
|
return duration |
678
|
|
|
|
679
|
|
|
@security.public |
680
|
|
|
def getEarliness(self): |
681
|
|
|
"""The remaining time in minutes for this analysis to be completed. |
682
|
|
|
Returns zero if the analysis is neither 'ready to process' nor a |
683
|
|
|
turnaround time is set. |
684
|
|
|
earliness = duration - max_turnaround_time |
685
|
|
|
The analysis is late if the earliness is negative |
686
|
|
|
:return: the remaining time in minutes before the analysis reaches TAT |
687
|
|
|
:rtype: int |
688
|
|
|
""" |
689
|
|
|
maxtime = self.getMaxTimeAllowed() |
690
|
|
|
if not maxtime: |
691
|
|
|
# No Turnaround time is set for this analysis |
692
|
|
|
return 0 |
693
|
|
|
return api.to_minutes(**maxtime) - self.getDuration() |
694
|
|
|
|
695
|
|
|
@security.public |
696
|
|
|
def isLateAnalysis(self): |
697
|
|
|
"""Returns true if the analysis is late in accordance with the maximum |
698
|
|
|
turnaround time. If no maximum turnaround time is set for this analysis |
699
|
|
|
or it is not yet ready to be processed, or there is still time |
700
|
|
|
remaining (earliness), returns False. |
701
|
|
|
:return: true if the analysis is late |
702
|
|
|
:rtype: bool |
703
|
|
|
""" |
704
|
|
|
return self.getEarliness() < 0 |
705
|
|
|
|
706
|
|
|
@security.public |
707
|
|
|
def getLateness(self): |
708
|
|
|
"""The time in minutes that exceeds the maximum turnaround set for this |
709
|
|
|
analysis. If the analysis has no turnaround time set or is not ready |
710
|
|
|
for process yet, returns 0. The analysis is not late if the lateness is |
711
|
|
|
negative |
712
|
|
|
:return: the time in minutes that exceeds the maximum turnaround time |
713
|
|
|
:rtype: int |
714
|
|
|
""" |
715
|
|
|
return -self.getEarliness() |
716
|
|
|
|
717
|
|
|
@security.public |
718
|
|
|
def isInstrumentAllowed(self, instrument): |
719
|
|
|
"""Checks if the specified instrument can be set for this analysis, |
720
|
|
|
|
721
|
|
|
:param instrument: string,Instrument |
722
|
|
|
:return: True if the assignment of the passed in instrument is allowed |
723
|
|
|
:rtype: bool |
724
|
|
|
""" |
725
|
|
|
uid = api.get_uid(instrument) |
726
|
|
|
return uid in self.getRawAllowedInstruments() |
727
|
|
|
|
728
|
|
|
@security.public |
729
|
|
|
def isMethodAllowed(self, method): |
730
|
|
|
"""Checks if the analysis can follow the method specified |
731
|
|
|
|
732
|
|
|
:param method: string,Method |
733
|
|
|
:return: True if the analysis can follow the method specified |
734
|
|
|
:rtype: bool |
735
|
|
|
""" |
736
|
|
|
uid = api.get_uid(method) |
737
|
|
|
return uid in self.getRawAllowedMethods() |
738
|
|
|
|
739
|
|
|
@security.public |
740
|
|
|
def getAllowedMethods(self): |
741
|
|
|
"""Returns the allowed methods for this analysis, either if the method |
742
|
|
|
was assigned directly (by using "Allows manual entry of results") or |
743
|
|
|
indirectly via Instrument ("Allows instrument entry of results") in |
744
|
|
|
Analysis Service Edit View. |
745
|
|
|
:return: A list with the methods allowed for this analysis |
746
|
|
|
:rtype: list of Methods |
747
|
|
|
""" |
748
|
|
|
service = self.getAnalysisService() |
749
|
|
|
if not service: |
750
|
|
|
return [] |
751
|
|
|
# get the available methods of the service |
752
|
|
|
return service.getMethods() |
753
|
|
|
|
754
|
|
|
@security.public |
755
|
|
|
def getRawAllowedMethods(self): |
756
|
|
|
"""Returns the UIDs of the allowed methods for this analysis |
757
|
|
|
""" |
758
|
|
|
service = self.getAnalysisService() |
759
|
|
|
if not service: |
760
|
|
|
return [] |
761
|
|
|
return service.getRawMethods() |
762
|
|
|
|
763
|
|
|
@security.public |
764
|
|
|
def getAllowedInstruments(self): |
765
|
|
|
"""Returns the allowed instruments from the service |
766
|
|
|
|
767
|
|
|
:return: A list of instruments allowed for this Analysis |
768
|
|
|
:rtype: list of instruments |
769
|
|
|
""" |
770
|
|
|
service = self.getAnalysisService() |
771
|
|
|
if not service: |
772
|
|
|
return [] |
773
|
|
|
return service.getInstruments() |
774
|
|
|
|
775
|
|
|
@security.public |
776
|
|
|
def getRawAllowedInstruments(self): |
777
|
|
|
"""Returns the UIDS of the allowed instruments from the service |
778
|
|
|
""" |
779
|
|
|
service = self.getAnalysisService() |
780
|
|
|
if not service: |
781
|
|
|
return [] |
782
|
|
|
return service.getRawInstruments() |
783
|
|
|
|
784
|
|
|
@security.public |
785
|
|
|
def getExponentialFormatPrecision(self, result=None): |
786
|
|
|
""" Returns the precision for the Analysis Service and result |
787
|
|
|
provided. Results with a precision value above this exponential |
788
|
|
|
format precision should be formatted as scientific notation. |
789
|
|
|
|
790
|
|
|
If the Calculate Precision according to Uncertainty is not set, |
791
|
|
|
the method will return the exponential precision value set in the |
792
|
|
|
Schema. Otherwise, will calculate the precision value according to |
793
|
|
|
the Uncertainty and the result. |
794
|
|
|
|
795
|
|
|
If Calculate Precision from the Uncertainty is set but no result |
796
|
|
|
provided neither uncertainty values are set, returns the fixed |
797
|
|
|
exponential precision. |
798
|
|
|
|
799
|
|
|
Will return positive values if the result is below 0 and will return |
800
|
|
|
0 or positive values if the result is above 0. |
801
|
|
|
|
802
|
|
|
Given an analysis service with fixed exponential format |
803
|
|
|
precision of 4: |
804
|
|
|
Result Uncertainty Returns |
805
|
|
|
5.234 0.22 0 |
806
|
|
|
13.5 1.34 1 |
807
|
|
|
0.0077 0.008 -3 |
808
|
|
|
32092 0.81 4 |
809
|
|
|
456021 423 5 |
810
|
|
|
|
811
|
|
|
For further details, visit https://jira.bikalabs.com/browse/LIMS-1334 |
812
|
|
|
|
813
|
|
|
:param result: if provided and "Calculate Precision according to the |
814
|
|
|
Uncertainty" is set, the result will be used to retrieve the |
815
|
|
|
uncertainty from which the precision must be calculated. Otherwise, |
816
|
|
|
the fixed-precision will be used. |
817
|
|
|
:returns: the precision |
818
|
|
|
""" |
819
|
|
|
if not result or self.getPrecisionFromUncertainty() is False: |
820
|
|
|
return self._getExponentialFormatPrecision() |
821
|
|
|
else: |
822
|
|
|
uncertainty = self.getUncertainty(result) |
823
|
|
|
if uncertainty is None: |
824
|
|
|
return self._getExponentialFormatPrecision() |
825
|
|
|
|
826
|
|
|
try: |
827
|
|
|
float(result) |
828
|
|
|
except ValueError: |
829
|
|
|
# if analysis result is not a number, then we assume in range |
830
|
|
|
return self._getExponentialFormatPrecision() |
831
|
|
|
|
832
|
|
|
return get_significant_digits(uncertainty) |
833
|
|
|
|
834
|
|
|
def _getExponentialFormatPrecision(self): |
835
|
|
|
field = self.getField('ExponentialFormatPrecision') |
836
|
|
|
value = field.get(self) |
837
|
|
|
if value is None: |
838
|
|
|
# https://github.com/bikalims/bika.lims/issues/2004 |
839
|
|
|
# We require the field, because None values make no sense at all. |
840
|
|
|
value = self.Schema().getField( |
841
|
|
|
'ExponentialFormatPrecision').getDefault(self) |
842
|
|
|
return value |
843
|
|
|
|
844
|
|
|
@security.public |
845
|
|
|
def getFormattedResult(self, specs=None, decimalmark='.', sciformat=1, |
846
|
|
|
html=True): |
847
|
|
|
"""Formatted result: |
848
|
|
|
0: If the result type is StringResult, return it without being formatted |
849
|
|
|
1. If the result is a detection limit, returns '< LDL' or '> UDL' |
850
|
|
|
2. Print ResultText of matching ResultOptions |
851
|
|
|
3. If the result is not floatable, return it without being formatted |
852
|
|
|
4. If the analysis specs has hidemin or hidemax enabled and the |
853
|
|
|
result is out of range, render result as '<min' or '>max' |
854
|
|
|
5. If the result is below Lower Detection Limit, show '<LDL' |
855
|
|
|
6. If the result is above Upper Detecion Limit, show '>UDL' |
856
|
|
|
7. Otherwise, render numerical value |
857
|
|
|
:param specs: Optional result specifications, a dictionary as follows: |
858
|
|
|
{'min': <min_val>, |
859
|
|
|
'max': <max_val>, |
860
|
|
|
'error': <error>, |
861
|
|
|
'hidemin': <hidemin_val>, |
862
|
|
|
'hidemax': <hidemax_val>} |
863
|
|
|
:param decimalmark: The string to be used as a decimal separator. |
864
|
|
|
default is '.' |
865
|
|
|
:param sciformat: 1. The sci notation has to be formatted as aE^+b |
866
|
|
|
2. The sci notation has to be formatted as a·10^b |
867
|
|
|
3. As 2, but with super html entity for exp |
868
|
|
|
4. The sci notation has to be formatted as a·10^b |
869
|
|
|
5. As 4, but with super html entity for exp |
870
|
|
|
By default 1 |
871
|
|
|
:param html: if true, returns an string with the special characters |
872
|
|
|
escaped: e.g: '<' and '>' (LDL and UDL for results like < 23.4). |
873
|
|
|
""" |
874
|
|
|
result = self.getResult() |
875
|
|
|
|
876
|
|
|
# If result options, return text of matching option |
877
|
|
|
choices = self.getResultOptions() |
878
|
|
|
if choices: |
879
|
|
|
# Create a dict for easy mapping of result options |
880
|
|
|
values_texts = dict(map( |
881
|
|
|
lambda c: (str(c["ResultValue"]), c["ResultText"]), choices |
882
|
|
|
)) |
883
|
|
|
|
884
|
|
|
# Result might contain a single result option |
885
|
|
|
match = values_texts.get(str(result)) |
886
|
|
|
if match: |
887
|
|
|
return match |
888
|
|
|
|
889
|
|
|
# Result might be a string with multiple options e.g. "['2', '1']" |
890
|
|
|
try: |
891
|
|
|
raw_result = json.loads(result) |
892
|
|
|
texts = map(lambda r: values_texts.get(str(r)), raw_result) |
|
|
|
|
893
|
|
|
texts = filter(None, texts) |
894
|
|
|
return "<br/>".join(texts) |
895
|
|
|
except (ValueError, TypeError): |
896
|
|
|
pass |
897
|
|
|
|
898
|
|
|
# If string result, return without any formatting |
899
|
|
|
if self.getStringResult(): |
900
|
|
|
return cgi.escape(result) if html else result |
901
|
|
|
|
902
|
|
|
# If a detection limit, return '< LDL' or '> UDL' |
903
|
|
|
dl = self.getDetectionLimitOperand() |
904
|
|
|
if dl: |
905
|
|
|
try: |
906
|
|
|
res = api.float_to_string(float(result)) |
907
|
|
|
fdm = formatDecimalMark(res, decimalmark) |
908
|
|
|
hdl = cgi.escape(dl) if html else dl |
909
|
|
|
return '%s %s' % (hdl, fdm) |
910
|
|
|
except (TypeError, ValueError): |
911
|
|
|
logger.warn( |
912
|
|
|
"The result for the analysis %s is a detection limit, " |
913
|
|
|
"but not floatable: %s" % (self.id, result)) |
914
|
|
|
return formatDecimalMark(result, decimalmark=decimalmark) |
915
|
|
|
|
916
|
|
|
# If not floatable, return without any formatting |
917
|
|
|
try: |
918
|
|
|
result = float(result) |
919
|
|
|
except (TypeError, ValueError): |
920
|
|
|
return formatDecimalMark(result, decimalmark=decimalmark) |
921
|
|
|
|
922
|
|
|
# If specs are set, evaluate if out of range |
923
|
|
|
specs = specs if specs else self.getResultsRange() |
924
|
|
|
hidemin = specs.get('hidemin', '') |
925
|
|
|
hidemax = specs.get('hidemax', '') |
926
|
|
|
try: |
927
|
|
|
belowmin = hidemin and result < float(hidemin) or False |
928
|
|
|
except (TypeError, ValueError): |
929
|
|
|
belowmin = False |
930
|
|
|
try: |
931
|
|
|
abovemax = hidemax and result > float(hidemax) or False |
932
|
|
|
except (TypeError, ValueError): |
933
|
|
|
abovemax = False |
934
|
|
|
|
935
|
|
|
# If below min and hidemin enabled, return '<min' |
936
|
|
|
if belowmin: |
937
|
|
|
fdm = formatDecimalMark('< %s' % hidemin, decimalmark) |
938
|
|
|
return fdm.replace('< ', '< ', 1) if html else fdm |
939
|
|
|
|
940
|
|
|
# If above max and hidemax enabled, return '>max' |
941
|
|
|
if abovemax: |
942
|
|
|
fdm = formatDecimalMark('> %s' % hidemax, decimalmark) |
943
|
|
|
return fdm.replace('> ', '> ', 1) if html else fdm |
944
|
|
|
|
945
|
|
|
# If below LDL, return '< LDL' |
946
|
|
|
ldl = self.getLowerDetectionLimit() |
947
|
|
|
ldl = api.to_float(ldl, 0.0) |
948
|
|
|
if result < ldl: |
949
|
|
|
# LDL must not be formatted according to precision, etc. |
950
|
|
|
ldl = api.float_to_string(ldl) |
951
|
|
|
fdm = formatDecimalMark('< %s' % ldl, decimalmark) |
952
|
|
|
return fdm.replace('< ', '< ', 1) if html else fdm |
953
|
|
|
|
954
|
|
|
# If above UDL, return '< UDL' |
955
|
|
|
udl = self.getUpperDetectionLimit() |
956
|
|
|
udl = api.to_float(udl, 0.0) |
957
|
|
|
if result > udl: |
958
|
|
|
# UDL must not be formatted according to precision, etc. |
959
|
|
|
udl = api.float_to_string(udl) |
960
|
|
|
fdm = formatDecimalMark('> %s' % udl, decimalmark) |
961
|
|
|
return fdm.replace('> ', '> ', 1) if html else fdm |
962
|
|
|
|
963
|
|
|
# Render numerical values |
964
|
|
|
return format_numeric_result(self, self.getResult(), |
965
|
|
|
decimalmark=decimalmark, |
966
|
|
|
sciformat=sciformat) |
967
|
|
|
|
968
|
|
|
@security.public |
969
|
|
|
def getPrecision(self, result=None): |
970
|
|
|
"""Returns the precision for the Analysis. |
971
|
|
|
|
972
|
|
|
- If ManualUncertainty is set, calculates the precision of the result |
973
|
|
|
in accordance with the manual uncertainty set. |
974
|
|
|
|
975
|
|
|
- If Calculate Precision from Uncertainty is set in Analysis Service, |
976
|
|
|
calculates the precision in accordance with the uncertainty inferred |
977
|
|
|
from uncertainties ranges. |
978
|
|
|
|
979
|
|
|
- If neither Manual Uncertainty nor Calculate Precision from |
980
|
|
|
Uncertainty are set, returns the precision from the Analysis Service |
981
|
|
|
|
982
|
|
|
- If you have a number with zero uncertainty: If you roll a pair of |
983
|
|
|
dice and observe five spots, the number of spots is 5. This is a raw |
984
|
|
|
data point, with no uncertainty whatsoever. So just write down the |
985
|
|
|
number. Similarly, the number of centimeters per inch is 2.54, |
986
|
|
|
by definition, with no uncertainty whatsoever. Again: just write |
987
|
|
|
down the number. |
988
|
|
|
|
989
|
|
|
Further information at AbstractBaseAnalysis.getPrecision() |
990
|
|
|
""" |
991
|
|
|
allow_manual = self.getAllowManualUncertainty() |
992
|
|
|
precision_unc = self.getPrecisionFromUncertainty() |
993
|
|
|
if allow_manual or precision_unc: |
994
|
|
|
uncertainty = self.getUncertainty(result) |
995
|
|
|
if uncertainty is None: |
996
|
|
|
return self.getField("Precision").get(self) |
997
|
|
|
if api.to_float(uncertainty) == 0 and result is None: |
998
|
|
|
return self.getField("Precision").get(self) |
999
|
|
|
if api.to_float(uncertainty) == 0: |
1000
|
|
|
strres = str(result) |
1001
|
|
|
numdecimals = strres[::-1].find('.') |
1002
|
|
|
return numdecimals |
1003
|
|
|
|
1004
|
|
|
uncertainty = api.to_float(uncertainty) |
1005
|
|
|
# Get the 'raw' significant digits from uncertainty |
1006
|
|
|
sig_digits = get_significant_digits(uncertainty) |
1007
|
|
|
# Round the uncertainty to its significant digit. |
1008
|
|
|
# Needed because the precision for the result has to be based on |
1009
|
|
|
# the *rounded* uncertainty. Note the following for a given |
1010
|
|
|
# uncertainty value: |
1011
|
|
|
# >>> round(0.09404, 2) |
1012
|
|
|
# 0.09 |
1013
|
|
|
# >>> round(0.09504, 2) |
1014
|
|
|
# 0.1 |
1015
|
|
|
# The precision when the uncertainty is 0.09504 is not 2, but 1 |
1016
|
|
|
uncertainty = abs(round(uncertainty, sig_digits)) |
1017
|
|
|
# Return the significant digit to apply |
1018
|
|
|
return get_significant_digits(uncertainty) |
1019
|
|
|
|
1020
|
|
|
return self.getField('Precision').get(self) |
1021
|
|
|
|
1022
|
|
|
@security.public |
1023
|
|
|
def getAnalyst(self): |
1024
|
|
|
"""Returns the stored Analyst or the user who submitted the result |
1025
|
|
|
""" |
1026
|
|
|
analyst = self.getField("Analyst").get(self) or self.getAssignedAnalyst() |
1027
|
|
|
if not analyst: |
1028
|
|
|
analyst = self.getSubmittedBy() |
1029
|
|
|
return analyst or "" |
1030
|
|
|
|
1031
|
|
|
@security.public |
1032
|
|
|
def getAssignedAnalyst(self): |
1033
|
|
|
"""Returns the Analyst assigned to the worksheet this |
1034
|
|
|
analysis is assigned to |
1035
|
|
|
""" |
1036
|
|
|
worksheet = self.getWorksheet() |
1037
|
|
|
if not worksheet: |
1038
|
|
|
return "" |
1039
|
|
|
return worksheet.getAnalyst() or "" |
1040
|
|
|
|
1041
|
|
|
@security.public |
1042
|
|
|
def getSubmittedBy(self): |
1043
|
|
|
""" |
1044
|
|
|
Returns the identifier of the user who submitted the result if the |
1045
|
|
|
state of the current analysis is "to_be_verified" or "verified" |
1046
|
|
|
:return: the user_id of the user who did the last submission of result |
1047
|
|
|
""" |
1048
|
|
|
return getTransitionActor(self, 'submit') |
1049
|
|
|
|
1050
|
|
|
@security.public |
1051
|
|
|
def getDateSubmitted(self): |
1052
|
|
|
"""Returns the time the result was submitted. |
1053
|
|
|
:return: a DateTime object. |
1054
|
|
|
:rtype: DateTime |
1055
|
|
|
""" |
1056
|
|
|
return getTransitionDate(self, 'submit', return_as_datetime=True) |
1057
|
|
|
|
1058
|
|
|
@security.public |
1059
|
|
|
def getDateVerified(self): |
1060
|
|
|
"""Returns the time the analysis was verified. If the analysis hasn't |
1061
|
|
|
been yet verified, returns None |
1062
|
|
|
:return: the time the analysis was verified or None |
1063
|
|
|
:rtype: DateTime |
1064
|
|
|
""" |
1065
|
|
|
return getTransitionDate(self, 'verify', return_as_datetime=True) |
1066
|
|
|
|
1067
|
|
|
@security.public |
1068
|
|
|
def getStartProcessDate(self): |
1069
|
|
|
"""Returns the date time when the analysis is ready to be processed. |
1070
|
|
|
It returns the datetime when the object was created, but might be |
1071
|
|
|
different depending on the type of analysis (e.g. "Date Received" for |
1072
|
|
|
routine analyses): see overriden functions. |
1073
|
|
|
:return: Date time when the analysis is ready to be processed. |
1074
|
|
|
:rtype: DateTime |
1075
|
|
|
""" |
1076
|
|
|
return self.created() |
1077
|
|
|
|
1078
|
|
|
@security.public |
1079
|
|
|
def getParentURL(self): |
1080
|
|
|
"""This method is used to populate catalog values |
1081
|
|
|
This function returns the analysis' parent URL |
1082
|
|
|
""" |
1083
|
|
|
parent = self.aq_parent |
1084
|
|
|
if parent: |
1085
|
|
|
return parent.absolute_url_path() |
1086
|
|
|
|
1087
|
|
|
@security.public |
1088
|
|
|
def getWorksheetUID(self): |
1089
|
|
|
"""This method is used to populate catalog values |
1090
|
|
|
Returns WS UID if this analysis is assigned to a worksheet, or None. |
1091
|
|
|
""" |
1092
|
|
|
uids = get_backreferences(self, relationship="WorksheetAnalysis") |
1093
|
|
|
if not uids: |
1094
|
|
|
return None |
1095
|
|
|
|
1096
|
|
|
if len(uids) > 1: |
1097
|
|
|
path = api.get_path(self) |
1098
|
|
|
logger.error("More than one worksheet: {}".format(path)) |
1099
|
|
|
return None |
1100
|
|
|
|
1101
|
|
|
return uids[0] |
1102
|
|
|
|
1103
|
|
|
@security.public |
1104
|
|
|
def getWorksheet(self): |
1105
|
|
|
"""Returns the Worksheet to which this analysis belongs to, or None |
1106
|
|
|
""" |
1107
|
|
|
worksheet_uid = self.getWorksheetUID() |
1108
|
|
|
return api.get_object_by_uid(worksheet_uid, None) |
1109
|
|
|
|
1110
|
|
|
@security.public |
1111
|
|
|
def remove_duplicates(self, ws): |
1112
|
|
|
"""When this analysis is unassigned from a worksheet, this function |
1113
|
|
|
is responsible for deleting DuplicateAnalysis objects from the ws. |
1114
|
|
|
""" |
1115
|
|
|
for analysis in ws.objectValues(): |
1116
|
|
|
if IDuplicateAnalysis.providedBy(analysis) \ |
1117
|
|
|
and analysis.getAnalysis().UID() == self.UID(): |
1118
|
|
|
ws.removeAnalysis(analysis) |
1119
|
|
|
|
1120
|
|
|
def setInterimValue(self, keyword, value): |
1121
|
|
|
"""Sets a value to an interim of this analysis |
1122
|
|
|
:param keyword: the keyword of the interim |
1123
|
|
|
:param value: the value for the interim |
1124
|
|
|
""" |
1125
|
|
|
# Ensure value format integrity |
1126
|
|
|
if value is None: |
1127
|
|
|
value = "" |
1128
|
|
|
elif isinstance(value, string_types): |
1129
|
|
|
value = value.strip() |
1130
|
|
|
elif isinstance(value, (list, tuple, set, dict)): |
1131
|
|
|
value = json.dumps(value) |
1132
|
|
|
|
1133
|
|
|
# Ensure result integrity regards to None, empty and 0 values |
1134
|
|
|
interims = copy.deepcopy(self.getInterimFields()) |
1135
|
|
|
for interim in interims: |
1136
|
|
|
if interim.get("keyword") == keyword: |
1137
|
|
|
interim["value"] = str(value) |
1138
|
|
|
self.setInterimFields(interims) |
1139
|
|
|
|
1140
|
|
|
def getInterimValue(self, keyword): |
1141
|
|
|
"""Returns the value of an interim of this analysis |
1142
|
|
|
""" |
1143
|
|
|
interims = filter(lambda item: item["keyword"] == keyword, |
1144
|
|
|
self.getInterimFields()) |
1145
|
|
|
if not interims: |
1146
|
|
|
logger.warning("Interim '{}' for analysis '{}' not found" |
1147
|
|
|
.format(keyword, self.getKeyword())) |
1148
|
|
|
return None |
1149
|
|
|
if len(interims) > 1: |
1150
|
|
|
logger.error("More than one interim '{}' found for '{}'" |
1151
|
|
|
.format(keyword, self.getKeyword())) |
1152
|
|
|
return None |
1153
|
|
|
return interims[0].get('value', '') |
1154
|
|
|
|
1155
|
|
|
def isRetest(self): |
1156
|
|
|
"""Returns whether this analysis is a retest or not |
1157
|
|
|
""" |
1158
|
|
|
if self.getRawRetestOf(): |
1159
|
|
|
return True |
1160
|
|
|
return False |
1161
|
|
|
|
1162
|
|
|
def getRetestOfUID(self): |
1163
|
|
|
"""Returns the UID of the retracted analysis this is a retest of |
1164
|
|
|
""" |
1165
|
|
|
return self.getRawRetestOf() |
1166
|
|
|
|
1167
|
|
|
def getRawRetest(self): |
1168
|
|
|
"""Returns the UID of the retest that comes from this analysis, if any |
1169
|
|
|
""" |
1170
|
|
|
relationship = self.getField("RetestOf").relationship |
1171
|
|
|
uids = get_backreferences(self, relationship) |
1172
|
|
|
if not uids: |
1173
|
|
|
return None |
1174
|
|
|
if len(uids) > 1: |
1175
|
|
|
logger.warn("Analysis {} with multiple retests".format(self.id)) |
1176
|
|
|
return uids[0] |
1177
|
|
|
|
1178
|
|
|
def getRetest(self): |
1179
|
|
|
"""Returns the retest that comes from this analysis, if any |
1180
|
|
|
""" |
1181
|
|
|
retest_uid = self.getRawRetest() |
1182
|
|
|
return api.get_object(retest_uid, default=None) |
1183
|
|
|
|
1184
|
|
|
def isRetested(self): |
1185
|
|
|
"""Returns whether this analysis has been retested or not |
1186
|
|
|
""" |
1187
|
|
|
if self.getRawRetest(): |
1188
|
|
|
return True |
1189
|
|
|
return False |
1190
|
|
|
|