|
1
|
|
|
# -*- coding: utf-8 -*- |
|
2
|
|
|
# |
|
3
|
|
|
# This file is part of SENAITE.CORE. |
|
4
|
|
|
# |
|
5
|
|
|
# SENAITE.CORE is free software: you can redistribute it and/or modify it under |
|
6
|
|
|
# the terms of the GNU General Public License as published by the Free Software |
|
7
|
|
|
# Foundation, version 2. |
|
8
|
|
|
# |
|
9
|
|
|
# This program is distributed in the hope that it will be useful, but WITHOUT |
|
10
|
|
|
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
|
11
|
|
|
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more |
|
12
|
|
|
# details. |
|
13
|
|
|
# |
|
14
|
|
|
# You should have received a copy of the GNU General Public License along with |
|
15
|
|
|
# this program; if not, write to the Free Software Foundation, Inc., 51 |
|
16
|
|
|
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
|
17
|
|
|
# |
|
18
|
|
|
# Copyright 2018-2019 by it's authors. |
|
19
|
|
|
# Some rights reserved, see README and LICENSE. |
|
20
|
|
|
|
|
21
|
|
|
import cgi |
|
22
|
|
|
import math |
|
23
|
|
|
from decimal import Decimal |
|
24
|
|
|
|
|
25
|
|
|
from AccessControl import ClassSecurityInfo |
|
26
|
|
|
from DateTime import DateTime |
|
27
|
|
|
from Products.Archetypes.Field import DateTimeField |
|
28
|
|
|
from Products.Archetypes.Field import FixedPointField |
|
29
|
|
|
from Products.Archetypes.Field import IntegerField |
|
30
|
|
|
from Products.Archetypes.Field import StringField |
|
31
|
|
|
from Products.Archetypes.Schema import Schema |
|
32
|
|
|
from Products.Archetypes.references import HoldingReference |
|
33
|
|
|
from Products.CMFCore.permissions import View |
|
34
|
|
|
from Products.CMFCore.utils import getToolByName |
|
35
|
|
|
from bika.lims import api |
|
36
|
|
|
from bika.lims import bikaMessageFactory as _ |
|
37
|
|
|
from bika.lims import deprecated |
|
38
|
|
|
from bika.lims import logger |
|
39
|
|
|
from bika.lims import workflow as wf |
|
40
|
|
|
from bika.lims.browser.fields import HistoryAwareReferenceField |
|
41
|
|
|
from bika.lims.browser.fields import InterimFieldsField |
|
42
|
|
|
from bika.lims.browser.fields import UIDReferenceField |
|
43
|
|
|
from bika.lims.browser.fields.uidreferencefield import get_backreferences |
|
44
|
|
|
from bika.lims.browser.widgets import RecordsWidget |
|
45
|
|
|
from bika.lims.config import LDL |
|
46
|
|
|
from bika.lims.config import UDL |
|
47
|
|
|
from bika.lims.content.abstractbaseanalysis import AbstractBaseAnalysis |
|
48
|
|
|
from bika.lims.content.abstractbaseanalysis import schema |
|
49
|
|
|
from bika.lims.interfaces import IDuplicateAnalysis |
|
50
|
|
|
from bika.lims.permissions import FieldEditAnalysisResult |
|
51
|
|
|
from bika.lims.utils import drop_trailing_zeros_decimal |
|
52
|
|
|
from bika.lims.utils import formatDecimalMark |
|
53
|
|
|
from bika.lims.utils.analysis import format_numeric_result |
|
54
|
|
|
from bika.lims.utils.analysis import get_significant_digits |
|
55
|
|
|
from bika.lims.workflow import getTransitionActor |
|
56
|
|
|
from bika.lims.workflow import getTransitionDate |
|
57
|
|
|
|
|
58
|
|
|
# A link directly to the AnalysisService object used to create the analysis |
|
59
|
|
|
AnalysisService = UIDReferenceField( |
|
60
|
|
|
'AnalysisService' |
|
61
|
|
|
) |
|
62
|
|
|
|
|
63
|
|
|
# Attachments which are added manually in the UI, or automatically when |
|
64
|
|
|
# results are imported from a file supplied by an instrument. |
|
65
|
|
|
Attachment = UIDReferenceField( |
|
66
|
|
|
'Attachment', |
|
67
|
|
|
multiValued=1, |
|
68
|
|
|
allowed_types=('Attachment',) |
|
69
|
|
|
) |
|
70
|
|
|
|
|
71
|
|
|
# The final result of the analysis is stored here. The field contains a |
|
72
|
|
|
# String value, but the result itself is required to be numeric. If |
|
73
|
|
|
# a non-numeric result is needed, ResultOptions can be used. |
|
74
|
|
|
Result = StringField( |
|
75
|
|
|
'Result', |
|
76
|
|
|
read_permission=View, |
|
77
|
|
|
write_permission=FieldEditAnalysisResult, |
|
78
|
|
|
) |
|
79
|
|
|
|
|
80
|
|
|
# When the result is changed, this value is updated to the current time. |
|
81
|
|
|
# Only the most recent result capture date is recorded here and used to |
|
82
|
|
|
# populate catalog values, however the workflow review_history can be |
|
83
|
|
|
# used to get all dates of result capture |
|
84
|
|
|
ResultCaptureDate = DateTimeField( |
|
85
|
|
|
'ResultCaptureDate' |
|
86
|
|
|
) |
|
87
|
|
|
|
|
88
|
|
|
# Returns the retracted analysis this analysis is a retest of |
|
89
|
|
|
RetestOf = UIDReferenceField( |
|
90
|
|
|
'RetestOf' |
|
91
|
|
|
) |
|
92
|
|
|
|
|
93
|
|
|
# If the result is outside of the detection limits of the method or instrument, |
|
94
|
|
|
# the operand (< or >) is stored here. For routine analyses this is taken |
|
95
|
|
|
# from the Result, if the result entered explicitly startswith "<" or ">" |
|
96
|
|
|
DetectionLimitOperand = StringField( |
|
97
|
|
|
'DetectionLimitOperand', |
|
98
|
|
|
read_permission=View, |
|
99
|
|
|
write_permission=FieldEditAnalysisResult, |
|
100
|
|
|
) |
|
101
|
|
|
|
|
102
|
|
|
# The ID of the logged in user who submitted the result for this Analysis. |
|
103
|
|
|
Analyst = StringField( |
|
104
|
|
|
'Analyst' |
|
105
|
|
|
) |
|
106
|
|
|
|
|
107
|
|
|
# The actual uncertainty for this analysis' result, populated from the ranges |
|
108
|
|
|
# specified in the analysis service when the result is submitted. |
|
109
|
|
|
Uncertainty = FixedPointField( |
|
110
|
|
|
'Uncertainty', |
|
111
|
|
|
read_permission=View, |
|
112
|
|
|
write_permission="Field: Edit Result", |
|
113
|
|
|
precision=10, |
|
114
|
|
|
) |
|
115
|
|
|
|
|
116
|
|
|
# transitioned to a 'verified' state. This value is set automatically |
|
117
|
|
|
# when the analysis is created, based on the value set for the property |
|
118
|
|
|
# NumberOfRequiredVerifications from the Analysis Service |
|
119
|
|
|
NumberOfRequiredVerifications = IntegerField( |
|
120
|
|
|
'NumberOfRequiredVerifications', |
|
121
|
|
|
default=1 |
|
122
|
|
|
) |
|
123
|
|
|
|
|
124
|
|
|
# Routine Analyses and Reference Analysis have a versioned link to |
|
125
|
|
|
# the calculation at creation time. |
|
126
|
|
|
Calculation = HistoryAwareReferenceField( |
|
127
|
|
|
'Calculation', |
|
128
|
|
|
allowed_types=('Calculation',), |
|
129
|
|
|
relationship='AnalysisCalculation', |
|
130
|
|
|
referenceClass=HoldingReference |
|
131
|
|
|
) |
|
132
|
|
|
|
|
133
|
|
|
# InterimFields are defined in Calculations, Services, and Analyses. |
|
134
|
|
|
# In Analysis Services, the default values are taken from Calculation. |
|
135
|
|
|
# In Analyses, the default values are taken from the Analysis Service. |
|
136
|
|
|
# When instrument results are imported, the values in analysis are overridden |
|
137
|
|
|
# before the calculation is performed. |
|
138
|
|
|
InterimFields = InterimFieldsField( |
|
139
|
|
|
'InterimFields', |
|
140
|
|
|
read_permission=View, |
|
141
|
|
|
write_permission=FieldEditAnalysisResult, |
|
142
|
|
|
schemata='Method', |
|
143
|
|
|
widget=RecordsWidget( |
|
144
|
|
|
label=_("Calculation Interim Fields"), |
|
145
|
|
|
description=_( |
|
146
|
|
|
"Values can be entered here which will override the defaults " |
|
147
|
|
|
"specified in the Calculation Interim Fields."), |
|
148
|
|
|
) |
|
149
|
|
|
) |
|
150
|
|
|
|
|
151
|
|
|
schema = schema.copy() + Schema(( |
|
152
|
|
|
AnalysisService, |
|
153
|
|
|
Analyst, |
|
154
|
|
|
Attachment, |
|
155
|
|
|
DetectionLimitOperand, |
|
156
|
|
|
# NumberOfRequiredVerifications overrides AbstractBaseClass |
|
157
|
|
|
NumberOfRequiredVerifications, |
|
158
|
|
|
Result, |
|
159
|
|
|
ResultCaptureDate, |
|
160
|
|
|
RetestOf, |
|
161
|
|
|
Uncertainty, |
|
162
|
|
|
Calculation, |
|
163
|
|
|
InterimFields |
|
164
|
|
|
)) |
|
165
|
|
|
|
|
166
|
|
|
|
|
167
|
|
|
class AbstractAnalysis(AbstractBaseAnalysis): |
|
168
|
|
|
security = ClassSecurityInfo() |
|
169
|
|
|
displayContentsTab = False |
|
170
|
|
|
schema = schema |
|
|
|
|
|
|
171
|
|
|
|
|
172
|
|
|
@deprecated('[1705] Currently returns the Analysis object itself. If you ' |
|
173
|
|
|
'need to get the service, use getAnalysisService instead') |
|
174
|
|
|
@security.public |
|
175
|
|
|
def getService(self): |
|
176
|
|
|
return self |
|
177
|
|
|
|
|
178
|
|
|
def getServiceUID(self): |
|
179
|
|
|
"""Return the UID of the associated service. |
|
180
|
|
|
""" |
|
181
|
|
|
service = self.getAnalysisService() |
|
182
|
|
|
if service: |
|
183
|
|
|
return service.UID() |
|
184
|
|
|
|
|
185
|
|
|
@security.public |
|
186
|
|
|
def getNumberOfVerifications(self): |
|
187
|
|
|
return len(self.getVerificators()) |
|
188
|
|
|
|
|
189
|
|
|
@security.public |
|
190
|
|
|
def getNumberOfRemainingVerifications(self): |
|
191
|
|
|
required = self.getNumberOfRequiredVerifications() |
|
192
|
|
|
done = self.getNumberOfVerifications() |
|
193
|
|
|
if done >= required: |
|
194
|
|
|
return 0 |
|
195
|
|
|
return required - done |
|
196
|
|
|
|
|
197
|
|
|
# TODO Workflow - analysis . Remove? |
|
198
|
|
|
@security.public |
|
199
|
|
|
def getLastVerificator(self): |
|
200
|
|
|
verifiers = self.getVerificators() |
|
201
|
|
|
return verifiers and verifiers[-1] or None |
|
202
|
|
|
|
|
203
|
|
|
@security.public |
|
204
|
|
|
def getVerificators(self): |
|
205
|
|
|
"""Returns the user ids of the users that verified this analysis |
|
206
|
|
|
""" |
|
207
|
|
|
verifiers = list() |
|
208
|
|
|
actions = ["verify", "multi_verify"] |
|
209
|
|
|
for event in wf.getReviewHistory(self): |
|
210
|
|
|
if event['action'] in actions: |
|
211
|
|
|
verifiers.append(event['actor']) |
|
212
|
|
|
sorted(verifiers, reverse=True) |
|
213
|
|
|
return verifiers |
|
214
|
|
|
|
|
215
|
|
|
@security.public |
|
216
|
|
|
def getDefaultUncertainty(self, result=None): |
|
217
|
|
|
"""Return the uncertainty value, if the result falls within |
|
218
|
|
|
specified ranges for the service from which this analysis was derived. |
|
219
|
|
|
""" |
|
220
|
|
|
|
|
221
|
|
|
if result is None: |
|
222
|
|
|
result = self.getResult() |
|
223
|
|
|
|
|
224
|
|
|
uncertainties = self.getUncertainties() |
|
225
|
|
|
if uncertainties: |
|
226
|
|
|
try: |
|
227
|
|
|
res = float(result) |
|
228
|
|
|
except (TypeError, ValueError): |
|
229
|
|
|
# if analysis result is not a number, then we assume in range |
|
230
|
|
|
return None |
|
231
|
|
|
|
|
232
|
|
|
for d in uncertainties: |
|
233
|
|
|
_min = float(d['intercept_min']) |
|
234
|
|
|
_max = float(d['intercept_max']) |
|
235
|
|
|
if _min <= res and res <= _max: |
|
236
|
|
|
if str(d['errorvalue']).strip().endswith('%'): |
|
237
|
|
|
try: |
|
238
|
|
|
percvalue = float(d['errorvalue'].replace('%', '')) |
|
239
|
|
|
except ValueError: |
|
240
|
|
|
return None |
|
241
|
|
|
uncertainty = res / 100 * percvalue |
|
242
|
|
|
else: |
|
243
|
|
|
uncertainty = float(d['errorvalue']) |
|
244
|
|
|
|
|
245
|
|
|
return uncertainty |
|
246
|
|
|
return None |
|
247
|
|
|
|
|
248
|
|
|
@security.public |
|
249
|
|
|
def getUncertainty(self, result=None): |
|
250
|
|
|
"""Returns the uncertainty for this analysis and result. |
|
251
|
|
|
Returns the value from Schema's Uncertainty field if the Service has |
|
252
|
|
|
the option 'Allow manual uncertainty'. Otherwise, do a callback to |
|
253
|
|
|
getDefaultUncertainty(). Returns None if no result specified and the |
|
254
|
|
|
current result for this analysis is below or above detections limits. |
|
255
|
|
|
""" |
|
256
|
|
|
uncertainty = self.getField('Uncertainty').get(self) |
|
257
|
|
|
if result is None and (self.isAboveUpperDetectionLimit() or |
|
258
|
|
|
self.isBelowLowerDetectionLimit()): |
|
259
|
|
|
return None |
|
260
|
|
|
|
|
261
|
|
|
if uncertainty and self.getAllowManualUncertainty() is True: |
|
262
|
|
|
try: |
|
263
|
|
|
uncertainty = float(uncertainty) |
|
264
|
|
|
return uncertainty |
|
265
|
|
|
except (TypeError, ValueError): |
|
266
|
|
|
# if uncertainty is not a number, return default value |
|
267
|
|
|
pass |
|
268
|
|
|
return self.getDefaultUncertainty(result) |
|
269
|
|
|
|
|
270
|
|
|
@security.public |
|
271
|
|
|
def setUncertainty(self, unc): |
|
272
|
|
|
"""Sets the uncertainty for this analysis. If the result is a |
|
273
|
|
|
Detection Limit or the value is below LDL or upper UDL, sets the |
|
274
|
|
|
uncertainty value to 0 |
|
275
|
|
|
""" |
|
276
|
|
|
# Uncertainty calculation on DL |
|
277
|
|
|
# https://jira.bikalabs.com/browse/LIMS-1808 |
|
278
|
|
|
if self.isAboveUpperDetectionLimit() or \ |
|
279
|
|
|
self.isBelowLowerDetectionLimit(): |
|
280
|
|
|
self.getField('Uncertainty').set(self, None) |
|
281
|
|
|
else: |
|
282
|
|
|
self.getField('Uncertainty').set(self, unc) |
|
283
|
|
|
|
|
284
|
|
|
@security.public |
|
285
|
|
|
def setDetectionLimitOperand(self, value): |
|
286
|
|
|
"""Set detection limit operand for this analysis |
|
287
|
|
|
Allowed detection limit operands are `<` and `>`. |
|
288
|
|
|
""" |
|
289
|
|
|
manual_dl = self.getAllowManualDetectionLimit() |
|
290
|
|
|
selector = self.getDetectionLimitSelector() |
|
291
|
|
|
if not manual_dl and not selector: |
|
292
|
|
|
# Don't allow the user to set the limit operand if manual assignment |
|
293
|
|
|
# is not allowed and selector is not visible |
|
294
|
|
|
return |
|
295
|
|
|
|
|
296
|
|
|
# Changing the detection limit operand has a side effect on the result |
|
297
|
|
|
result = self.getResult() |
|
298
|
|
|
if value in [LDL, UDL]: |
|
299
|
|
|
# flush uncertainty |
|
300
|
|
|
self.setUncertainty("") |
|
301
|
|
|
|
|
302
|
|
|
# If no previous result or user is not allowed to manually set the |
|
303
|
|
|
# the detection limit, override the result with default LDL/UDL |
|
304
|
|
|
has_result = api.is_floatable(result) |
|
305
|
|
|
if not has_result or not manual_dl: |
|
306
|
|
|
# set the result according to the system default UDL/LDL values |
|
307
|
|
|
if value == LDL: |
|
308
|
|
|
result = self.getLowerDetectionLimit() |
|
309
|
|
|
else: |
|
310
|
|
|
result = self.getUpperDetectionLimit() |
|
311
|
|
|
|
|
312
|
|
|
else: |
|
313
|
|
|
value = "" |
|
314
|
|
|
# Restore the DetectionLimitSelector, cause maybe its visibility |
|
315
|
|
|
# was changed because allow manual detection limit was enabled and |
|
316
|
|
|
# the user set a result with "<" or ">" |
|
317
|
|
|
if manual_dl: |
|
318
|
|
|
service = self.getAnalysisService() |
|
319
|
|
|
selector = service.getDetectionLimitSelector() |
|
320
|
|
|
self.setDetectionLimitSelector(selector) |
|
321
|
|
|
|
|
322
|
|
|
# Set the result |
|
323
|
|
|
self.getField("Result").set(self, result) |
|
324
|
|
|
|
|
325
|
|
|
# Set the detection limit to the field |
|
326
|
|
|
self.getField("DetectionLimitOperand").set(self, value) |
|
327
|
|
|
|
|
328
|
|
|
# Method getLowerDetectionLimit overrides method of class BaseAnalysis |
|
329
|
|
|
@security.public |
|
330
|
|
|
def getLowerDetectionLimit(self): |
|
331
|
|
|
"""Returns the Lower Detection Limit (LDL) that applies to this |
|
332
|
|
|
analysis in particular. If no value set or the analysis service |
|
333
|
|
|
doesn't allow manual input of detection limits, returns the value set |
|
334
|
|
|
by default in the Analysis Service |
|
335
|
|
|
""" |
|
336
|
|
|
if self.isLowerDetectionLimit(): |
|
337
|
|
|
result = self.getResult() |
|
338
|
|
|
try: |
|
339
|
|
|
# in this case, the result itself is the LDL. |
|
340
|
|
|
return float(result) |
|
341
|
|
|
except (TypeError, ValueError): |
|
342
|
|
|
logger.warn("The result for the analysis %s is a lower " |
|
343
|
|
|
"detection limit, but not floatable: '%s'. " |
|
344
|
|
|
"Returnig AS's default LDL." % |
|
345
|
|
|
(self.id, result)) |
|
346
|
|
|
return AbstractBaseAnalysis.getLowerDetectionLimit(self) |
|
347
|
|
|
|
|
348
|
|
|
# Method getUpperDetectionLimit overrides method of class BaseAnalysis |
|
349
|
|
|
@security.public |
|
350
|
|
|
def getUpperDetectionLimit(self): |
|
351
|
|
|
"""Returns the Upper Detection Limit (UDL) that applies to this |
|
352
|
|
|
analysis in particular. If no value set or the analysis service |
|
353
|
|
|
doesn't allow manual input of detection limits, returns the value set |
|
354
|
|
|
by default in the Analysis Service |
|
355
|
|
|
""" |
|
356
|
|
|
if self.isUpperDetectionLimit(): |
|
357
|
|
|
result = self.getResult() |
|
358
|
|
|
try: |
|
359
|
|
|
# in this case, the result itself is the LDL. |
|
360
|
|
|
return float(result) |
|
361
|
|
|
except (TypeError, ValueError): |
|
362
|
|
|
logger.warn("The result for the analysis %s is a lower " |
|
363
|
|
|
"detection limit, but not floatable: '%s'. " |
|
364
|
|
|
"Returnig AS's default LDL." % |
|
365
|
|
|
(self.id, result)) |
|
366
|
|
|
return AbstractBaseAnalysis.getUpperDetectionLimit(self) |
|
367
|
|
|
|
|
368
|
|
|
@security.public |
|
369
|
|
|
def isBelowLowerDetectionLimit(self): |
|
370
|
|
|
"""Returns True if the result is below the Lower Detection Limit or |
|
371
|
|
|
if Lower Detection Limit has been manually set |
|
372
|
|
|
""" |
|
373
|
|
|
if self.isLowerDetectionLimit(): |
|
374
|
|
|
return True |
|
375
|
|
|
|
|
376
|
|
|
result = self.getResult() |
|
377
|
|
|
if result and str(result).strip().startswith(LDL): |
|
378
|
|
|
return True |
|
379
|
|
|
|
|
380
|
|
|
if api.is_floatable(result): |
|
381
|
|
|
return api.to_float(result) < self.getLowerDetectionLimit() |
|
382
|
|
|
|
|
383
|
|
|
return False |
|
384
|
|
|
|
|
385
|
|
|
@security.public |
|
386
|
|
|
def isAboveUpperDetectionLimit(self): |
|
387
|
|
|
"""Returns True if the result is above the Upper Detection Limit or |
|
388
|
|
|
if Upper Detection Limit has been manually set |
|
389
|
|
|
""" |
|
390
|
|
|
if self.isUpperDetectionLimit(): |
|
391
|
|
|
return True |
|
392
|
|
|
|
|
393
|
|
|
result = self.getResult() |
|
394
|
|
|
if result and str(result).strip().startswith(UDL): |
|
395
|
|
|
return True |
|
396
|
|
|
|
|
397
|
|
|
if api.is_floatable(result): |
|
398
|
|
|
return api.to_float(result) > self.getUpperDetectionLimit() |
|
399
|
|
|
|
|
400
|
|
|
return False |
|
401
|
|
|
|
|
402
|
|
|
@security.public |
|
403
|
|
|
def getDetectionLimits(self): |
|
404
|
|
|
"""Returns a two-value array with the limits of detection (LDL and |
|
405
|
|
|
UDL) that applies to this analysis in particular. If no value set or |
|
406
|
|
|
the analysis service doesn't allow manual input of detection limits, |
|
407
|
|
|
returns the value set by default in the Analysis Service |
|
408
|
|
|
""" |
|
409
|
|
|
return [self.getLowerDetectionLimit(), self.getUpperDetectionLimit()] |
|
410
|
|
|
|
|
411
|
|
|
@security.public |
|
412
|
|
|
def isLowerDetectionLimit(self): |
|
413
|
|
|
"""Returns True if the result for this analysis represents a Lower |
|
414
|
|
|
Detection Limit. Otherwise, returns False |
|
415
|
|
|
""" |
|
416
|
|
|
return self.getDetectionLimitOperand() == LDL |
|
417
|
|
|
|
|
418
|
|
|
@security.public |
|
419
|
|
|
def isUpperDetectionLimit(self): |
|
420
|
|
|
"""Returns True if the result for this analysis represents an Upper |
|
421
|
|
|
Detection Limit. Otherwise, returns False |
|
422
|
|
|
""" |
|
423
|
|
|
return self.getDetectionLimitOperand() == UDL |
|
424
|
|
|
|
|
425
|
|
|
@security.public |
|
426
|
|
|
def getDependents(self): |
|
427
|
|
|
"""Return a list of analyses who depend on us to calculate their result |
|
428
|
|
|
""" |
|
429
|
|
|
raise NotImplementedError("getDependents is not implemented.") |
|
430
|
|
|
|
|
431
|
|
|
@security.public |
|
432
|
|
|
def getDependencies(self, retracted=False): |
|
433
|
|
|
"""Return a list of siblings who we depend on to calculate our result. |
|
434
|
|
|
:param retracted: If false retracted/rejected analyses are dismissed |
|
435
|
|
|
:type retracted: bool |
|
436
|
|
|
:return: Analyses the current analysis depends on |
|
437
|
|
|
:rtype: list of IAnalysis |
|
438
|
|
|
""" |
|
439
|
|
|
raise NotImplementedError("getDependencies is not implemented.") |
|
440
|
|
|
|
|
441
|
|
|
@security.public |
|
442
|
|
|
def setResult(self, value): |
|
443
|
|
|
"""Validate and set a value into the Result field, taking into |
|
444
|
|
|
account the Detection Limits. |
|
445
|
|
|
:param value: is expected to be a string. |
|
446
|
|
|
""" |
|
447
|
|
|
# Always update ResultCapture date when this field is modified |
|
448
|
|
|
self.setResultCaptureDate(DateTime()) |
|
449
|
|
|
|
|
450
|
|
|
# Ensure result integrity regards to None, empty and 0 values |
|
451
|
|
|
val = str("" if not value and value != 0 else value).strip() |
|
452
|
|
|
|
|
453
|
|
|
# UDL/LDL directly entered in the results field |
|
454
|
|
|
if val and val[0] in [LDL, UDL]: |
|
455
|
|
|
# Result prefixed with LDL/UDL |
|
456
|
|
|
oper = val[0] |
|
457
|
|
|
# Strip off LDL/UDL from the result |
|
458
|
|
|
val = val.replace(oper, "", 1) |
|
459
|
|
|
# Check if the value is indeterminate / non-floatable |
|
460
|
|
|
try: |
|
461
|
|
|
val = float(val) |
|
462
|
|
|
except (ValueError, TypeError): |
|
463
|
|
|
val = value |
|
464
|
|
|
|
|
465
|
|
|
# We dismiss the operand and the selector visibility unless the user |
|
466
|
|
|
# is allowed to manually set the detection limit or the DL selector |
|
467
|
|
|
# is visible. |
|
468
|
|
|
allow_manual = self.getAllowManualDetectionLimit() |
|
469
|
|
|
selector = self.getDetectionLimitSelector() |
|
470
|
|
|
if allow_manual or selector: |
|
471
|
|
|
# Ensure visibility of the detection limit selector |
|
472
|
|
|
self.setDetectionLimitSelector(True) |
|
473
|
|
|
|
|
474
|
|
|
# Set the detection limit operand |
|
475
|
|
|
self.setDetectionLimitOperand(oper) |
|
476
|
|
|
|
|
477
|
|
|
if not allow_manual: |
|
478
|
|
|
# Override value by default DL |
|
479
|
|
|
if oper == LDL: |
|
480
|
|
|
val = self.getLowerDetectionLimit() |
|
481
|
|
|
else: |
|
482
|
|
|
val = self.getUpperDetectionLimit() |
|
483
|
|
|
|
|
484
|
|
|
# Set the result field |
|
485
|
|
|
self.getField("Result").set(self, val) |
|
486
|
|
|
|
|
487
|
|
|
@security.public |
|
488
|
|
|
def getResultsRange(self): |
|
489
|
|
|
raise NotImplementedError("getResultsRange is not implemented.") |
|
490
|
|
|
|
|
491
|
|
|
@security.public |
|
492
|
|
|
def calculateResult(self, override=False, cascade=False): |
|
493
|
|
|
"""Calculates the result for the current analysis if it depends of |
|
494
|
|
|
other analysis/interim fields. Otherwise, do nothing |
|
495
|
|
|
""" |
|
496
|
|
|
if self.getResult() and override is False: |
|
497
|
|
|
return False |
|
498
|
|
|
|
|
499
|
|
|
calc = self.getCalculation() |
|
500
|
|
|
if not calc: |
|
501
|
|
|
return False |
|
502
|
|
|
|
|
503
|
|
|
mapping = {} |
|
504
|
|
|
|
|
505
|
|
|
# Interims' priority order (from low to high): |
|
506
|
|
|
# Calculation < Analysis |
|
507
|
|
|
interims = calc.getInterimFields() + self.getInterimFields() |
|
508
|
|
|
|
|
509
|
|
|
# Add interims to mapping |
|
510
|
|
|
for i in interims: |
|
511
|
|
|
if 'keyword' not in i: |
|
512
|
|
|
continue |
|
513
|
|
|
# skip unset values |
|
514
|
|
|
if i['value'] == '': |
|
515
|
|
|
continue |
|
516
|
|
|
try: |
|
517
|
|
|
ivalue = float(i['value']) |
|
518
|
|
|
mapping[i['keyword']] = ivalue |
|
519
|
|
|
except (TypeError, ValueError): |
|
520
|
|
|
# Interim not float, abort |
|
521
|
|
|
return False |
|
522
|
|
|
|
|
523
|
|
|
# Add dependencies results to mapping |
|
524
|
|
|
dependencies = self.getDependencies() |
|
525
|
|
|
for dependency in dependencies: |
|
526
|
|
|
result = dependency.getResult() |
|
527
|
|
|
if not result: |
|
528
|
|
|
# Dependency without results found |
|
529
|
|
|
if cascade: |
|
530
|
|
|
# Try to calculate the dependency result |
|
531
|
|
|
dependency.calculateResult(override, cascade) |
|
532
|
|
|
result = dependency.getResult() |
|
533
|
|
|
else: |
|
534
|
|
|
return False |
|
535
|
|
|
if result: |
|
536
|
|
|
try: |
|
537
|
|
|
result = float(str(result)) |
|
538
|
|
|
key = dependency.getKeyword() |
|
539
|
|
|
ldl = dependency.getLowerDetectionLimit() |
|
540
|
|
|
udl = dependency.getUpperDetectionLimit() |
|
541
|
|
|
bdl = dependency.isBelowLowerDetectionLimit() |
|
542
|
|
|
adl = dependency.isAboveUpperDetectionLimit() |
|
543
|
|
|
mapping[key] = result |
|
544
|
|
|
mapping['%s.%s' % (key, 'RESULT')] = result |
|
545
|
|
|
mapping['%s.%s' % (key, 'LDL')] = ldl |
|
546
|
|
|
mapping['%s.%s' % (key, 'UDL')] = udl |
|
547
|
|
|
mapping['%s.%s' % (key, 'BELOWLDL')] = int(bdl) |
|
548
|
|
|
mapping['%s.%s' % (key, 'ABOVEUDL')] = int(adl) |
|
549
|
|
|
except (TypeError, ValueError): |
|
550
|
|
|
return False |
|
551
|
|
|
|
|
552
|
|
|
# Calculate |
|
553
|
|
|
formula = calc.getMinifiedFormula() |
|
554
|
|
|
formula = formula.replace('[', '%(').replace(']', ')f') |
|
555
|
|
|
try: |
|
556
|
|
|
formula = eval("'%s'%%mapping" % formula, |
|
557
|
|
|
{"__builtins__": None, |
|
558
|
|
|
'math': math, |
|
559
|
|
|
'context': self}, |
|
560
|
|
|
{'mapping': mapping}) |
|
561
|
|
|
result = eval(formula, calc._getGlobals()) |
|
562
|
|
|
except TypeError: |
|
563
|
|
|
self.setResult("NA") |
|
564
|
|
|
return True |
|
565
|
|
|
except ZeroDivisionError: |
|
566
|
|
|
self.setResult('0/0') |
|
567
|
|
|
return True |
|
568
|
|
|
except KeyError: |
|
569
|
|
|
self.setResult("NA") |
|
570
|
|
|
return True |
|
571
|
|
|
except ImportError: |
|
572
|
|
|
self.setResult("NA") |
|
573
|
|
|
return True |
|
574
|
|
|
|
|
575
|
|
|
self.setResult(str(result)) |
|
576
|
|
|
return True |
|
577
|
|
|
|
|
578
|
|
|
@security.public |
|
579
|
|
|
def getPrice(self): |
|
580
|
|
|
"""The function obtains the analysis' price without VAT and without |
|
581
|
|
|
member discount |
|
582
|
|
|
:return: the price (without VAT or Member Discount) in decimal format |
|
583
|
|
|
""" |
|
584
|
|
|
analysis_request = self.aq_parent |
|
585
|
|
|
client = analysis_request.aq_parent |
|
586
|
|
|
if client.getBulkDiscount(): |
|
587
|
|
|
price = self.getBulkPrice() |
|
588
|
|
|
else: |
|
589
|
|
|
price = self.getField('Price').get(self) |
|
590
|
|
|
return price |
|
591
|
|
|
|
|
592
|
|
|
@security.public |
|
593
|
|
|
def getVATAmount(self): |
|
594
|
|
|
"""Compute the VAT amount without member discount. |
|
595
|
|
|
:return: the result as a float |
|
596
|
|
|
""" |
|
597
|
|
|
vat = self.getVAT() |
|
598
|
|
|
price = self.getPrice() |
|
599
|
|
|
return Decimal(price) * Decimal(vat) / 100 |
|
600
|
|
|
|
|
601
|
|
|
@security.public |
|
602
|
|
|
def getTotalPrice(self): |
|
603
|
|
|
"""Obtain the total price without client's member discount. The function |
|
604
|
|
|
keeps in mind the client's bulk discount. |
|
605
|
|
|
:return: the result as a float |
|
606
|
|
|
""" |
|
607
|
|
|
return Decimal(self.getPrice()) + Decimal(self.getVATAmount()) |
|
608
|
|
|
|
|
609
|
|
|
@security.public |
|
610
|
|
|
def getDuration(self): |
|
611
|
|
|
"""Returns the time in minutes taken for this analysis. |
|
612
|
|
|
If the analysis is not yet 'ready to process', returns 0 |
|
613
|
|
|
If the analysis is still in progress (not yet verified), |
|
614
|
|
|
duration = date_verified - date_start_process |
|
615
|
|
|
Otherwise: |
|
616
|
|
|
duration = current_datetime - date_start_process |
|
617
|
|
|
:return: time in minutes taken for this analysis |
|
618
|
|
|
:rtype: int |
|
619
|
|
|
""" |
|
620
|
|
|
starttime = self.getStartProcessDate() |
|
621
|
|
|
if not starttime: |
|
622
|
|
|
# The analysis is not yet ready to be processed |
|
623
|
|
|
return 0 |
|
624
|
|
|
endtime = self.getDateVerified() or DateTime() |
|
625
|
|
|
|
|
626
|
|
|
# Duration in minutes |
|
627
|
|
|
duration = (endtime - starttime) * 24 * 60 |
|
628
|
|
|
return duration |
|
629
|
|
|
|
|
630
|
|
|
@security.public |
|
631
|
|
|
def getEarliness(self): |
|
632
|
|
|
"""The remaining time in minutes for this analysis to be completed. |
|
633
|
|
|
Returns zero if the analysis is neither 'ready to process' nor a |
|
634
|
|
|
turnaround time is set. |
|
635
|
|
|
earliness = duration - max_turnaround_time |
|
636
|
|
|
The analysis is late if the earliness is negative |
|
637
|
|
|
:return: the remaining time in minutes before the analysis reaches TAT |
|
638
|
|
|
:rtype: int |
|
639
|
|
|
""" |
|
640
|
|
|
maxtime = self.getMaxTimeAllowed() |
|
641
|
|
|
if not maxtime: |
|
642
|
|
|
# No Turnaround time is set for this analysis |
|
643
|
|
|
return 0 |
|
644
|
|
|
return api.to_minutes(**maxtime) - self.getDuration() |
|
645
|
|
|
|
|
646
|
|
|
@security.public |
|
647
|
|
|
def isLateAnalysis(self): |
|
648
|
|
|
"""Returns true if the analysis is late in accordance with the maximum |
|
649
|
|
|
turnaround time. If no maximum turnaround time is set for this analysis |
|
650
|
|
|
or it is not yet ready to be processed, or there is still time |
|
651
|
|
|
remaining (earliness), returns False. |
|
652
|
|
|
:return: true if the analysis is late |
|
653
|
|
|
:rtype: bool |
|
654
|
|
|
""" |
|
655
|
|
|
return self.getEarliness() < 0 |
|
656
|
|
|
|
|
657
|
|
|
@security.public |
|
658
|
|
|
def getLateness(self): |
|
659
|
|
|
"""The time in minutes that exceeds the maximum turnaround set for this |
|
660
|
|
|
analysis. If the analysis has no turnaround time set or is not ready |
|
661
|
|
|
for process yet, returns 0. The analysis is not late if the lateness is |
|
662
|
|
|
negative |
|
663
|
|
|
:return: the time in minutes that exceeds the maximum turnaround time |
|
664
|
|
|
:rtype: int |
|
665
|
|
|
""" |
|
666
|
|
|
return -self.getEarliness() |
|
667
|
|
|
|
|
668
|
|
|
@security.public |
|
669
|
|
|
def isInstrumentValid(self): |
|
670
|
|
|
"""Checks if the instrument selected for this analysis is valid. |
|
671
|
|
|
Returns false if an out-of-date or uncalibrated instrument is |
|
672
|
|
|
assigned. |
|
673
|
|
|
:return: True if the Analysis has no instrument assigned or is valid |
|
674
|
|
|
:rtype: bool |
|
675
|
|
|
""" |
|
676
|
|
|
if self.getInstrument(): |
|
677
|
|
|
return self.getInstrument().isValid() |
|
678
|
|
|
return True |
|
679
|
|
|
|
|
680
|
|
|
@security.public |
|
681
|
|
|
def isInstrumentAllowed(self, instrument): |
|
682
|
|
|
"""Checks if the specified instrument can be set for this analysis, |
|
683
|
|
|
either if the instrument was assigned directly (by using "Allows |
|
684
|
|
|
instrument entry of results") or indirectly via Method ("Allows manual |
|
685
|
|
|
entry of results") in Analysis Service Edit view. |
|
686
|
|
|
Param instrument can be either an uid or an object |
|
687
|
|
|
:param instrument: string,Instrument |
|
688
|
|
|
:return: True if the assignment of the passed in instrument is allowed |
|
689
|
|
|
:rtype: bool |
|
690
|
|
|
""" |
|
691
|
|
|
if isinstance(instrument, str): |
|
692
|
|
|
uid = instrument |
|
693
|
|
|
else: |
|
694
|
|
|
uid = instrument.UID() |
|
695
|
|
|
|
|
696
|
|
|
return uid in self.getAllowedInstrumentUIDs() |
|
697
|
|
|
|
|
698
|
|
|
@security.public |
|
699
|
|
|
def isMethodAllowed(self, method): |
|
700
|
|
|
"""Checks if the analysis can follow the method specified, either if |
|
701
|
|
|
the method was assigned directly (by using "Allows manual entry of |
|
702
|
|
|
results") or indirectly via Instrument ("Allows instrument entry of |
|
703
|
|
|
results") in Analysis Service Edit view. |
|
704
|
|
|
Param method can be either a uid or an object |
|
705
|
|
|
:param method: string,Method |
|
706
|
|
|
:return: True if the analysis can follow the method specified |
|
707
|
|
|
:rtype: bool |
|
708
|
|
|
""" |
|
709
|
|
|
if isinstance(method, str): |
|
710
|
|
|
uid = method |
|
711
|
|
|
else: |
|
712
|
|
|
uid = method.UID() |
|
713
|
|
|
|
|
714
|
|
|
return uid in self.getAllowedMethodUIDs() |
|
715
|
|
|
|
|
716
|
|
|
@security.public |
|
717
|
|
|
def getAllowedMethods(self): |
|
718
|
|
|
"""Returns the allowed methods for this analysis, either if the method |
|
719
|
|
|
was assigned directly (by using "Allows manual entry of results") or |
|
720
|
|
|
indirectly via Instrument ("Allows instrument entry of results") in |
|
721
|
|
|
Analysis Service Edit View. |
|
722
|
|
|
:return: A list with the methods allowed for this analysis |
|
723
|
|
|
:rtype: list of Methods |
|
724
|
|
|
""" |
|
725
|
|
|
service = self.getAnalysisService() |
|
726
|
|
|
if not service: |
|
727
|
|
|
return [] |
|
728
|
|
|
|
|
729
|
|
|
methods = [] |
|
730
|
|
|
if self.getManualEntryOfResults(): |
|
731
|
|
|
methods = service.getMethods() |
|
732
|
|
|
if self.getInstrumentEntryOfResults(): |
|
733
|
|
|
for instrument in service.getInstruments(): |
|
734
|
|
|
methods.extend(instrument.getMethods()) |
|
735
|
|
|
|
|
736
|
|
|
return list(set(methods)) |
|
737
|
|
|
|
|
738
|
|
|
@security.public |
|
739
|
|
|
def getAllowedMethodUIDs(self): |
|
740
|
|
|
"""Used to populate getAllowedMethodUIDs metadata. Delegates to |
|
741
|
|
|
method getAllowedMethods() for the retrieval of the methods allowed. |
|
742
|
|
|
:return: A list with the UIDs of the methods allowed for this analysis |
|
743
|
|
|
:rtype: list of strings |
|
744
|
|
|
""" |
|
745
|
|
|
return [m.UID() for m in self.getAllowedMethods()] |
|
746
|
|
|
|
|
747
|
|
|
@security.public |
|
748
|
|
|
def getAllowedInstruments(self): |
|
749
|
|
|
"""Returns the allowed instruments for this analysis, either if the |
|
750
|
|
|
instrument was assigned directly (by using "Allows instrument entry of |
|
751
|
|
|
results") or indirectly via Method (by using "Allows manual entry of |
|
752
|
|
|
results") in Analysis Service edit view. |
|
753
|
|
|
:return: A list of instruments allowed for this Analysis |
|
754
|
|
|
:rtype: list of instruments |
|
755
|
|
|
""" |
|
756
|
|
|
service = self.getAnalysisService() |
|
757
|
|
|
if not service: |
|
758
|
|
|
return [] |
|
759
|
|
|
|
|
760
|
|
|
instruments = [] |
|
761
|
|
|
if self.getInstrumentEntryOfResults(): |
|
762
|
|
|
instruments = service.getInstruments() |
|
763
|
|
|
if self.getManualEntryOfResults(): |
|
764
|
|
|
for meth in self.getAllowedMethods(): |
|
765
|
|
|
instruments += meth.getInstruments() |
|
766
|
|
|
|
|
767
|
|
|
return list(set(instruments)) |
|
768
|
|
|
|
|
769
|
|
|
@security.public |
|
770
|
|
|
def getAllowedInstrumentUIDs(self): |
|
771
|
|
|
"""Used to populate getAllowedInstrumentUIDs metadata. Delegates to |
|
772
|
|
|
getAllowedInstruments() for the retrieval of the instruments allowed. |
|
773
|
|
|
:return: List of instruments' UIDs allowed for this analysis |
|
774
|
|
|
:rtype: list of strings |
|
775
|
|
|
""" |
|
776
|
|
|
return [i.UID() for i in self.getAllowedInstruments()] |
|
777
|
|
|
|
|
778
|
|
|
@security.public |
|
779
|
|
|
def getExponentialFormatPrecision(self, result=None): |
|
780
|
|
|
""" Returns the precision for the Analysis Service and result |
|
781
|
|
|
provided. Results with a precision value above this exponential |
|
782
|
|
|
format precision should be formatted as scientific notation. |
|
783
|
|
|
|
|
784
|
|
|
If the Calculate Precision according to Uncertainty is not set, |
|
785
|
|
|
the method will return the exponential precision value set in the |
|
786
|
|
|
Schema. Otherwise, will calculate the precision value according to |
|
787
|
|
|
the Uncertainty and the result. |
|
788
|
|
|
|
|
789
|
|
|
If Calculate Precision from the Uncertainty is set but no result |
|
790
|
|
|
provided neither uncertainty values are set, returns the fixed |
|
791
|
|
|
exponential precision. |
|
792
|
|
|
|
|
793
|
|
|
Will return positive values if the result is below 0 and will return |
|
794
|
|
|
0 or positive values if the result is above 0. |
|
795
|
|
|
|
|
796
|
|
|
Given an analysis service with fixed exponential format |
|
797
|
|
|
precision of 4: |
|
798
|
|
|
Result Uncertainty Returns |
|
799
|
|
|
5.234 0.22 0 |
|
800
|
|
|
13.5 1.34 1 |
|
801
|
|
|
0.0077 0.008 -3 |
|
802
|
|
|
32092 0.81 4 |
|
803
|
|
|
456021 423 5 |
|
804
|
|
|
|
|
805
|
|
|
For further details, visit https://jira.bikalabs.com/browse/LIMS-1334 |
|
806
|
|
|
|
|
807
|
|
|
:param result: if provided and "Calculate Precision according to the |
|
808
|
|
|
Uncertainty" is set, the result will be used to retrieve the |
|
809
|
|
|
uncertainty from which the precision must be calculated. Otherwise, |
|
810
|
|
|
the fixed-precision will be used. |
|
811
|
|
|
:returns: the precision |
|
812
|
|
|
""" |
|
813
|
|
|
if not result or self.getPrecisionFromUncertainty() is False: |
|
814
|
|
|
return self._getExponentialFormatPrecision() |
|
815
|
|
|
else: |
|
816
|
|
|
uncertainty = self.getUncertainty(result) |
|
817
|
|
|
if uncertainty is None: |
|
818
|
|
|
return self._getExponentialFormatPrecision() |
|
819
|
|
|
|
|
820
|
|
|
try: |
|
821
|
|
|
float(result) |
|
822
|
|
|
except ValueError: |
|
823
|
|
|
# if analysis result is not a number, then we assume in range |
|
824
|
|
|
return self._getExponentialFormatPrecision() |
|
825
|
|
|
|
|
826
|
|
|
return get_significant_digits(uncertainty) |
|
827
|
|
|
|
|
828
|
|
|
def _getExponentialFormatPrecision(self): |
|
829
|
|
|
field = self.getField('ExponentialFormatPrecision') |
|
830
|
|
|
value = field.get(self) |
|
831
|
|
|
if value is None: |
|
832
|
|
|
# https://github.com/bikalims/bika.lims/issues/2004 |
|
833
|
|
|
# We require the field, because None values make no sense at all. |
|
834
|
|
|
value = self.Schema().getField( |
|
835
|
|
|
'ExponentialFormatPrecision').getDefault(self) |
|
836
|
|
|
return value |
|
837
|
|
|
|
|
838
|
|
|
@security.public |
|
839
|
|
|
def getFormattedResult(self, specs=None, decimalmark='.', sciformat=1, |
|
840
|
|
|
html=True): |
|
841
|
|
|
"""Formatted result: |
|
842
|
|
|
1. If the result is a detection limit, returns '< LDL' or '> UDL' |
|
843
|
|
|
2. Print ResultText of matching ResultOptions |
|
844
|
|
|
3. If the result is not floatable, return it without being formatted |
|
845
|
|
|
4. If the analysis specs has hidemin or hidemax enabled and the |
|
846
|
|
|
result is out of range, render result as '<min' or '>max' |
|
847
|
|
|
5. If the result is below Lower Detection Limit, show '<LDL' |
|
848
|
|
|
6. If the result is above Upper Detecion Limit, show '>UDL' |
|
849
|
|
|
7. Otherwise, render numerical value |
|
850
|
|
|
:param specs: Optional result specifications, a dictionary as follows: |
|
851
|
|
|
{'min': <min_val>, |
|
852
|
|
|
'max': <max_val>, |
|
853
|
|
|
'error': <error>, |
|
854
|
|
|
'hidemin': <hidemin_val>, |
|
855
|
|
|
'hidemax': <hidemax_val>} |
|
856
|
|
|
:param decimalmark: The string to be used as a decimal separator. |
|
857
|
|
|
default is '.' |
|
858
|
|
|
:param sciformat: 1. The sci notation has to be formatted as aE^+b |
|
859
|
|
|
2. The sci notation has to be formatted as a·10^b |
|
860
|
|
|
3. As 2, but with super html entity for exp |
|
861
|
|
|
4. The sci notation has to be formatted as a·10^b |
|
862
|
|
|
5. As 4, but with super html entity for exp |
|
863
|
|
|
By default 1 |
|
864
|
|
|
:param html: if true, returns an string with the special characters |
|
865
|
|
|
escaped: e.g: '<' and '>' (LDL and UDL for results like < 23.4). |
|
866
|
|
|
""" |
|
867
|
|
|
result = self.getResult() |
|
868
|
|
|
|
|
869
|
|
|
# 1. The result is a detection limit, return '< LDL' or '> UDL' |
|
870
|
|
|
dl = self.getDetectionLimitOperand() |
|
871
|
|
|
if dl: |
|
872
|
|
|
try: |
|
873
|
|
|
res = float(result) # required, check if floatable |
|
874
|
|
|
res = drop_trailing_zeros_decimal(res) |
|
875
|
|
|
fdm = formatDecimalMark(res, decimalmark) |
|
876
|
|
|
hdl = cgi.escape(dl) if html else dl |
|
877
|
|
|
return '%s %s' % (hdl, fdm) |
|
878
|
|
|
except (TypeError, ValueError): |
|
879
|
|
|
logger.warn( |
|
880
|
|
|
"The result for the analysis %s is a detection limit, " |
|
881
|
|
|
"but not floatable: %s" % (self.id, result)) |
|
882
|
|
|
return formatDecimalMark(result, decimalmark=decimalmark) |
|
883
|
|
|
|
|
884
|
|
|
choices = self.getResultOptions() |
|
885
|
|
|
|
|
886
|
|
|
# 2. Print ResultText of matching ResulOptions |
|
887
|
|
|
match = [x['ResultText'] for x in choices |
|
888
|
|
|
if str(x['ResultValue']) == str(result)] |
|
889
|
|
|
if match: |
|
890
|
|
|
return match[0] |
|
891
|
|
|
|
|
892
|
|
|
# 3. If the result is not floatable, return it without being formatted |
|
893
|
|
|
try: |
|
894
|
|
|
result = float(result) |
|
895
|
|
|
except (TypeError, ValueError): |
|
896
|
|
|
return formatDecimalMark(result, decimalmark=decimalmark) |
|
897
|
|
|
|
|
898
|
|
|
# 4. If the analysis specs has enabled hidemin or hidemax and the |
|
899
|
|
|
# result is out of range, render result as '<min' or '>max' |
|
900
|
|
|
specs = specs if specs else self.getResultsRange() |
|
901
|
|
|
hidemin = specs.get('hidemin', '') |
|
902
|
|
|
hidemax = specs.get('hidemax', '') |
|
903
|
|
|
try: |
|
904
|
|
|
belowmin = hidemin and result < float(hidemin) or False |
|
905
|
|
|
except (TypeError, ValueError): |
|
906
|
|
|
belowmin = False |
|
907
|
|
|
try: |
|
908
|
|
|
abovemax = hidemax and result > float(hidemax) or False |
|
909
|
|
|
except (TypeError, ValueError): |
|
910
|
|
|
abovemax = False |
|
911
|
|
|
|
|
912
|
|
|
# 4.1. If result is below min and hidemin enabled, return '<min' |
|
913
|
|
|
if belowmin: |
|
914
|
|
|
fdm = formatDecimalMark('< %s' % hidemin, decimalmark) |
|
915
|
|
|
return fdm.replace('< ', '< ', 1) if html else fdm |
|
916
|
|
|
|
|
917
|
|
|
# 4.2. If result is above max and hidemax enabled, return '>max' |
|
918
|
|
|
if abovemax: |
|
919
|
|
|
fdm = formatDecimalMark('> %s' % hidemax, decimalmark) |
|
920
|
|
|
return fdm.replace('> ', '> ', 1) if html else fdm |
|
921
|
|
|
|
|
922
|
|
|
# Below Lower Detection Limit (LDL)? |
|
923
|
|
|
ldl = self.getLowerDetectionLimit() |
|
924
|
|
|
if result < ldl: |
|
925
|
|
|
# LDL must not be formatted according to precision, etc. |
|
926
|
|
|
# Drop trailing zeros from decimal |
|
927
|
|
|
ldl = drop_trailing_zeros_decimal(ldl) |
|
928
|
|
|
fdm = formatDecimalMark('< %s' % ldl, decimalmark) |
|
929
|
|
|
return fdm.replace('< ', '< ', 1) if html else fdm |
|
930
|
|
|
|
|
931
|
|
|
# Above Upper Detection Limit (UDL)? |
|
932
|
|
|
udl = self.getUpperDetectionLimit() |
|
933
|
|
|
if result > udl: |
|
934
|
|
|
# UDL must not be formatted according to precision, etc. |
|
935
|
|
|
# Drop trailing zeros from decimal |
|
936
|
|
|
udl = drop_trailing_zeros_decimal(udl) |
|
937
|
|
|
fdm = formatDecimalMark('> %s' % udl, decimalmark) |
|
938
|
|
|
return fdm.replace('> ', '> ', 1) if html else fdm |
|
939
|
|
|
|
|
940
|
|
|
# Render numerical values |
|
941
|
|
|
return format_numeric_result(self, self.getResult(), |
|
942
|
|
|
decimalmark=decimalmark, |
|
943
|
|
|
sciformat=sciformat) |
|
944
|
|
|
|
|
945
|
|
|
@security.public |
|
946
|
|
|
def getPrecision(self, result=None): |
|
947
|
|
|
"""Returns the precision for the Analysis. |
|
948
|
|
|
|
|
949
|
|
|
- If ManualUncertainty is set, calculates the precision of the result |
|
950
|
|
|
in accordance with the manual uncertainty set. |
|
951
|
|
|
|
|
952
|
|
|
- If Calculate Precision from Uncertainty is set in Analysis Service, |
|
953
|
|
|
calculates the precision in accordance with the uncertainty infered |
|
954
|
|
|
from uncertainties ranges. |
|
955
|
|
|
|
|
956
|
|
|
- If neither Manual Uncertainty nor Calculate Precision from |
|
957
|
|
|
Uncertainty are set, returns the precision from the Analysis Service |
|
958
|
|
|
|
|
959
|
|
|
- If you have a number with zero uncertainty: If you roll a pair of |
|
960
|
|
|
dice and observe five spots, the number of spots is 5. This is a raw |
|
961
|
|
|
data point, with no uncertainty whatsoever. So just write down the |
|
962
|
|
|
number. Similarly, the number of centimeters per inch is 2.54, |
|
963
|
|
|
by definition, with no uncertainty whatsoever. Again: just write |
|
964
|
|
|
down the number. |
|
965
|
|
|
|
|
966
|
|
|
Further information at AbstractBaseAnalysis.getPrecision() |
|
967
|
|
|
""" |
|
968
|
|
|
allow_manual = self.getAllowManualUncertainty() |
|
969
|
|
|
precision_unc = self.getPrecisionFromUncertainty() |
|
970
|
|
|
if allow_manual or precision_unc: |
|
971
|
|
|
uncertainty = self.getUncertainty(result) |
|
972
|
|
|
if uncertainty is None: |
|
973
|
|
|
return self.getField('Precision').get(self) |
|
974
|
|
|
if uncertainty == 0 and result is None: |
|
975
|
|
|
return self.getField('Precision').get(self) |
|
976
|
|
|
if uncertainty == 0: |
|
977
|
|
|
strres = str(result) |
|
978
|
|
|
numdecimals = strres[::-1].find('.') |
|
979
|
|
|
return numdecimals |
|
980
|
|
|
return get_significant_digits(uncertainty) |
|
981
|
|
|
return self.getField('Precision').get(self) |
|
982
|
|
|
|
|
983
|
|
|
@security.public |
|
984
|
|
|
def getAnalyst(self): |
|
985
|
|
|
"""Returns the stored Analyst or the user who submitted the result |
|
986
|
|
|
""" |
|
987
|
|
|
analyst = self.getField("Analyst").get(self) |
|
988
|
|
|
if not analyst: |
|
989
|
|
|
analyst = self.getSubmittedBy() |
|
990
|
|
|
return analyst or "" |
|
991
|
|
|
|
|
992
|
|
|
@security.public |
|
993
|
|
|
def getAssignedAnalyst(self): |
|
994
|
|
|
"""Returns the Analyst assigned to the worksheet this |
|
995
|
|
|
analysis is assigned to |
|
996
|
|
|
""" |
|
997
|
|
|
worksheet = self.getWorksheet() |
|
998
|
|
|
if not worksheet: |
|
999
|
|
|
return "" |
|
1000
|
|
|
return worksheet.getAnalyst() or "" |
|
1001
|
|
|
|
|
1002
|
|
|
@security.public |
|
1003
|
|
|
def getAnalystName(self): |
|
1004
|
|
|
"""Returns the name of the currently assigned analyst |
|
1005
|
|
|
""" |
|
1006
|
|
|
analyst = self.getAnalyst() |
|
1007
|
|
|
if not analyst: |
|
1008
|
|
|
return "" |
|
1009
|
|
|
user = api.get_user(analyst.strip()) |
|
1010
|
|
|
return user and user.getProperty("fullname") or analyst |
|
1011
|
|
|
|
|
1012
|
|
|
@security.public |
|
1013
|
|
|
def getObjectWorkflowStates(self): |
|
1014
|
|
|
"""This method is used to populate catalog values |
|
1015
|
|
|
Returns a dictionary with the workflow id as key and workflow state as |
|
1016
|
|
|
value. |
|
1017
|
|
|
:return: {'review_state':'active',...} |
|
1018
|
|
|
""" |
|
1019
|
|
|
workflow = getToolByName(self, 'portal_workflow') |
|
1020
|
|
|
states = {} |
|
1021
|
|
|
for w in workflow.getWorkflowsFor(self): |
|
1022
|
|
|
state = api.get_workflow_status_of(self, w.state_var) |
|
1023
|
|
|
states[w.state_var] = state |
|
1024
|
|
|
return states |
|
1025
|
|
|
|
|
1026
|
|
|
@security.public |
|
1027
|
|
|
def getSubmittedBy(self): |
|
1028
|
|
|
""" |
|
1029
|
|
|
Returns the identifier of the user who submitted the result if the |
|
1030
|
|
|
state of the current analysis is "to_be_verified" or "verified" |
|
1031
|
|
|
:return: the user_id of the user who did the last submission of result |
|
1032
|
|
|
""" |
|
1033
|
|
|
return getTransitionActor(self, 'submit') |
|
1034
|
|
|
|
|
1035
|
|
|
@security.public |
|
1036
|
|
|
def getDateSubmitted(self): |
|
1037
|
|
|
"""Returns the time the result was submitted. |
|
1038
|
|
|
:return: a DateTime object. |
|
1039
|
|
|
:rtype: DateTime |
|
1040
|
|
|
""" |
|
1041
|
|
|
return getTransitionDate(self, 'submit', return_as_datetime=True) |
|
1042
|
|
|
|
|
1043
|
|
|
@security.public |
|
1044
|
|
|
def getDateVerified(self): |
|
1045
|
|
|
"""Returns the time the analysis was verified. If the analysis hasn't |
|
1046
|
|
|
been yet verified, returns None |
|
1047
|
|
|
:return: the time the analysis was verified or None |
|
1048
|
|
|
:rtype: DateTime |
|
1049
|
|
|
""" |
|
1050
|
|
|
return getTransitionDate(self, 'verify', return_as_datetime=True) |
|
1051
|
|
|
|
|
1052
|
|
|
@security.public |
|
1053
|
|
|
def getStartProcessDate(self): |
|
1054
|
|
|
"""Returns the date time when the analysis is ready to be processed. |
|
1055
|
|
|
It returns the datetime when the object was created, but might be |
|
1056
|
|
|
different depending on the type of analysis (e.g. "Date Received" for |
|
1057
|
|
|
routine analyses): see overriden functions. |
|
1058
|
|
|
:return: Date time when the analysis is ready to be processed. |
|
1059
|
|
|
:rtype: DateTime |
|
1060
|
|
|
""" |
|
1061
|
|
|
return self.created() |
|
1062
|
|
|
|
|
1063
|
|
|
@security.public |
|
1064
|
|
|
def getParentUID(self): |
|
1065
|
|
|
"""This method is used to populate catalog values |
|
1066
|
|
|
This function returns the analysis' parent UID |
|
1067
|
|
|
""" |
|
1068
|
|
|
parent = self.aq_parent |
|
1069
|
|
|
if parent: |
|
1070
|
|
|
return parent.UID() |
|
1071
|
|
|
|
|
1072
|
|
|
@security.public |
|
1073
|
|
|
def getParentURL(self): |
|
1074
|
|
|
"""This method is used to populate catalog values |
|
1075
|
|
|
This function returns the analysis' parent URL |
|
1076
|
|
|
""" |
|
1077
|
|
|
parent = self.aq_parent |
|
1078
|
|
|
if parent: |
|
1079
|
|
|
return parent.absolute_url_path() |
|
1080
|
|
|
|
|
1081
|
|
|
@security.public |
|
1082
|
|
|
def getParentTitle(self): |
|
1083
|
|
|
"""This method is used to populate catalog values |
|
1084
|
|
|
This function returns the analysis' parent Title |
|
1085
|
|
|
""" |
|
1086
|
|
|
parent = self.aq_parent |
|
1087
|
|
|
if parent: |
|
1088
|
|
|
return parent.Title() |
|
1089
|
|
|
|
|
1090
|
|
|
@security.public |
|
1091
|
|
|
def getWorksheetUID(self): |
|
1092
|
|
|
"""This method is used to populate catalog values |
|
1093
|
|
|
Returns WS UID if this analysis is assigned to a worksheet, or None. |
|
1094
|
|
|
""" |
|
1095
|
|
|
worksheet = self.getWorksheet() |
|
1096
|
|
|
if worksheet: |
|
1097
|
|
|
return worksheet.UID() |
|
1098
|
|
|
|
|
1099
|
|
|
@security.public |
|
1100
|
|
|
def getWorksheet(self): |
|
1101
|
|
|
"""Returns the Worksheet to which this analysis belongs to, or None |
|
1102
|
|
|
""" |
|
1103
|
|
|
worksheet = self.getBackReferences('WorksheetAnalysis') |
|
1104
|
|
|
if not worksheet: |
|
1105
|
|
|
return None |
|
1106
|
|
|
if len(worksheet) > 1: |
|
1107
|
|
|
logger.error( |
|
1108
|
|
|
"Analysis %s is assigned to more than one worksheet." |
|
1109
|
|
|
% self.getId()) |
|
1110
|
|
|
return worksheet[0] |
|
1111
|
|
|
|
|
1112
|
|
|
@security.public |
|
1113
|
|
|
def getInstrumentValid(self): |
|
1114
|
|
|
"""Used to populate catalog values. Delegates to isInstrumentValid() |
|
1115
|
|
|
Returns false if an out-of-date or uncalibrated instrument is |
|
1116
|
|
|
assigned. |
|
1117
|
|
|
:return: True if the Analysis has no instrument assigned or is valid |
|
1118
|
|
|
:rtype: bool |
|
1119
|
|
|
""" |
|
1120
|
|
|
return self.isInstrumentValid() |
|
1121
|
|
|
|
|
1122
|
|
|
@security.public |
|
1123
|
|
|
def getAttachmentUIDs(self): |
|
1124
|
|
|
"""Used to populate metadata, so that we don't need full objects of |
|
1125
|
|
|
analyses when working with their attachments. |
|
1126
|
|
|
""" |
|
1127
|
|
|
attachments = self.getAttachment() |
|
1128
|
|
|
uids = [att.UID() for att in attachments] |
|
1129
|
|
|
return uids |
|
1130
|
|
|
|
|
1131
|
|
|
@security.public |
|
1132
|
|
|
def getCalculationTitle(self): |
|
1133
|
|
|
"""Used to populate catalog values |
|
1134
|
|
|
""" |
|
1135
|
|
|
calculation = self.getCalculation() |
|
1136
|
|
|
if calculation: |
|
1137
|
|
|
return calculation.Title() |
|
1138
|
|
|
|
|
1139
|
|
|
@security.public |
|
1140
|
|
|
def getCalculationUID(self): |
|
1141
|
|
|
"""Used to populate catalog values |
|
1142
|
|
|
""" |
|
1143
|
|
|
calculation = self.getCalculation() |
|
1144
|
|
|
if calculation: |
|
1145
|
|
|
return calculation.UID() |
|
1146
|
|
|
|
|
1147
|
|
|
@security.public |
|
1148
|
|
|
def remove_duplicates(self, ws): |
|
1149
|
|
|
"""When this analysis is unassigned from a worksheet, this function |
|
1150
|
|
|
is responsible for deleting DuplicateAnalysis objects from the ws. |
|
1151
|
|
|
""" |
|
1152
|
|
|
for analysis in ws.objectValues(): |
|
1153
|
|
|
if IDuplicateAnalysis.providedBy(analysis) \ |
|
1154
|
|
|
and analysis.getAnalysis().UID() == self.UID(): |
|
1155
|
|
|
ws.removeAnalysis(analysis) |
|
1156
|
|
|
|
|
1157
|
|
|
def setInterimValue(self, keyword, value): |
|
1158
|
|
|
"""Sets a value to an interim of this analysis |
|
1159
|
|
|
:param keyword: the keyword of the interim |
|
1160
|
|
|
:param value: the value for the interim |
|
1161
|
|
|
""" |
|
1162
|
|
|
# Ensure result integrity regards to None, empty and 0 values |
|
1163
|
|
|
val = str('' if not value and value != 0 else value).strip() |
|
1164
|
|
|
interims = self.getInterimFields() |
|
1165
|
|
|
for interim in interims: |
|
1166
|
|
|
if interim['keyword'] == keyword: |
|
1167
|
|
|
interim['value'] = val |
|
1168
|
|
|
self.setInterimFields(interims) |
|
1169
|
|
|
return |
|
1170
|
|
|
|
|
1171
|
|
|
logger.warning("Interim '{}' for analysis '{}' not found" |
|
1172
|
|
|
.format(keyword, self.getKeyword())) |
|
1173
|
|
|
|
|
1174
|
|
|
def getInterimValue(self, keyword): |
|
1175
|
|
|
"""Returns the value of an interim of this analysis |
|
1176
|
|
|
""" |
|
1177
|
|
|
interims = filter(lambda item: item["keyword"] == keyword, |
|
1178
|
|
|
self.getInterimFields()) |
|
1179
|
|
|
if not interims: |
|
1180
|
|
|
logger.warning("Interim '{}' for analysis '{}' not found" |
|
1181
|
|
|
.format(keyword, self.getKeyword())) |
|
1182
|
|
|
return None |
|
1183
|
|
|
if len(interims) > 1: |
|
1184
|
|
|
logger.error("More than one interim '{}' found for '{}'" |
|
1185
|
|
|
.format(keyword, self.getKeyword())) |
|
1186
|
|
|
return None |
|
1187
|
|
|
return interims[0].get('value', '') |
|
1188
|
|
|
|
|
1189
|
|
|
def isRetest(self): |
|
1190
|
|
|
"""Returns whether this analysis is a retest or not |
|
1191
|
|
|
""" |
|
1192
|
|
|
return self.getRetestOf() and True or False |
|
1193
|
|
|
|
|
1194
|
|
|
def getRetestOfUID(self): |
|
1195
|
|
|
"""Returns the UID of the retracted analysis this is a retest of |
|
1196
|
|
|
""" |
|
1197
|
|
|
retest_of = self.getRetestOf() |
|
1198
|
|
|
if retest_of: |
|
1199
|
|
|
return api.get_uid(retest_of) |
|
1200
|
|
|
|
|
1201
|
|
|
def getRetest(self): |
|
1202
|
|
|
"""Returns the retest that comes from this analysis, if any |
|
1203
|
|
|
""" |
|
1204
|
|
|
relationship = "{}RetestOf".format(self.portal_type) |
|
1205
|
|
|
back_refs = get_backreferences(self, relationship) |
|
1206
|
|
|
if not back_refs: |
|
1207
|
|
|
return None |
|
1208
|
|
|
if len(back_refs) > 1: |
|
1209
|
|
|
logger.warn("Analysis {} with multiple retests".format(self.id)) |
|
1210
|
|
|
return api.get_object_by_uid(back_refs[0]) |
|
1211
|
|
|
|