|
1
|
|
|
# -*- coding: utf-8 -*- |
|
2
|
|
|
# |
|
3
|
|
|
# This file is part of SENAITE.CORE |
|
4
|
|
|
# |
|
5
|
|
|
# Copyright 2018 by it's authors. |
|
6
|
|
|
# Some rights reserved. See LICENSE.rst, CONTRIBUTORS.rst. |
|
7
|
|
|
|
|
8
|
|
|
import json |
|
9
|
|
|
|
|
10
|
|
|
from DateTime import DateTime |
|
11
|
|
|
from Products.CMFPlone.i18nl10n import ulocalized_time |
|
12
|
|
|
from bika.lims import PMF |
|
13
|
|
|
from bika.lims import api |
|
14
|
|
|
from bika.lims import bikaMessageFactory as _ |
|
15
|
|
|
from bika.lims.api import is_active |
|
16
|
|
|
from bika.lims.api.analysis import is_out_of_range |
|
17
|
|
|
from bika.lims.browser.bika_listing import WorkflowAction |
|
18
|
|
|
from bika.lims.browser.referenceanalysis import AnalysesRetractedListReport |
|
19
|
|
|
from bika.lims.catalog.analysis_catalog import CATALOG_ANALYSIS_LISTING |
|
20
|
|
|
from bika.lims.catalog.analysisrequest_catalog import \ |
|
21
|
|
|
CATALOG_ANALYSIS_REQUEST_LISTING |
|
22
|
|
|
from bika.lims.catalog.worksheet_catalog import CATALOG_WORKSHEET_LISTING |
|
23
|
|
|
from bika.lims.interfaces import IReferenceAnalysis |
|
24
|
|
|
from bika.lims.interfaces.analysis import IRequestAnalysis |
|
25
|
|
|
from bika.lims.subscribers import doActionFor |
|
26
|
|
|
from bika.lims.workflow import in_state, isTransitionAllowed |
|
27
|
|
|
|
|
28
|
|
|
|
|
29
|
|
|
class AnalysesWorkflowAction(WorkflowAction): |
|
30
|
|
|
"""Workflow actions taken in lists that contains analyses""" |
|
31
|
|
|
|
|
32
|
|
|
def workflow_action_submit(self): |
|
33
|
|
|
uids = self.get_selected_uids() |
|
34
|
|
|
if not uids: |
|
35
|
|
|
message = _('No items selected.') |
|
36
|
|
|
self.context.plone_utils.addPortalMessage(message, 'info') |
|
37
|
|
|
self.request.response.redirect(self.context.absolute_url()) |
|
38
|
|
|
return |
|
39
|
|
|
|
|
40
|
|
|
if not is_active(self.context): |
|
41
|
|
|
message = _('Item is inactive.') |
|
42
|
|
|
self.context.plone_utils.addPortalMessage(message, 'info') |
|
43
|
|
|
self.request.response.redirect(self.context.absolute_url()) |
|
44
|
|
|
return |
|
45
|
|
|
|
|
46
|
|
|
form = self.request.form |
|
47
|
|
|
remarks = form.get('Remarks', [{}])[0] |
|
48
|
|
|
results = form.get('Result', [{}])[0] |
|
49
|
|
|
retested = form.get('retested', {}) |
|
50
|
|
|
methods = form.get('Method', [{}])[0] |
|
51
|
|
|
instruments = form.get('Instrument', [{}])[0] |
|
52
|
|
|
analysts = self.request.form.get('Analyst', [{}])[0] |
|
53
|
|
|
uncertainties = self.request.form.get('Uncertainty', [{}])[0] |
|
54
|
|
|
dlimits = self.request.form.get('DetectionLimit', [{}])[0] |
|
55
|
|
|
|
|
56
|
|
|
# XXX combine data from multiple bika listing tables. |
|
57
|
|
|
# TODO: Is this necessary? |
|
58
|
|
|
item_data = {} |
|
59
|
|
|
if 'item_data' in form: |
|
60
|
|
|
if type(form['item_data']) == list: |
|
61
|
|
|
for i_d in form['item_data']: |
|
62
|
|
|
for i, d in json.loads(i_d).items(): |
|
63
|
|
|
item_data[i] = d |
|
64
|
|
|
else: |
|
65
|
|
|
item_data = json.loads(form['item_data']) |
|
66
|
|
|
|
|
67
|
|
|
# Store affected Analysis Requests |
|
68
|
|
|
affected_ars = set() |
|
69
|
|
|
|
|
70
|
|
|
# Store affected Worksheets |
|
71
|
|
|
affected_ws = set() |
|
72
|
|
|
|
|
73
|
|
|
# Store invalid instruments-ref.analyses |
|
74
|
|
|
invalid_instrument_refs = dict() |
|
75
|
|
|
|
|
76
|
|
|
# We manually query by all analyses uids at once here instead of using |
|
77
|
|
|
# _get_selected_items from the base class, cause that function fetches |
|
78
|
|
|
# the objects by uid, but sequentially one by one |
|
79
|
|
|
query = dict(UID=uids) |
|
80
|
|
|
for brain in api.search(query, CATALOG_ANALYSIS_LISTING): |
|
81
|
|
|
uid = api.get_uid(brain) |
|
82
|
|
|
analysis = api.get_object(brain) |
|
83
|
|
|
|
|
84
|
|
|
# If not active, do nothing |
|
85
|
|
|
if not is_active(brain): |
|
86
|
|
|
continue |
|
87
|
|
|
|
|
88
|
|
|
# Need to save remarks? |
|
89
|
|
|
if uid in remarks: |
|
90
|
|
|
analysis.setRemarks(remarks[uid]) |
|
91
|
|
|
|
|
92
|
|
|
# Retested? |
|
93
|
|
|
if uid in retested: |
|
94
|
|
|
analysis.setRetested(retested[uid]) |
|
95
|
|
|
|
|
96
|
|
|
# Need to save the instrument? |
|
97
|
|
|
if uid in instruments: |
|
98
|
|
|
instrument = instruments[uid] or None |
|
99
|
|
|
analysis.setInstrument(instrument) |
|
100
|
|
|
if instrument and IReferenceAnalysis.providedBy(analysis): |
|
101
|
|
|
if is_out_of_range(analysis): |
|
102
|
|
|
# This reference analysis is out of range, so we have |
|
103
|
|
|
# to retract all analyses assigned to this same |
|
104
|
|
|
# instrument that are awaiting for verification |
|
105
|
|
|
if uid not in invalid_instrument_refs: |
|
106
|
|
|
invalid_instrument_refs[uid] = set() |
|
107
|
|
|
invalid_instrument_refs[uid].add(analysis) |
|
108
|
|
|
else: |
|
109
|
|
|
# The reference result is valid, so make the instrument |
|
110
|
|
|
# available again for further analyses |
|
111
|
|
|
instrument.setDisposeUntilNextCalibrationTest(False) |
|
112
|
|
|
|
|
113
|
|
|
# Need to save the method? |
|
114
|
|
|
if uid in methods: |
|
115
|
|
|
method = methods[uid] or None |
|
116
|
|
|
analysis.setMethod(method) |
|
117
|
|
|
|
|
118
|
|
|
# Need to save the analyst? |
|
119
|
|
|
if uid in analysts: |
|
120
|
|
|
analysis.setAnalyst(analysts[uid]) |
|
121
|
|
|
|
|
122
|
|
|
# Need to save the uncertainty? |
|
123
|
|
|
if uid in uncertainties: |
|
124
|
|
|
analysis.setUncertainty(uncertainties[uid]) |
|
125
|
|
|
|
|
126
|
|
|
# Need to save the detection limit? |
|
127
|
|
|
analysis.setDetectionLimitOperand(dlimits.get(uid, "")) |
|
128
|
|
|
|
|
129
|
|
|
# Need to save results? |
|
130
|
|
|
submitted = False |
|
131
|
|
|
if uid in results and results[uid]: |
|
132
|
|
|
interims = item_data.get(uid, []) |
|
133
|
|
|
analysis.setInterimFields(interims) |
|
134
|
|
|
analysis.setResult(results[uid]) |
|
135
|
|
|
|
|
136
|
|
|
# Can the analysis be submitted? |
|
137
|
|
|
# An analysis can only be submitted if all its dependencies |
|
138
|
|
|
# are valid and have been submitted already |
|
139
|
|
|
can_submit = True |
|
140
|
|
|
invalid_states = ['to_be_sampled', 'to_be_preserved', |
|
141
|
|
|
'sample_due', 'sample_received'] |
|
142
|
|
|
for dependency in analysis.getDependencies(): |
|
143
|
|
|
if in_state(dependency, invalid_states): |
|
144
|
|
|
can_submit = False |
|
145
|
|
|
break |
|
146
|
|
|
if can_submit: |
|
147
|
|
|
# doActionFor transitions the analysis to verif pending, |
|
148
|
|
|
# so must only be done when results are submitted. |
|
149
|
|
|
doActionFor(analysis, 'submit') |
|
150
|
|
|
submitted = True |
|
151
|
|
|
if IRequestAnalysis.providedBy(analysis): |
|
152
|
|
|
# Store the AR uids to be reindexed later. |
|
153
|
|
|
affected_ars.add(brain.getParentUID ) |
|
154
|
|
|
|
|
155
|
|
|
if brain.worksheetanalysis_review_state == 'assigned': |
|
156
|
|
|
worksheet_uid = analysis.getWorksheetUID() |
|
157
|
|
|
if worksheet_uid: |
|
158
|
|
|
affected_ws.add(worksheet_uid) |
|
159
|
|
|
|
|
160
|
|
|
if not submitted: |
|
161
|
|
|
# Analysis has not been submitted, so we need to reindex the |
|
162
|
|
|
# object manually, to update catalog's metadata. |
|
163
|
|
|
analysis.reindexObject() |
|
164
|
|
|
|
|
165
|
|
|
# If a reference analysis with an out-of-range result and instrument |
|
166
|
|
|
# assigned has been submitted, retract then routine analyses that are |
|
167
|
|
|
# awaiting for verification and with same instrument associated |
|
168
|
|
|
retracted = list() |
|
169
|
|
|
for invalid_instrument_uid in invalid_instrument_refs.keys(): |
|
170
|
|
|
query = dict(getInstrumentUID=invalid_instrument_uid, |
|
171
|
|
|
portal_type=['Analysis', 'DuplicateAnalysis'], |
|
172
|
|
|
review_state='to_be_verified', |
|
173
|
|
|
cancellation_state='active', ) |
|
174
|
|
|
brains = api.search(query, CATALOG_ANALYSIS_LISTING) |
|
175
|
|
|
for brain in brains: |
|
176
|
|
|
analysis = api.get_object(brain) |
|
177
|
|
|
failed_msg = '{0}: {1}'.format( |
|
178
|
|
|
ulocalized_time(DateTime(), long_format=1), |
|
179
|
|
|
_("Instrument failed reference test")) |
|
180
|
|
|
an_remarks = analysis.getRemarks() |
|
181
|
|
|
analysis.setRemarks('. '.join([an_remarks, failed_msg])) |
|
182
|
|
|
doActionFor(analysis, 'retract') |
|
183
|
|
|
retracted.append(analysis) |
|
184
|
|
|
|
|
185
|
|
|
# If some analyses have been retracted because instrument failed a |
|
186
|
|
|
# reference test, then generate a pdf report |
|
187
|
|
|
if retracted: |
|
188
|
|
|
# Create the Retracted Analyses List |
|
189
|
|
|
report = AnalysesRetractedListReport(self.context, self.request, |
|
190
|
|
|
self.portal_url, |
|
191
|
|
|
'Retracted analyses', |
|
192
|
|
|
retracted) |
|
193
|
|
|
|
|
194
|
|
|
# Attach the pdf to all ReferenceAnalysis that failed (accessible |
|
195
|
|
|
# from Instrument's Internal Calibration Tests list |
|
196
|
|
|
pdf = report.toPdf() |
|
197
|
|
|
for ref in invalid_instrument_refs.values(): |
|
198
|
|
|
ref.setRetractedAnalysesPdfReport(pdf) |
|
199
|
|
|
|
|
200
|
|
|
# Send the email |
|
201
|
|
|
try: |
|
202
|
|
|
report.sendEmail() |
|
203
|
|
|
except: |
|
204
|
|
|
pass |
|
205
|
|
|
|
|
206
|
|
|
# Finally, when we are done processing all applicable analyses, we must |
|
207
|
|
|
# attempt to initiate the submit transition on the ARs and Worksheets |
|
208
|
|
|
# the processed analyses belong to. |
|
209
|
|
|
# We stick only to affected_ars, and affected_ws |
|
210
|
|
|
|
|
211
|
|
|
# Reindex the Analysis Requests for which at least one Analysis has |
|
212
|
|
|
# been submitted. We do this here because one AR can contain multiple |
|
213
|
|
|
# Analyses, so better to just reindex the AR once instead of each time. |
|
214
|
|
|
# AR Catalog contains some metadata that that rely on the Analyses an |
|
215
|
|
|
# Analysis Request contains. |
|
216
|
|
|
if affected_ars: |
|
217
|
|
|
query = dict(UID=list(affected_ars), portal_type="AnalysisRequest") |
|
218
|
|
|
for ar_brain in api.search(query, CATALOG_ANALYSIS_REQUEST_LISTING): |
|
219
|
|
|
if ar_brain.review_state == 'to_be_verified': |
|
220
|
|
|
continue |
|
221
|
|
|
ar = api.get_object(ar_brain) |
|
222
|
|
|
if isTransitionAllowed(ar, "submit"): |
|
223
|
|
|
doActionFor(ar, "submit") |
|
224
|
|
|
else: |
|
225
|
|
|
ar.reindexObject() |
|
226
|
|
|
|
|
227
|
|
|
if affected_ws: |
|
228
|
|
|
query = dict(UID=list(affected_ws), portal_type="Worksheet") |
|
229
|
|
|
for ws_brain in api.search(query, CATALOG_WORKSHEET_LISTING): |
|
230
|
|
|
if ws_brain.review_state == 'to_be_verified': |
|
231
|
|
|
continue |
|
232
|
|
|
ws = api.get_object(ws_brain) |
|
233
|
|
|
if isTransitionAllowed(ws, "submit"): |
|
234
|
|
|
doActionFor(ws, "submit") |
|
235
|
|
|
|
|
236
|
|
|
message = PMF("Changes saved.") |
|
237
|
|
|
self.context.plone_utils.addPortalMessage(message, 'info') |
|
238
|
|
|
self.destination_url = self.request.get_header("referer", |
|
239
|
|
|
self.context.absolute_url()) |
|
240
|
|
|
self.request.response.redirect(self.destination_url) |
|
241
|
|
|
|
|
242
|
|
|
|