|
1
|
|
|
# -*- coding: utf-8 -*- |
|
2
|
|
|
# |
|
3
|
|
|
# This file is part of SENAITE.CORE |
|
4
|
|
|
# |
|
5
|
|
|
# Copyright 2018 by it's authors. |
|
6
|
|
|
# Some rights reserved. See LICENSE.rst, CONTRIBUTORS.rst. |
|
7
|
|
|
|
|
8
|
|
|
import os |
|
9
|
|
|
import tempfile |
|
10
|
|
|
from email.mime.multipart import MIMEMultipart |
|
11
|
|
|
from email.mime.text import MIMEText |
|
12
|
|
|
|
|
13
|
|
|
from Products.CMFCore.utils import getToolByName |
|
14
|
|
|
from Products.CMFPlone.utils import _createObjectByType |
|
15
|
|
|
from Products.CMFPlone.utils import safe_unicode |
|
16
|
|
|
from bika.lims import bikaMessageFactory as _ |
|
17
|
|
|
from bika.lims import logger |
|
18
|
|
|
from bika.lims.idserver import renameAfterCreation |
|
19
|
|
|
from bika.lims.interfaces import ISample, IAnalysisService, IRoutineAnalysis, \ |
|
20
|
|
|
IAnalysisRequest |
|
21
|
|
|
from bika.lims.utils import attachPdf |
|
22
|
|
|
from bika.lims.utils import changeWorkflowState |
|
23
|
|
|
from bika.lims.utils import createPdf |
|
24
|
|
|
from bika.lims.utils import encode_header |
|
25
|
|
|
from bika.lims.utils import tmpID, copy_field_values |
|
26
|
|
|
from bika.lims.utils import to_utf8 |
|
27
|
|
|
from bika.lims.utils.sample import create_sample |
|
28
|
|
|
from bika.lims.utils.samplepartition import create_samplepartition |
|
29
|
|
|
from bika.lims.workflow import doActionFor |
|
30
|
|
|
from bika.lims.workflow import doActionsFor |
|
31
|
|
|
from bika.lims.workflow import getReviewHistoryActionsList |
|
32
|
|
|
from email.Utils import formataddr |
|
33
|
|
|
from plone import api |
|
34
|
|
|
|
|
35
|
|
|
|
|
36
|
|
|
def create_analysisrequest(client, request, values, analyses=None, |
|
37
|
|
|
partitions=None, specifications=None, prices=None): |
|
38
|
|
|
"""This is meant for general use and should do everything necessary to |
|
39
|
|
|
create and initialise an AR and any other required auxilliary objects |
|
40
|
|
|
(Sample, SamplePartition, Analysis...) |
|
41
|
|
|
|
|
42
|
|
|
:param client: |
|
43
|
|
|
The container (Client) in which the ARs will be created. |
|
44
|
|
|
:param request: |
|
45
|
|
|
The current Request object. |
|
46
|
|
|
:param values: |
|
47
|
|
|
a dict, where keys are AR|Sample schema field names. |
|
48
|
|
|
:param analyses: |
|
49
|
|
|
Analysis services list. If specified, augments the values in |
|
50
|
|
|
values['Analyses']. May consist of service objects, UIDs, or Keywords. |
|
51
|
|
|
:param partitions: |
|
52
|
|
|
A list of dictionaries, if specific partitions are required. If not |
|
53
|
|
|
specified, AR's sample is created with a single partition. |
|
54
|
|
|
:param specifications: |
|
55
|
|
|
These values augment those found in values['Specifications'] |
|
56
|
|
|
:param prices: |
|
57
|
|
|
Allow different prices to be set for analyses. If not set, prices |
|
58
|
|
|
are read from the associated analysis service. |
|
59
|
|
|
""" |
|
60
|
|
|
# Don't pollute the dict param passed in |
|
61
|
|
|
values = dict(values.items()) |
|
62
|
|
|
|
|
63
|
|
|
# Create new sample or locate the existing for secondary AR |
|
64
|
|
|
secondary = False |
|
65
|
|
|
sample = None |
|
66
|
|
|
if not values.get('Sample', False): |
|
67
|
|
|
sample = create_sample(client, request, values) |
|
68
|
|
|
else: |
|
69
|
|
|
sample = get_sample_from_values(client, values) |
|
70
|
|
|
secondary = True |
|
71
|
|
|
|
|
72
|
|
|
# Create the Analysis Request |
|
73
|
|
|
ar = _createObjectByType('AnalysisRequest', client, tmpID()) |
|
74
|
|
|
|
|
75
|
|
|
# Set some required fields manually before processForm is called |
|
76
|
|
|
ar.setSample(sample) |
|
77
|
|
|
values['Sample'] = sample |
|
78
|
|
|
ar.processForm(REQUEST=request, values=values) |
|
79
|
|
|
ar.edit(RequestID=ar.getId()) |
|
80
|
|
|
|
|
81
|
|
|
# Set analysis request analyses. 'Analyses' param are analyses services |
|
82
|
|
|
analyses = analyses if analyses else [] |
|
83
|
|
|
service_uids = get_services_uids( |
|
84
|
|
|
context=client, analyses_serv=analyses, values=values) |
|
85
|
|
|
# processForm already has created the analyses, but here we create the |
|
86
|
|
|
# analyses with specs and prices. This function, even it is called 'set', |
|
87
|
|
|
# deletes the old analyses, so eventually we obtain the desired analyses. |
|
88
|
|
|
ar.setAnalyses(service_uids, prices=prices, specs=specifications) |
|
89
|
|
|
analyses = ar.getAnalyses(full_objects=True) |
|
90
|
|
|
|
|
91
|
|
|
# Create sample partitions |
|
92
|
|
|
if not partitions: |
|
93
|
|
|
partitions = values.get('Partitions', |
|
94
|
|
|
[{'services': service_uids}]) |
|
95
|
|
|
|
|
96
|
|
|
part_num = 0 |
|
97
|
|
|
prefix = sample.getId() + "-P" |
|
98
|
|
|
if secondary: |
|
99
|
|
|
# Always create new partitions if is a Secondary AR, cause it does |
|
100
|
|
|
# not make sense to reuse the partitions used in a previous AR! |
|
101
|
|
|
sparts = sample.getSamplePartitions() |
|
102
|
|
|
for spart in sparts: |
|
103
|
|
|
spartnum = int(spart.getId().split(prefix)[1]) |
|
104
|
|
|
if spartnum > part_num: |
|
105
|
|
|
part_num = spartnum |
|
106
|
|
|
|
|
107
|
|
|
for n, partition in enumerate(partitions): |
|
108
|
|
|
# Calculate partition id |
|
109
|
|
|
partition_id = '%s%s' % (prefix, part_num + 1) |
|
110
|
|
|
partition['part_id'] = partition_id |
|
111
|
|
|
# Point to or create sample partition |
|
112
|
|
|
if partition_id in sample.objectIds(): |
|
113
|
|
|
partition['object'] = sample[partition_id] |
|
114
|
|
|
else: |
|
115
|
|
|
partition['object'] = create_samplepartition( |
|
116
|
|
|
sample, |
|
117
|
|
|
partition, |
|
118
|
|
|
analyses |
|
119
|
|
|
) |
|
120
|
|
|
part_num += 1 |
|
121
|
|
|
|
|
122
|
|
|
# At this point, we have a fully created AR, with a Sample, Partitions and |
|
123
|
|
|
# Analyses, but the state of all them is the initial ("sample_registered"). |
|
124
|
|
|
# We can now transition the whole thing (instead of doing it manually for |
|
125
|
|
|
# each object we created). After and Before transitions will take care of |
|
126
|
|
|
# cascading and promoting the transitions in all the objects "associated" |
|
127
|
|
|
# to this Analysis Request. |
|
128
|
|
|
sampling_workflow_enabled = sample.getSamplingWorkflowEnabled() |
|
129
|
|
|
action = 'no_sampling_workflow' |
|
130
|
|
|
if sampling_workflow_enabled: |
|
131
|
|
|
action = 'sampling_workflow' |
|
132
|
|
|
# Transition the Analysis Request and related objects to "sampled" (if |
|
133
|
|
|
# sampling workflow not enabled) or to "to_be_sampled" statuses. |
|
134
|
|
|
doActionFor(ar, action) |
|
135
|
|
|
|
|
136
|
|
|
if secondary: |
|
137
|
|
|
# If secondary AR, then we need to manually transition the AR (and its |
|
138
|
|
|
# children) to fit with the Sample Partition's current state |
|
139
|
|
|
sampleactions = getReviewHistoryActionsList(sample) |
|
140
|
|
|
doActionsFor(ar, sampleactions) |
|
141
|
|
|
# We need a workaround here in order to transition partitions. |
|
142
|
|
|
# auto_no_preservation_required and auto_preservation_required are |
|
143
|
|
|
# auto transitions applied to analysis requests, but partitions don't |
|
144
|
|
|
# have them, so we need to replace them by the sample_workflow |
|
145
|
|
|
# equivalent. |
|
146
|
|
|
if 'auto_no_preservation_required' in sampleactions: |
|
147
|
|
|
index = sampleactions.index('auto_no_preservation_required') |
|
148
|
|
|
sampleactions[index] = 'sample_due' |
|
149
|
|
|
elif 'auto_preservation_required' in sampleactions: |
|
150
|
|
|
index = sampleactions.index('auto_preservation_required') |
|
151
|
|
|
sampleactions[index] = 'to_be_preserved' |
|
152
|
|
|
# We need to transition the partition manually |
|
153
|
|
|
# Transition pre-preserved partitions |
|
154
|
|
|
for partition in partitions: |
|
155
|
|
|
part = partition['object'] |
|
156
|
|
|
doActionsFor(part, sampleactions) |
|
157
|
|
|
|
|
158
|
|
|
# Transition pre-preserved partitions |
|
159
|
|
|
for p in partitions: |
|
160
|
|
|
if 'prepreserved' in p and p['prepreserved']: |
|
161
|
|
|
part = p['object'] |
|
162
|
|
|
doActionFor(part, 'preserve') |
|
163
|
|
|
|
|
164
|
|
|
# Once the ar is fully created, check if there are rejection reasons |
|
165
|
|
|
reject_field = values.get('RejectionReasons', '') |
|
166
|
|
|
if reject_field and reject_field.get('checkbox', False): |
|
167
|
|
|
doActionFor(ar, 'reject') |
|
168
|
|
|
|
|
169
|
|
|
return ar |
|
170
|
|
|
|
|
171
|
|
|
|
|
172
|
|
|
def get_sample_from_values(context, values): |
|
173
|
|
|
"""values may contain a UID or a direct Sample object. |
|
174
|
|
|
""" |
|
175
|
|
|
if ISample.providedBy(values['Sample']): |
|
176
|
|
|
sample = values['Sample'] |
|
177
|
|
|
else: |
|
178
|
|
|
bc = getToolByName(context, 'bika_catalog') |
|
179
|
|
|
brains = bc(UID=values['Sample']) |
|
180
|
|
|
if brains: |
|
181
|
|
|
sample = brains[0].getObject() |
|
182
|
|
|
else: |
|
183
|
|
|
raise RuntimeError("create_analysisrequest: invalid sample " |
|
184
|
|
|
"value provided. values=%s" % values) |
|
185
|
|
|
if not sample: |
|
186
|
|
|
raise RuntimeError("create_analysisrequest: invalid sample " |
|
187
|
|
|
"value provided. values=%s" % values) |
|
188
|
|
|
return sample |
|
189
|
|
|
|
|
190
|
|
|
|
|
191
|
|
|
def get_services_uids(context=None, analyses_serv=None, values=None): |
|
192
|
|
|
""" |
|
193
|
|
|
This function returns a list of UIDs from analyses services from its |
|
194
|
|
|
parameters. |
|
195
|
|
|
:param analyses_serv: A list (or one object) of service-related info items. |
|
196
|
|
|
see _resolve_items_to_service_uids() docstring. |
|
197
|
|
|
:type analyses_serv: list |
|
198
|
|
|
:param values: a dict, where keys are AR|Sample schema field names. |
|
199
|
|
|
:type values: dict |
|
200
|
|
|
:returns: a list of analyses services UIDs |
|
201
|
|
|
""" |
|
202
|
|
|
if analyses_serv is None: |
|
203
|
|
|
analyses_serv = [] |
|
204
|
|
|
if values is None: |
|
205
|
|
|
values = {} |
|
206
|
|
|
|
|
207
|
|
|
if not context or (not analyses_serv and not values): |
|
208
|
|
|
raise RuntimeError( |
|
209
|
|
|
"get_services_uids: Missing or wrong parameters.") |
|
210
|
|
|
uid_catalog = getToolByName(context, 'uid_catalog') |
|
211
|
|
|
anv = values['Analyses'] if values.get('Analyses', None) else [] |
|
212
|
|
|
analyses_services = anv + analyses_serv |
|
213
|
|
|
# It is possible to create analysis requests |
|
214
|
|
|
# by JSON petitions and services, profiles or types aren't allways send. |
|
215
|
|
|
# Sometimes we can get analyses and profiles that doesn't match and we |
|
216
|
|
|
# should act in consequence. |
|
217
|
|
|
# Getting the analyses profiles |
|
218
|
|
|
analyses_profiles = values.get('Profiles', []) |
|
219
|
|
|
if not isinstance(analyses_profiles, (list, tuple)): |
|
220
|
|
|
# Plone converts the incoming form value to a list, if there are |
|
221
|
|
|
# multiple values; but if not, it will send a string (a single UID). |
|
222
|
|
|
analyses_profiles = [analyses_profiles] |
|
223
|
|
|
if not analyses_services and not analyses_profiles: |
|
224
|
|
|
raise RuntimeError( |
|
225
|
|
|
"create_analysisrequest: no analyses services or analysis" |
|
226
|
|
|
" profile provided") |
|
227
|
|
|
# Add analysis services UIDs from profiles to analyses_services variable. |
|
228
|
|
|
for profile_uid in analyses_profiles: |
|
229
|
|
|
# When creating an AR, JS builds a query from selected fields. |
|
230
|
|
|
# Although it doesn't set empty values to any |
|
231
|
|
|
# Field, somehow 'Profiles' field can have an empty value in the set. |
|
232
|
|
|
# Thus, we should avoid querying by empty UID through 'uid_catalog'. |
|
233
|
|
|
if profile_uid: |
|
234
|
|
|
profile = uid_catalog(UID=profile_uid) |
|
235
|
|
|
profile = profile[0].getObject() |
|
236
|
|
|
# Only services UIDs |
|
237
|
|
|
services_uids = profile.getRawService() |
|
238
|
|
|
# _resolve_items_to_service_uids() will remove duplicates |
|
239
|
|
|
analyses_services += services_uids |
|
240
|
|
|
return _resolve_items_to_service_uids(analyses_services) |
|
241
|
|
|
|
|
242
|
|
|
|
|
243
|
|
|
def _resolve_items_to_service_uids(items): |
|
244
|
|
|
""" Returns a list of service uids without duplicates based on the items |
|
245
|
|
|
:param items: |
|
246
|
|
|
A list (or one object) of service-related info items. The list can be |
|
247
|
|
|
heterogeneous and each item can be: |
|
248
|
|
|
- Analysis Service instance |
|
249
|
|
|
- Analysis instance |
|
250
|
|
|
- Analysis Service title |
|
251
|
|
|
- Analysis Service UID |
|
252
|
|
|
- Analysis Service Keyword |
|
253
|
|
|
If an item that doesn't match any of the criterias above is found, the |
|
254
|
|
|
function will raise a RuntimeError |
|
255
|
|
|
""" |
|
256
|
|
|
portal = None |
|
257
|
|
|
bsc = None |
|
258
|
|
|
service_uids = [] |
|
259
|
|
|
|
|
260
|
|
|
# Maybe only a single item was passed |
|
261
|
|
|
if type(items) not in (list, tuple): |
|
262
|
|
|
items = [items, ] |
|
263
|
|
|
for item in items: |
|
264
|
|
|
# service objects |
|
265
|
|
|
if IAnalysisService.providedBy(item): |
|
266
|
|
|
uid = item.UID() |
|
267
|
|
|
service_uids.append(uid) |
|
268
|
|
|
continue |
|
269
|
|
|
|
|
270
|
|
|
# Analysis objects (shortcut for eg copying analyses from other AR) |
|
271
|
|
|
if IRoutineAnalysis.providedBy(item): |
|
272
|
|
|
service_uids.append(item.getServiceUID()) |
|
273
|
|
|
continue |
|
274
|
|
|
|
|
275
|
|
|
# An object UID already there? |
|
276
|
|
|
if item in service_uids: |
|
277
|
|
|
continue |
|
278
|
|
|
|
|
279
|
|
|
# Maybe object UID. |
|
280
|
|
|
portal = portal if portal else api.portal.get() |
|
281
|
|
|
bsc = bsc if bsc else getToolByName(portal, 'bika_setup_catalog') |
|
282
|
|
|
brains = bsc(UID=item) |
|
283
|
|
|
if brains: |
|
284
|
|
|
uid = brains[0].UID |
|
285
|
|
|
service_uids.append(uid) |
|
286
|
|
|
continue |
|
287
|
|
|
|
|
288
|
|
|
# Maybe service Title |
|
289
|
|
|
brains = bsc(portal_type='AnalysisService', title=item) |
|
290
|
|
|
if brains: |
|
291
|
|
|
uid = brains[0].UID |
|
292
|
|
|
service_uids.append(uid) |
|
293
|
|
|
continue |
|
294
|
|
|
|
|
295
|
|
|
# Maybe service Keyword |
|
296
|
|
|
brains = bsc(portal_type='AnalysisService', getKeyword=item) |
|
297
|
|
|
if brains: |
|
298
|
|
|
uid = brains[0].UID |
|
299
|
|
|
service_uids.append(uid) |
|
300
|
|
|
continue |
|
301
|
|
|
|
|
302
|
|
|
raise RuntimeError( |
|
303
|
|
|
str(item) + " should be the UID, title, keyword " |
|
304
|
|
|
" or title of an AnalysisService.") |
|
305
|
|
|
return list(set(service_uids)) |
|
306
|
|
|
|
|
307
|
|
|
|
|
308
|
|
|
def notify_rejection(analysisrequest): |
|
309
|
|
|
""" |
|
310
|
|
|
Notifies via email that a given Analysis Request has been rejected. The |
|
311
|
|
|
notification is sent to the Client contacts assigned to the Analysis |
|
312
|
|
|
Request. |
|
313
|
|
|
|
|
314
|
|
|
:param analysisrequest: Analysis Request to which the notification refers |
|
315
|
|
|
:returns: true if success |
|
316
|
|
|
""" |
|
317
|
|
|
|
|
318
|
|
|
# We do this imports here to avoid circular dependencies until we deal |
|
319
|
|
|
# better with this notify_rejection thing. |
|
320
|
|
|
from bika.lims.browser.analysisrequest.reject import \ |
|
321
|
|
|
AnalysisRequestRejectPdfView, AnalysisRequestRejectEmailView |
|
322
|
|
|
|
|
323
|
|
|
arid = analysisrequest.getId() |
|
324
|
|
|
|
|
325
|
|
|
# This is the template to render for the pdf that will be either attached |
|
326
|
|
|
# to the email and attached the the Analysis Request for further access |
|
327
|
|
|
tpl = AnalysisRequestRejectPdfView(analysisrequest, analysisrequest.REQUEST) |
|
328
|
|
|
html = tpl.template() |
|
329
|
|
|
html = safe_unicode(html).encode('utf-8') |
|
330
|
|
|
filename = '%s-rejected' % arid |
|
331
|
|
|
pdf_fn = tempfile.mktemp(suffix=".pdf") |
|
332
|
|
|
pdf = createPdf(htmlreport=html, outfile=pdf_fn) |
|
333
|
|
|
if pdf: |
|
334
|
|
|
# Attach the pdf to the Analysis Request |
|
335
|
|
|
attid = analysisrequest.aq_parent.generateUniqueId('Attachment') |
|
336
|
|
|
att = _createObjectByType( |
|
337
|
|
|
"Attachment", analysisrequest.aq_parent, attid) |
|
338
|
|
|
att.setAttachmentFile(open(pdf_fn)) |
|
339
|
|
|
# Awkward workaround to rename the file |
|
340
|
|
|
attf = att.getAttachmentFile() |
|
341
|
|
|
attf.filename = '%s.pdf' % filename |
|
342
|
|
|
att.setAttachmentFile(attf) |
|
343
|
|
|
att.unmarkCreationFlag() |
|
344
|
|
|
renameAfterCreation(att) |
|
345
|
|
|
atts = analysisrequest.getAttachment() + [att] if \ |
|
346
|
|
|
analysisrequest.getAttachment() else [att] |
|
347
|
|
|
atts = [a.UID() for a in atts] |
|
348
|
|
|
analysisrequest.setAttachment(atts) |
|
349
|
|
|
os.remove(pdf_fn) |
|
350
|
|
|
|
|
351
|
|
|
# This is the message for the email's body |
|
352
|
|
|
tpl = AnalysisRequestRejectEmailView( |
|
353
|
|
|
analysisrequest, analysisrequest.REQUEST) |
|
354
|
|
|
html = tpl.template() |
|
355
|
|
|
html = safe_unicode(html).encode('utf-8') |
|
356
|
|
|
|
|
357
|
|
|
# compose and send email. |
|
358
|
|
|
mailto = [] |
|
359
|
|
|
lab = analysisrequest.bika_setup.laboratory |
|
360
|
|
|
mailfrom = formataddr((encode_header(lab.getName()), lab.getEmailAddress())) |
|
361
|
|
|
mailsubject = _('%s has been rejected') % arid |
|
362
|
|
|
contacts = [analysisrequest.getContact()] + analysisrequest.getCCContact() |
|
363
|
|
|
for contact in contacts: |
|
364
|
|
|
name = to_utf8(contact.getFullname()) |
|
365
|
|
|
email = to_utf8(contact.getEmailAddress()) |
|
366
|
|
|
if email: |
|
367
|
|
|
mailto.append(formataddr((encode_header(name), email))) |
|
368
|
|
|
if not mailto: |
|
369
|
|
|
return False |
|
370
|
|
|
mime_msg = MIMEMultipart('related') |
|
371
|
|
|
mime_msg['Subject'] = mailsubject |
|
372
|
|
|
mime_msg['From'] = mailfrom |
|
373
|
|
|
mime_msg['To'] = ','.join(mailto) |
|
374
|
|
|
mime_msg.preamble = 'This is a multi-part MIME message.' |
|
375
|
|
|
msg_txt = MIMEText(html, _subtype='html') |
|
376
|
|
|
mime_msg.attach(msg_txt) |
|
377
|
|
|
if pdf: |
|
378
|
|
|
attachPdf(mime_msg, pdf, filename) |
|
379
|
|
|
|
|
380
|
|
|
try: |
|
381
|
|
|
host = getToolByName(analysisrequest, 'MailHost') |
|
382
|
|
|
host.send(mime_msg.as_string(), immediate=True) |
|
383
|
|
|
except: |
|
384
|
|
|
logger.warning( |
|
385
|
|
|
"Email with subject %s was not sent (SMTP connection error)" % mailsubject) |
|
386
|
|
|
|
|
387
|
|
|
return True |
|
388
|
|
|
|
|
389
|
|
|
|
|
390
|
|
|
def create_retest(ar): |
|
391
|
|
|
"""Creates a retest (Analysis Request) from an invalidated Analysis Request |
|
392
|
|
|
:param ar: The invalidated Analysis Request |
|
393
|
|
|
:type ar: IAnalysisRequest |
|
394
|
|
|
:rtype: IAnalysisRequest |
|
395
|
|
|
""" |
|
396
|
|
|
if not ar: |
|
397
|
|
|
raise ValueError("Source Analysis Request cannot be None") |
|
398
|
|
|
|
|
399
|
|
|
if not IAnalysisRequest.providedBy(ar): |
|
400
|
|
|
raise ValueError("Type not supported: {}".format(repr(type(ar)))) |
|
401
|
|
|
|
|
402
|
|
|
if ar.getRetest(): |
|
403
|
|
|
# Do not allow the creation of another retest! |
|
404
|
|
|
raise ValueError("Retest already set") |
|
405
|
|
|
|
|
406
|
|
|
if not ar.isInvalid(): |
|
407
|
|
|
# Analysis Request must be in 'invalid' state |
|
408
|
|
|
raise ValueError("Cannot create a retest from a valid Analysis Request" |
|
409
|
|
|
.format(repr(ar))) |
|
410
|
|
|
|
|
411
|
|
|
# 1. Create the Retest (Analysis Request) |
|
412
|
|
|
ignore = ['Analyses', 'DatePublished', 'Invalidated', 'Sample'] |
|
413
|
|
|
retest = _createObjectByType("AnalysisRequest", ar.aq_parent, tmpID()) |
|
414
|
|
|
retest.setSample(ar.getSample()) |
|
415
|
|
|
copy_field_values(ar, retest, ignore_fieldnames=ignore) |
|
416
|
|
|
renameAfterCreation(retest) |
|
417
|
|
|
|
|
418
|
|
|
# 2. Copy the analyses from the source |
|
419
|
|
|
criteria = dict(full_objects=True, retracted=False, reflexed=False) |
|
420
|
|
|
for an in ar.getAnalyses(**criteria): |
|
421
|
|
|
nan = _createObjectByType("Analysis", retest, an.getKeyword()) |
|
422
|
|
|
|
|
423
|
|
|
# Make a copy |
|
424
|
|
|
ignore_fieldnames = ['Verificators', 'DataAnalysisPublished'] |
|
425
|
|
|
copy_field_values(an, nan, ignore_fieldnames=ignore_fieldnames) |
|
426
|
|
|
nan.unmarkCreationFlag() |
|
427
|
|
|
|
|
428
|
|
|
# Set the workflow state of the analysis to 'to_be_verified', cause it |
|
429
|
|
|
# already has a result in place |
|
430
|
|
|
# TODO: We loose here the info about who submitted the result! |
|
431
|
|
|
changeWorkflowState(nan, 'bika_analysis_workflow', 'to_be_verified') |
|
432
|
|
|
nan.reindexObject() |
|
433
|
|
|
|
|
434
|
|
|
# 3. Assign the source to retest |
|
435
|
|
|
retest.setInvalidated(ar) |
|
436
|
|
|
|
|
437
|
|
|
# 4. Transition the retest to "to_be_verified"! |
|
438
|
|
|
changeWorkflowState(retest, 'bika_ar_workflow', 'to_be_verified') |
|
439
|
|
|
|
|
440
|
|
|
# 5. Reindex and other stuff |
|
441
|
|
|
retest.reindexObject() |
|
442
|
|
|
retest.aq_parent.reindexObject() |
|
443
|
|
|
return retest |
|
444
|
|
|
|