1
|
|
|
# -*- coding: utf-8 -*- |
2
|
|
|
# |
3
|
|
|
# This file is part of SENAITE.CORE. |
4
|
|
|
# |
5
|
|
|
# SENAITE.CORE is free software: you can redistribute it and/or modify it under |
6
|
|
|
# the terms of the GNU General Public License as published by the Free Software |
7
|
|
|
# Foundation, version 2. |
8
|
|
|
# |
9
|
|
|
# This program is distributed in the hope that it will be useful, but WITHOUT |
10
|
|
|
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS |
11
|
|
|
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more |
12
|
|
|
# details. |
13
|
|
|
# |
14
|
|
|
# You should have received a copy of the GNU General Public License along with |
15
|
|
|
# this program; if not, write to the Free Software Foundation, Inc., 51 |
16
|
|
|
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. |
17
|
|
|
# |
18
|
|
|
# Copyright 2018-2019 by it's authors. |
19
|
|
|
# Some rights reserved, see README and LICENSE. |
20
|
|
|
|
21
|
|
|
from AccessControl import ClassSecurityInfo |
22
|
|
|
import csv |
23
|
|
|
from copy import deepcopy |
24
|
|
|
from DateTime.DateTime import DateTime |
25
|
|
|
from Products.Archetypes.event import ObjectInitializedEvent |
26
|
|
|
from Products.CMFCore.WorkflowCore import WorkflowException |
27
|
|
|
from bika.lims import bikaMessageFactory as _ |
28
|
|
|
from bika.lims.browser import ulocalized_time |
29
|
|
|
from bika.lims.config import PROJECTNAME |
30
|
|
|
from bika.lims.content.bikaschema import BikaSchema |
31
|
|
|
from bika.lims.content.analysisrequest import schema as ar_schema |
32
|
|
|
from bika.lims.content.sample import schema as sample_schema |
33
|
|
|
from bika.lims.idserver import renameAfterCreation |
34
|
|
|
from bika.lims.interfaces import IARImport, IClient |
35
|
|
|
from bika.lims.utils import tmpID |
36
|
|
|
from bika.lims.utils.analysisrequest import create_analysisrequest |
37
|
|
|
from bika.lims.vocabularies import CatalogVocabulary |
38
|
|
|
from bika.lims.workflow import doActionFor |
39
|
|
|
from collective.progressbar.events import InitialiseProgressBar |
40
|
|
|
from collective.progressbar.events import ProgressBar |
41
|
|
|
from collective.progressbar.events import ProgressState |
42
|
|
|
from collective.progressbar.events import UpdateProgressEvent |
43
|
|
|
from Products.Archetypes import atapi |
44
|
|
|
from Products.Archetypes.public import * |
45
|
|
|
from plone.app.blob.field import FileField as BlobFileField |
46
|
|
|
from Products.Archetypes.references import HoldingReference |
47
|
|
|
from Products.Archetypes.utils import addStatusMessage |
48
|
|
|
from Products.CMFCore.utils import getToolByName |
49
|
|
|
from Products.CMFPlone.utils import _createObjectByType |
50
|
|
|
from Products.DataGridField import CheckboxColumn |
51
|
|
|
from Products.DataGridField import Column |
52
|
|
|
from Products.DataGridField import DataGridField |
53
|
|
|
from Products.DataGridField import DataGridWidget |
54
|
|
|
from Products.DataGridField import DateColumn |
55
|
|
|
from Products.DataGridField import LinesColumn |
56
|
|
|
from Products.DataGridField import SelectColumn |
57
|
|
|
from zope import event |
58
|
|
|
from zope.event import notify |
59
|
|
|
from zope.i18nmessageid import MessageFactory |
60
|
|
|
from zope.interface import implements |
61
|
|
|
|
62
|
|
|
from bika.lims.browser.widgets import ReferenceWidget as bReferenceWidget |
63
|
|
|
|
64
|
|
|
import sys |
65
|
|
|
import transaction |
66
|
|
|
|
67
|
|
|
_p = MessageFactory(u"plone") |
68
|
|
|
|
69
|
|
|
OriginalFile = BlobFileField( |
70
|
|
|
'OriginalFile', |
71
|
|
|
widget=ComputedWidget( |
|
|
|
|
72
|
|
|
visible=False |
73
|
|
|
), |
74
|
|
|
) |
75
|
|
|
|
76
|
|
|
Filename = StringField( |
|
|
|
|
77
|
|
|
'Filename', |
78
|
|
|
widget=StringWidget( |
|
|
|
|
79
|
|
|
label=_('Original Filename'), |
80
|
|
|
visible=True |
81
|
|
|
), |
82
|
|
|
) |
83
|
|
|
|
84
|
|
|
NrSamples = StringField( |
|
|
|
|
85
|
|
|
'NrSamples', |
86
|
|
|
widget=StringWidget( |
|
|
|
|
87
|
|
|
label=_('Number of samples'), |
88
|
|
|
visible=True |
89
|
|
|
), |
90
|
|
|
) |
91
|
|
|
|
92
|
|
|
ClientName = StringField( |
|
|
|
|
93
|
|
|
'ClientName', |
94
|
|
|
searchable=True, |
95
|
|
|
widget=StringWidget( |
|
|
|
|
96
|
|
|
label=_("Client Name"), |
97
|
|
|
), |
98
|
|
|
) |
99
|
|
|
|
100
|
|
|
ClientID = StringField( |
|
|
|
|
101
|
|
|
'ClientID', |
102
|
|
|
searchable=True, |
103
|
|
|
widget=StringWidget( |
|
|
|
|
104
|
|
|
label=_('Client ID'), |
105
|
|
|
), |
106
|
|
|
) |
107
|
|
|
|
108
|
|
|
ClientOrderNumber = StringField( |
|
|
|
|
109
|
|
|
'ClientOrderNumber', |
110
|
|
|
searchable=True, |
111
|
|
|
widget=StringWidget( |
|
|
|
|
112
|
|
|
label=_('Client Order Number'), |
113
|
|
|
), |
114
|
|
|
) |
115
|
|
|
|
116
|
|
|
ClientReference = StringField( |
|
|
|
|
117
|
|
|
'ClientReference', |
118
|
|
|
searchable=True, |
119
|
|
|
widget=StringWidget( |
|
|
|
|
120
|
|
|
label=_('Client Reference'), |
121
|
|
|
), |
122
|
|
|
) |
123
|
|
|
|
124
|
|
|
Contact = ReferenceField( |
|
|
|
|
125
|
|
|
'Contact', |
126
|
|
|
allowed_types=('Contact',), |
127
|
|
|
relationship='ARImportContact', |
128
|
|
|
default_method='getContactUIDForUser', |
129
|
|
|
referenceClass=HoldingReference, |
130
|
|
|
vocabulary_display_path_bound=sys.maxint, |
131
|
|
|
widget=ReferenceWidget( |
|
|
|
|
132
|
|
|
label=_('Primary Contact'), |
133
|
|
|
size=20, |
134
|
|
|
visible=True, |
135
|
|
|
base_query={'is_active': True}, |
136
|
|
|
showOn=True, |
137
|
|
|
popup_width='300px', |
138
|
|
|
colModel=[{'columnName': 'UID', 'hidden': True}, |
139
|
|
|
{'columnName': 'Fullname', 'width': '100', |
140
|
|
|
'label': _('Name')}], |
141
|
|
|
), |
142
|
|
|
) |
143
|
|
|
|
144
|
|
|
Batch = ReferenceField( |
|
|
|
|
145
|
|
|
'Batch', |
146
|
|
|
allowed_types=('Batch',), |
147
|
|
|
relationship='ARImportBatch', |
148
|
|
|
widget=bReferenceWidget( |
149
|
|
|
label=_('Batch'), |
150
|
|
|
visible=True, |
151
|
|
|
catalog_name='bika_catalog', |
152
|
|
|
base_query={'review_state': 'open'}, |
153
|
|
|
showOn=True, |
154
|
|
|
), |
155
|
|
|
) |
156
|
|
|
|
157
|
|
|
CCContacts = DataGridField( |
158
|
|
|
'CCContacts', |
159
|
|
|
allow_insert=False, |
160
|
|
|
allow_delete=False, |
161
|
|
|
allow_reorder=False, |
162
|
|
|
allow_empty_rows=False, |
163
|
|
|
columns=('CCNamesReport', |
164
|
|
|
'CCEmailsReport', |
165
|
|
|
'CCNamesInvoice', |
166
|
|
|
'CCEmailsInvoice'), |
167
|
|
|
default=[{'CCNamesReport': [], |
168
|
|
|
'CCEmailsReport': [], |
169
|
|
|
'CCNamesInvoice': [], |
170
|
|
|
'CCEmailsInvoice': [] |
171
|
|
|
}], |
172
|
|
|
widget=DataGridWidget( |
173
|
|
|
columns={ |
174
|
|
|
'CCNamesReport': LinesColumn('Report CC Contacts'), |
175
|
|
|
'CCEmailsReport': LinesColumn('Report CC Emails'), |
176
|
|
|
'CCNamesInvoice': LinesColumn('Invoice CC Contacts'), |
177
|
|
|
'CCEmailsInvoice': LinesColumn('Invoice CC Emails') |
178
|
|
|
} |
179
|
|
|
) |
180
|
|
|
) |
181
|
|
|
|
182
|
|
|
SampleData = DataGridField( |
183
|
|
|
'SampleData', |
184
|
|
|
allow_insert=True, |
185
|
|
|
allow_delete=True, |
186
|
|
|
allow_reorder=False, |
187
|
|
|
allow_empty_rows=False, |
188
|
|
|
allow_oddeven=True, |
189
|
|
|
columns=('ClientSampleID', |
190
|
|
|
'SamplingDate', |
191
|
|
|
'DateSampled', |
192
|
|
|
'SamplePoint', |
193
|
|
|
'SampleMatrix', |
194
|
|
|
'SampleType', # not a schema field! |
195
|
|
|
'ContainerType', # not a schema field! |
196
|
|
|
'Analyses', # not a schema field! |
197
|
|
|
'Profiles' # not a schema field! |
198
|
|
|
), |
199
|
|
|
widget=DataGridWidget( |
200
|
|
|
label=_('Samples'), |
201
|
|
|
columns={ |
202
|
|
|
'ClientSampleID': Column('Sample ID'), |
203
|
|
|
'SamplingDate': DateColumn('Sampling Date'), |
204
|
|
|
'DateSampled': DateColumn('Date Sampled'), |
205
|
|
|
'SamplePoint': SelectColumn( |
206
|
|
|
'Sample Point', vocabulary='Vocabulary_SamplePoint'), |
207
|
|
|
'SampleMatrix': SelectColumn( |
208
|
|
|
'Sample Matrix', vocabulary='Vocabulary_SampleMatrix'), |
209
|
|
|
'SampleType': SelectColumn( |
210
|
|
|
'Sample Type', vocabulary='Vocabulary_SampleType'), |
211
|
|
|
'ContainerType': SelectColumn( |
212
|
|
|
'Container', vocabulary='Vocabulary_ContainerType'), |
213
|
|
|
'Analyses': LinesColumn('Analyses'), |
214
|
|
|
'Profiles': LinesColumn('Profiles'), |
215
|
|
|
} |
216
|
|
|
) |
217
|
|
|
) |
218
|
|
|
|
219
|
|
|
Errors = LinesField( |
|
|
|
|
220
|
|
|
'Errors', |
221
|
|
|
widget=LinesWidget( |
|
|
|
|
222
|
|
|
label=_('Errors'), |
223
|
|
|
rows=10, |
224
|
|
|
) |
225
|
|
|
) |
226
|
|
|
|
227
|
|
|
schema = BikaSchema.copy() + Schema(( |
|
|
|
|
228
|
|
|
OriginalFile, |
229
|
|
|
Filename, |
230
|
|
|
NrSamples, |
231
|
|
|
ClientName, |
232
|
|
|
ClientID, |
233
|
|
|
ClientOrderNumber, |
234
|
|
|
ClientReference, |
235
|
|
|
Contact, |
236
|
|
|
CCContacts, |
237
|
|
|
Batch, |
238
|
|
|
SampleData, |
239
|
|
|
Errors, |
240
|
|
|
)) |
241
|
|
|
|
242
|
|
|
schema['title'].validators = () |
243
|
|
|
# Update the validation layer after change the validator in runtime |
244
|
|
|
schema['title']._validationLayer() |
245
|
|
|
|
246
|
|
|
|
247
|
|
|
class ARImport(BaseFolder): |
|
|
|
|
248
|
|
|
security = ClassSecurityInfo() |
249
|
|
|
schema = schema |
|
|
|
|
250
|
|
|
displayContentsTab = False |
251
|
|
|
implements(IARImport) |
252
|
|
|
|
253
|
|
|
_at_rename_after_creation = True |
254
|
|
|
|
255
|
|
|
def _renameAfterCreation(self, check_auto_id=False): |
256
|
|
|
renameAfterCreation(self) |
257
|
|
|
|
258
|
|
|
def guard_validate_transition(self): |
259
|
|
|
"""We may only attempt validation if file data has been uploaded. |
260
|
|
|
""" |
261
|
|
|
data = self.getOriginalFile() |
262
|
|
|
if data and len(data): |
263
|
|
|
return True |
264
|
|
|
|
265
|
|
|
# TODO Workflow - ARImport - Remove |
266
|
|
|
def workflow_before_validate(self): |
267
|
|
|
"""This function transposes values from the provided file into the |
268
|
|
|
ARImport object's fields, and checks for invalid values. |
269
|
|
|
|
270
|
|
|
If errors are found: |
271
|
|
|
- Validation transition is aborted. |
272
|
|
|
- Errors are stored on object and displayed to user. |
273
|
|
|
|
274
|
|
|
""" |
275
|
|
|
# Re-set the errors on this ARImport each time validation is attempted. |
276
|
|
|
# When errors are detected they are immediately appended to this field. |
277
|
|
|
self.setErrors([]) |
278
|
|
|
|
279
|
|
|
self.validate_headers() |
280
|
|
|
self.validate_samples() |
281
|
|
|
|
282
|
|
|
if self.getErrors(): |
283
|
|
|
addStatusMessage(self.REQUEST, _p('Validation errors.'), 'error') |
284
|
|
|
transaction.commit() |
285
|
|
|
self.REQUEST.response.write( |
286
|
|
|
'<script>document.location.href="%s/edit"</script>' % ( |
287
|
|
|
self.absolute_url())) |
288
|
|
|
self.REQUEST.response.write( |
289
|
|
|
'<script>document.location.href="%s/view"</script>' % ( |
290
|
|
|
self.absolute_url())) |
291
|
|
|
|
292
|
|
|
def at_post_edit_script(self): |
293
|
|
|
workflow = getToolByName(self, 'portal_workflow') |
294
|
|
|
trans_ids = [t['id'] for t in workflow.getTransitionsFor(self)] |
295
|
|
|
if 'validate' in trans_ids: |
296
|
|
|
workflow.doActionFor(self, 'validate') |
297
|
|
|
|
298
|
|
|
def workflow_script_import(self): |
299
|
|
|
"""Create objects from valid ARImport |
300
|
|
|
""" |
301
|
|
|
bsc = getToolByName(self, 'bika_setup_catalog') |
302
|
|
|
client = self.aq_parent |
303
|
|
|
|
304
|
|
|
title = _('Submitting Sample Import') |
305
|
|
|
description = _('Creating and initialising objects') |
306
|
|
|
bar = ProgressBar(self, self.REQUEST, title, description) |
307
|
|
|
notify(InitialiseProgressBar(bar)) |
308
|
|
|
|
309
|
|
|
profiles = [x.getObject() for x in bsc(portal_type='AnalysisProfile')] |
310
|
|
|
|
311
|
|
|
gridrows = self.schema['SampleData'].get(self) |
312
|
|
|
row_cnt = 0 |
313
|
|
|
for therow in gridrows: |
314
|
|
|
row = deepcopy(therow) |
315
|
|
|
row_cnt += 1 |
316
|
|
|
|
317
|
|
|
# Profiles are titles, profile keys, or UIDS: convert them to UIDs. |
318
|
|
|
newprofiles = [] |
319
|
|
|
for title in row['Profiles']: |
320
|
|
|
objects = [x for x in profiles |
321
|
|
|
if title in (x.getProfileKey(), x.UID(), x.Title())] |
322
|
|
|
for obj in objects: |
323
|
|
|
newprofiles.append(obj.UID()) |
324
|
|
|
row['Profiles'] = newprofiles |
325
|
|
|
|
326
|
|
|
# Same for analyses |
327
|
|
|
newanalyses = set(self.get_row_services(row) + |
328
|
|
|
self.get_row_profile_services(row)) |
329
|
|
|
# get batch |
330
|
|
|
batch = self.schema['Batch'].get(self) |
331
|
|
|
if batch: |
332
|
|
|
row['Batch'] = batch.UID() |
333
|
|
|
# Add AR fields from schema into this row's data |
334
|
|
|
row['ClientReference'] = self.getClientReference() |
335
|
|
|
row['ClientOrderNumber'] = self.getClientOrderNumber() |
336
|
|
|
contact_uid =\ |
337
|
|
|
self.getContact().UID() if self.getContact() else None |
338
|
|
|
row['Contact'] = contact_uid |
339
|
|
|
# Creating analysis request from gathered data |
340
|
|
|
ar = create_analysisrequest( |
341
|
|
|
client, |
342
|
|
|
self.REQUEST, |
343
|
|
|
row, |
344
|
|
|
analyses=list(newanalyses),) |
345
|
|
|
|
346
|
|
|
# progress marker update |
347
|
|
|
progress_index = float(row_cnt) / len(gridrows) * 100 |
348
|
|
|
progress = ProgressState(self.REQUEST, progress_index) |
349
|
|
|
notify(UpdateProgressEvent(progress)) |
350
|
|
|
|
351
|
|
|
# document has been written to, and redirect() fails here |
352
|
|
|
self.REQUEST.response.write( |
353
|
|
|
'<script>document.location.href="%s"</script>' % ( |
354
|
|
|
self.absolute_url())) |
355
|
|
|
|
356
|
|
|
def get_header_values(self): |
357
|
|
|
"""Scrape the "Header" values from the original input file |
358
|
|
|
""" |
359
|
|
|
lines = self.getOriginalFile().data.splitlines() |
360
|
|
|
reader = csv.reader(lines) |
361
|
|
|
header_fields = header_data = [] |
362
|
|
|
for row in reader: |
363
|
|
|
if not any(row): |
364
|
|
|
continue |
365
|
|
|
if row[0].strip().lower() == 'header': |
366
|
|
|
header_fields = [x.strip() for x in row][1:] |
367
|
|
|
continue |
368
|
|
|
if row[0].strip().lower() == 'header data': |
369
|
|
|
header_data = [x.strip() for x in row][1:] |
370
|
|
|
break |
371
|
|
|
if not (header_data or header_fields): |
372
|
|
|
return None |
373
|
|
|
if not (header_data and header_fields): |
374
|
|
|
self.error("File is missing header row or header data") |
375
|
|
|
return None |
376
|
|
|
# inject us out of here |
377
|
|
|
values = dict(zip(header_fields, header_data)) |
378
|
|
|
# blank cell from sheet will probably make it in here: |
379
|
|
|
if '' in values: |
380
|
|
|
del (values['']) |
381
|
|
|
return values |
382
|
|
|
|
383
|
|
|
def save_header_data(self): |
384
|
|
|
"""Save values from the file's header row into their schema fields. |
385
|
|
|
""" |
386
|
|
|
client = self.aq_parent |
387
|
|
|
|
388
|
|
|
headers = self.get_header_values() |
389
|
|
|
if not headers: |
390
|
|
|
return False |
391
|
|
|
|
392
|
|
|
# Plain header fields that can be set into plain schema fields: |
393
|
|
|
for h, f in [ |
394
|
|
|
('File name', 'Filename'), |
395
|
|
|
('No of Samples', 'NrSamples'), |
396
|
|
|
('Client name', 'ClientName'), |
397
|
|
|
('Client ID', 'ClientID'), |
398
|
|
|
('Client Order Number', 'ClientOrderNumber'), |
399
|
|
|
('Client Reference', 'ClientReference') |
400
|
|
|
]: |
401
|
|
|
v = headers.get(h, None) |
402
|
|
|
if v: |
403
|
|
|
field = self.schema[f] |
404
|
|
|
field.set(self, v) |
405
|
|
|
del (headers[h]) |
406
|
|
|
|
407
|
|
|
# Primary Contact |
408
|
|
|
v = headers.get('Contact', None) |
409
|
|
|
contacts = [x for x in client.objectValues('Contact')] |
410
|
|
|
contact = [c for c in contacts if c.Title() == v] |
411
|
|
|
if contact: |
412
|
|
|
self.schema['Contact'].set(self, contact) |
413
|
|
|
else: |
414
|
|
|
self.error("Specified contact '%s' does not exist; using '%s'"% |
415
|
|
|
(v, contacts[0].Title())) |
416
|
|
|
self.schema['Contact'].set(self, contacts[0]) |
417
|
|
|
del (headers['Contact']) |
418
|
|
|
|
419
|
|
|
# CCContacts |
420
|
|
|
field_value = { |
421
|
|
|
'CCNamesReport': '', |
422
|
|
|
'CCEmailsReport': '', |
423
|
|
|
'CCNamesInvoice': '', |
424
|
|
|
'CCEmailsInvoice': '' |
425
|
|
|
} |
426
|
|
|
for h, f in [ |
427
|
|
|
# csv header name DataGrid Column ID |
428
|
|
|
('CC Names - Report', 'CCNamesReport'), |
429
|
|
|
('CC Emails - Report', 'CCEmailsReport'), |
430
|
|
|
('CC Names - Invoice', 'CCNamesInvoice'), |
431
|
|
|
('CC Emails - Invoice', 'CCEmailsInvoice'), |
432
|
|
|
]: |
433
|
|
|
if h in headers: |
434
|
|
|
values = [x.strip() for x in headers.get(h, '').split(",")] |
435
|
|
|
field_value[f] = values if values else '' |
436
|
|
|
del (headers[h]) |
437
|
|
|
self.schema['CCContacts'].set(self, [field_value]) |
438
|
|
|
|
439
|
|
|
if headers: |
440
|
|
|
unexpected = ','.join(headers.keys()) |
441
|
|
|
self.error("Unexpected header fields: %s" % unexpected) |
442
|
|
|
|
443
|
|
|
def get_sample_values(self): |
444
|
|
|
"""Read the rows specifying Samples and return a dictionary with |
445
|
|
|
related data. |
446
|
|
|
|
447
|
|
|
keys are: |
448
|
|
|
headers - row with "Samples" in column 0. These headers are |
449
|
|
|
used as dictionary keys in the rows below. |
450
|
|
|
prices - Row with "Analysis Price" in column 0. |
451
|
|
|
total_analyses - Row with "Total analyses" in colmn 0 |
452
|
|
|
price_totals - Row with "Total price excl Tax" in column 0 |
453
|
|
|
samples - All other sample rows. |
454
|
|
|
|
455
|
|
|
""" |
456
|
|
|
res = {'samples': []} |
457
|
|
|
lines = self.getOriginalFile().data.splitlines() |
458
|
|
|
reader = csv.reader(lines) |
459
|
|
|
next_rows_are_sample_rows = False |
460
|
|
|
for row in reader: |
461
|
|
|
if not any(row): |
462
|
|
|
continue |
463
|
|
|
if next_rows_are_sample_rows: |
464
|
|
|
vals = [x.strip() for x in row] |
465
|
|
|
if not any(vals): |
466
|
|
|
continue |
467
|
|
|
res['samples'].append(zip(res['headers'], vals)) |
468
|
|
|
elif row[0].strip().lower() == 'samples': |
469
|
|
|
res['headers'] = [x.strip() for x in row] |
470
|
|
|
elif row[0].strip().lower() == 'analysis price': |
471
|
|
|
res['prices'] = \ |
472
|
|
|
zip(res['headers'], [x.strip() for x in row]) |
473
|
|
|
elif row[0].strip().lower() == 'total analyses': |
474
|
|
|
res['total_analyses'] = \ |
475
|
|
|
zip(res['headers'], [x.strip() for x in row]) |
476
|
|
|
elif row[0].strip().lower() == 'total price excl tax': |
477
|
|
|
res['price_totals'] = \ |
478
|
|
|
zip(res['headers'], [x.strip() for x in row]) |
479
|
|
|
next_rows_are_sample_rows = True |
480
|
|
|
return res |
481
|
|
|
|
482
|
|
|
def save_sample_data(self): |
483
|
|
|
"""Save values from the file's header row into the DataGrid columns |
484
|
|
|
after doing some very basic validation |
485
|
|
|
""" |
486
|
|
|
bsc = getToolByName(self, 'bika_setup_catalog') |
487
|
|
|
keywords = self.bika_setup_catalog.uniqueValuesFor('getKeyword') |
488
|
|
|
profiles = [] |
489
|
|
|
for p in bsc(portal_type='AnalysisProfile'): |
490
|
|
|
p = p.getObject() |
491
|
|
|
profiles.append(p.Title()) |
492
|
|
|
profiles.append(p.getProfileKey()) |
493
|
|
|
|
494
|
|
|
sample_data = self.get_sample_values() |
495
|
|
|
if not sample_data: |
496
|
|
|
return False |
497
|
|
|
|
498
|
|
|
# columns that we expect, but do not find, are listed here. |
499
|
|
|
# we report on them only once, after looping through sample rows. |
500
|
|
|
missing = set() |
501
|
|
|
|
502
|
|
|
# This contains all sample header rows that were not handled |
503
|
|
|
# by this code |
504
|
|
|
unexpected = set() |
505
|
|
|
|
506
|
|
|
# Save other errors here instead of sticking them directly into |
507
|
|
|
# the field, so that they show up after MISSING and before EXPECTED |
508
|
|
|
errors = [] |
509
|
|
|
|
510
|
|
|
# This will be the new sample-data field value, when we are done. |
511
|
|
|
grid_rows = [] |
512
|
|
|
|
513
|
|
|
row_nr = 0 |
514
|
|
|
for row in sample_data['samples']: |
515
|
|
|
row = dict(row) |
516
|
|
|
row_nr += 1 |
517
|
|
|
|
518
|
|
|
# sid is just for referring the user back to row X in their |
519
|
|
|
# in put spreadsheet |
520
|
|
|
gridrow = {'sid': row['Samples']} |
521
|
|
|
del (row['Samples']) |
522
|
|
|
|
523
|
|
|
# We'll use this later to verify the number against selections |
524
|
|
|
if 'Total number of Analyses or Profiles' in row: |
525
|
|
|
nr_an = row['Total number of Analyses or Profiles'] |
526
|
|
|
del (row['Total number of Analyses or Profiles']) |
527
|
|
|
else: |
528
|
|
|
nr_an = 0 |
529
|
|
|
try: |
530
|
|
|
nr_an = int(nr_an) |
531
|
|
|
except ValueError: |
532
|
|
|
nr_an = 0 |
533
|
|
|
|
534
|
|
|
# TODO this is ignored and is probably meant to serve some purpose. |
535
|
|
|
del (row['Price excl Tax']) |
536
|
|
|
|
537
|
|
|
# ContainerType - not part of sample or AR schema |
538
|
|
|
if 'ContainerType' in row: |
539
|
|
|
title = row['ContainerType'] |
540
|
|
|
if title: |
541
|
|
|
obj = self.lookup(('ContainerType',), |
542
|
|
|
Title=row['ContainerType']) |
543
|
|
|
if obj: |
544
|
|
|
gridrow['ContainerType'] = obj[0].UID |
545
|
|
|
del (row['ContainerType']) |
546
|
|
|
|
547
|
|
|
if 'SampleMatrix' in row: |
548
|
|
|
# SampleMatrix - not part of sample or AR schema |
549
|
|
|
title = row['SampleMatrix'] |
550
|
|
|
if title: |
551
|
|
|
obj = self.lookup(('SampleMatrix',), |
552
|
|
|
Title=row['SampleMatrix']) |
553
|
|
|
if obj: |
554
|
|
|
gridrow['SampleMatrix'] = obj[0].UID |
555
|
|
|
del (row['SampleMatrix']) |
556
|
|
|
|
557
|
|
|
# match against sample schema |
558
|
|
|
for k, v in row.items(): |
559
|
|
|
if k in ['Analyses', 'Profiles']: |
560
|
|
|
continue |
561
|
|
|
if k in sample_schema: |
562
|
|
|
del (row[k]) |
563
|
|
|
if v: |
564
|
|
|
try: |
565
|
|
|
value = self.munge_field_value( |
566
|
|
|
sample_schema, row_nr, k, v) |
567
|
|
|
gridrow[k] = value |
568
|
|
|
except ValueError as e: |
569
|
|
|
errors.append(e.message) |
570
|
|
|
|
571
|
|
|
# match against ar schema |
572
|
|
|
for k, v in row.items(): |
573
|
|
|
if k in ['Analyses', 'Profiles']: |
574
|
|
|
continue |
575
|
|
|
if k in ar_schema: |
576
|
|
|
del (row[k]) |
577
|
|
|
if v: |
578
|
|
|
try: |
579
|
|
|
value = self.munge_field_value( |
580
|
|
|
ar_schema, row_nr, k, v) |
581
|
|
|
gridrow[k] = value |
582
|
|
|
except ValueError as e: |
583
|
|
|
errors.append(e.message) |
584
|
|
|
|
585
|
|
|
# Count and remove Keywords and Profiles from the list |
586
|
|
|
gridrow['Analyses'] = [] |
587
|
|
|
for k, v in row.items(): |
588
|
|
|
if k in keywords: |
589
|
|
|
del (row[k]) |
590
|
|
|
if str(v).strip().lower() not in ('', '0', 'false'): |
591
|
|
|
gridrow['Analyses'].append(k) |
592
|
|
|
gridrow['Profiles'] = [] |
593
|
|
|
for k, v in row.items(): |
594
|
|
|
if k in profiles: |
595
|
|
|
del (row[k]) |
596
|
|
|
if str(v).strip().lower() not in ('', '0', 'false'): |
597
|
|
|
gridrow['Profiles'].append(k) |
598
|
|
|
if len(gridrow['Analyses']) + len(gridrow['Profiles']) != nr_an: |
599
|
|
|
errors.append( |
600
|
|
|
"Row %s: Number of analyses does not match provided value" % |
601
|
|
|
row_nr) |
602
|
|
|
|
603
|
|
|
grid_rows.append(gridrow) |
604
|
|
|
|
605
|
|
|
self.setSampleData(grid_rows) |
606
|
|
|
|
607
|
|
|
if missing: |
608
|
|
|
self.error("SAMPLES: Missing expected fields: %s" % |
609
|
|
|
','.join(missing)) |
610
|
|
|
|
611
|
|
|
for thing in errors: |
612
|
|
|
self.error(thing) |
613
|
|
|
|
614
|
|
|
if unexpected: |
615
|
|
|
self.error("Unexpected header fields: %s" % |
616
|
|
|
','.join(unexpected)) |
617
|
|
|
|
618
|
|
|
def get_batch_header_values(self): |
619
|
|
|
"""Scrape the "Batch Header" values from the original input file |
620
|
|
|
""" |
621
|
|
|
lines = self.getOriginalFile().data.splitlines() |
622
|
|
|
reader = csv.reader(lines) |
623
|
|
|
batch_headers = batch_data = [] |
624
|
|
|
for row in reader: |
625
|
|
|
if not any(row): |
626
|
|
|
continue |
627
|
|
|
if row[0].strip().lower() == 'batch header': |
628
|
|
|
batch_headers = [x.strip() for x in row][1:] |
629
|
|
|
continue |
630
|
|
|
if row[0].strip().lower() == 'batch data': |
631
|
|
|
batch_data = [x.strip() for x in row][1:] |
632
|
|
|
break |
633
|
|
|
if not (batch_data or batch_headers): |
634
|
|
|
return None |
635
|
|
|
if not (batch_data and batch_headers): |
636
|
|
|
self.error("Missing batch headers or data") |
637
|
|
|
return None |
638
|
|
|
# Inject us out of here |
639
|
|
|
values = dict(zip(batch_headers, batch_data)) |
640
|
|
|
return values |
641
|
|
|
|
642
|
|
|
def create_or_reference_batch(self): |
643
|
|
|
"""Save reference to batch, if existing batch specified |
644
|
|
|
Create new batch, if possible with specified values |
645
|
|
|
""" |
646
|
|
|
client = self.aq_parent |
647
|
|
|
batch_headers = self.get_batch_header_values() |
648
|
|
|
if not batch_headers: |
649
|
|
|
return False |
650
|
|
|
# if the Batch's Title is specified and exists, no further |
651
|
|
|
# action is required. We will just set the Batch field to |
652
|
|
|
# use the existing object. |
653
|
|
|
batch_title = batch_headers.get('title', False) |
654
|
|
|
if batch_title: |
655
|
|
|
existing_batch = [x for x in client.objectValues('Batch') |
656
|
|
|
if x.title == batch_title] |
657
|
|
|
if existing_batch: |
658
|
|
|
self.setBatch(existing_batch[0]) |
659
|
|
|
return existing_batch[0] |
660
|
|
|
# If the batch title is specified but does not exist, |
661
|
|
|
# we will attempt to create the bach now. |
662
|
|
|
if 'title' in batch_headers: |
663
|
|
|
if 'id' in batch_headers: |
664
|
|
|
del (batch_headers['id']) |
665
|
|
|
if '' in batch_headers: |
666
|
|
|
del (batch_headers['']) |
667
|
|
|
batch = _createObjectByType('Batch', client, tmpID()) |
668
|
|
|
batch.processForm() |
669
|
|
|
batch.edit(**batch_headers) |
670
|
|
|
self.setBatch(batch) |
671
|
|
|
|
672
|
|
|
def munge_field_value(self, schema, row_nr, fieldname, value): |
673
|
|
|
"""Convert a spreadsheet value into a field value that fits in |
674
|
|
|
the corresponding schema field. |
675
|
|
|
- boolean: All values are true except '', 'false', or '0'. |
676
|
|
|
- reference: The title of an object in field.allowed_types; |
677
|
|
|
returns a UID or list of UIDs |
678
|
|
|
- datetime: returns a string value from ulocalized_time |
679
|
|
|
|
680
|
|
|
Tho this is only used during "Saving" of csv data into schema fields, |
681
|
|
|
it will flag 'validation' errors, as this is the only chance we will |
682
|
|
|
get to complain about these field values. |
683
|
|
|
|
684
|
|
|
""" |
685
|
|
|
field = schema[fieldname] |
686
|
|
|
if field.type == 'boolean': |
687
|
|
|
value = str(value).strip().lower() |
688
|
|
|
value = '' if value in ['0', 'no', 'false', 'none'] else '1' |
689
|
|
|
return value |
690
|
|
View Code Duplication |
if field.type == 'reference': |
|
|
|
|
691
|
|
|
value = str(value).strip() |
692
|
|
|
brains = self.lookup(field.allowed_types, Title=value) |
693
|
|
|
if not brains: |
694
|
|
|
brains = self.lookup(field.allowed_types, UID=value) |
695
|
|
|
if not brains: |
696
|
|
|
raise ValueError('Row %s: value is invalid (%s=%s)' % ( |
697
|
|
|
row_nr, fieldname, value)) |
698
|
|
|
if field.multiValued: |
699
|
|
|
return [b.UID for b in brains] if brains else [] |
700
|
|
|
else: |
701
|
|
|
return brains[0].UID if brains else None |
702
|
|
|
if field.type == 'datetime': |
703
|
|
|
try: |
704
|
|
|
value = DateTime(value) |
705
|
|
|
return ulocalized_time( |
706
|
|
|
value, long_format=True, time_only=False, context=self) |
707
|
|
|
except: |
708
|
|
|
raise ValueError('Row %s: value is invalid (%s=%s)' % ( |
709
|
|
|
row_nr, fieldname, value)) |
710
|
|
|
return str(value) |
711
|
|
|
|
712
|
|
|
def validate_headers(self): |
713
|
|
|
"""Validate headers fields from schema |
714
|
|
|
""" |
715
|
|
|
|
716
|
|
|
pc = getToolByName(self, 'portal_catalog') |
717
|
|
|
pu = getToolByName(self, "plone_utils") |
718
|
|
|
|
719
|
|
|
client = self.aq_parent |
720
|
|
|
|
721
|
|
|
# Verify Client Name |
722
|
|
|
if self.getClientName() != client.Title(): |
723
|
|
|
self.error("%s: value is invalid (%s)." % ( |
724
|
|
|
'Client name', self.getClientName())) |
725
|
|
|
|
726
|
|
|
# Verify Client ID |
727
|
|
|
if self.getClientID() != client.getClientID(): |
728
|
|
|
self.error("%s: value is invalid (%s)." % ( |
729
|
|
|
'Client ID', self.getClientID())) |
730
|
|
|
|
731
|
|
|
existing_arimports = pc(portal_type='ARImport', |
732
|
|
|
review_state=['valid', 'imported']) |
733
|
|
|
# Verify Client Order Number |
734
|
|
|
for arimport in existing_arimports: |
735
|
|
|
if arimport.UID == self.UID() \ |
736
|
|
|
or not arimport.getClientOrderNumber(): |
737
|
|
|
continue |
738
|
|
|
arimport = arimport.getObject() |
739
|
|
|
|
740
|
|
|
if arimport.getClientOrderNumber() == self.getClientOrderNumber(): |
741
|
|
|
self.error('%s: already used by existing ARImport.' % |
742
|
|
|
'ClientOrderNumber') |
743
|
|
|
break |
744
|
|
|
|
745
|
|
|
# Verify Client Reference |
746
|
|
|
for arimport in existing_arimports: |
747
|
|
|
if arimport.UID == self.UID() \ |
748
|
|
|
or not arimport.getClientReference(): |
749
|
|
|
continue |
750
|
|
|
arimport = arimport.getObject() |
751
|
|
|
if arimport.getClientReference() == self.getClientReference(): |
752
|
|
|
self.error('%s: already used by existing ARImport.' % |
753
|
|
|
'ClientReference') |
754
|
|
|
break |
755
|
|
|
|
756
|
|
|
# getCCContacts has no value if object is not complete (eg during test) |
757
|
|
|
if self.getCCContacts(): |
758
|
|
|
cc_contacts = self.getCCContacts()[0] |
759
|
|
|
contacts = [x for x in client.objectValues('Contact')] |
760
|
|
|
contact_names = [c.Title() for c in contacts] |
761
|
|
|
# validate Contact existence in this Client |
762
|
|
|
for k in ['CCNamesReport', 'CCNamesInvoice']: |
763
|
|
|
for val in cc_contacts[k]: |
764
|
|
|
if val and val not in contact_names: |
765
|
|
|
self.error('%s: value is invalid (%s)' % (k, val)) |
766
|
|
|
else: |
767
|
|
|
cc_contacts = {'CCNamesReport': [], |
768
|
|
|
'CCEmailsReport': [], |
769
|
|
|
'CCNamesInvoice': [], |
770
|
|
|
'CCEmailsInvoice': [] |
771
|
|
|
} |
772
|
|
|
# validate Contact existence in this Client |
773
|
|
|
for k in ['CCEmailsReport', 'CCEmailsInvoice']: |
774
|
|
|
for val in cc_contacts.get(k, []): |
775
|
|
|
if val and not pu.validateSingleNormalizedEmailAddress(val): |
776
|
|
|
self.error('%s: value is invalid (%s)' % (k, val)) |
777
|
|
|
|
778
|
|
|
def validate_samples(self): |
779
|
|
|
"""Scan through the SampleData values and make sure |
780
|
|
|
that each one is correct |
781
|
|
|
""" |
782
|
|
|
|
783
|
|
|
bsc = getToolByName(self, 'bika_setup_catalog') |
784
|
|
|
keywords = bsc.uniqueValuesFor('getKeyword') |
785
|
|
|
profiles = [] |
786
|
|
|
for p in bsc(portal_type='AnalysisProfile'): |
787
|
|
|
p = p.getObject() |
788
|
|
|
profiles.append(p.Title()) |
789
|
|
|
profiles.append(p.getProfileKey()) |
790
|
|
|
|
791
|
|
|
row_nr = 0 |
792
|
|
|
for gridrow in self.getSampleData(): |
793
|
|
|
row_nr += 1 |
794
|
|
|
|
795
|
|
|
# validate against sample and ar schemas |
796
|
|
|
for k, v in gridrow.items(): |
797
|
|
|
if k in ['Analysis', 'Profiles']: |
798
|
|
|
break |
799
|
|
|
if k in sample_schema: |
800
|
|
|
try: |
801
|
|
|
self.validate_against_schema( |
802
|
|
|
sample_schema, row_nr, k, v) |
803
|
|
|
continue |
804
|
|
|
except ValueError as e: |
805
|
|
|
self.error(e.message) |
806
|
|
|
break |
807
|
|
|
if k in ar_schema: |
808
|
|
|
try: |
809
|
|
|
self.validate_against_schema( |
810
|
|
|
ar_schema, row_nr, k, v) |
811
|
|
|
except ValueError as e: |
812
|
|
|
self.error(e.message) |
813
|
|
|
|
814
|
|
|
an_cnt = 0 |
815
|
|
|
for v in gridrow['Analyses']: |
816
|
|
|
if v and v not in keywords: |
817
|
|
|
self.error("Row %s: value is invalid (%s=%s)" % |
818
|
|
|
('Analysis keyword', row_nr, v)) |
819
|
|
|
else: |
820
|
|
|
an_cnt += 1 |
821
|
|
|
for v in gridrow['Profiles']: |
822
|
|
|
if v and v not in profiles: |
823
|
|
|
self.error("Row %s: value is invalid (%s=%s)" % |
824
|
|
|
('Profile Title', row_nr, v)) |
825
|
|
|
else: |
826
|
|
|
an_cnt += 1 |
827
|
|
|
if not an_cnt: |
828
|
|
|
self.error("Row %s: No valid analyses or profiles" % row_nr) |
829
|
|
|
|
830
|
|
|
def validate_against_schema(self, schema, row_nr, fieldname, value): |
831
|
|
|
""" |
832
|
|
|
""" |
833
|
|
|
field = schema[fieldname] |
834
|
|
|
if field.type == 'boolean': |
835
|
|
|
value = str(value).strip().lower() |
836
|
|
|
return value |
837
|
|
View Code Duplication |
if field.type == 'reference': |
|
|
|
|
838
|
|
|
value = str(value).strip() |
839
|
|
|
if field.required and not value: |
840
|
|
|
raise ValueError("Row %s: %s field requires a value" % ( |
841
|
|
|
row_nr, fieldname)) |
842
|
|
|
if not value: |
843
|
|
|
return value |
844
|
|
|
brains = self.lookup(field.allowed_types, UID=value) |
845
|
|
|
if not brains: |
846
|
|
|
raise ValueError("Row %s: value is invalid (%s=%s)" % ( |
847
|
|
|
row_nr, fieldname, value)) |
848
|
|
|
if field.multiValued: |
849
|
|
|
return [b.UID for b in brains] if brains else [] |
850
|
|
|
else: |
851
|
|
|
return brains[0].UID if brains else None |
852
|
|
|
if field.type == 'datetime': |
853
|
|
|
try: |
854
|
|
|
ulocalized_time(DateTime(value), long_format=True, |
855
|
|
|
time_only=False, context=self) |
856
|
|
|
except: |
857
|
|
|
raise ValueError('Row %s: value is invalid (%s=%s)' % ( |
858
|
|
|
row_nr, fieldname, value)) |
859
|
|
|
return value |
860
|
|
|
|
861
|
|
|
def lookup(self, allowed_types, **kwargs): |
862
|
|
|
"""Lookup an object of type (allowed_types). kwargs is sent |
863
|
|
|
directly to the catalog. |
864
|
|
|
""" |
865
|
|
|
at = getToolByName(self, 'archetype_tool') |
866
|
|
|
for portal_type in allowed_types: |
867
|
|
|
catalog = at.catalog_map.get(portal_type, [None])[0] |
868
|
|
|
catalog = getToolByName(self, catalog) |
869
|
|
|
kwargs['portal_type'] = portal_type |
870
|
|
|
brains = catalog(**kwargs) |
871
|
|
|
if brains: |
872
|
|
|
return brains |
873
|
|
|
|
874
|
|
|
def get_row_services(self, row): |
875
|
|
|
"""Return a list of services which are referenced in Analyses. |
876
|
|
|
values may be UID, Title or Keyword. |
877
|
|
|
""" |
878
|
|
|
bsc = getToolByName(self, 'bika_setup_catalog') |
879
|
|
|
services = set() |
880
|
|
|
for val in row.get('Analyses', []): |
881
|
|
|
brains = bsc(portal_type='AnalysisService', getKeyword=val) |
882
|
|
|
if not brains: |
883
|
|
|
brains = bsc(portal_type='AnalysisService', title=val) |
884
|
|
|
if not brains: |
885
|
|
|
brains = bsc(portal_type='AnalysisService', UID=val) |
886
|
|
|
if brains: |
887
|
|
|
services.add(brains[0].UID) |
888
|
|
|
else: |
889
|
|
|
self.error("Invalid analysis specified: %s" % val) |
890
|
|
|
return list(services) |
891
|
|
|
|
892
|
|
|
def get_row_profile_services(self, row): |
893
|
|
|
"""Return a list of services which are referenced in profiles |
894
|
|
|
values may be UID, Title or ProfileKey. |
895
|
|
|
""" |
896
|
|
|
bsc = getToolByName(self, 'bika_setup_catalog') |
897
|
|
|
services = set() |
898
|
|
|
profiles = [x.getObject() for x in bsc(portal_type='AnalysisProfile')] |
899
|
|
|
for val in row.get('Profiles', []): |
900
|
|
|
objects = [x for x in profiles |
901
|
|
|
if val in (x.getProfileKey(), x.UID(), x.Title())] |
902
|
|
|
if objects: |
903
|
|
|
for service in objects[0].getService(): |
904
|
|
|
services.add(service.UID()) |
905
|
|
|
else: |
906
|
|
|
self.error("Invalid profile specified: %s" % val) |
907
|
|
|
return list(services) |
908
|
|
|
|
909
|
|
|
def get_row_container(self, row): |
910
|
|
|
"""Return a sample container |
911
|
|
|
""" |
912
|
|
|
bsc = getToolByName(self, 'bika_setup_catalog') |
913
|
|
|
val = row.get('Container', False) |
914
|
|
|
if val: |
915
|
|
|
brains = bsc(portal_type='Container', UID=row['Container']) |
916
|
|
|
if brains: |
917
|
|
|
brains[0].getObject() |
918
|
|
|
brains = bsc(portal_type='ContainerType', UID=row['Container']) |
919
|
|
|
if brains: |
920
|
|
|
# XXX Cheating. The calculation of capacity vs. volume is not done. |
921
|
|
|
return brains[0].getObject() |
922
|
|
|
return None |
923
|
|
|
|
924
|
|
|
def get_row_profiles(self, row): |
925
|
|
|
bsc = getToolByName(self, 'bika_setup_catalog') |
926
|
|
|
profiles = [] |
927
|
|
|
for profile_title in row.get('Profiles', []): |
928
|
|
|
profile = bsc(portal_type='AnalysisProfile', title=profile_title) |
929
|
|
|
profiles.append(profile) |
930
|
|
|
return profiles |
931
|
|
|
|
932
|
|
|
def Vocabulary_SamplePoint(self): |
933
|
|
|
vocabulary = CatalogVocabulary(self) |
934
|
|
|
vocabulary.catalog = 'bika_setup_catalog' |
935
|
|
|
folders = [self.bika_setup.bika_samplepoints] |
936
|
|
|
if IClient.providedBy(self.aq_parent): |
937
|
|
|
folders.append(self.aq_parent) |
938
|
|
|
return vocabulary(allow_blank=True, portal_type='SamplePoint') |
939
|
|
|
|
940
|
|
|
def Vocabulary_SampleMatrix(self): |
941
|
|
|
vocabulary = CatalogVocabulary(self) |
942
|
|
|
vocabulary.catalog = 'bika_setup_catalog' |
943
|
|
|
return vocabulary(allow_blank=True, portal_type='SampleMatrix') |
944
|
|
|
|
945
|
|
|
def Vocabulary_SampleType(self): |
946
|
|
|
vocabulary = CatalogVocabulary(self) |
947
|
|
|
vocabulary.catalog = 'bika_setup_catalog' |
948
|
|
|
folders = [self.bika_setup.bika_sampletypes] |
949
|
|
|
if IClient.providedBy(self.aq_parent): |
950
|
|
|
folders.append(self.aq_parent) |
951
|
|
|
return vocabulary(allow_blank=True, portal_type='SampleType') |
952
|
|
|
|
953
|
|
|
def Vocabulary_ContainerType(self): |
954
|
|
|
vocabulary = CatalogVocabulary(self) |
955
|
|
|
vocabulary.catalog = 'bika_setup_catalog' |
956
|
|
|
return vocabulary(allow_blank=True, portal_type='ContainerType') |
957
|
|
|
|
958
|
|
|
def error(self, msg): |
959
|
|
|
errors = list(self.getErrors()) |
960
|
|
|
errors.append(msg) |
961
|
|
|
self.setErrors(errors) |
962
|
|
|
|
963
|
|
|
|
964
|
|
|
atapi.registerType(ARImport, PROJECTNAME) |
965
|
|
|
|