1
|
|
|
# -*- coding: utf-8 -*- |
2
|
|
|
# |
3
|
|
|
# This file is part of SENAITE.CORE |
4
|
|
|
# |
5
|
|
|
# Copyright 2018 by it's authors. |
6
|
|
|
# Some rights reserved. See LICENSE.rst, CONTRIBUTORS.rst. |
7
|
|
|
|
8
|
|
|
import os |
9
|
|
|
import glob |
10
|
|
|
import traceback |
11
|
|
|
|
12
|
|
|
from DateTime import DateTime |
13
|
|
|
from bika.lims.api.analysis import is_out_of_range |
14
|
|
|
from bika.lims.interfaces import IReferenceSample, IReferenceAnalysis |
15
|
|
|
from plone.resource.utils import iterDirectoriesOfType, queryResourceDirectory |
16
|
|
|
from Products.CMFCore.utils import getToolByName |
17
|
|
|
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile |
18
|
|
|
|
19
|
|
|
from bika.lims import api |
20
|
|
|
from bika.lims import bikaMessageFactory as _ |
21
|
|
|
from bika.lims import logger |
22
|
|
|
from bika.lims.browser import BrowserView |
23
|
|
|
from bika.lims.config import POINTS_OF_CAPTURE |
24
|
|
|
from bika.lims.utils import formatDecimalMark |
25
|
|
|
from bika.lims.utils import format_supsub |
26
|
|
|
from bika.lims.utils import to_utf8 |
27
|
|
|
from bika.lims.utils.analysis import format_uncertainty |
28
|
|
|
|
29
|
|
|
|
30
|
|
|
class PrintView(BrowserView): |
31
|
|
|
""" Print view for a worksheet. This view acts as a placeholder, so |
32
|
|
|
the user can select the preferred options (AR by columns, AR by |
33
|
|
|
rows, etc.) for printing. Both a print button and pdf button |
34
|
|
|
are shown. |
35
|
|
|
""" |
36
|
|
|
|
37
|
|
|
template = ViewPageTemplateFile("../templates/print.pt") |
38
|
|
|
_DEFAULT_TEMPLATE = 'ar_by_column.pt' |
39
|
|
|
_DEFAULT_NUMCOLS = 4 |
40
|
|
|
_TEMPLATES_DIR = '../templates/print' |
41
|
|
|
# Add-on folder to look for templates |
42
|
|
|
_TEMPLATES_ADDON_DIR = 'worksheets' |
43
|
|
|
_current_ws_index = 0 |
44
|
|
|
_worksheets = [] |
45
|
|
|
|
46
|
|
|
def __init__(self, context, request): |
47
|
|
|
super(PrintView, self).__init__(context, request) |
48
|
|
|
self._worksheets = [self.context] |
49
|
|
|
|
50
|
|
|
def __call__(self): |
51
|
|
|
""" Entry point of PrintView. |
52
|
|
|
If context.portal_type is a Worksheet, then the PrintView |
53
|
|
|
is initialized to manage only that worksheet. If the |
54
|
|
|
context.portal_type is a WorksheetFolder and there are |
55
|
|
|
items selected in the request (items param), the PrintView |
56
|
|
|
will show the preview for all the selected Worksheets. |
57
|
|
|
By default, returns a HTML-encoded template, but if the |
58
|
|
|
request contains a param 'pdf' with value 1, will flush a |
59
|
|
|
pdf for the worksheet. |
60
|
|
|
""" |
61
|
|
|
|
62
|
|
View Code Duplication |
if self.context.portal_type == 'Worksheet': |
|
|
|
|
63
|
|
|
self._worksheets = [self.context] |
64
|
|
|
|
65
|
|
|
elif self.context.portal_type == 'WorksheetFolder' and \ |
66
|
|
|
self.request.get('items', ''): |
67
|
|
|
uids = self.request.get('items').split(',') |
68
|
|
|
uc = getToolByName(self.context, 'uid_catalog') |
69
|
|
|
self._worksheets = [obj.getObject() for obj in uc(UID=uids)] |
70
|
|
|
|
71
|
|
|
else: |
72
|
|
|
# Warn and redirect to referer |
73
|
|
|
logger.warning('PrintView: type not allowed: %s' % |
74
|
|
|
self.context.portal_type) |
75
|
|
|
self.destination_url = self.request.get_header( |
76
|
|
|
"referer", self.context.absolute_url()) |
77
|
|
|
|
78
|
|
|
# Generate PDF? |
79
|
|
|
if self.request.form.get('pdf', '0') == '1': |
80
|
|
|
return self._flush_pdf() |
|
|
|
|
81
|
|
|
else: |
82
|
|
|
return self.template() |
83
|
|
|
|
84
|
|
|
def get_analysis_data_by_title(self, ar_data, title): |
85
|
|
|
"""A template helper to pick an Analysis identified by the name of the |
86
|
|
|
current Analysis Service. |
87
|
|
|
|
88
|
|
|
ar_data is the dictionary structure which is returned by _ws_data |
89
|
|
|
""" |
90
|
|
|
analyses = ar_data.get("analyses", []) |
91
|
|
|
for analysis in analyses: |
92
|
|
|
if analysis.get("title") == title: |
93
|
|
|
return analysis |
94
|
|
|
return None |
95
|
|
|
|
96
|
|
View Code Duplication |
def getWSTemplates(self): |
|
|
|
|
97
|
|
|
""" Returns a DisplayList with the available templates found in |
98
|
|
|
templates/worksheets |
99
|
|
|
""" |
100
|
|
|
this_dir = os.path.dirname(os.path.abspath(__file__)) |
101
|
|
|
templates_dir = os.path.join(this_dir, self._TEMPLATES_DIR) |
102
|
|
|
tempath = '%s/%s' % (templates_dir, '*.pt') |
103
|
|
|
templates = [t.split('/')[-1] for t in glob.glob(tempath)] |
104
|
|
|
out = [] |
105
|
|
|
for template in templates: |
106
|
|
|
out.append({'id': template, 'title': template[:-3]}) |
107
|
|
|
for templates_resource in iterDirectoriesOfType(self._TEMPLATES_ADDON_DIR): |
108
|
|
|
prefix = templates_resource.__name__ |
109
|
|
|
templates = [tpl for tpl in templates_resource.listDirectory() if tpl.endswith('.pt')] |
110
|
|
|
for template in templates: |
111
|
|
|
out.append({ |
112
|
|
|
'id': '{0}:{1}'.format(prefix, template), |
113
|
|
|
'title': '{0} ({1})'.format(template[:-3], prefix), |
114
|
|
|
}) |
115
|
|
|
return out |
116
|
|
|
|
117
|
|
View Code Duplication |
def renderWSTemplate(self): |
|
|
|
|
118
|
|
|
""" Returns the current worksheet rendered with the template |
119
|
|
|
specified in the request (param 'template'). |
120
|
|
|
Moves the iterator to the next worksheet available. |
121
|
|
|
""" |
122
|
|
|
templates_dir = self._TEMPLATES_DIR |
123
|
|
|
embedt = self.request.get('template', self._DEFAULT_TEMPLATE) |
124
|
|
|
if embedt.find(':') >= 0: |
125
|
|
|
prefix, embedt = embedt.split(':') |
126
|
|
|
templates_dir = queryResourceDirectory(self._TEMPLATES_ADDON_DIR, prefix).directory |
127
|
|
|
embed = ViewPageTemplateFile(os.path.join(templates_dir, embedt)) |
128
|
|
|
reptemplate = "" |
129
|
|
|
try: |
130
|
|
|
reptemplate = embed(self) |
131
|
|
|
except: |
132
|
|
|
tbex = traceback.format_exc() |
133
|
|
|
wsid = self._worksheets[self._current_ws_index].id |
134
|
|
|
reptemplate = "<div class='error-print'>%s - %s '%s':<pre>%s</pre></div>" % (wsid, _("Unable to load the template"), embedt, tbex) |
135
|
|
|
if self._current_ws_index < len(self._worksheets): |
136
|
|
|
self._current_ws_index += 1 |
137
|
|
|
return reptemplate |
138
|
|
|
|
139
|
|
View Code Duplication |
def getCSS(self): |
|
|
|
|
140
|
|
|
""" Returns the css style to be used for the current template. |
141
|
|
|
If the selected template is 'default.pt', this method will |
142
|
|
|
return the content from 'default.css'. If no css file found |
143
|
|
|
for the current template, returns empty string |
144
|
|
|
""" |
145
|
|
|
template = self.request.get('template', self._DEFAULT_TEMPLATE) |
146
|
|
|
content = '' |
147
|
|
|
if template.find(':') >= 0: |
148
|
|
|
prefix, template = template.split(':') |
149
|
|
|
resource = queryResourceDirectory(self._TEMPLATES_ADDON_DIR, prefix) |
150
|
|
|
css = '{0}.css'.format(template[:-3]) |
151
|
|
|
if css in resource.listDirectory(): |
152
|
|
|
content = resource.readFile(css) |
153
|
|
|
else: |
154
|
|
|
this_dir = os.path.dirname(os.path.abspath(__file__)) |
155
|
|
|
templates_dir = os.path.join(this_dir, self._TEMPLATES_DIR) |
156
|
|
|
path = '%s/%s.css' % (templates_dir, template[:-3]) |
157
|
|
|
with open(path, 'r') as content_file: |
158
|
|
|
content = content_file.read() |
159
|
|
|
return content |
160
|
|
|
|
161
|
|
|
def getNumColumns(self): |
162
|
|
|
""" Returns the number of columns to display |
163
|
|
|
""" |
164
|
|
|
return int(self.request.get('numcols', self._DEFAULT_NUMCOLS)) |
165
|
|
|
|
166
|
|
|
def getWorksheets(self): |
167
|
|
|
""" Returns the list of worksheets to be printed |
168
|
|
|
""" |
169
|
|
|
return self._worksheets |
170
|
|
|
|
171
|
|
|
def getWorksheet(self): |
172
|
|
|
""" Returns the current worksheet from the list. Returns None when |
173
|
|
|
the iterator reaches the end of the array. |
174
|
|
|
""" |
175
|
|
|
ws = None |
176
|
|
|
if self._current_ws_index < len(self._worksheets): |
177
|
|
|
ws = self._ws_data(self._worksheets[self._current_ws_index]) |
178
|
|
|
return ws |
179
|
|
|
|
180
|
|
|
def splitList(self, elements, chunksnum): |
181
|
|
|
""" Splits a list to a n lists with chunksnum number of elements |
182
|
|
|
each one. |
183
|
|
|
For a list [3,4,5,6,7,8,9] with chunksunum 4, the method |
184
|
|
|
will return the following list of groups: |
185
|
|
|
[[3,4,5,6],[7,8,9]] |
186
|
|
|
""" |
187
|
|
|
if len(elements) < chunksnum: |
188
|
|
|
return [elements] |
189
|
|
|
groups = zip(*[elements[i::chunksnum] for i in range(chunksnum)]) |
190
|
|
|
if len(groups) * chunksnum < len(elements): |
191
|
|
|
groups.extend([elements[-(len(elements) - len(groups) * chunksnum):]]) |
192
|
|
|
return groups |
193
|
|
|
|
194
|
|
|
def _lab_data(self): |
195
|
|
|
""" Returns a dictionary that represents the lab object |
196
|
|
|
Keys: obj, title, url, address, confidence, accredited, |
197
|
|
|
accreditation_body, accreditation_logo, logo |
198
|
|
|
""" |
199
|
|
|
portal = self.context.portal_url.getPortalObject() |
200
|
|
|
lab = self.context.bika_setup.laboratory |
201
|
|
|
lab_address = lab.getPostalAddress() \ |
202
|
|
|
or lab.getBillingAddress() \ |
203
|
|
|
or lab.getPhysicalAddress() |
204
|
|
|
if lab_address: |
205
|
|
|
_keys = ['address', 'city', 'state', 'zip', 'country'] |
206
|
|
|
_list = ["<div>%s</div>" % lab_address.get(v) for v in _keys |
207
|
|
|
if lab_address.get(v)] |
208
|
|
|
lab_address = "".join(_list) |
209
|
|
|
else: |
210
|
|
|
lab_address = '' |
211
|
|
|
|
212
|
|
|
return {'obj': lab, |
213
|
|
|
'title': to_utf8(lab.Title()), |
214
|
|
|
'url': to_utf8(lab.getLabURL()), |
215
|
|
|
'address': to_utf8(lab_address), |
216
|
|
|
'confidence': lab.getConfidence(), |
217
|
|
|
'accredited': lab.getLaboratoryAccredited(), |
218
|
|
|
'accreditation_body': to_utf8(lab.getAccreditationBody()), |
219
|
|
|
'accreditation_logo': lab.getAccreditationBodyLogo(), |
220
|
|
|
'logo': "%s/logo_print.png" % portal.absolute_url()} |
221
|
|
|
|
222
|
|
|
def _ws_data(self, ws): |
223
|
|
|
""" Creates an ws dict, accessible from the view and from each |
224
|
|
|
specific template. |
225
|
|
|
Keys: obj, id, url, template_title, remarks, date_printed, |
226
|
|
|
ars, createdby, analyst, printedby, analyses_titles, |
227
|
|
|
portal, laboratory |
228
|
|
|
""" |
229
|
|
|
data = { |
230
|
|
|
'obj': ws, |
231
|
|
|
'id': ws.id, |
232
|
|
|
'url': ws.absolute_url(), |
233
|
|
|
'template_title': ws.getWorksheetTemplateTitle(), |
234
|
|
|
'remarks': ws.getRemarks(), |
235
|
|
|
'date_printed': self.ulocalized_time(DateTime(), long_format=1), |
236
|
|
|
'date_created': self.ulocalized_time(ws.created(), long_format=1), |
237
|
|
|
} |
238
|
|
|
|
239
|
|
|
# Sub-objects |
240
|
|
|
data['ars'] = self._analyses_data(ws) |
241
|
|
|
data['createdby'] = self._createdby_data(ws) |
242
|
|
|
data['analyst'] = self._analyst_data(ws) |
243
|
|
|
data['printedby'] = self._printedby_data(ws) |
244
|
|
|
|
245
|
|
|
# Unify the analyses titles for the template |
246
|
|
|
# N.B. The Analyses come in sorted, so don't use a set() to unify them, |
247
|
|
|
# because it sorts the Analyses alphabetically |
248
|
|
|
ans = [] |
249
|
|
|
for ar in data['ars']: |
250
|
|
|
for an in ar['analyses']: |
251
|
|
|
title = an["title"] |
252
|
|
|
if title in ans: |
253
|
|
|
continue |
254
|
|
|
ans.append(title) |
255
|
|
|
data['analyses_titles'] = ans |
256
|
|
|
|
257
|
|
|
portal = self.context.portal_url.getPortalObject() |
258
|
|
|
data['portal'] = {'obj': portal, |
259
|
|
|
'url': portal.absolute_url()} |
260
|
|
|
data['laboratory'] = self._lab_data() |
261
|
|
|
|
262
|
|
|
return data |
263
|
|
|
|
264
|
|
|
def _createdby_data(self, ws): |
265
|
|
|
""" Returns a dict that represents the user who created the ws |
266
|
|
|
Keys: username, fullmame, email |
267
|
|
|
""" |
268
|
|
|
username = ws.getOwner().getUserName() |
269
|
|
|
return {'username': username, |
270
|
|
|
'fullname': to_utf8(self.user_fullname(username)), |
271
|
|
|
'email': to_utf8(self.user_email(username))} |
272
|
|
|
|
273
|
|
|
def _analyst_data(self, ws): |
274
|
|
|
""" Returns a dict that represent the analyst assigned to the |
275
|
|
|
worksheet. |
276
|
|
|
Keys: username, fullname, email |
277
|
|
|
""" |
278
|
|
|
username = ws.getAnalyst() |
279
|
|
|
return {'username': username, |
280
|
|
|
'fullname': to_utf8(self.user_fullname(username)), |
281
|
|
|
'email': to_utf8(self.user_email(username))} |
282
|
|
|
|
283
|
|
|
def _printedby_data(self, ws): |
284
|
|
|
""" Returns a dict that represents the user who prints the ws |
285
|
|
|
Keys: username, fullname, email |
286
|
|
|
""" |
287
|
|
|
data = {} |
288
|
|
|
member = self.context.portal_membership.getAuthenticatedMember() |
289
|
|
|
if member: |
290
|
|
|
username = member.getUserName() |
291
|
|
|
data['username'] = username |
292
|
|
|
data['fullname'] = to_utf8(self.user_fullname(username)) |
293
|
|
|
data['email'] = to_utf8(self.user_email(username)) |
294
|
|
|
|
295
|
|
|
c = [x for x in self.bika_setup_catalog(portal_type='LabContact') |
296
|
|
|
if x.getObject().getUsername() == username] |
297
|
|
|
if c: |
298
|
|
|
sf = c[0].getObject().getSignature() |
299
|
|
|
if sf: |
300
|
|
|
data['signature'] = sf.absolute_url() + "/Signature" |
301
|
|
|
|
302
|
|
|
return data |
303
|
|
|
|
304
|
|
|
def _analyses_data(self, ws): |
305
|
|
|
""" Returns a list of dicts. Each dict represents an analysis |
306
|
|
|
assigned to the worksheet |
307
|
|
|
""" |
308
|
|
|
ans = ws.getAnalyses() |
309
|
|
|
layout = ws.getLayout() |
310
|
|
|
pos_count = 0 |
311
|
|
|
prev_pos = 0 |
312
|
|
|
ars = {} |
313
|
|
|
|
314
|
|
|
# mapping of analysis UID -> position in layout |
315
|
|
|
uid_to_pos_mapping = dict( |
316
|
|
|
map(lambda row: (row["analysis_uid"], row["position"]), layout)) |
317
|
|
|
|
318
|
|
|
for an in ans: |
319
|
|
|
# Build the analysis-specific dict |
320
|
|
|
if an.portal_type == "DuplicateAnalysis": |
321
|
|
|
andict = self._analysis_data(an.getAnalysis()) |
322
|
|
|
andict['id'] = an.getReferenceAnalysesGroupID() |
323
|
|
|
andict['obj'] = an |
324
|
|
|
andict['type'] = "DuplicateAnalysis" |
325
|
|
|
andict['reftype'] = 'd' |
326
|
|
|
else: |
327
|
|
|
andict = self._analysis_data(an) |
328
|
|
|
|
329
|
|
|
# Analysis position |
330
|
|
|
pos = uid_to_pos_mapping.get(an.UID(), 0) |
331
|
|
|
|
332
|
|
|
# compensate for possible bad data (dbw#104) |
333
|
|
|
if isinstance(pos, (list, tuple)) and pos[0] == 'new': |
334
|
|
|
pos = prev_pos |
335
|
|
|
|
336
|
|
|
pos = int(pos) |
337
|
|
|
prev_pos = pos |
338
|
|
|
|
339
|
|
|
# This will allow to sort automatically all the analyses, |
340
|
|
|
# also if they have the same initial position. |
341
|
|
|
andict['tmp_position'] = (pos * 100) + pos_count |
342
|
|
|
andict['position'] = pos |
343
|
|
|
pos_count += 1 |
344
|
|
|
|
345
|
|
|
# Look for the analysis request, client and sample info and |
346
|
|
|
# group the analyses per Analysis Request |
347
|
|
|
reqid = andict['request_id'] |
348
|
|
|
if an.portal_type in ("ReferenceAnalysis", "DuplicateAnalysis"): |
349
|
|
|
reqid = an.getReferenceAnalysesGroupID() |
350
|
|
|
|
351
|
|
|
if reqid not in ars: |
352
|
|
|
arobj = an.aq_parent |
353
|
|
|
if an.portal_type == "DuplicateAnalysis": |
354
|
|
|
arobj = an.getAnalysis().aq_parent |
355
|
|
|
|
356
|
|
|
ar = self._ar_data(arobj) |
357
|
|
|
ar['client'] = self._client_data(arobj.aq_parent) |
358
|
|
|
ar["sample"] = dict() |
359
|
|
|
if IReferenceSample.providedBy(an): |
360
|
|
|
ar['sample'] = self._sample_data(an.getSample()) |
361
|
|
|
else: |
362
|
|
|
ar['sample'] = self._sample_data(an.getRequest()) |
363
|
|
|
ar['analyses'] = [] |
364
|
|
|
ar['tmp_position'] = andict['tmp_position'] |
365
|
|
|
ar['position'] = andict['position'] |
366
|
|
|
if an.portal_type in ("ReferenceAnalysis", "DuplicateAnalysis"): |
367
|
|
|
ar['id'] = an.getReferenceAnalysesGroupID() |
368
|
|
|
ar['url'] = an.absolute_url() |
369
|
|
|
else: |
370
|
|
|
ar = ars[reqid] |
371
|
|
|
if (andict['tmp_position'] < ar['tmp_position']): |
372
|
|
|
ar['tmp_position'] = andict['tmp_position'] |
373
|
|
|
ar['position'] = andict['position'] |
374
|
|
|
|
375
|
|
|
# Sort analyses by position |
376
|
|
|
ans = ar['analyses'] |
377
|
|
|
ans.append(andict) |
378
|
|
|
ans.sort(lambda x, y: cmp(x.get('tmp_position'), y.get('tmp_position'))) |
379
|
|
|
ar['analyses'] = ans |
380
|
|
|
ars[reqid] = ar |
381
|
|
|
|
382
|
|
|
ars = [a for a in ars.itervalues()] |
383
|
|
|
|
384
|
|
|
# Sort analysis requests by position |
385
|
|
|
ars.sort(lambda x, y: cmp(x.get('tmp_position'), y.get('tmp_position'))) |
386
|
|
|
return ars |
387
|
|
|
|
388
|
|
|
def _analysis_data(self, analysis): |
389
|
|
|
""" Returns a dict that represents the analysis |
390
|
|
|
""" |
391
|
|
|
decimalmark = analysis.aq_parent.aq_parent.getDecimalMark() |
392
|
|
|
keyword = analysis.getKeyword() |
393
|
|
|
andict = { |
394
|
|
|
'obj': analysis, |
395
|
|
|
'id': analysis.id, |
396
|
|
|
'title': analysis.Title(), |
397
|
|
|
'keyword': keyword, |
398
|
|
|
'scientific_name': analysis.getScientificName(), |
399
|
|
|
'accredited': analysis.getAccredited(), |
400
|
|
|
'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(analysis.getPointOfCapture())), |
401
|
|
|
'category': to_utf8(analysis.getCategoryTitle()), |
402
|
|
|
'result': analysis.getResult(), |
403
|
|
|
'unit': to_utf8(analysis.getUnit()), |
404
|
|
|
'formatted_unit': format_supsub(to_utf8(analysis.getUnit())), |
405
|
|
|
'capture_date': analysis.getResultCaptureDate(), |
406
|
|
|
'request_id': analysis.aq_parent.getId(), |
407
|
|
|
'formatted_result': '', |
408
|
|
|
'uncertainty': analysis.getUncertainty(), |
409
|
|
|
'formatted_uncertainty': '', |
410
|
|
|
'retested': analysis.isRetest(), |
411
|
|
|
'remarks': to_utf8(analysis.getRemarks()), |
412
|
|
|
'outofrange': False, |
413
|
|
|
'type': analysis.portal_type, |
414
|
|
|
'reftype': analysis.getReferenceType() if hasattr( |
415
|
|
|
analysis, 'getReferenceType') else None, |
416
|
|
|
'worksheet': None, |
417
|
|
|
'specs': {}, |
418
|
|
|
'formatted_specs': '', |
419
|
|
|
'review_state': api.get_workflow_status_of(analysis), |
420
|
|
|
} |
421
|
|
|
|
422
|
|
|
andict['refsample'] = analysis.getSample().id \ |
423
|
|
|
if IReferenceAnalysis.providedBy(analysis) \ |
424
|
|
|
else analysis.getRequestID() |
425
|
|
|
|
426
|
|
|
specs = analysis.getResultsRange() |
427
|
|
|
andict['specs'] = specs |
428
|
|
|
scinot = self.context.bika_setup.getScientificNotationReport() |
429
|
|
|
andict['formatted_result'] = analysis.getFormattedResult(specs=specs, sciformat=int(scinot), decimalmark=decimalmark) |
430
|
|
|
|
431
|
|
|
fs = '' |
432
|
|
|
if specs.get('min', None) and specs.get('max', None): |
433
|
|
|
fs = '%s - %s' % (specs['min'], specs['max']) |
434
|
|
|
elif specs.get('min', None): |
435
|
|
|
fs = '> %s' % specs['min'] |
436
|
|
|
elif specs.get('max', None): |
437
|
|
|
fs = '< %s' % specs['max'] |
438
|
|
|
andict['formatted_specs'] = formatDecimalMark(fs, decimalmark) |
439
|
|
|
andict['formatted_uncertainty'] = format_uncertainty(analysis, analysis.getResult(), decimalmark=decimalmark, sciformat=int(scinot)) |
440
|
|
|
|
441
|
|
|
# Out of range? |
442
|
|
|
andict['outofrange'] = is_out_of_range(analysis)[0] |
443
|
|
|
return andict |
444
|
|
|
|
445
|
|
|
def _sample_data(self, sample): |
446
|
|
|
""" Returns a dict that represents the sample |
447
|
|
|
Keys: obj, id, url, client_sampleid, date_sampled, |
448
|
|
|
sampling_date, sampler, date_received, composite, |
449
|
|
|
date_expired, date_disposal, date_disposed, remarks |
450
|
|
|
""" |
451
|
|
|
data = {} |
452
|
|
|
if sample: |
453
|
|
|
data = {'obj': sample, |
454
|
|
|
'id': sample.id, |
455
|
|
|
'url': sample.absolute_url(), |
456
|
|
|
'date_sampled': self.ulocalized_time( |
457
|
|
|
sample.getDateSampled(), long_format=True), |
458
|
|
|
'date_received': self.ulocalized_time( |
459
|
|
|
sample.getDateReceived(), long_format=0), |
460
|
|
|
} |
461
|
|
|
|
462
|
|
|
if sample.portal_type == "ReferenceSample": |
463
|
|
|
data['sample_type'] = None |
464
|
|
|
data['sample_point'] = None |
465
|
|
|
else: |
466
|
|
|
data['sample_type'] = self._sample_type(sample) |
467
|
|
|
data['sample_point'] = self._sample_point(sample) |
468
|
|
|
return data |
469
|
|
|
|
470
|
|
|
def _sample_type(self, sample=None): |
471
|
|
|
""" Returns a dict that represents the sample type assigned to |
472
|
|
|
the sample specified |
473
|
|
|
Keys: obj, id, title, url |
474
|
|
|
""" |
475
|
|
|
data = {} |
476
|
|
|
sampletype = sample.getSampleType() if sample else None |
477
|
|
|
if sampletype: |
478
|
|
|
data = {'obj': sampletype, |
479
|
|
|
'id': sampletype.id, |
480
|
|
|
'title': sampletype.Title(), |
481
|
|
|
'url': sampletype.absolute_url()} |
482
|
|
|
return data |
483
|
|
|
|
484
|
|
|
def _sample_point(self, sample=None): |
485
|
|
|
""" Returns a dict that represents the sample point assigned to |
486
|
|
|
the sample specified |
487
|
|
|
Keys: obj, id, title, url |
488
|
|
|
""" |
489
|
|
|
samplepoint = sample.getSamplePoint() if sample else None |
490
|
|
|
data = {} |
491
|
|
|
if samplepoint: |
492
|
|
|
data = {'obj': samplepoint, |
493
|
|
|
'id': samplepoint.id, |
494
|
|
|
'title': samplepoint.Title(), |
495
|
|
|
'url': samplepoint.absolute_url()} |
496
|
|
|
return data |
497
|
|
|
|
498
|
|
|
def _ar_data(self, ar): |
499
|
|
|
""" Returns a dict that represents the analysis request |
500
|
|
|
""" |
501
|
|
|
if not ar: |
502
|
|
|
return {} |
503
|
|
|
|
504
|
|
|
if ar.portal_type == "AnalysisRequest": |
505
|
|
|
return {'obj': ar, |
506
|
|
|
'id': ar.getId(), |
507
|
|
|
'date_received': self.ulocalized_time( |
508
|
|
|
ar.getDateReceived(), long_format=0), |
509
|
|
|
'date_sampled': self.ulocalized_time( |
510
|
|
|
ar.getDateSampled(), long_format=True), |
511
|
|
|
'url': ar.absolute_url(), } |
512
|
|
|
elif ar.portal_type == "ReferenceSample": |
513
|
|
|
return {'obj': ar, |
514
|
|
|
'id': ar.id, |
515
|
|
|
'date_received': self.ulocalized_time( |
516
|
|
|
ar.getDateReceived(), long_format=0), |
517
|
|
|
'date_sampled': self.ulocalized_time( |
518
|
|
|
ar.getDateSampled(), long_format=True), |
519
|
|
|
'url': ar.absolute_url(), } |
520
|
|
|
else: |
521
|
|
|
return {'obj': ar, |
522
|
|
|
'id': ar.id, |
523
|
|
|
'date_received': "", |
524
|
|
|
'date_sampled': "", |
525
|
|
|
'url': ar.absolute_url(), } |
526
|
|
|
|
527
|
|
|
def _client_data(self, client): |
528
|
|
|
""" Returns a dict that represents the client specified |
529
|
|
|
Keys: obj, id, url, name |
530
|
|
|
""" |
531
|
|
|
data = {} |
532
|
|
|
if client: |
533
|
|
|
data['obj'] = client |
534
|
|
|
data['id'] = client.id |
535
|
|
|
data['url'] = client.absolute_url() |
536
|
|
|
data['name'] = to_utf8(client.getName()) |
537
|
|
|
return data |
538
|
|
|
|
539
|
|
|
def _flush_pdf(): |
540
|
|
|
""" Generates a PDF using the current layout as the template and |
541
|
|
|
returns the chunk of bytes. |
542
|
|
|
""" |
543
|
|
|
return "" |
544
|
|
|
|