Passed
Push — master ( d8e2ec...90ae0b )
by Jordi
10:07 queued 04:19
created

InstrumentResultsFileParser.getAnalysesTotalCount()   A

Complexity

Conditions 1

Size

Total Lines 4
Code Lines 2

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 2
dl 0
loc 4
rs 10
c 0
b 0
f 0
cc 1
nop 1
1
# -*- coding: utf-8 -*-
2
#
3
# This file is part of SENAITE.CORE
4
#
5
# Copyright 2018 by it's authors.
6
# Some rights reserved. See LICENSE.rst, CONTRIBUTORS.rst.
7
8
import codecs
9
from datetime import datetime
10
from DateTime import DateTime
11
from Products.CMFCore.utils import getToolByName
12
from Products.CMFPlone.utils import _createObjectByType
13
from bika.lims import api
14
from bika.lims import bikaMessageFactory as _, logger
15
from bika.lims.catalog import CATALOG_ANALYSIS_REQUEST_LISTING
16
from bika.lims.exportimport.instruments.logger import Logger
17
from bika.lims.idserver import renameAfterCreation
18
from bika.lims.utils import t
19
from bika.lims.utils import tmpID
20
from bika.lims.workflow import doActionFor
21
22
23
class InstrumentResultsFileParser(Logger):
24
25
    def __init__(self, infile, mimetype):
26
        Logger.__init__(self)
27
        self._infile = infile
28
        self._header = {}
29
        self._rawresults = {}
30
        self._mimetype = mimetype
31
        self._numline = 0
32
33
    def getInputFile(self):
34
        """ Returns the results input file
35
        """
36
        return self._infile
37
38
    def parse(self):
39
        """ Parses the input results file and populates the rawresults dict.
40
            See getRawResults() method for more info about rawresults format
41
            Returns True if the file has been parsed successfully.
42
            Is highly recommended to use _addRawResult method when adding
43
            raw results.
44
            IMPORTANT: To be implemented by child classes
45
        """
46
        raise NotImplementedError
47
48
    def getAttachmentFileType(self):
49
        """ Returns the file type name that will be used when creating the
50
            AttachmentType used by the importer for saving the results file as
51
            an attachment in each Analysis matched.
52
            By default returns self.getFileMimeType()
53
        """
54
        return self.getFileMimeType()
55
56
    def getFileMimeType(self):
57
        """ Returns the results file type
58
        """
59
        return self._mimetype
60
61
    def getHeader(self):
62
        """ Returns a dictionary with custom key, values
63
        """
64
        return self._header
65
66
    def _addRawResult(self, resid, values={}, override=False):
67
        """ Adds a set of raw results for an object with id=resid
68
            resid is usually an Analysis Request ID or Worksheet's Reference
69
            Analysis ID. The values are a dictionary in which the keys are
70
            analysis service keywords and the values, another dictionary with
71
            the key,value results.
72
            The column 'DefaultResult' must be provided, because is used to map
73
            to the column from which the default result must be retrieved.
74
75
            Example:
76
            resid  = 'DU13162-001-R1'
77
            values = {
78
                'D2': {'DefaultResult': 'Final Conc',
79
                       'Remarks':       '',
80
                       'Resp':          '5816',
81
                       'ISTD Resp':     '274638',
82
                       'Resp Ratio':    '0.0212',
83
                       'Final Conc':    '0.9145',
84
                       'Exp Conc':      '1.9531',
85
                       'Accuracy':      '98.19' },
86
87
                'D3': {'DefaultResult': 'Final Conc',
88
                       'Remarks':       '',
89
                       'Resp':          '5816',
90
                       'ISTD Resp':     '274638',
91
                       'Resp Ratio':    '0.0212',
92
                       'Final Conc':    '0.9145',
93
                       'Exp Conc':      '1.9531',
94
                       'Accuracy':      '98.19' }
95
                }
96
        """
97
        if override or resid not in self._rawresults.keys():
98
            self._rawresults[resid] = [values]
99
        else:
100
            self._rawresults[resid].append(values)
101
102
    def _emptyRawResults(self):
103
        """ Remove all grabbed raw results
104
        """
105
        self._rawresults = {}
106
107
    def getObjectsTotalCount(self):
108
        """ The total number of objects (ARs, ReferenceSamples, etc.) parsed
109
        """
110
        return len(self.getRawResults())
111
112
    def getResultsTotalCount(self):
113
        """ The total number of analysis results parsed
114
        """
115
        count = 0
116
        for val in self.getRawResults().values():
117
            count += len(val)
118
        return count
119
120
    def getAnalysesTotalCount(self):
121
        """ The total number of different analyses parsed
122
        """
123
        return len(self.getAnalysisKeywords())
124
125
    def getAnalysisKeywords(self):
126
        """ The analysis service keywords found
127
        """
128
        analyses = []
129
        for rows in self.getRawResults().values():
130
            for row in rows:
131
                analyses = list(set(analyses + row.keys()))
132
        return analyses
133
134
    def getRawResults(self):
135
        """ Returns a dictionary containing the parsed results data
136
            Each dict key is the results row ID (usually AR ID or Worksheet's
137
            Reference Sample ID). Each item is another dictionary, in which the
138
            key is a the AS Keyword.
139
            Inside the AS dict, the column 'DefaultResult' must be
140
            provided, that maps to the column from which the default
141
            result must be retrieved.
142
            If 'Remarks' column is found, it value will be set in Analysis
143
            Remarks field when using the deault Importer.
144
145
            Example:
146
            raw_results['DU13162-001-R1'] = [{
147
148
                'D2': {'DefaultResult': 'Final Conc',
149
                       'Remarks':       '',
150
                       'Resp':          '5816',
151
                       'ISTD Resp':     '274638',
152
                       'Resp Ratio':    '0.0212',
153
                       'Final Conc':    '0.9145',
154
                       'Exp Conc':      '1.9531',
155
                       'Accuracy':      '98.19' },
156
157
                'D3': {'DefaultResult': 'Final Conc',
158
                       'Remarks':       '',
159
                       'Resp':          '5816',
160
                       'ISTD Resp':     '274638',
161
                       'Resp Ratio':    '0.0212',
162
                       'Final Conc':    '0.9145',
163
                       'Exp Conc':      '1.9531',
164
                       'Accuracy':      '98.19' }]
165
166
            in which:
167
            - 'DU13162-001-R1' is the Analysis Request ID,
168
            - 'D2' column is an analysis service keyword,
169
            - 'DefaultResult' column maps to the column with default result
170
            - 'Remarks' column with Remarks results for that Analysis
171
            - The rest of the dict columns are results (or additional info)
172
              that can be set to the analysis if needed (the default importer
173
              will look for them if the analysis has Interim fields).
174
175
            In the case of reference samples:
176
            Control/Blank:
177
            raw_results['QC13-0001-0002'] = {...}
178
179
            Duplicate of sample DU13162-009 (from AR DU13162-009-R1)
180
            raw_results['QC-DU13162-009-002'] = {...}
181
182
        """
183
        return self._rawresults
184
185
    def resume(self):
186
        """ Resumes the parse process
187
            Called by the Results Importer after parse() call
188
        """
189
        if len(self.getRawResults()) == 0:
190
            self.warn("No results found")
191
            return False
192
        return True
193
194
195
class InstrumentCSVResultsFileParser(InstrumentResultsFileParser):
196
197
    def __init__(self, infile, encoding=None):
198
        InstrumentResultsFileParser.__init__(self, infile, 'CSV')
199
        # Some Instruments can generate files with different encodings, so we
200
        # may need this parameter
201
        self._encoding = encoding
202
203
    def parse(self):
204
        infile = self.getInputFile()
205
        self.log("Parsing file ${file_name}",
206
                 mapping={"file_name": infile.filename})
207
        jump = 0
208
        # We test in import functions if the file was uploaded
209
        try:
210
            if self._encoding:
211
                f = codecs.open(infile.name, 'r', encoding=self._encoding)
212
            else:
213
                f = open(infile.name, 'rU')
214
        except AttributeError:
215
            f = infile
216
        for line in f.readlines():
217
            self._numline += 1
218
            if jump == -1:
219
                # Something went wrong. Finish
220
                self.err("File processing finished due to critical errors")
221
                return False
222
            if jump > 0:
223
                # Jump some lines
224
                jump -= 1
225
                continue
226
227
            if not line or not line.strip():
228
                continue
229
230
            line = line.strip()
231
            jump = 0
232
            if line:
233
                jump = self._parseline(line)
234
235
        self.log(
236
            "End of file reached successfully: ${total_objects} objects, "
237
            "${total_analyses} analyses, ${total_results} results",
238
            mapping={"total_objects": self.getObjectsTotalCount(),
239
                     "total_analyses": self.getAnalysesTotalCount(),
240
                     "total_results": self.getResultsTotalCount()}
241
        )
242
        return True
243
244
    def splitLine(self, line):
245
        sline = line.split(',')
246
        return [token.strip() for token in sline]
247
248
    def _parseline(self, line):
249
        """ Parses a line from the input CSV file and populates rawresults
250
            (look at getRawResults comment)
251
            returns -1 if critical error found and parser must end
252
            returns the number of lines to be jumped in next read. If 0, the
253
            parser reads the next line as usual
254
        """
255
        raise NotImplementedError
256
257
258
class InstrumentTXTResultsFileParser(InstrumentResultsFileParser):
259
260
    def __init__(self, infile, separator, encoding=None,):
261
        InstrumentResultsFileParser.__init__(self, infile, 'TXT')
262
        # Some Instruments can generate files with different encodings, so we
263
        # may need this parameter
264
        self._separator = separator
265
        self._encoding = encoding
266
267
    def parse(self):
268
        infile = self.getInputFile()
269
        self.log("Parsing file ${file_name}", mapping={"file_name": infile.filename})
270
        jump = 0
271
        lines = self.read_file(infile)
272
        for line in lines:
273
            self._numline += 1
274
            if jump == -1:
275
                # Something went wrong. Finish
276
                self.err("File processing finished due to critical errors")
277
                return False
278
            if jump > 0:
279
                # Jump some lines
280
                jump -= 1
281
                continue
282
283
            if not line:
284
                continue
285
286
            jump = 0
287
            if line:
288
                jump = self._parseline(line)
289
290
        self.log(
291
            "End of file reached successfully: ${total_objects} objects, "
292
            "${total_analyses} analyses, ${total_results} results",
293
            mapping={"total_objects": self.getObjectsTotalCount(),
294
                     "total_analyses": self.getAnalysesTotalCount(),
295
                     "total_results": self.getResultsTotalCount()}
296
        )
297
        return True
298
299
    def read_file(self, infile):
300
        """Given an input file read its contents, strip whitespace from the
301
         beginning and end of each line and return a list of the preprocessed
302
         lines read.
303
304
        :param infile: file that contains the data to be read
305
        :return: list of the read lines with stripped whitespace
306
        """
307
        try:
308
            encoding = self._encoding if self._encoding else None
309
            mode = 'r' if self._encoding else 'rU'
310
            with codecs.open(infile.name, mode, encoding=encoding) as f:
311
                lines = f.readlines()
312
        except AttributeError:
313
            lines = infile.readlines()
314
        lines = [line.strip() for line in lines]
315
        return lines
316
317
    def split_line(self, line):
318
        sline = line.split(self._separator)
319
        return [token.strip() for token in sline]
320
321
    def _parseline(self, line):
322
        """ Parses a line from the input CSV file and populates rawresults
323
            (look at getRawResults comment)
324
            returns -1 if critical error found and parser must end
325
            returns the number of lines to be jumped in next read. If 0, the
326
            parser reads the next line as usual
327
        """
328
        raise NotImplementedError
329
330
331
class AnalysisResultsImporter(Logger):
332
333
    def __init__(self, parser, context,
334
                 override=[False, False],
335
                 allowed_ar_states=None,
336
                 allowed_analysis_states=None,
337
                 instrument_uid=None):
338
        Logger.__init__(self)
339
        self._parser = parser
340
        self.context = context
341
        self._allowed_ar_states = allowed_ar_states
342
        self._allowed_analysis_states = allowed_analysis_states
343
        self._override = override
344
        self._idsearch = ['getId', 'getClientSampleID']
345
        self._priorizedsearchcriteria = ''
346
        self.bsc = getToolByName(self.context, 'bika_setup_catalog')
347
        self.bac = getToolByName(self.context, 'bika_analysis_catalog')
348
        self.ar_catalog = getToolByName(
349
            self.context, CATALOG_ANALYSIS_REQUEST_LISTING)
350
        self.pc = getToolByName(self.context, 'portal_catalog')
351
        self.bc = getToolByName(self.context, 'bika_catalog')
352
        self.wf = getToolByName(self.context, 'portal_workflow')
353
        if not self._allowed_ar_states:
354
            self._allowed_ar_states = ['sample_received',
355
                                       'attachment_due',
356
                                       'to_be_verified']
357
        if not self._allowed_analysis_states:
358
            self._allowed_analysis_states = [
359
                'unassigned', 'assigned', 'to_be_verified'
360
            ]
361
        if not self._idsearch:
362
            self._idsearch = ['getId']
363
        self.instrument_uid = instrument_uid
364
365
    def getParser(self):
366
        """ Returns the parser that will be used for the importer
367
        """
368
        return self._parser
369
370
    def getAllowedARStates(self):
371
        """ The allowed Analysis Request states
372
            The results import will only take into account the analyses
373
            contained inside an Analysis Request which current state is one
374
            from these.
375
        """
376
        return self._allowed_ar_states
377
378
    def getAllowedAnalysisStates(self):
379
        """ The allowed Analysis states
380
            The results import will only take into account the analyses
381
            if its current state is in the allowed analysis states.
382
        """
383
        return self._allowed_analysis_states
384
385
    def getOverride(self):
386
        """ If the importer must override previously entered results.
387
            [False, False]: The results will not be overriden
388
            [True, False]: The results will be overriden only if there's no
389
                           result entered yet,
390
            [True, True]: The results will be always overriden, also if the
391
                          parsed result is empty.
392
        """
393
        return self._override
394
395
    def getKeywordsToBeExcluded(self):
396
        """ Returns an array with the analysis codes/keywords to be excluded
397
            by the importer. By default, an empty array
398
        """
399
        return []
400
401
    def process(self):
402
        self._parser.parse()
403
        parsed = self._parser.resume()
404
        self._errors = self._parser.errors
405
        self._warns = self._parser.warns
406
        self._logs = self._parser.logs
407
        self._priorizedsearchcriteria = ''
408
409
        if parsed is False:
410
            return False
411
412
        # Allowed analysis states
413
        allowed_ar_states_msg = [t(_(s)) for s in self.getAllowedARStates()]
414
        allowed_an_states_msg = [
415
                t(_(s)) for s in self.getAllowedAnalysisStates()]
416
        self.log("Allowed Sample states: ${allowed_states}",
417
                 mapping={'allowed_states': ', '.join(allowed_ar_states_msg)})
418
        self.log("Allowed analysis states: ${allowed_states}",
419
                 mapping={'allowed_states': ', '.join(allowed_an_states_msg)})
420
421
        # Exclude non existing ACODEs
422
        acodes = []
423
        ancount = 0
424
        instprocessed = []
425
        importedars = {}
426
        importedinsts = {}
427
        rawacodes = self._parser.getAnalysisKeywords()
428
        exclude = self.getKeywordsToBeExcluded()
429
        for acode in rawacodes:
430
            if acode in exclude or not acode:
431
                continue
432
            service = self.bsc(getKeyword=acode)
433
            if not service:
434
                self.warn('Service keyword ${analysis_keyword} not found',
435
                          mapping={"analysis_keyword": acode})
436
            else:
437
                acodes.append(acode)
438
        if len(acodes) == 0:
439
            self.warn("Service keywords: no matches found")
440
441
        # Attachments will be created in any worksheet that contains
442
        # analyses that are updated by this import
443
        attachments = {}
444
        infile = self._parser.getInputFile()
445
446
        for objid, results in self._parser.getRawResults().iteritems():
447
            # Allowed more than one result for the same sample and analysis.
448
            # Needed for calibration tests
449
            for result in results:
450
                analyses = self._getZODBAnalyses(objid)
451
                inst = None
452
                if len(analyses) == 0 and self.instrument_uid:
453
                    # No registered analyses found, but maybe we need to
454
                    # create them first if an instruemnt id has been set in
455
                    insts = self.bsc(portal_type='Instrument',
456
                                     UID=self.instrument_uid)
457
                    if len(insts) == 0:
458
                        # No instrument found
459
                        self.warn("No Sample with "
460
                                  "'${allowed_ar_states}' "
461
                                  "states found, And no QC"
462
                                  "analyses found for ${object_id}",
463
                                  mapping={"allowed_ar_states": ', '.join(
464
                                      allowed_ar_states_msg),
465
                                          "object_id": objid})
466
                        self.warn("Instrument not found")
467
                        continue
468
469
                    inst = insts[0].getObject()
470
471
                    # Create a new ReferenceAnalysis and link it to
472
                    # the Instrument
473
                    # Here we have an objid (i.e. R01200012) and
474
                    # a dict with results (the key is the AS keyword).
475
                    # How can we create a ReferenceAnalysis if we don't know
476
                    # which ReferenceSample we might use?
477
                    # Ok. The objid HAS to be the ReferenceSample code.
478
                    refsample = self.bc(portal_type='ReferenceSample', id=objid)
479
                    if refsample and len(refsample) == 1:
480
                        refsample = refsample[0].getObject()
481
482
                    elif refsample and len(refsample) > 1:
483
                        # More than one reference sample found!
484
                        self.warn(
485
                            "More than one reference sample found for"
486
                            "'${object_id}'",
487
                            mapping={"object_id": objid})
488
                        continue
489
490
                    else:
491
                        # No reference sample found!
492
                        self.warn("No Reference Sample found for ${object_id}",
493
                                  mapping={"object_id": objid})
494
                        continue
495
496
                    # For each acode, create a ReferenceAnalysis and attach it
497
                    # to the Reference Sample
498
                    services = self.bsc(portal_type='AnalysisService')
499
                    service_uids = [service.UID for service in services
500
                                    if service.getObject().getKeyword()
501
                                    in result.keys()]
502
                    analyses = inst.addReferences(refsample, service_uids)
503
504
                elif len(analyses) == 0:
505
                    # No analyses found
506
                    self.warn("No Sample with "
507
                              "'${allowed_ar_states}' "
508
                              "states neither QC analyses found "
509
                              "for ${object_id}",
510
                              mapping={
511
                                 "allowed_ar_states": ', '.join(
512
                                     allowed_ar_states_msg),
513
                                 "object_id": objid})
514
                    continue
515
516
                # Look for timestamp
517
                capturedate = result.get('DateTime', {}).get('DateTime', None)
518
                if capturedate:
519
                    del result['DateTime']
520
                for acode, values in result.iteritems():
521
                    if acode not in acodes:
522
                        # Analysis keyword doesn't exist
523
                        continue
524
525
                    ans = [analysis for analysis in analyses
526
                           if analysis.getKeyword() == acode]
527
528
                    if len(ans) > 1:
529
                        self.warn("More than one analysis found for "
530
                                  "${object_id} and ${analysis_keyword}",
531
                                  mapping={"object_id": objid,
532
                                           "analysis_keyword": acode})
533
                        continue
534
535
                    elif len(ans) == 0:
536
                        self.warn("No analyses found for ${object_id} "
537
                                  "and ${analysis_keyword}",
538
                                  mapping={"object_id": objid,
539
                                           "analysis_keyword": acode})
540
                        continue
541
542
                    analysis = ans[0]
543
544
                    # Create attachment in worksheet linked to this analysis.
545
                    # Only if this import has not already created the
546
                    # attachment
547
                    # And only if the filename of the attachment is unique in
548
                    # this worksheet.  Otherwise we will attempt to use
549
                    # existing attachment.
550
                    ws = analysis.getWorksheet()
551
                    if ws:
552
                        if ws.getId() not in attachments:
553
                            fn = infile.filename
554
                            fn_attachments = self.get_attachment_filenames(ws)
555
                            if fn in fn_attachments.keys():
556
                                attachments[ws.getId()] = fn_attachments[fn]
557
                            else:
558
                                attachments[ws.getId()] = \
559
                                    self.create_attachment(ws, infile)
560
561
                    if capturedate:
562
                        values['DateTime'] = capturedate
563
                    processed = self._process_analysis(objid, analysis, values)
564
                    if processed:
565
                        ancount += 1
566
                        if inst:
567
                            # Calibration Test (import to Instrument)
568
                            instprocessed.append(inst.UID())
569
                            importedinst = inst.title in importedinsts.keys() \
570
                                and importedinsts[inst.title] or []
571
                            if acode not in importedinst:
572
                                importedinst.append(acode)
573
                            importedinsts[inst.title] = importedinst
574
                        else:
575
                            ar = analysis.portal_type == 'Analysis' \
576
                                and analysis.aq_parent or None
577
                            if ar and ar.UID:
578
                                importedar = ar.getId() in importedars.keys() \
579
                                            and importedars[ar.getId()] or []
580
                                if acode not in importedar:
581
                                    importedar.append(acode)
582
                                importedars[ar.getId()] = importedar
583
584
                        if ws:
585
                            self.attach_attachment(
586
                                analysis, attachments[ws.getId()])
587
                        else:
588
                            self.warn(
589
                                "Attachment cannot be linked to analysis as "
590
                                "it is not assigned to a worksheet (%s)" %
591
                                analysis)
592
593
        for arid, acodes in importedars.iteritems():
594
            acodesmsg = ["Analysis %s" % acod for acod in acodes]
595
            self.log(
596
                    "${request_id}: ${analysis_keywords} imported sucessfully",
597
                    mapping={"request_id": arid,
598
                             "analysis_keywords": acodesmsg})
599
600
        for instid, acodes in importedinsts.iteritems():
601
            acodesmsg = ["Analysis %s" % acod for acod in acodes]
602
            msg = "%s: %s %s" % (instid,
603
                                 ", ".join(acodesmsg),
604
                                 "imported sucessfully")
605
            self.log(msg)
606
607
        if self.instrument_uid:
608
            self.log(
609
                "Import finished successfully: ${nr_updated_ars} Samples, "
610
                "${nr_updated_instruments} Instruments and "
611
                "${nr_updated_results} "
612
                "results updated",
613
                mapping={"nr_updated_ars": str(len(importedars)),
614
                         "nr_updated_instruments": str(len(importedinsts)),
615
                         "nr_updated_results": str(ancount)})
616
        else:
617
            self.log(
618
                "Import finished successfully: ${nr_updated_ars} Samples and "
619
                "${nr_updated_results} results updated",
620
                mapping={"nr_updated_ars": str(len(importedars)),
621
                         "nr_updated_results": str(ancount)})
622
623
    def create_mime_attachmenttype(self):
624
        # Create the AttachmentType for mime type if not exists
625
        attachmentType = self.bsc(portal_type="AttachmentType",
626
                                  title=self._parser.getAttachmentFileType())
627
        if not attachmentType:
628
            folder = self.context.bika_setup.bika_attachmenttypes
629
            obj = _createObjectByType("AttachmentType", folder, tmpID())
630
            obj.edit(title=self._parser.getAttachmentFileType(),
631
                     description="Autogenerated file type")
632
            obj.unmarkCreationFlag()
633
            renameAfterCreation(obj)
634
            attuid = obj.UID()
635
        else:
636
            attuid = attachmentType[0].UID
637
        return attuid
638
639
    def create_attachment(self, ws, infile):
640
        attuid = self.create_mime_attachmenttype()
641
        attachment = None
642
        if attuid and infile:
643
            attachment = _createObjectByType("Attachment", ws, tmpID())
644
            logger.info("Creating %s in %s" % (attachment, ws))
645
            attachment.edit(
646
                AttachmentFile=infile,
647
                AttachmentType=attuid,
648
                AttachmentKeys='Results, Automatic import')
649
            attachment.reindexObject()
650
        return attachment
651
652
    def attach_attachment(self, analysis, attachment):
653
        """
654
        Attach a file or a given set of files to an analysis
655
656
        :param analysis: analysis where the files are to be attached
657
        :param attachment: files to be attached. This can be either a
658
        single file or a list of files
659
        :return: None
660
        """
661
        if not attachment:
662
            return
663
        if isinstance(attachment, list):
664
            for attach in attachment:
665
                self.attach_attachment(analysis, attach)
666
            return
667
        # current attachments
668
        an_atts = analysis.getAttachment()
669
        atts_filenames = [att.getAttachmentFile().filename for att in an_atts]
670
        if attachment.getAttachmentFile().filename not in atts_filenames:
671
            an_atts.append(attachment)
672
            logger.info(
673
                "Attaching %s to %s" % (attachment.UID(), analysis))
674
            analysis.setAttachment([att.UID() for att in an_atts])
675
            analysis.reindexObject()
676
        else:
677
            self.warn("Attachment %s was not linked to analysis %s" %
678
                      (attachment.UID(), analysis))
679
680
    def get_attachment_filenames(self, ws):
681
        fn_attachments = {}
682
        for att in ws.objectValues('Attachment'):
683
            fn = att.getAttachmentFile().filename
684
            if fn not in fn_attachments:
685
                fn_attachments[fn] = []
686
            fn_attachments[fn].append(att)
687
        return fn_attachments
688
689
    def _getObjects(self, objid, criteria, states):
690
        # self.log("Criteria: %s %s") % (criteria, obji))
691
        obj = []
692
        if criteria in ['arid']:
693
            obj = self.ar_catalog(
694
                           getId=objid,
695
                           review_state=states)
696
        elif criteria == 'csid':
697
            obj = self.ar_catalog(
698
                           getClientSampleID=objid,
699
                           review_state=states)
700
        elif criteria == 'aruid':
701
            obj = self.ar_catalog(
702
                           UID=objid,
703
                           review_state=states)
704
        elif criteria == 'rgid':
705
            obj = self.bac(portal_type=['ReferenceAnalysis',
706
                                        'DuplicateAnalysis'],
707
                           getReferenceAnalysesGroupID=objid)
708
        elif criteria == 'rid':
709
            obj = self.bac(portal_type=['ReferenceAnalysis',
710
                                        'DuplicateAnalysis'], id=objid)
711
        elif criteria == 'ruid':
712
            obj = self.bac(portal_type=['ReferenceAnalysis',
713
                                        'DuplicateAnalysis'], UID=objid)
714
        if obj and len(obj) > 0:
715
            self._priorizedsearchcriteria = criteria
716
        return obj
717
718
    def _getZODBAnalyses(self, objid):
719
        """ Searches for analyses from ZODB to be filled with results.
720
            objid can be either AR ID or Worksheet's Reference Sample IDs.
721
            Only analyses that matches with getAnallowedAnalysisStates() will
722
            be returned. If not a ReferenceAnalysis, getAllowedARStates() is
723
            also checked.
724
            Returns empty array if no analyses found
725
        """
726
        # ars = []
727
        analyses = []
728
        searchcriteria = ['getId', 'getClientSampleID']
729
        allowed_ar_states = self.getAllowedARStates()
730
        allowed_an_states = self.getAllowedAnalysisStates()
731
        # allowed_ar_states_msg = [_(s) for s in allowed_ar_states]
732
        allowed_an_states_msg = [_(s) for s in allowed_an_states]
733
734
        # Acceleration of searches using priorization
735
        if self._priorizedsearchcriteria in ['rgid', 'rid', 'ruid']:
736
            # Look from reference analyses
737
            analyses = self._getZODBAnalysesFromReferenceAnalyses(
738
                    objid, self._priorizedsearchcriteria)
739
        if len(analyses) == 0:
740
            # Look from ar and derived
741
            analyses = self._getZODBAnalysesFromAR(objid,
742
                                                   '',
743
                                                   searchcriteria,
744
                                                   allowed_ar_states)
745
746
        # Discard analyses that don't match with allowed_an_states
747
        analyses = [analysis for analysis in analyses
748
                    if analysis.portal_type != 'Analysis' or
749
                    self.wf.getInfoFor(analysis, 'review_state')
750
                    in allowed_an_states]
751
752
        if len(analyses) == 0:
753
            self.warn(
754
                "No analyses '${allowed_analysis_states}' "
755
                "states found for ${object_id}",
756
                mapping={"allowed_analysis_states": ', '.join(
757
                    allowed_an_states_msg),
758
                         "object_id": objid})
759
760
        return analyses
761
762
    def _getZODBAnalysesFromAR(self, objid, criteria,
763
                               allowedsearches, arstates):
764
        ars = []
765
        analyses = []
766
        if criteria:
767
            ars = self._getObjects(objid, criteria, arstates)
768
            if not ars or len(ars) == 0:
769
                return self._getZODBAnalysesFromAR(objid, None,
770
                                                   allowedsearches, arstates)
771
        else:
772
            sortorder = ['arid', 'csid', 'aruid']
773
            for crit in sortorder:
774
                if (crit == 'arid' and 'getId' in allowedsearches) \
775
                    or (crit == 'csid' and 'getClientSampleID'
776
                                in allowedsearches) \
777
                        or (crit == 'aruid' and 'getId' in allowedsearches):
778
                    ars = self._getObjects(objid, crit, arstates)
779
                    if ars and len(ars) > 0:
780
                        break
781
782
        if not ars or len(ars) == 0:
783
            return self._getZODBAnalysesFromReferenceAnalyses(objid, None)
784
785
        elif len(ars) > 1:
786
            self.err("More than one Sample found for ${object_id}",
787
                     mapping={"object_id": objid})
788
            return []
789
790
        ar = ars[0].getObject()
791
        analyses = [analysis.getObject() for analysis in ar.getAnalyses()]
792
793
        return analyses
794
795
    def _getZODBAnalysesFromReferenceAnalyses(self, objid, criteria):
796
        analyses = []
797
        if criteria:
798
            refans = self._getObjects(objid, criteria, [])
799
            if len(refans) == 0:
800
                return []
801
802
            elif criteria == 'rgid':
803
                return [an.getObject() for an in refans]
804
805
            elif len(refans) == 1:
806
                # The search has been made using the internal identifier
807
                # from a Reference Analysis (id or uid). That is not usual.
808
                an = refans[0].getObject()
809
                worksheet = an.getWorksheet()
810
                if worksheet:
811
                    # A regular QC test (assigned to a Worksheet)
812
                    return [an, ]
813
                elif an.getInstrument():
814
                    # An Internal Calibration Test
815
                    return [an, ]
816
                else:
817
                    # Oops. This should never happen!
818
                    # A ReferenceAnalysis must be always assigned to
819
                    # a Worksheet (Regular QC) or to an Instrument
820
                    # (Internal Calibration Test)
821
                    self.err("The Reference Analysis ${object_id} has neither "
822
                             "instrument nor worksheet assigned",
823
                             mapping={"object_id": objid})
824
                    return []
825
            else:
826
                # This should never happen!
827
                # Fetching ReferenceAnalysis for its id or uid should
828
                # *always* return a unique result
829
                self.err(
830
                    "More than one Reference Analysis found for ${obect_id}",
831
                    mapping={"object_id": objid})
832
                return []
833
834
        else:
835
            sortorder = ['rgid', 'rid', 'ruid']
836
            for crit in sortorder:
837
                analyses = self._getZODBAnalysesFromReferenceAnalyses(objid,
838
                                                                      crit)
839
                if len(analyses) > 0:
840
                    return analyses
841
842
        return analyses
843
844
    def calculateTotalResults(self, objid, analysis):
845
        """ If an AR(objid) has an analysis that has a calculation
846
        then check if param analysis is used on the calculations formula.
847
        Here we are dealing with two types of analysis.
848
        1. Calculated Analysis - Results are calculated.
849
        2. Analysis - Results are captured and not calculated
850
        :param objid: AR ID or Worksheet's Reference Sample IDs
851
        :param analysis: Analysis Object
852
        """
853
        analyses = self._getZODBAnalyses(objid)
854
        # Filter Analyses With Calculation
855
        analyses_with_calculation = filter(
856
                                        lambda an: an.getCalculation(),
857
                                        analyses)
858
        for analysis_with_calc in analyses_with_calculation:
859
            # Get the calculation to get the formula so that we can check
860
            # if param analysis keyword is used on the calculation formula
861
            calcultion = analysis_with_calc.getCalculation()
862
            formula = calcultion.getMinifiedFormula()
863
            # The analysis that we are currenly on
864
            analysis_keyword = analysis.getKeyword()
865
            if analysis_keyword not in formula:
866
                continue
867
868
            # If the analysis_keyword is in the formula, it means that this
869
            # analysis is a dependent on that calculated analysis
870
            calc_passed = analysis_with_calc.calculateResult(override=self._override[1])
871
            if calc_passed:
872
                api.do_transition_for(analysis_with_calc, "submit")
873
                self.log(
874
                    "${request_id}: calculated result for "
875
                    "'${analysis_keyword}': '${analysis_result}'",
876
                    mapping={"request_id": objid,
877
                             "analysis_keyword": analysis_with_calc.getKeyword(),
878
                             "analysis_result": str(analysis_with_calc.getResult())}
879
                )
880
881
882
    def _process_analysis(self, objid, analysis, values):
883
        resultsaved = False
884
        acode = analysis.getKeyword()
885
        defresultkey = values.get("DefaultResult", "")
886
        capturedate = None
887
        # Look for timestamp
888
        if "DateTime" in values.keys():
889
            try:
890
                dt = values.get('DateTime')
891
                capturedate = DateTime(datetime.strptime(dt,
892
                                                         '%Y%m%d %H:%M:%S'))
893
            except:
894
                capturedate = None
895
                pass
896
            del values['DateTime']
897
898
        fields_to_reindex = []
899
        # get interims
900
        interimsout = []
901
        interims = hasattr(analysis, 'getInterimFields') \
902
                   and analysis.getInterimFields() or []
903
        for interim in interims:
904
            keyword = interim['keyword']
905
            title = interim['title']
906
            if values.get(keyword, '') or values.get(keyword, '') == 0:
907
                res = values.get(keyword)
908
                self.log("${request_id} result for "
909
                         "'${analysis_keyword}:${interim_keyword}': "
910
                         "'${result}'",
911
                         mapping={"request_id": objid,
912
                                  "analysis_keyword": acode,
913
                                  "interim_keyword": keyword,
914
                                  "result": str(res)}
915
                         )
916
                ninterim = interim.copy()
917
                ninterim['value'] = res
918
                interimsout.append(ninterim)
919
                resultsaved = True
920
            elif values.get(title, '') or values.get(title, '') == 0:
921
                res = values.get(title)
922
                self.log("%s/'%s:%s': '%s'" % (objid, acode, title, str(res)))
923
                ninterim = interim.copy()
924
                ninterim['value'] = res
925
                interimsout.append(ninterim)
926
                resultsaved = True
927
            else:
928
                interimsout.append(interim)
929
        # write interims
930
        if len(interimsout) > 0:
931
            analysis.setInterimFields(interimsout)
932
            analysis.calculateResult(override=self._override[1])
933
934
        # Set result if present.
935
        res = values.get(defresultkey, '')
936
        if res or res == 0 or self._override[1] == True:
937
            # self.log("${object_id} result for '${analysis_keyword}': '${result}'",
938
            #          mapping={"obect_id": obid,
939
            #                   "analysis_keyword": acode,
940
            #                   "result": str(res)})
941
            # TODO incorporar per veure detall d'importacio
942
            analysis.setResult(res)
943
            if capturedate:
944
                analysis.setResultCaptureDate(capturedate)
945
            resultsaved = True
946
947
        if resultsaved == False:
948
            self.log(
949
                "${request_id} result for '${analysis_keyword}': '${result}'",
950
                mapping={"request_id": objid,
951
                         "analysis_keyword": acode,
952
                         "result": ""})
953
954
        if resultsaved:
955
            doActionFor(analysis, 'submit')
956
            self.calculateTotalResults(objid, analysis)
957
            fields_to_reindex.append('Result')
958
959
        if (resultsaved) \
960
            and values.get('Remarks', '') \
961
            and analysis.portal_type == 'Analysis' \
962
            and (analysis.getRemarks() != '' or self._override[1] == True):
963
            analysis.setRemarks(values['Remarks'])
964
            fields_to_reindex.append('Remarks')
965
966
        if len(fields_to_reindex):
967
            analysis.reindexObject(idxs=fields_to_reindex)
968
        return resultsaved
969