Passed
Push — master ( d8e2ec...90ae0b )
by Jordi
10:07 queued 04:19
created

MasshunterQuantCSVParser._parseline()   A

Complexity

Conditions 3

Size

Total Lines 7
Code Lines 6

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 6
dl 0
loc 7
rs 10
c 0
b 0
f 0
cc 3
nop 2
1
# -*- coding: utf-8 -*-
2
#
3
# This file is part of SENAITE.CORE
4
#
5
# Copyright 2018 by it's authors.
6
# Some rights reserved. See LICENSE.rst, CONTRIBUTORS.rst.
7
8
""" Agilent's 'Masshunter Quant'
9
"""
10
from DateTime import DateTime
11
from Products.Archetypes.event import ObjectInitializedEvent
12
from Products.CMFCore.utils import getToolByName
13
from Products.Five.browser.pagetemplatefile import ViewPageTemplateFile
14
from bika.lims import bikaMessageFactory as _
15
from bika.lims.utils import t
16
from bika.lims import logger
17
from bika.lims.browser import BrowserView
18
from bika.lims.idserver import renameAfterCreation
19
from bika.lims.utils import changeWorkflowState
20
from bika.lims.utils import tmpID
21
from cStringIO import StringIO
22
from datetime import datetime
23
from operator import itemgetter
24
from plone.i18n.normalizer.interfaces import IIDNormalizer
25
from zope.component import getUtility
26
import csv
27
import json
28
import plone
29
import zope
30
import zope.event
31
from bika.lims.exportimport.instruments.resultsimport import InstrumentCSVResultsFileParser,\
32
    AnalysisResultsImporter
33
import traceback
34
35
title = "Agilent - Masshunter Quantitative"
36
37
38
def Import(context, request):
39
    """ Read Agilent's Masshunter Quant analysis results
40
    """
41
    infile = request.form['amhq_file']
42
    fileformat = request.form['amhq_format']
43
    artoapply = request.form['amhq_artoapply']
44
    override = request.form['amhq_override']
45
    instrument = request.form.get('instrument', None)
46
    errors = []
47
    logs = []
48
49
    # Load the most suitable parser according to file extension/options/etc...
50
    parser = None
51
    if not hasattr(infile, 'filename'):
52
        errors.append(_("No file selected"))
53
    elif fileformat == 'csv':
54
        parser = MasshunterQuantCSVParser(infile)
55
    else:
56
        errors.append(t(_("Unrecognized file format ${fileformat}",
57
                          mapping={"fileformat": fileformat})))
58
59
    if parser:
60
        # Load the importer
61
        status = ['sample_received', 'attachment_due', 'to_be_verified']
62
        if artoapply == 'received':
63
            status = ['sample_received']
64
        elif artoapply == 'received_tobeverified':
65
            status = ['sample_received', 'attachment_due', 'to_be_verified']
66
67
        over = [False, False]
68
        if override == 'nooverride':
69
            over = [False, False]
70
        elif override == 'override':
71
            over = [True, False]
72
        elif override == 'overrideempty':
73
            over = [True, True]
74
75
        importer = MasshunterQuantImporter(parser=parser,
76
                                           context=context,
77
                                           allowed_ar_states=status,
78
                                           allowed_analysis_states=None,
79
                                           override=over,
80
                                           instrument_uid=instrument)
81
        tbex = ''
82
        try:
83
            importer.process()
84
        except:
85
            tbex = traceback.format_exc()
86
        errors = importer.errors
87
        logs = importer.logs
88
        warns = importer.warns
89
        if tbex:
90
            errors.append(tbex)
91
92
    results = {'errors': errors, 'log': logs, 'warns': warns}
0 ignored issues
show
introduced by
The variable warns does not seem to be defined in case parser on line 59 is False. Are you sure this can never be the case?
Loading history...
93
94
    return json.dumps(results)
95
96
97
class MasshunterQuantCSVParser(InstrumentCSVResultsFileParser):
98
99
    HEADERKEY_BATCHINFO = 'Batch Info'
100
    HEADERKEY_BATCHDATAPATH = 'Batch Data Path'
101
    HEADERKEY_ANALYSISTIME = 'Analysis Time'
102
    HEADERKEY_ANALYSTNAME = 'Analyst Name'
103
    HEADERKEY_REPORTTIME = 'Report Time'
104
    HEADERKEY_REPORTERNAME = 'Reporter Name'
105
    HEADERKEY_LASTCALIBRATION = 'Last Calib Update'
106
    HEADERKEY_BATCHSTATE = 'Batch State'
107
    SEQUENCETABLE_KEY = 'Sequence Table'
108
    SEQUENCETABLE_HEADER_DATAFILE = 'Data File'
109
    SEQUENCETABLE_HEADER_SAMPLENAME = 'Sample Name'
110
    SEQUENCETABLE_PRERUN = 'prerunrespchk.d'
111
    SEQUENCETABLE_MIDRUN = 'mid_respchk.d'
112
    SEQUENCETABLE_POSTRUN = 'post_respchk.d'
113
    SEQUENCETABLE_NUMERICHEADERS = ('Inj Vol',)
114
    QUANTITATIONRESULTS_KEY = 'Quantification Results'
115
    QUANTITATIONRESULTS_TARGETCOMPOUND = 'Target Compound'
116
    QUANTITATIONRESULTS_HEADER_DATAFILE = 'Data File'
117
    QUANTITATIONRESULTS_PRERUN = 'prerunrespchk.d'
118
    QUANTITATIONRESULTS_MIDRUN = 'mid_respchk.d'
119
    QUANTITATIONRESULTS_POSTRUN = 'post_respchk.d'
120
    QUANTITATIONRESULTS_NUMERICHEADERS = ('Resp', 'ISTD Resp', 'Resp Ratio',
121
                                          'Final Conc', 'Exp Conc', 'Accuracy')
122
    QUANTITATIONRESULTS_COMPOUNDCOLUMN = 'Compound'
123
    COMMAS = ','
124
125
    def __init__(self, csv):
126
        InstrumentCSVResultsFileParser.__init__(self, csv)
127
        self._end_header = False
128
        self._end_sequencetable = False
129
        self._sequences = []
130
        self._sequencesheader = []
131
        self._quantitationresultsheader = []
132
        self._numline = 0
133
134
    def getAttachmentFileType(self):
135
        return "Agilent's Masshunter Quant CSV"
136
137
    def _parseline(self, line):
138
        if self._end_header == False:
139
            return self.parse_headerline(line)
140
        elif self._end_sequencetable == False:
141
            return self.parse_sequencetableline(line)
142
        else:
143
            return self.parse_quantitationesultsline(line)
144
145
    def parse_headerline(self, line):
146
        """ Parses header lines
147
148
            Header example:
149
            Batch Info,2013-03-20T07:11:09.9053262-07:00,2013-03-20T07:12:55.5280967-07:00,2013-03-20T07:11:07.1047817-07:00,,,,,,,,,,,,,,
150
            Batch Data Path,D:\MassHunter\Data\130129\QuantResults\130129LS.batch.bin,,,,,,,,,,,,,,,,
151
            Analysis Time,3/20/2013 7:11 AM,Analyst Name,Administrator,,,,,,,,,,,,,,
152
            Report Time,3/20/2013 7:12 AM,Reporter Name,Administrator,,,,,,,,,,,,,,
153
            Last Calib Update,3/20/2013 7:11 AM,Batch State,Processed,,,,,,,,,,,,,,
154
            ,,,,,,,,,,,,,,,,,
155
        """
156
        if self._end_header == True:
157
            # Header already processed
158
            return 0
159
160
        if line.startswith(self.SEQUENCETABLE_KEY):
161
            self._end_header = True
162
            if len(self._header) == 0:
163
                self.err("No header found", numline=self._numline)
164
                return -1
165
            return 0
166
167
        splitted = [token.strip() for token in line.split(',')]
168
169
        # Batch Info,2013-03-20T07:11:09.9053262-07:00,2013-03-20T07:12:55.5280967-07:00,2013-03-20T07:11:07.1047817-07:00,,,,,,,,,,,,,,
170
        if splitted[0] == self.HEADERKEY_BATCHINFO:
171
            if self.HEADERKEY_BATCHINFO in self._header:
172
                self.warn("Header Batch Info already found. Discarding",
173
                          numline=self._numline, line=line)
174
                return 0
175
176
            self._header[self.HEADERKEY_BATCHINFO] = []
177
            for i in range(len(splitted) - 1):
178
                if splitted[i + 1]:
179
                    self._header[self.HEADERKEY_BATCHINFO].append(splitted[i + 1])
180
181
        # Batch Data Path,D:\MassHunter\Data\130129\QuantResults\130129LS.batch.bin,,,,,,,,,,,,,,,,
182
        elif splitted[0] == self.HEADERKEY_BATCHDATAPATH:
183
            if self.HEADERKEY_BATCHDATAPATH in self._header:
184
                self.warn("Header Batch Data Path already found. Discarding",
185
                          numline=self._numline, line=line)
186
                return 0;
187
188
            if splitted[1]:
189
                self._header[self.HEADERKEY_BATCHDATAPATH] = splitted[1]
190
            else:
191
                self.warn("Batch Data Path not found or empty",
192
                          numline=self._numline, line=line)
193
194
        # Analysis Time,3/20/2013 7:11 AM,Analyst Name,Administrator,,,,,,,,,,,,,,
195
        elif splitted[0] == self.HEADERKEY_ANALYSISTIME:
196 View Code Duplication
            if splitted[1]:
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
197
                try:
198
                    d = datetime.strptime(splitted[1], "%m/%d/%Y %I:%M %p")
199
                    self._header[self.HEADERKEY_ANALYSISTIME] = d
200
                except ValueError:
201
                    self.err("Invalid Analysis Time format",
202
                             numline=self._numline, line=line)
203
            else:
204
                self.warn("Analysis Time not found or empty",
205
                          numline=self._numline, line=line)
206
207
            if splitted[2] and splitted[2] == self.HEADERKEY_ANALYSTNAME:
208
                if splitted[3]:
209
                    self._header[self.HEADERKEY_ANALYSTNAME] = splitted[3]
210
                else:
211
                    self.warn("Analyst Name not found or empty",
212
                              numline=self._numline, line=line)
213
            else:
214
                self.err("Analyst Name not found",
215
                         numline=self._numline, line=line)
216
217
        # Report Time,3/20/2013 7:12 AM,Reporter Name,Administrator,,,,,,,,,,,,,,
218
        elif splitted[0] == self.HEADERKEY_REPORTTIME:
219
            if splitted[1]:
220
                try:
221
                    d = datetime.strptime(splitted[1], "%m/%d/%Y %I:%M %p")
222
                    self._header[self.HEADERKEY_REPORTTIME] = d
223
                except ValueError:
224
                    self.err("Invalid Report Time format",
225
                         numline=self._numline, line=line)
226
            else:
227
                self.warn("Report time not found or empty",
228
                          numline=self._numline, line=line)
229
230
231
            if splitted[2] and splitted[2] == self.HEADERKEY_REPORTERNAME:
232
                if splitted[3]:
233
                    self._header[self.HEADERKEY_REPORTERNAME] = splitted[3]
234
                else:
235
                    self.warn("Reporter Name not found or empty",
236
                              numline=self._numline, line=line)
237
238
            else:
239
                self.err("Reporter Name not found",
240
                         numline=self._numline, line=line)
241
242
243
        # Last Calib Update,3/20/2013 7:11 AM,Batch State,Processed,,,,,,,,,,,,,,
244
        elif splitted[0] == self.HEADERKEY_LASTCALIBRATION:
245
            if splitted[1]:
246
                try:
247
                    d = datetime.strptime(splitted[1], "%m/%d/%Y %I:%M %p")
248
                    self._header[self.HEADERKEY_LASTCALIBRATION] = d
249
                except ValueError:
250
                    self.err("Invalid Last Calibration time format",
251
                             numline=self._numline, line=line)
252
253
            else:
254
                self.warn("Last Calibration time not found or empty",
255
                          numline=self._numline, line=line)
256
257
258
            if splitted[2] and splitted[2] == self.HEADERKEY_BATCHSTATE:
259
                if splitted[3]:
260
                    self._header[self.HEADERKEY_BATCHSTATE] = splitted[3]
261
                else:
262
                    self.warn("Batch state not found or empty",
263
                              numline=self._numline, line=line)
264
265
            else:
266
                self.err("Batch state not found",
267
                         numline=self._numline, line=line)
268
269
270
        return 0
271
272
    def parse_sequencetableline(self, line):
273
        """ Parses sequence table lines
274
275
            Sequence Table example:
276
            Sequence Table,,,,,,,,,,,,,,,,,
277
            Data File,Sample Name,Position,Inj Vol,Level,Sample Type,Acq Method File,,,,,,,,,,,
278
            prerunrespchk.d,prerunrespchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
279
            DSS_Nist_L1.d,DSS_Nist_L1,P1-A2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
280
            DSS_Nist_L2.d,DSS_Nist_L2,P1-B2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
281
            DSS_Nist_L3.d,DSS_Nist_L3,P1-C2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
282
            UTAK_DS_L1.d,UTAK_DS_L1,P1-D2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
283
            UTAK_DS_L2.d,UTAK_DS_L2,P1-E2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
284
            mid_respchk.d,mid_respchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
285
            UTAK_DS_low.d,UTAK_DS_Low,P1-F2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
286
            FDBS_31.d,FDBS_31,P1-G2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
287
            FDBS_32.d,FDBS_32,P1-H2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
288
            LS_60-r001.d,LS_60,P1-G12,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
289
            LS_60-r002.d,LS_60,P1-G12,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
290
            LS_61-r001.d,LS_61,P1-H12,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
291
            LS_61-r002.d,LS_61,P1-H12,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
292
            post_respchk.d,post_respchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
293
            ,,,,,,,,,,,,,,,,,
294
        """
295
296
        # Sequence Table,,,,,,,,,,,,,,,,,
297
        # prerunrespchk.d,prerunrespchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
298
        # mid_respchk.d,mid_respchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
299
        # ,,,,,,,,,,,,,,,,,
300
        if line.startswith(self.SEQUENCETABLE_KEY) \
301
            or line.startswith(self.SEQUENCETABLE_PRERUN) \
302
            or line.startswith(self.SEQUENCETABLE_MIDRUN) \
303
            or self._end_sequencetable == True:
304
305
            # Nothing to do, continue
306
            return 0
307
308
        # Data File,Sample Name,Position,Inj Vol,Level,Sample Type,Acq Method File,,,,,,,,,,,
309
        if line.startswith(self.SEQUENCETABLE_HEADER_DATAFILE):
310
            self._sequencesheader = [token.strip() for token in line.split(',') if token.strip()]
311
            return 0
312
313
        # post_respchk.d,post_respchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
314
        # Quantitation Results,,,,,,,,,,,,,,,,,
315
        if line.startswith(self.SEQUENCETABLE_POSTRUN) \
316
            or line.startswith(self.QUANTITATIONRESULTS_KEY) \
317
            or line.startswith(self.COMMAS):
318
            self._end_sequencetable = True
319
            if len(self._sequences) == 0:
320
                self.err("No Sequence Table found", linenum=self._numline)
321
                return -1
322
323
            # Jumps 2 lines:
324
            # Data File,Sample Name,Position,Inj Vol,Level,Sample Type,Acq Method File,,,,,,,,,,,
325
            # prerunrespchk.d,prerunrespchk,Vial 3,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
326
            return 2
327
328
        # DSS_Nist_L1.d,DSS_Nist_L1,P1-A2,-1.00,,Sample,120824_VitD_MAPTAD_1D_MRM_practice.m,,,,,,,,,,,
329
        splitted = [token.strip() for token in line.split(',')]
330
        sequence = {}
331
        for colname in self._sequencesheader:
332
            sequence[colname] = ''
333
334
        for i in range(len(splitted)):
335
            token = splitted[i]
336
            if i < len(self._sequencesheader):
337
                colname = self._sequencesheader[i]
338
                if token and colname in self.SEQUENCETABLE_NUMERICHEADERS:
339
                    try:
340
                        sequence[colname] = float(token)
341
                    except ValueError:
342
                        self.warn(
343
                            "No valid number ${token} in column ${index} (${column_name})",
344
                            mapping={"token": token,
345
                                     "index": str(i + 1),
346
                                     "column_name": colname},
347
                            numline=self._numline, line=line)
348
                        sequence[colname] = token
349
                else:
350
                    sequence[colname] = token
351
            elif token:
352
                self.err("Orphan value in column ${index} (${token})",
353
                         mapping={"index": str(i+1),
354
                                  "token": token},
355
                         numline=self._numline, line=line)
356
        self._sequences.append(sequence)
357
358
    def parse_quantitationesultsline(self, line):
359
        """ Parses quantitation result lines
360
361
            Quantitation results example:
362
            Quantitation Results,,,,,,,,,,,,,,,,,
363
            Target Compound,25-OH D3+PTAD+MA,,,,,,,,,,,,,,,,
364
            Data File,Compound,ISTD,Resp,ISTD Resp,Resp Ratio, Final Conc,Exp Conc,Accuracy,,,,,,,,,
365
            prerunrespchk.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,5816,274638,0.0212,0.9145,,,,,,,,,,,
366
            DSS_Nist_L1.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,6103,139562,0.0437,1.6912,,,,,,,,,,,
367
            DSS_Nist_L2.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,11339,135726,0.0835,3.0510,,,,,,,,,,,
368
            DSS_Nist_L3.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,15871,141710,0.1120,4.0144,,,,,,,,,,,
369
            mid_respchk.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,4699,242798,0.0194,0.8514,,,,,,,,,,,
370
            DSS_Nist_L3-r002.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,15659,129490,0.1209,4.3157,,,,,,,,,,,
371
            UTAK_DS_L1-r001.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,29846,132264,0.2257,7.7965,,,,,,,,,,,
372
            UTAK_DS_L1-r002.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,28696,141614,0.2026,7.0387,,,,,,,,,,,
373
            post_respchk.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,5022,231748,0.0217,0.9315,,,,,,,,,,,
374
            ,,,,,,,,,,,,,,,,,
375
            Target Compound,25-OH D2+PTAD+MA,,,,,,,,,,,,,,,,
376
            Data File,Compound,ISTD,Resp,ISTD Resp,Resp Ratio, Final Conc,Exp Conc,Accuracy,,,,,,,,,
377
            prerunrespchk.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,6222,274638,0.0227,0.8835,,,,,,,,,,,
378
            DSS_Nist_L1.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,1252,139562,0.0090,0.7909,,,,,,,,,,,
379
            DSS_Nist_L2.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,3937,135726,0.0290,0.9265,,,,,,,,,,,
380
            DSS_Nist_L3.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,826,141710,0.0058,0.7697,,,,,,,,,,,
381
            mid_respchk.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,7864,242798,0.0324,0.9493,,,,,,,,,,,
382
            DSS_Nist_L3-r002.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,853,129490,0.0066,0.7748,,,,,,,,,,,
383
            UTAK_DS_L1-r001.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,127496,132264,0.9639,7.1558,,,,,,,,,,,
384
            UTAK_DS_L1-r002.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,135738,141614,0.9585,7.1201,,,,,,,,,,,
385
            post_respchk.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,6567,231748,0.0283,0.9219,,,,,,,,,,,
386
            ,,,,,,,,,,,,,,,,,
387
        """
388
389
        # Quantitation Results,,,,,,,,,,,,,,,,,
390
        # prerunrespchk.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,5816,274638,0.0212,0.9145,,,,,,,,,,,
391
        # mid_respchk.d,25-OH D3+PTAD+MA,25-OH D3d3+PTAD+MA,4699,242798,0.0194,0.8514,,,,,,,,,,,
392
        # post_respchk.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,6567,231748,0.0283,0.9219,,,,,,,,,,,
393
        # ,,,,,,,,,,,,,,,,,
394
        if line.startswith(self.QUANTITATIONRESULTS_KEY) \
395
            or line.startswith(self.QUANTITATIONRESULTS_PRERUN) \
396
            or line.startswith(self.QUANTITATIONRESULTS_MIDRUN) \
397
            or line.startswith(self.QUANTITATIONRESULTS_POSTRUN) \
398
            or line.startswith(self.COMMAS):
399
400
            # Nothing to do, continue
401
            return 0
402
403
        # Data File,Compound,ISTD,Resp,ISTD Resp,Resp Ratio, Final Conc,Exp Conc,Accuracy,,,,,,,,,
404
        if line.startswith(self.QUANTITATIONRESULTS_HEADER_DATAFILE):
405
            self._quantitationresultsheader = [token.strip() for token in line.split(',') if token.strip()]
406
            return 0
407
408
        # Target Compound,25-OH D3+PTAD+MA,,,,,,,,,,,,,,,,
409
        if line.startswith(self.QUANTITATIONRESULTS_TARGETCOMPOUND):
410
            # New set of Quantitation Results
411
            splitted = [token.strip() for token in line.split(',')]
412
            if not splitted[1]:
413
                self.warn("No Target Compound found",
414
                          numline=self._numline, line=line)
415
            return 0
416
417
        # DSS_Nist_L1.d,25-OH D2+PTAD+MA,25-OH D3d3+PTAD+MA,1252,139562,0.0090,0.7909,,,,,,,,,,,
418
        splitted = [token.strip() for token in line.split(',')]
419
        quantitation = {}
420
        for colname in self._quantitationresultsheader:
421
            quantitation[colname] = ''
422
423
        for i in range(len(splitted)):
424
            token = splitted[i]
425 View Code Duplication
            if i < len(self._quantitationresultsheader):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
426
                colname = self._quantitationresultsheader[i]
427
                if token and colname in self.QUANTITATIONRESULTS_NUMERICHEADERS:
428
                    try:
429
                        quantitation[colname] = float(token)
430
                    except ValueError:
431
                        self.warn(
432
                            "No valid number ${token} in column ${index} (${column_name})",
433
                            mapping={"token": token,
434
                                     "index": str(i + 1),
435
                                     "column_name": colname},
436
                            numline=self._numline, line=line)
437
                        quantitation[colname] = token
438
                else:
439
                    quantitation[colname] = token
440
            elif token:
441
                self.err("Orphan value in column ${index} (${token})",
442
                         mapping={"index": str(i+1),
443
                                  "token": token},
444
                         numline=self._numline, line=line)
445
446
        if self.QUANTITATIONRESULTS_COMPOUNDCOLUMN in quantitation:
447
            compound = quantitation[self.QUANTITATIONRESULTS_COMPOUNDCOLUMN]
448
449
            # Look for sequence matches and populate rawdata
450
            datafile = quantitation.get(self.QUANTITATIONRESULTS_HEADER_DATAFILE, '')
451
            if not datafile:
452
                self.err("No Data File found for quantitation result",
453
                         numline=self._numline, line=line)
454
455
            else:
456
                seqs = [sequence for sequence in self._sequences \
457
                        if sequence.get('Data File', '') == datafile]
458
                if len(seqs) == 0:
459
                    self.err("No sample found for quantitative result ${data_file}",
460
                             mapping={"data_file": datafile},
461
                             numline=self._numline, line=line)
462
                elif len(seqs) > 1:
463
                    self.err("More than one sequence found for quantitative result: ${data_file}",
464
                             mapping={"data_file": datafile},
465
                             numline=self._numline, line=line)
466
                else:
467
                    objid = seqs[0].get(self.SEQUENCETABLE_HEADER_SAMPLENAME, '')
468
                    if objid:
469
                        quantitation['DefaultResult'] = 'Final Conc'
470
                        quantitation['Remarks'] = _("Autoimport")
471
                        rows = self.getRawResults().get(objid, [])
472
                        raw = rows[0] if len(rows) > 0 else {}
473
                        raw[compound] = quantitation
474
                        self._addRawResult(objid, raw, True)
475
                    else:
476
                        self.err("No valid sequence for ${data_file}",
477
                                 mapping={"data_file": datafile},
478
                                 numline=self._numline, line=line)
479
        else:
480
            self.err("Value for column '${column}' not found",
481
                     mapping={"column": self.QUANTITATIONRESULTS_COMPOUNDCOLUMN},
482
                     numline=self._numline, line=line)
483
484
485
class MasshunterQuantImporter(AnalysisResultsImporter):
486
487
    def __init__(self, parser, context,  override,
488
                 allowed_ar_states=None, allowed_analysis_states=None,
489
                 instrument_uid=''):
490
        AnalysisResultsImporter.__init__(self, parser, context, 
491
                                         override, allowed_ar_states,
492
                                         allowed_analysis_states,
493
                                         instrument_uid)
494