| Total Complexity | 50 |
| Total Lines | 309 |
| Duplicated Lines | 26.54 % |
| Changes | 0 | ||
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like bika.lims.exportimport.instruments.shimadzu.gcms.qp2010se often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | # -*- coding: utf-8 -*- |
||
| 2 | # |
||
| 3 | # This file is part of SENAITE.CORE |
||
| 4 | # |
||
| 5 | # Copyright 2018 by it's authors. |
||
| 6 | # Some rights reserved. See LICENSE.rst, CONTRIBUTORS.rst. |
||
| 7 | |||
| 8 | """ Shimadzu's 'GCMS QP2010 SE' |
||
| 9 | """ |
||
| 10 | from bika.lims import bikaMessageFactory as _ |
||
| 11 | from datetime import datetime |
||
| 12 | import json |
||
| 13 | import re |
||
| 14 | from bika.lims.exportimport.instruments.resultsimport import \ |
||
| 15 | InstrumentCSVResultsFileParser, AnalysisResultsImporter |
||
| 16 | import traceback |
||
| 17 | |||
| 18 | title = "Shimadzu - GCMS-QP2010 SE" |
||
| 19 | |||
| 20 | |||
| 21 | View Code Duplication | def Import(context, request): |
|
|
|
|||
| 22 | """ Read Shimadzu GCMS-TQ8030 GC/MS/MS analysis results |
||
| 23 | """ |
||
| 24 | form = request.form |
||
| 25 | # TODO form['file'] sometimes returns a list |
||
| 26 | infile = form['instrument_results_file'][0] if \ |
||
| 27 | isinstance(form['instrument_results_file'], list) \ |
||
| 28 | else form['instrument_results_file'] |
||
| 29 | override = form['results_override'] |
||
| 30 | artoapply = form['artoapply'] |
||
| 31 | instrument = form.get('instrument', None) |
||
| 32 | errors = [] |
||
| 33 | logs = [] |
||
| 34 | |||
| 35 | # Load the most suitable parser according to file extension/options/etc... |
||
| 36 | parser = None |
||
| 37 | if not hasattr(infile, 'filename'): |
||
| 38 | errors.append(_("No file selected")) |
||
| 39 | parser = GCMSQP2010SECSVParser(infile) |
||
| 40 | |||
| 41 | if parser: |
||
| 42 | # Load the importer |
||
| 43 | status = ['sample_received', 'attachment_due', 'to_be_verified'] |
||
| 44 | if artoapply == 'received': |
||
| 45 | status = ['sample_received'] |
||
| 46 | elif artoapply == 'received_tobeverified': |
||
| 47 | status = ['sample_received', 'attachment_due', 'to_be_verified'] |
||
| 48 | |||
| 49 | over = [False, False] |
||
| 50 | if override == 'nooverride': |
||
| 51 | over = [False, False] |
||
| 52 | elif override == 'override': |
||
| 53 | over = [True, False] |
||
| 54 | elif override == 'overrideempty': |
||
| 55 | over = [True, True] |
||
| 56 | |||
| 57 | importer = GCMSQP2010SEImporter(parser=parser, |
||
| 58 | context=context, |
||
| 59 | allowed_ar_states=status, |
||
| 60 | allowed_analysis_states=None, |
||
| 61 | override=over, |
||
| 62 | instrument_uid=instrument) |
||
| 63 | tbex = '' |
||
| 64 | try: |
||
| 65 | importer.process() |
||
| 66 | except: |
||
| 67 | tbex = traceback.format_exc() |
||
| 68 | errors = importer.errors |
||
| 69 | logs = importer.logs |
||
| 70 | warns = importer.warns |
||
| 71 | if tbex: |
||
| 72 | errors.append(tbex) |
||
| 73 | |||
| 74 | results = {'errors': errors, 'log': logs, 'warns': warns} |
||
| 75 | |||
| 76 | return json.dumps(results) |
||
| 77 | |||
| 78 | |||
| 79 | class GCMSQP2010SECSVParser(InstrumentCSVResultsFileParser): |
||
| 80 | |||
| 81 | HEADERTABLE_KEY = '[Header]' |
||
| 82 | HEADERKEY_FILENAME = 'Data File Name' |
||
| 83 | HEADERKEY_OUTPUTDATE = 'Output Date' |
||
| 84 | HEADERKEY_OUTPUTTIME = 'Output Time' |
||
| 85 | QUANTITATIONRESULTS_KEY = '[MS Quantitative Results]' |
||
| 86 | QUANTITATIONRESULTS_NUMBEROFIDS = '# of IDs' |
||
| 87 | QUANTITATIONRESULTS_HEADER_ID_NUMBER = 'ID#' |
||
| 88 | QUANTITATIONRESULTS_NUMERICHEADERS = ('Mass', 'Height' 'Conc.', |
||
| 89 | 'Std.Ret.Time', '3rd', '2nd', '1st', |
||
| 90 | 'Constant', 'Ref.Ion Area', |
||
| 91 | 'Ref.Ion Height', |
||
| 92 | 'Ref.Ion Set Ratio', |
||
| 93 | 'Ref.Ion Ratio', 'Recovery', |
||
| 94 | 'SI', 'Ref.Ion1 m/z', |
||
| 95 | 'Ref.Ion1 Area', 'Ref.Ion1 Height', |
||
| 96 | 'Ref.Ion1 Set Ratio', |
||
| 97 | 'Ref.Ion1 Ratio', 'Ref.Ion2 m/z', |
||
| 98 | 'Ref.Ion2 Area', 'Ref.Ion2 Height', |
||
| 99 | 'Ref.Ion2 Set Ratio', |
||
| 100 | 'Ref.Ion2 Ratio', 'Ref.Ion3 m/z', |
||
| 101 | 'Ref.Ion3 Area', 'Ref.Ion3 Height', |
||
| 102 | 'Ref.Ion3 Set Ratio', |
||
| 103 | 'Ref.Ion3 Ratio', |
||
| 104 | 'Ref.Ion4 m/z', 'Ref.Ion4 Area', |
||
| 105 | 'Ref.Ion4 Height', |
||
| 106 | 'Ref.Ion4 Set Ratio', |
||
| 107 | 'Ref.Ion4 Ratio', 'Ref.Ion5 m/z', |
||
| 108 | 'Ref.Ion5 Area', 'Ref.Ion5 Height', |
||
| 109 | 'Ref.Ion5 Set Ratio', |
||
| 110 | 'Ref.Ion5 Ratio', 'S/N', 'Threshold', |
||
| 111 | ) |
||
| 112 | SIMILARITYSEARCHRESULTS_KEY = \ |
||
| 113 | '[MS Similarity Search Results for Identified Results]' |
||
| 114 | PEAK_TABLE_KEY = '[MC Peak Table]' |
||
| 115 | COMMAS = ',' |
||
| 116 | |||
| 117 | def __init__(self, csv): |
||
| 118 | InstrumentCSVResultsFileParser.__init__(self, csv) |
||
| 119 | self._end_header = False |
||
| 120 | self._quantitationresultsheader = [] |
||
| 121 | self._numline = 0 |
||
| 122 | |||
| 123 | def _parseline(self, line): |
||
| 124 | if self._end_header is False: |
||
| 125 | return self.parse_headerline(line) |
||
| 126 | else: |
||
| 127 | return self.parse_quantitationesultsline(line) |
||
| 128 | |||
| 129 | def parse_headerline(self, line): |
||
| 130 | """ Parses header lines |
||
| 131 | |||
| 132 | Header example: |
||
| 133 | [Header] |
||
| 134 | Data File Name,C:\GCMSsolution\Data\October\ |
||
| 135 | 1-16-02249-001_CD_10172016_2.qgd |
||
| 136 | Output Date,10/18/2016 |
||
| 137 | Output Time,12:04:11 PM |
||
| 138 | """ |
||
| 139 | if self._end_header is True: |
||
| 140 | # Header already processed |
||
| 141 | return 0 |
||
| 142 | |||
| 143 | splitted = [token.strip() for token in line.split('\t')] |
||
| 144 | |||
| 145 | # [Header] |
||
| 146 | if splitted[0] == self.HEADERTABLE_KEY: |
||
| 147 | if self.HEADERTABLE_KEY in self._header: |
||
| 148 | self.warn("Header [Header] Info already found. Discarding", |
||
| 149 | numline=self._numline, line=line) |
||
| 150 | return 0 |
||
| 151 | |||
| 152 | self._header[self.HEADERTABLE_KEY] = [] |
||
| 153 | for i in range(len(splitted) - 1): |
||
| 154 | if splitted[i + 1]: |
||
| 155 | self._header[self.HEADERTABLE_KEY].append(splitted[i + 1]) |
||
| 156 | |||
| 157 | # Data File Name, C:\GCMSsolution\Data\October\ |
||
| 158 | # 1-16-02249-001_CD_10172016_2.qgd |
||
| 159 | elif splitted[0] == self.HEADERKEY_FILENAME: |
||
| 160 | if self.HEADERKEY_FILENAME in self._header: |
||
| 161 | self.warn("Header File Data Name already found. Discarding", |
||
| 162 | numline=self._numline, line=line) |
||
| 163 | return 0 |
||
| 164 | |||
| 165 | if splitted[1]: |
||
| 166 | self._header[self.HEADERKEY_FILENAME] = splitted[1] |
||
| 167 | else: |
||
| 168 | self.warn("File Data Name not found or empty", |
||
| 169 | numline=self._numline, line=line) |
||
| 170 | |||
| 171 | # Output Date 10/18/2016 |
||
| 172 | elif splitted[0] == self.HEADERKEY_OUTPUTDATE: |
||
| 173 | if splitted[1]: |
||
| 174 | try: |
||
| 175 | d = datetime.strptime(splitted[1], "%m/%d/%Y") |
||
| 176 | self._header[self.HEADERKEY_OUTPUTDATE] = d |
||
| 177 | except ValueError: |
||
| 178 | self.err("Invalid Output Date format", |
||
| 179 | numline=self._numline, line=line) |
||
| 180 | else: |
||
| 181 | self.warn("Output Date not found or empty", |
||
| 182 | numline=self._numline, line=line) |
||
| 183 | d = datetime.strptime(splitted[1], "%m/%d/%Y") |
||
| 184 | |||
| 185 | # Output Time 12:04:11 PM |
||
| 186 | elif splitted[0] == self.HEADERKEY_OUTPUTTIME: |
||
| 187 | if splitted[1]: |
||
| 188 | try: |
||
| 189 | d = datetime.strptime(splitted[1], "%I:%M:%S %p") |
||
| 190 | self._header[self.HEADERKEY_OUTPUTTIME] = d |
||
| 191 | except ValueError: |
||
| 192 | self.err("Invalid Output Time format", |
||
| 193 | numline=self._numline, line=line) |
||
| 194 | else: |
||
| 195 | self.warn("Output Time not found or empty", |
||
| 196 | numline=self._numline, line=line) |
||
| 197 | d = datetime.strptime(splitted[1], "%I:%M %p") |
||
| 198 | |||
| 199 | if line.startswith(self.QUANTITATIONRESULTS_KEY): |
||
| 200 | self._end_header = True |
||
| 201 | if len(self._header) == 0: |
||
| 202 | self.err("No header found", numline=self._numline) |
||
| 203 | return -1 |
||
| 204 | return 0 |
||
| 205 | |||
| 206 | return 0 |
||
| 207 | |||
| 208 | def parse_quantitationesultsline(self, line): |
||
| 209 | """ Parses quantitation result lines |
||
| 210 | Please see samples/GC-MS output.txt |
||
| 211 | [MS Quantitative Results] section |
||
| 212 | """ |
||
| 213 | |||
| 214 | # [MS Quantitative Results] |
||
| 215 | if line.startswith(self.QUANTITATIONRESULTS_KEY) \ |
||
| 216 | or line.startswith(self.QUANTITATIONRESULTS_NUMBEROFIDS) \ |
||
| 217 | or line.startswith(self.SIMILARITYSEARCHRESULTS_KEY) \ |
||
| 218 | or line.startswith(self.PEAK_TABLE_KEY): |
||
| 219 | |||
| 220 | # Nothing to do, continue |
||
| 221 | return 0 |
||
| 222 | |||
| 223 | # # of IDs \t23 |
||
| 224 | if line.startswith(self.QUANTITATIONRESULTS_HEADER_ID_NUMBER): |
||
| 225 | self._quantitationresultsheader = [token.strip() for token |
||
| 226 | in line.split('\t') |
||
| 227 | if token.strip()] |
||
| 228 | return 0 |
||
| 229 | |||
| 230 | # 1 \talpha-Pinene \tTarget \t0 \t93.00 \t7.738 \t7.680 \t7.795 \t2.480 |
||
| 231 | # \t344488 \t138926 \t0.02604 \tAuto \t2 \t7.812 \tLinear \t0 \t0 |
||
| 232 | # \t4.44061e+008 \t278569 \t0 \t0 \t38.94 \t38.58 \t0.00 \t98 \t92.00 |
||
| 233 | # \t0 \t0 \t38.94 \t38.58 \t91.00 \t0 \t0 \t38.93 \t40.02 \t0 \t0 \t0 |
||
| 234 | # \t0 \t0 \t0 \t0 #\t0 \t0 \t0 \t0 \t0 \t0 \t0 \t0 \t75.27 \tmg \t0.000 |
||
| 235 | splitted = [token.strip() for token in line.split('\t')] |
||
| 236 | ar_id = self._header['Data File Name'].split('\\')[-1].split('.')[0] |
||
| 237 | quantitation = {'DefaultResult': 'Conc.', 'AR': ar_id} |
||
| 238 | for colname in self._quantitationresultsheader: |
||
| 239 | quantitation[colname] = '' |
||
| 240 | |||
| 241 | for i in range(len(splitted)): |
||
| 242 | token = splitted[i] |
||
| 243 | View Code Duplication | if i < len(self._quantitationresultsheader): |
|
| 244 | colname = self._quantitationresultsheader[i] |
||
| 245 | if colname in self.QUANTITATIONRESULTS_NUMERICHEADERS: |
||
| 246 | try: |
||
| 247 | quantitation[colname] = float(token) |
||
| 248 | except ValueError: |
||
| 249 | self.warn( |
||
| 250 | "No valid number ${token} in column " |
||
| 251 | "${index} (${column_name})", |
||
| 252 | mapping={"token": token, |
||
| 253 | "index": str(i + 1), |
||
| 254 | "column_name": colname}, |
||
| 255 | numline=self._numline, line=line) |
||
| 256 | quantitation[colname] = token |
||
| 257 | else: |
||
| 258 | quantitation[colname] = token |
||
| 259 | |||
| 260 | # val = re.sub(r"\W", "", splitted[1]) |
||
| 261 | # self._addRawResult(quantitation['AR'], |
||
| 262 | # values={val:quantitation}, |
||
| 263 | # override=False) |
||
| 264 | elif token: |
||
| 265 | self.err("Orphan value in column ${index} (${token})", |
||
| 266 | mapping={"index": str(i+1), |
||
| 267 | "token": token}, |
||
| 268 | numline=self._numline, line=line) |
||
| 269 | |||
| 270 | result = quantitation[quantitation['DefaultResult']] |
||
| 271 | column_name = quantitation['DefaultResult'] |
||
| 272 | result = self.zeroValueDefaultInstrumentResults(column_name, |
||
| 273 | result, line) |
||
| 274 | quantitation[quantitation['DefaultResult']] = result |
||
| 275 | |||
| 276 | val = re.sub(r"\W", "", splitted[1]) |
||
| 277 | self._addRawResult(quantitation['AR'], |
||
| 278 | values={val: quantitation}, |
||
| 279 | override=False) |
||
| 280 | |||
| 281 | def zeroValueDefaultInstrumentResults(self, column_name, result, line): |
||
| 282 | result = str(result) |
||
| 283 | if result.startswith('--') or result == '' or result == 'ND': |
||
| 284 | return 0.0 |
||
| 285 | |||
| 286 | try: |
||
| 287 | result = float(result) |
||
| 288 | if result < 0.0: |
||
| 289 | result = 0.0 |
||
| 290 | except ValueError: |
||
| 291 | self.err( |
||
| 292 | "No valid number ${result} in column (${column_name})", |
||
| 293 | mapping={"result": result, |
||
| 294 | "column_name": column_name}, |
||
| 295 | numline=self._numline, line=line) |
||
| 296 | return |
||
| 297 | return result |
||
| 298 | |||
| 299 | |||
| 300 | class GCMSQP2010SEImporter(AnalysisResultsImporter): |
||
| 301 | |||
| 302 | def __init__(self, parser, context, override, |
||
| 303 | allowed_ar_states=None, allowed_analysis_states=None, |
||
| 304 | instrument_uid=''): |
||
| 305 | AnalysisResultsImporter.__init__(self, parser, context, |
||
| 306 | override, allowed_ar_states, |
||
| 307 | allowed_analysis_states, |
||
| 308 | instrument_uid) |
||
| 309 |