1
|
|
|
from __future__ import division |
2
|
|
|
import logging |
3
|
|
|
import re |
4
|
|
|
from collections import namedtuple |
5
|
|
|
from datetime import datetime |
6
|
|
|
|
7
|
|
|
from ..package_tools import Exporter |
8
|
|
|
from .text import ParseError, RegexParser, WMOTextProduct, parse_wmo_time |
9
|
|
|
from ..units import units |
10
|
|
|
|
11
|
|
|
exporter = Exporter(globals()) |
12
|
|
|
|
13
|
|
|
log = logging.getLogger('metpy.io.metar') |
14
|
|
|
log.addHandler(logging.StreamHandler()) # Python 2.7 needs a handler set |
15
|
|
|
log.setLevel(logging.WARNING) |
16
|
|
|
|
17
|
|
|
|
18
|
|
|
class MetarProduct(WMOTextProduct): |
19
|
|
|
def _parse(self, it): |
20
|
|
|
# Handle NWS style where it's just specified once at the top rather than per METAR |
21
|
|
|
if it.peek() in ('METAR', 'SPECI'): |
22
|
|
|
def_kind = next(it) |
23
|
|
|
else: |
24
|
|
|
def_kind = 'METAR' |
25
|
|
|
|
26
|
|
|
it.linesep = '=[\n]{0,2}' |
27
|
|
|
self.reports = [] |
28
|
|
|
|
29
|
|
|
parser = MetarParser(default_kind=def_kind, ref_time=self.datetime) |
30
|
|
|
for l in it: |
31
|
|
|
# Skip SAOs |
32
|
|
|
if l[3:7] != ' SA ': |
33
|
|
|
try: |
34
|
|
|
report = parser.parse(l) |
35
|
|
|
# Only add the report if it's not empty |
36
|
|
|
if report: |
37
|
|
|
self.reports.append(report) |
38
|
|
|
except ParseError as e: |
39
|
|
|
if self.strict: |
40
|
|
|
raise |
41
|
|
|
else: |
42
|
|
|
log.warning('Error parsing report: %s (%s)', l, e.message) |
43
|
|
|
|
44
|
|
|
def __str__(self): |
45
|
|
|
return (super(MetarProduct, self).__str__() + '\n\tReports:' + |
46
|
|
|
'\n\t\t'.join(map(str, self.reports))) |
47
|
|
|
|
48
|
|
|
|
49
|
|
|
def as_value(val, units): |
50
|
|
|
'Parse a value from a METAR report, attaching units' |
51
|
|
|
try: |
52
|
|
|
if val is None: |
53
|
|
|
return None |
54
|
|
|
elif val[0] in 'MP': |
55
|
|
|
log.warning('Got unhandled M/P value: %s', val) |
56
|
|
|
val = val[1:] |
57
|
|
|
elif val == '/' * len(val): |
58
|
|
|
val = 'NaN' |
59
|
|
|
return float(val) * units |
60
|
|
|
except (AttributeError, TypeError, ValueError): |
61
|
|
|
raise ParseError('Could not parse "%s" as a value' % val) |
62
|
|
|
|
63
|
|
|
|
64
|
|
|
# Helper for parsing. Generates a function to grab a given group from the matches, optionally |
65
|
|
|
# applying a converter |
66
|
|
|
def grab_group(group, conv=None): |
67
|
|
|
if conv: |
68
|
|
|
def process(matches, *args): |
69
|
|
|
return conv(matches[group]) |
70
|
|
|
else: |
71
|
|
|
def process(matches, *args): |
72
|
|
|
return matches[group] |
73
|
|
|
return process |
74
|
|
|
|
75
|
|
|
|
76
|
|
|
class MetarParser(object): |
77
|
|
|
'Class that parses a single METAR report' |
78
|
|
|
def __init__(self, default_kind='METAR', ref_time=None): |
79
|
|
|
# Reports should start with METAR/SPECI, but of course NWS doesn't actually |
80
|
|
|
# do this... |
81
|
|
|
self.default_kind = default_kind |
82
|
|
|
|
83
|
|
|
# Can specify the appropriate date for year/month. Defaults to using current |
84
|
|
|
self.ref_time = ref_time if ref_time else datetime.utcnow() |
85
|
|
|
|
86
|
|
|
# Main expected groups in the report |
87
|
|
|
self.main_groups = [('kind', kind(default_kind)), ('stid', stid), |
88
|
|
|
('datetime', dt(ref_time)), ('null', null), ('auto', auto), |
89
|
|
|
('corrected', corrected), |
90
|
|
|
('wind', wind), ('visibility', vis), ('runway_range', rvr), |
91
|
|
|
('present_wx', wx), ('sky_coverage', sky_cover), |
92
|
|
|
('temperature', basic_temp), ('altimeter', altimeter), |
93
|
|
|
('runway_state', runway_state)] |
94
|
|
|
|
95
|
|
|
# Complete set of possible groups in the remarks section |
96
|
|
|
self.remarks = [('volcano', volcano), ('automated', automated_type), |
97
|
|
|
('peak_wind', peak_wind), ('wind_shift', wind_shift), |
98
|
|
|
('sfc_vis', sfc_vis), |
99
|
|
|
('variable_vis', var_vis), ('sector_vis', sector_vis), |
100
|
|
|
('lightning', lightning), |
101
|
|
|
('precip_times', precip_times), ('thunderstorm', thunderstorm), |
102
|
|
|
('virga', virga), |
103
|
|
|
('variable_ceiling', var_ceiling), ('variable_sky_cover', var_sky), |
104
|
|
|
('significant_clouds', sig_cloud), ('mountains', mountains), |
105
|
|
|
('pressure_change', pressure_change), |
106
|
|
|
('sea_level_pressure', slp), ('no_speci', nospeci), |
107
|
|
|
('report_sequence', report_sequence), |
108
|
|
|
('hourly_precip', hourly_precip), ('period_precip', period_precip), |
109
|
|
|
('snow_6hr', snow_6hr), ('snow_depth', snow_depth), |
110
|
|
|
('snow_liquid_equivalent', snow_liquid_equivalent), |
111
|
|
|
('hourly_ice', hourly_ice), ('ice_3hr', ice_3hr), ('ice_6hr', ice_6hr), |
112
|
|
|
('daily_precip', daily_precip), ('cloud_types', cloud_types), |
113
|
|
|
('hourly_temperature', hourly_temp), ('max_temp_6hr', max_temp_6hr), |
114
|
|
|
('min_temp_6hr', min_temp_6hr), |
115
|
|
|
('daily_temperature', daily_temp_range), |
116
|
|
|
('pressure_tendency_3hr', press_tend), |
117
|
|
|
('non-operational sensors', non_op_sensors), |
118
|
|
|
('pilot', pilot_remark), ('needs_maintenance', maint), ('null', null)] |
119
|
|
|
|
120
|
|
|
self.clean_whitespace = re.compile('\s+') |
121
|
|
|
|
122
|
|
|
def parse(self, report): |
123
|
|
|
'Parses the report and returns a dictionary of parsed results' |
124
|
|
|
report = self.clean_whitespace.sub(' ', report) |
125
|
|
|
ob = dict(report=report, null=False) |
126
|
|
|
|
127
|
|
|
# Split into main and remark sections so we can treat slightly differently |
128
|
|
|
if 'RMK' in report: |
129
|
|
|
main, remark = report.split('RMK', 1) |
130
|
|
|
else: |
131
|
|
|
main = report |
132
|
|
|
remark = '' |
133
|
|
|
|
134
|
|
|
# Handle badly formatted report where there is no main section |
135
|
|
|
if not main.strip(): |
136
|
|
|
return dict() |
137
|
|
|
|
138
|
|
|
# Need to split out the trend forecast, otherwise will break parsing |
139
|
|
|
split = trend_forecast_regex.split(main, 1) |
140
|
|
|
if len(split) > 1: |
141
|
|
|
main, match, trend = trend_forecast_regex.split(main, 1) |
142
|
|
|
trend = trend.strip() |
143
|
|
|
if trend: |
144
|
|
|
trend_store = dict() |
145
|
|
|
trend = self._look_for_groups(trend, self.main_groups, trend_store) |
146
|
|
|
trend_store['unparsed'] = trend |
147
|
|
|
ob['trend_forecast'] = (match, trend_store) |
148
|
|
|
else: |
149
|
|
|
ob['trend_forecast'] = match |
150
|
|
|
|
151
|
|
|
# Start with the main groups. Get back what remains of the report |
152
|
|
|
main = self._look_for_groups(main, self.main_groups, ob) |
153
|
|
|
|
154
|
|
|
# If we have anything left now, it's un-parsed data and we should flag it. We check |
155
|
|
|
# to make sure it's actually useful leftovers |
156
|
|
|
if main and set(main) - set(' /'): |
157
|
|
|
ob['unparsed'] = main.strip() |
158
|
|
|
|
159
|
|
|
# If we have a remarks section, try to parse it |
160
|
|
|
if remark: |
161
|
|
|
# The groups in the remarks rely upon information from earlier in the report, |
162
|
|
|
# like the current time or units |
163
|
|
|
speed_units = ob['wind']['speed'].units if 'wind' in ob else units.knot |
164
|
|
|
context = dict(datetime=ob.get('datetime', self.ref_time), |
165
|
|
|
speed_units=units.Quantity(1.0, speed_units)) |
166
|
|
|
|
167
|
|
|
remark = self._look_for_groups_reduce(remark, self.remarks, ob, context) |
168
|
|
|
if remark: |
169
|
|
|
ob['remarks'] = remark |
170
|
|
|
|
171
|
|
|
# Handle parsing garbage by checking for either datetime or null report |
172
|
|
|
if ob['null'] or ('datetime' in ob and 'stid' in ob): |
173
|
|
|
return ob |
174
|
|
|
else: |
175
|
|
|
return dict() |
176
|
|
|
|
177
|
|
|
def _look_for_groups(self, string, groups, store, *context): |
178
|
|
|
# Walk through the list of (name, group) and try parsing the report with the group. |
179
|
|
|
# This will return the string that was parsed, so that we can keep track of where |
180
|
|
|
# we are in the string. We use a while loop so that we can repeat a group if |
181
|
|
|
# appropriate. |
182
|
|
|
string = string.strip() |
183
|
|
|
cursor = 0 |
184
|
|
|
leftover = [] |
185
|
|
|
groups = iter(groups) |
186
|
|
|
name, group = next(groups) |
187
|
|
|
while True: |
188
|
|
|
# Skip spaces and newlines, won't exceed end because no trailing whitespace |
189
|
|
|
while string[cursor] == ' ': |
190
|
|
|
cursor += 1 |
191
|
|
|
|
192
|
|
|
# Try to parse using the group. |
193
|
|
|
try: |
194
|
|
|
rng, data = group.parse(string, cursor, *context) |
195
|
|
|
except ParseError as e: |
196
|
|
|
log.warning('Error while parsing: %s (%s)', string, e.message) |
197
|
|
|
rng = data = None |
198
|
|
|
|
199
|
|
|
# If we got back a range, that means the group succeeded in parsing |
200
|
|
|
if rng: |
201
|
|
|
start, end = rng |
202
|
|
|
log.debug('%s parsed %s', name, string[start:end]) |
203
|
|
|
|
204
|
|
|
# If the match didn't start at the cursor, that means we skipped some |
205
|
|
|
# data and should flag as necessary |
206
|
|
|
if start > cursor: |
207
|
|
|
leftover.append(string[cursor:start].strip()) |
208
|
|
|
|
209
|
|
|
# Update the cursor in the string to where the group finished parsing |
210
|
|
|
cursor = end |
211
|
|
|
|
212
|
|
|
# If we got back some data, we should store. Possible to get back a default |
213
|
|
|
# value even if no parsing done. |
214
|
|
|
if data is not None: |
215
|
|
|
log.debug('%s returned %s', name, data) |
216
|
|
|
|
217
|
|
|
# If it's a repeated group, we store in a list regardless |
218
|
|
|
if group.repeat and group.keepall: |
219
|
|
|
store.setdefault(name, []).append(data) |
220
|
|
|
else: |
221
|
|
|
store[name] = data |
222
|
|
|
|
223
|
|
|
# If we've finished the string, get out |
224
|
|
|
if cursor >= len(string): |
225
|
|
|
break |
226
|
|
|
|
227
|
|
|
# If we shouldn't repeat the group, get the next one |
228
|
|
|
if not group.repeat or data is None: |
229
|
|
|
try: |
230
|
|
|
name, group = next(groups) |
231
|
|
|
except StopIteration: |
232
|
|
|
break |
233
|
|
|
|
234
|
|
|
# Return what remains of the string (removing whitespace) |
235
|
|
|
leftover.append(string[cursor:].strip()) |
236
|
|
|
return ' '.join(leftover) |
237
|
|
|
|
238
|
|
|
def _look_for_groups_reduce(self, string, groups, store, *context): |
239
|
|
|
# Walk through the list of (name, group) and try parsing the report with the group. |
240
|
|
|
# This will return the string that was parsed, so that we can keep track of where |
241
|
|
|
# we are in the string. We use a while loop so that we can repeat a group if |
242
|
|
|
# appropriate. |
243
|
|
|
string = string.strip() |
244
|
|
|
groups = iter(groups) |
245
|
|
|
name, group = next(groups) |
246
|
|
|
while True: |
247
|
|
|
# Try to parse using the group. |
248
|
|
|
rng, data = group.parse(string, 0, *context) |
249
|
|
|
|
250
|
|
|
# If we got back a range, that means the group succeeded in parsing |
251
|
|
|
if rng: |
252
|
|
|
start, end = rng |
253
|
|
|
log.debug('%s parsed %s', name, string[start:end]) |
254
|
|
|
|
255
|
|
|
string = string[:start].strip() + ' ' + string[end:].strip() |
256
|
|
|
|
257
|
|
|
# If we got back some data, we should store. Possible to get back a default |
258
|
|
|
# value even if no parsing done. |
259
|
|
|
if data is not None: |
260
|
|
|
log.debug('%s returned %s', name, data) |
261
|
|
|
|
262
|
|
|
# If it's a repeated group, we store in a list regardless |
263
|
|
|
if group.repeat and group.keepall: |
264
|
|
|
store.setdefault(name, []).append(data) |
265
|
|
|
else: |
266
|
|
|
store[name] = data |
267
|
|
|
|
268
|
|
|
# If we shouldn't repeat the group, get the next one |
269
|
|
|
if not group.repeat or data is None: |
270
|
|
|
try: |
271
|
|
|
name, group = next(groups) |
272
|
|
|
except StopIteration: |
273
|
|
|
break |
274
|
|
|
|
275
|
|
|
# Return what remains of the string (removing whitespace) |
276
|
|
|
return string.strip() |
277
|
|
|
|
278
|
|
|
# |
279
|
|
|
# Parsers for METAR groups -- main report |
280
|
|
|
# |
281
|
|
|
|
282
|
|
|
|
283
|
|
|
# Parse out METAR/SPECI |
284
|
|
|
def kind(default): |
285
|
|
|
return RegexParser(r'\b(?P<kind>METAR|SPECI)\b', grab_group('kind'), default=default) |
286
|
|
|
|
287
|
|
|
# Grab STID (CCCC) |
288
|
|
|
stid = RegexParser(r'\b(?P<stid>[0-9A-Z]{4})\b', grab_group('stid')) |
289
|
|
|
|
290
|
|
|
|
291
|
|
|
# Process the datetime in METAR to a full datetime (YYGGggZ) |
292
|
|
|
def dt(ref_time): |
293
|
|
|
return RegexParser(r'\b(?P<datetime>[0-3]\d[0-5]\d[0-5]\dZ)', |
294
|
|
|
lambda matches: parse_wmo_time(matches['datetime'], ref_time)) |
295
|
|
|
|
296
|
|
|
# Look for AUTO |
297
|
|
|
auto = RegexParser(r'\b(?P<auto>AUTO)', grab_group('auto', bool), default=False) |
298
|
|
|
|
299
|
|
|
# Look for COR |
300
|
|
|
corrected = RegexParser(r'\b(?P<cor>COR)\b', grab_group('cor', bool), default=False) |
301
|
|
|
|
302
|
|
|
# Look for NIL reports |
303
|
|
|
null = RegexParser(r'\b(?P<null>NIL)', grab_group('null', bool), default=False) |
304
|
|
|
|
305
|
|
|
|
306
|
|
|
# Process the full wind group (dddfffGfffKT dddVddd) |
307
|
|
|
def process_wind(matches): |
308
|
|
|
speed_unit = units('m/s') if matches.pop('units') == 'MPS' else units.knot |
309
|
|
|
if matches['direction'] != 'VRB': |
310
|
|
|
matches['direction'] = as_value(matches['direction'], units.deg) |
311
|
|
|
matches['speed'] = as_value(matches['speed'], speed_unit) |
312
|
|
|
matches['gust'] = as_value(matches['gust'], speed_unit) |
313
|
|
|
matches['dir1'] = as_value(matches['dir1'], units.deg) |
314
|
|
|
matches['dir2'] = as_value(matches['dir2'], units.deg) |
315
|
|
|
return matches |
316
|
|
|
|
317
|
|
|
wind = RegexParser(r'''(?P<direction>VRB|///|[0-3]\d{2}) |
318
|
|
|
(?P<speed>P?[\d]{2,3}|//) |
319
|
|
|
(G(?P<gust>P?\d{2,3}))? |
320
|
|
|
((?P<units>KT|MPS)|\b|\ ) |
321
|
|
|
(\ (?P<dir1>\d{3})V(?P<dir2>\d{3}))?''', process_wind) |
322
|
|
|
|
323
|
|
|
|
324
|
|
|
# The visibilty group (VVVVV) |
325
|
|
|
frac_conv = {'1/4': 1 / 4, '1/2': 1 / 2, '3/4': 3 / 4, |
326
|
|
|
'1/8': 1 / 8, '3/8': 3 / 8, '5/8': 5 / 8, '7/8': 7 / 8, |
327
|
|
|
'1/16': 1 / 16, '3/16': 3 / 16, '5/16': 5 / 16, '7/16': 7 / 16, |
328
|
|
|
'9/16': 9 / 16, '11/16': 11 / 16, '13/16': 13 / 16, '15/16': 15 / 16} |
329
|
|
|
|
330
|
|
|
|
331
|
|
|
def frac_to_float(frac): |
332
|
|
|
try: |
333
|
|
|
return frac_conv[frac] |
334
|
|
|
except KeyError: |
335
|
|
|
raise ParseError('%s is not a valid visibility fraction' % frac) |
336
|
|
|
|
337
|
|
|
|
338
|
|
|
def vis_to_float(dist, units): |
339
|
|
|
'Convert visibility, including fraction, to a value with units' |
340
|
|
|
if dist[0] == 'M': |
341
|
|
|
dist = dist[1:] |
342
|
|
|
dist = dist.strip() |
343
|
|
|
|
344
|
|
|
if '/' in dist: |
345
|
|
|
# Handle the case where the entire group is all '////' |
346
|
|
|
if dist[0] == '/' and all(c == '/' for c in dist): |
347
|
|
|
return float('nan') * units |
348
|
|
|
parts = dist.split(maxsplit=1) |
349
|
|
|
if len(parts) > 1: |
350
|
|
|
return as_value(parts[0], units) + frac_to_float(parts[1]) * units |
351
|
|
|
else: |
352
|
|
|
return frac_to_float(dist) * units |
353
|
|
|
else: |
354
|
|
|
return as_value(dist, units) |
355
|
|
|
|
356
|
|
|
|
357
|
|
|
def process_vis(matches): |
358
|
|
|
if matches['cavok']: |
359
|
|
|
return 'CAVOK' |
360
|
|
|
elif matches['vismiles']: |
361
|
|
|
return vis_to_float(matches['vismiles'], units.mile) |
362
|
|
|
elif matches['vismetric']: |
363
|
|
|
return as_value(matches['vismetric'], units.meter) |
364
|
|
|
|
365
|
|
|
vis = RegexParser(r'''(?P<cavok>CAVOK)| |
366
|
|
|
((?P<vismiles>M?(([1-9]\d?)|(([12][ ]?)?1?[13579]/1?[2468])|////))SM\b)| |
367
|
|
|
(?P<vismetric>\b\d{4}\b)''', process_vis) |
368
|
|
|
|
369
|
|
|
|
370
|
|
|
# Runway visual range (RDD/VVVV(VVVVV)FT) |
371
|
|
|
def to_rvr_value(dist, units): |
372
|
|
|
if dist[0] in ('M', 'P'): |
373
|
|
|
dist = dist[1:] |
374
|
|
|
return as_value(dist, units) |
375
|
|
|
|
376
|
|
|
|
377
|
|
|
def process_rvr(matches): |
378
|
|
|
dist_units = units(matches.pop('units').lower()) |
379
|
|
|
ret = dict() |
380
|
|
|
ret[matches['runway']] = to_rvr_value(matches['distance'], dist_units) |
381
|
|
|
if matches['max_dist']: |
382
|
|
|
ret[matches['runway']] = (ret[matches['runway']], |
383
|
|
|
to_rvr_value(matches['max_dist'], dist_units)) |
384
|
|
|
if matches['change']: |
385
|
|
|
change_map = dict(D='down', U='up', N='no change') |
386
|
|
|
ret[matches['runway']] = (ret[matches['runway']], change_map[matches['change']]) |
387
|
|
|
|
388
|
|
|
return ret |
389
|
|
|
|
390
|
|
|
rvr = RegexParser(r'''R(?P<runway>\d{2}[RLC]?) |
391
|
|
|
/(?P<distance>[MP]?\d{4}) |
392
|
|
|
(V(?P<max_dist>[MP]?\d{4}))? |
393
|
|
|
(?P<units>FT)/?(?P<change>[UDN])?''', process_rvr) |
394
|
|
|
|
395
|
|
|
|
396
|
|
|
# Present weather (w'w') |
397
|
|
|
precip_abbr = {'DZ': 'Drizzle', 'RA': 'Rain', 'SN': 'Snow', 'SG': 'Snow Grains', |
398
|
|
|
'IC': 'Ice Crystals', 'PL': 'Ice Pellets', 'GR': 'Hail', |
399
|
|
|
'GS': 'Small Hail or Snow Pellets', 'UP': 'Unknown Precipitation', |
400
|
|
|
'RASN': 'Rain and Snow'} |
401
|
|
|
|
402
|
|
|
|
403
|
|
|
class Weather(namedtuple('WxBase', 'mod desc precip obscur other')): |
404
|
|
|
lookups = [{'-': 'Light', '+': 'Heavy', 'VC': 'In the vicinity'}, |
405
|
|
|
{'MI': 'Shallow', 'PR': 'Partial', 'BC': 'Patches', 'DR': 'Low Drifting', |
406
|
|
|
'BL': 'Blowing', 'SH': 'Showers', 'TS': 'Thunderstorm', 'FZ': 'Freezing'}, |
407
|
|
|
precip_abbr, |
408
|
|
|
{'BR': 'Mist', 'FG': 'Fog', 'FU': 'Smoke', 'VA': 'Volcanic Ash', |
409
|
|
|
'DU': 'Widespread Dust', 'SA': 'Sand', 'HZ': 'Haze', 'PY': 'Spray'}, |
410
|
|
|
{'PO': 'Well-developed Dust/Sand Whirls', 'SQ': 'Squalls', 'FC': 'Funnel Cloud', |
411
|
|
|
'SS': 'Sandstorm', 'DS': 'Duststorm'}] |
412
|
|
|
|
413
|
|
|
@classmethod |
414
|
|
|
def fillin(cls, **kwargs): |
415
|
|
|
args = [None] * 5 |
416
|
|
|
base = cls(*args) |
417
|
|
|
return base._replace(**kwargs) |
418
|
|
|
|
419
|
|
|
def __str__(self): |
420
|
|
|
if self.mod == '+' and self.other == 'FC': |
421
|
|
|
return 'Tornado' |
422
|
|
|
|
423
|
|
|
return ' '.join(lookup[val] for val, lookup in zip(self, self.lookups) if val) |
424
|
|
|
|
425
|
|
|
|
426
|
|
|
def process_wx(matches): |
427
|
|
|
if matches['vdesc']: |
428
|
|
|
matches['mod'] = matches.pop('vicinity') |
429
|
|
|
matches['desc'] = matches.pop('vdesc') |
430
|
|
|
if matches['desc'] == 'ST': |
431
|
|
|
matches['desc'] = 'TS' |
432
|
|
|
else: |
433
|
|
|
matches.pop('vdesc') |
434
|
|
|
matches.pop('vicinity') |
435
|
|
|
|
436
|
|
|
return Weather(**matches) |
437
|
|
|
|
438
|
|
|
wx = RegexParser(r'''(((?P<mod>[-+])|\b) # Begin with one of these mods or nothing |
439
|
|
|
(?P<desc>MI|PR|BC|DR|BL|SH|TS|FZ)? |
440
|
|
|
((?P<precip>(DZ|RA|SN|SG|IC|PL|GR|GS|UP){1,3}) |
441
|
|
|
|(?P<obscur>BR|FG|FU|VA|DU|SA|HZ|PY) |
442
|
|
|
|(?P<other>PO|SQ|FC|SS|DS))) |
443
|
|
|
|((?P<vicinity>VC)?(?P<vdesc>SH|TS|ST))''', process_wx, repeat=True) |
444
|
|
|
|
445
|
|
|
|
446
|
|
|
# Sky condition (NNNhhh or VVhhh or SKC/CLR) |
447
|
|
|
def process_sky(matches): |
448
|
|
|
coverage_to_value = dict(VV=8, FEW=2, SCT=4, BKN=6, BKM=6, OVC=8) |
449
|
|
|
if matches.pop('clear'): |
450
|
|
|
return 0, 0, None |
451
|
|
|
hgt = as_value(matches['height'], 100 * units.feet) |
452
|
|
|
return hgt, coverage_to_value[matches['coverage']], matches['cumulus'] |
453
|
|
|
|
454
|
|
|
sky_cover = RegexParser(r'''\b(?P<clear>SKC|CLR|NSC|NCD)\b| |
455
|
|
|
((?P<coverage>VV|FEW|SCT|BK[MN]|OVC) |
456
|
|
|
\ ?(?P<height>\d{3}) |
457
|
|
|
(?P<cumulus>CB|TCU)?)''', process_sky, repeat=True) |
458
|
|
|
|
459
|
|
|
|
460
|
|
|
# Temperature/Dewpoint group -- whole values (TT/TdTd) |
461
|
|
|
def parse_whole_temp(temp): |
462
|
|
|
if temp in ('//', 'MM'): |
463
|
|
|
return float('NaN') * units.degC |
464
|
|
|
elif temp.startswith('M'): |
465
|
|
|
return -as_value(temp[1:], units.degC) |
466
|
|
|
else: |
467
|
|
|
return as_value(temp, units.degC) |
468
|
|
|
|
469
|
|
|
|
470
|
|
|
def process_temp(matches): |
471
|
|
|
temp = parse_whole_temp(matches['temperature']) |
472
|
|
|
if matches['dewpoint']: |
473
|
|
|
dewpt = parse_whole_temp(matches['dewpoint']) |
474
|
|
|
else: |
475
|
|
|
dewpt = float('NaN') * units.degC |
476
|
|
|
|
477
|
|
|
return temp, dewpt |
478
|
|
|
|
479
|
|
|
basic_temp = RegexParser(r'''(?P<temperature>(M?\d{2})|MM)/ |
480
|
|
|
(?P<dewpoint>(M?[\d]{1,2})|//|MM)?''', process_temp) |
481
|
|
|
|
482
|
|
|
|
483
|
|
|
# Altimeter setting (APPPP) |
484
|
|
|
def process_altimeter(matches): |
485
|
|
|
if matches['unit'] == 'A': |
486
|
|
|
alt_unit = 0.01 * units.inHg |
487
|
|
|
else: |
488
|
|
|
alt_unit = units('mbar') |
489
|
|
|
return as_value(matches['altimeter'], alt_unit) |
490
|
|
|
|
491
|
|
|
altimeter = RegexParser(r'\b(?P<unit>[AQ])(?P<altimeter>\d{4})', process_altimeter, |
492
|
|
|
repeat=True, keepall=False) |
493
|
|
|
|
494
|
|
|
# |
495
|
|
|
# Extended International groups |
496
|
|
|
# |
497
|
|
|
|
498
|
|
|
# Runway conditions |
499
|
|
|
runway_extent = {'1': 0.1, '2': 0.25, '5': 0.5, '9': 1.0, '/': float('NaN')} |
500
|
|
|
runway_contaminant = {'0': 'Clear and dry', '1': 'Damp', '2': 'Wet and water patches', |
501
|
|
|
'3': 'Rime and frost covered', '4': 'Dry snow', '5': 'Wet snow', |
502
|
|
|
'6': 'Slush', '7': 'Ice', '8': 'Compacted or rolled snow', |
503
|
|
|
'9': 'Frozen ruts or ridges', '/': 'No Report'} |
504
|
|
|
|
505
|
|
|
|
506
|
|
|
def runway_code_to_depth(code): |
507
|
|
|
if code == '//': |
508
|
|
|
return float('NaN') * units.mm |
509
|
|
|
code = int(code) |
510
|
|
|
if code < 91: |
511
|
|
|
return code * units.mm |
512
|
|
|
elif code < 99: |
513
|
|
|
return (code - 90) * 5 * units.cm |
514
|
|
|
else: |
515
|
|
|
return 'Inoperable' |
516
|
|
|
|
517
|
|
|
|
518
|
|
|
def runway_code_to_braking(code): |
519
|
|
|
if code == '//': |
520
|
|
|
return float('NaN') |
521
|
|
|
code = int(code) |
522
|
|
|
if code < 91: |
523
|
|
|
return float(code) / 100 |
524
|
|
|
else: |
525
|
|
|
return {91: 'poor', 92: 'medium/poor', 93: 'medium', 94: 'medium/good', |
526
|
|
|
95: 'good'}.get(code, 'unknown') |
527
|
|
|
|
528
|
|
|
|
529
|
|
|
def process_runway_state(matches): |
530
|
|
|
if matches['deposit']: |
531
|
|
|
matches['deposit'] = runway_contaminant.get(matches['deposit'], 'Unknown') |
532
|
|
|
if matches['extent']: |
533
|
|
|
matches['extent'] = runway_extent.get(matches['extent'], 'Unknown') |
534
|
|
|
if matches['depth']: |
535
|
|
|
matches['depth'] = runway_code_to_depth(matches['depth']) |
536
|
|
|
|
537
|
|
|
matches['cleared'] = bool(matches['cleared']) |
538
|
|
|
matches['braking'] = runway_code_to_braking(matches['braking']) |
539
|
|
|
|
540
|
|
|
return matches |
541
|
|
|
|
542
|
|
|
|
543
|
|
|
runway_state = RegexParser(r'''\bR(?P<runway>\d{2}) |
544
|
|
|
/((?P<deposit>[\d/])(?P<extent>[\d/])(?P<depth>\d{2}|//)|(?P<cleared>CLRD))? |
545
|
|
|
(?P<braking>\d{2}|//)''', process_runway_state) |
546
|
|
|
|
547
|
|
|
# Trend forecast (mostly international) |
548
|
|
|
trend_forecast_regex = re.compile(r'\b(?P<trend>NOSIG|BECMG|TEMPO)') |
549
|
|
|
|
550
|
|
|
# |
551
|
|
|
# Parsers for METAR groups -- remarks |
552
|
|
|
# |
553
|
|
|
|
554
|
|
|
|
555
|
|
|
# Combine time in the remark with the report datetime to make a proper datetime object |
556
|
|
|
def process_time(matches, context): |
557
|
|
|
repl = dict(minute=int(matches['minute'])) |
558
|
|
|
if matches['hour']: |
559
|
|
|
repl['hour'] = int(matches['hour']) |
560
|
|
|
|
561
|
|
|
return context['datetime'].replace(**repl) |
562
|
|
|
|
563
|
|
|
# Volcanic eruption, first in NWS reports |
564
|
|
|
volcano = RegexParser(r'[A-Z0-9 .]*VOLCANO[A-Z0-9 .]*') |
565
|
|
|
|
566
|
|
|
# Type of automatic station |
567
|
|
|
automated_type = RegexParser(r'\bA[O0][12]A?') |
568
|
|
|
|
569
|
|
|
|
570
|
|
|
# Peak wind remark (PK WND dddfff/hhmm) |
571
|
|
|
def process_peak_wind(matches, context): |
572
|
|
|
peak_time = process_time(matches, context) |
573
|
|
|
return dict(time=peak_time, speed=as_value(matches['speed'], context['speed_units']), |
574
|
|
|
direction=as_value(matches['direction'], units.deg)) |
575
|
|
|
|
576
|
|
|
peak_wind = RegexParser(r'''\bPK\ WND\ ?(?P<direction>\d{3}) |
577
|
|
|
(?P<speed>\d{2,3})/ |
578
|
|
|
(?P<hour>\d{2})? |
579
|
|
|
(?P<minute>\d{2})''', process_peak_wind) |
580
|
|
|
|
581
|
|
|
|
582
|
|
|
# Wind shift (WSHFT hhmm) |
583
|
|
|
def process_shift(matches, context): |
584
|
|
|
time = process_time(matches, context) |
585
|
|
|
front = bool(matches['frontal']) |
586
|
|
|
return dict(time=time, frontal=front) |
587
|
|
|
|
588
|
|
|
wind_shift = RegexParser(r'''\bWSHFT\ (?P<hour>\d{2})? |
589
|
|
|
(?P<minute>\d{2}) |
590
|
|
|
\ (?P<frontal>FROPA)?''', process_shift) |
591
|
|
|
|
592
|
|
|
|
593
|
|
|
# Tower/surface visibility (TWR(SFC) VIS vvvvv) |
594
|
|
|
def process_twrsfc_vis(matches, *args): |
595
|
|
|
abbr_to_kind = dict(TWR='tower', SFC='surface') |
596
|
|
|
return {abbr_to_kind[matches['kind']]: vis_to_float(matches['vis'], units.mile)} |
597
|
|
|
|
598
|
|
|
sfc_vis = RegexParser(r'''(?P<kind>TWR|SFC)\ VIS |
599
|
|
|
\ (?P<vis>[0-9 /]{1,5})''', process_twrsfc_vis) |
600
|
|
|
|
601
|
|
|
|
602
|
|
|
# Variable prevailing visibility (VIS vvvvvVvvvvv) |
603
|
|
|
def process_var_vis(matches, *args): |
604
|
|
|
vis1 = vis_to_float(matches['vis1'], units.mile) |
605
|
|
|
vis2 = vis_to_float(matches['vis2'], units.mile) |
606
|
|
|
return vis1, vis2 |
607
|
|
|
|
608
|
|
|
# (([1-9]\d?)|(([12][ ]?)?1?[13579]/1?[2468])) |
609
|
|
|
var_vis = RegexParser(r'''VIS\ (?P<vis1>M?((([12][ ]?)?1?[13579]/1?[2468])|([1-9]\d?))) |
610
|
|
|
V(?P<vis2>((([12][ ]?)?1?[13579]/1?[2468])|([1-9]\d?)))''', process_var_vis) |
611
|
|
|
|
612
|
|
|
|
613
|
|
|
# Sector visibility (VIS DIR vvvvv) |
614
|
|
|
def process_sector_vis(matches, *args): |
615
|
|
|
# compass_to_float = dict(N=0, NE=45, E=90, SE=135, S=180, SW=225, W=270, NW=315) |
616
|
|
|
vis = vis_to_float(matches['vis'], units.mile) |
617
|
|
|
return {matches['direc']: vis} |
618
|
|
|
|
619
|
|
|
sector_vis = RegexParser(r'''VIS\ (?P<direc>[NSEW]{1,2}) |
620
|
|
|
\ (?P<vis>[0-9 /]{1,5})''', process_sector_vis) |
621
|
|
|
|
622
|
|
|
|
623
|
|
|
# Lightning |
624
|
|
|
def process_lightning(matches, *args): |
625
|
|
|
if not matches['dist']: |
626
|
|
|
matches.pop('dist') |
627
|
|
|
|
628
|
|
|
if not matches['loc']: |
629
|
|
|
matches.pop('loc') |
630
|
|
|
|
631
|
|
|
if not matches['type']: |
632
|
|
|
matches.pop('type') |
633
|
|
|
else: |
634
|
|
|
type_str = matches['type'] |
635
|
|
|
matches['type'] = [] |
636
|
|
|
while type_str: |
637
|
|
|
matches['type'].append(type_str[:2]) |
638
|
|
|
type_str = type_str[2:] |
639
|
|
|
|
640
|
|
|
if not matches['frequency']: |
641
|
|
|
matches.pop('frequency') |
642
|
|
|
|
643
|
|
|
return matches |
644
|
|
|
|
645
|
|
|
lightning = RegexParser(r'''((?P<frequency>OCNL|FRQ|CONS)\ )? |
646
|
|
|
\bLTG(?P<type>(IC|CG|CC|CA)+)? |
647
|
|
|
\ ((?P<dist>OHD|VC|DSNT)\ )? |
648
|
|
|
(?P<loc>([NSEW\-]|ALQD?S|\ AND\ |\ THRU\ )+)?\b''', |
649
|
|
|
process_lightning) |
650
|
|
|
|
651
|
|
|
# Precipitation/Thunderstorm begin and end |
652
|
|
|
precip_times_regex = re.compile(r'([BE])(\d{2,4})') |
653
|
|
|
|
654
|
|
|
|
655
|
|
|
def process_precip_times(matches, context): |
656
|
|
|
ref_time = context['datetime'] |
657
|
|
|
kind = matches['precip'] |
658
|
|
|
times = [] |
659
|
|
|
start = None |
660
|
|
|
for be, time in precip_times_regex.findall(matches['times']): |
661
|
|
|
if len(time) == 2: |
662
|
|
|
time = ref_time.replace(minute=int(time)) |
663
|
|
|
else: |
664
|
|
|
time = ref_time.replace(hour=int(time[:2]), minute=int(time[2:4])) |
665
|
|
|
|
666
|
|
|
if be == 'B': |
667
|
|
|
start = time |
668
|
|
|
else: |
669
|
|
|
if start: |
670
|
|
|
times.append((start, time)) |
671
|
|
|
start = None |
672
|
|
|
else: |
673
|
|
|
times.append((None, time)) |
674
|
|
|
|
675
|
|
|
if start: |
676
|
|
|
times.append((start, None)) |
677
|
|
|
|
678
|
|
|
return kind, times |
679
|
|
|
|
680
|
|
|
precip_times = RegexParser(r'''(SH)?(?P<precip>TS|DZ|FZRA|RA|SN|SG|IC|PL|GR|GS|UP) |
681
|
|
|
(?P<times>([BE]([0-2]\d)?[0-5]\d)+)''', |
682
|
|
|
process_precip_times, repeat=True) |
683
|
|
|
|
684
|
|
|
|
685
|
|
|
# Thunderstorm (TS LOC MOV DIR) |
686
|
|
|
def process_thunderstorm(matches, *args): |
687
|
|
|
return matches |
688
|
|
|
|
689
|
|
|
thunderstorm = RegexParser(r'''\bTS\ (?P<loc>[NSEW\-]+)(\ MOV\ (?P<mov>[NSEW\-]+))?''', |
690
|
|
|
process_thunderstorm) |
691
|
|
|
|
692
|
|
|
# Virga |
693
|
|
|
virga = RegexParser(r'''\bVIRGA\ (?P<direction>[NSEW\-])''', grab_group('direction')) |
694
|
|
|
|
695
|
|
|
|
696
|
|
|
# Variable Ceiling |
697
|
|
|
def process_var_ceiling(matches, *args): |
698
|
|
|
return (as_value(matches['ceil1'], 100 * units.feet), |
699
|
|
|
as_value(matches['ceil2'], 100 * units.feet)) |
700
|
|
|
|
701
|
|
|
var_ceiling = RegexParser(r'\bCIG\ (?P<ceil1>\d{3})V(?P<ceil2>\d{3})\b', process_var_ceiling) |
702
|
|
|
|
703
|
|
|
|
704
|
|
|
# Variable sky cover |
705
|
|
|
def process_var_sky(matches, *args): |
706
|
|
|
matches['height'] = as_value(matches['height'], 100 * units.feet) |
707
|
|
|
matches['cover'] = (matches.pop('cover1'), matches.pop('cover2')) |
708
|
|
|
return matches |
709
|
|
|
|
710
|
|
|
var_sky = RegexParser(r'''\b(?P<cover1>CLR|FEW|SCT|BKN|OVC) |
711
|
|
|
(?P<height>\d{3})?\ V |
712
|
|
|
\ (?P<cover2>CLR|FEW|SCT|BKN|OVC)''', process_var_sky) |
713
|
|
|
|
714
|
|
|
# Mountains obscured |
715
|
|
|
mountains = RegexParser(r'''\bMTNS?(\ PTLY)?(\ OBSCD?)?(\ DSNT)?(\ [NSEW\-]+)?''') |
716
|
|
|
|
717
|
|
|
# Significant cloud types (CLD DIR (MOV DIR)) |
718
|
|
|
sig_cloud = RegexParser(r'''(?P<cloudtype>CB(MAM)?|TCU|ACC|[ACS]CSL|(APRNT\ ROTOR\ CLD)) |
719
|
|
|
\ (?P<dir>VC\ ALQD?S|[NSEW-]+)(\ MOV\ (?P<movdir>[NSEW]{1,2}))?''') |
720
|
|
|
|
721
|
|
|
|
722
|
|
|
# Cloud Types (8/ClCmCh) |
723
|
|
|
def process_cloud_types(matches, *args): |
724
|
|
|
ret = dict() |
725
|
|
|
for k, v in matches.items(): |
726
|
|
|
if v == '/': |
727
|
|
|
ret[k] = None |
728
|
|
|
else: |
729
|
|
|
ret[k] = int(v) |
730
|
|
|
return ret |
731
|
|
|
|
732
|
|
|
cloud_types = RegexParser(r'''\b8/(?P<low>[\d/])(?P<middle>[\d/])(?P<high>[\d/])''', |
733
|
|
|
process_cloud_types) |
734
|
|
|
|
735
|
|
|
|
736
|
|
|
# Pressure changes (PRESRR/PRESFR) |
737
|
|
|
def process_pressure_change(matches, *args): |
738
|
|
|
if matches['tend'] == 'R': |
739
|
|
|
return 'rising rapidly' |
740
|
|
|
else: |
741
|
|
|
return 'falling rapidly' |
742
|
|
|
|
743
|
|
|
pressure_change = RegexParser(r'\bPRES(?P<tend>[FR])R\b', process_pressure_change) |
744
|
|
|
|
745
|
|
|
|
746
|
|
|
# Sea-level pressure (SLPppp) |
747
|
|
|
def process_slp(matches, *args): |
748
|
|
|
if matches['slp'] == 'NO': |
749
|
|
|
matches['slp'] = 'NaN' |
750
|
|
|
|
751
|
|
|
slp = as_value(matches['slp'], 0.1 * units('mbar')) |
752
|
|
|
if slp < 50 * units('mbar'): |
753
|
|
|
slp += 1000 * units('mbar') |
754
|
|
|
else: |
755
|
|
|
slp += 900 * units('mbar') |
756
|
|
|
return slp |
757
|
|
|
|
758
|
|
|
slp = RegexParser(r'SLP(?P<slp>\d{3}|NO)', process_slp) |
759
|
|
|
|
760
|
|
|
|
761
|
|
|
# No SPECI |
762
|
|
|
nospeci = RegexParser(r'\bNO(\ )?SPECI') |
763
|
|
|
|
764
|
|
|
# First/last report |
765
|
|
|
report_sequence = RegexParser(r'''\b(FIRST|LAST)''') |
766
|
|
|
|
767
|
|
|
|
768
|
|
|
# Parse precip report |
769
|
|
|
def parse_rmk_precip(precip): |
770
|
|
|
return as_value(precip, 0.01 * units.inch) |
771
|
|
|
|
772
|
|
|
|
773
|
|
|
# Hourly Precip (Prrrr) |
774
|
|
|
hourly_precip = RegexParser(r'\bP(?P<precip>\d{4})\b', grab_group('precip', parse_rmk_precip)) |
775
|
|
|
|
776
|
|
|
# 3/6-hour precip (6RRRR) |
777
|
|
|
period_precip = RegexParser(r'\b6(?P<precip>\d{4}|////)', |
778
|
|
|
grab_group('precip', parse_rmk_precip)) |
779
|
|
|
|
780
|
|
|
|
781
|
|
|
# Parse snow report |
782
|
|
|
def parse_rmk_snow(snow): |
783
|
|
|
return as_value(snow, 0.1 * units.inch) |
784
|
|
|
|
785
|
|
|
# 6-hour snow (931RRR) |
786
|
|
|
snow_6hr = RegexParser(r'\b931(?P<snow>\d{3})\b', grab_group('snow', parse_rmk_snow)) |
787
|
|
|
|
788
|
|
|
|
789
|
|
|
def parse_rmk_snow_depth(snow): |
790
|
|
|
return as_value(snow, units.inch) |
791
|
|
|
|
792
|
|
|
# Snow depth |
793
|
|
|
snow_depth = RegexParser(r'\b4/(?P<snow>\d{3})\b', grab_group('snow', parse_rmk_snow_depth)) |
794
|
|
|
|
795
|
|
|
# Snow liquid equivalent (933RRR) |
796
|
|
|
snow_liquid_equivalent = RegexParser(r'\b933(?P<snow>\d{3})\b', |
797
|
|
|
grab_group('snow', parse_rmk_snow)) |
798
|
|
|
|
799
|
|
|
# 24-hour precip (7RRRR) |
800
|
|
|
daily_precip = RegexParser(r'\b7(?P<precip>\d{4}|////)', |
801
|
|
|
grab_group('precip', parse_rmk_precip)) |
802
|
|
|
|
803
|
|
|
# Hourly ice accretion (I1RRR) |
804
|
|
|
hourly_ice = RegexParser(r'\bI1(?P<ice>\d{3})', grab_group('ice', parse_rmk_precip)) |
805
|
|
|
|
806
|
|
|
# 3-hour ice accretion (I3RRR) |
807
|
|
|
ice_3hr = RegexParser(r'\bI3(?P<ice>\d{3})', grab_group('ice', parse_rmk_precip)) |
808
|
|
|
|
809
|
|
|
# 6-hour ice accretion (I6RRR) |
810
|
|
|
ice_6hr = RegexParser(r'\bI6(?P<ice>\d{3})', grab_group('ice', parse_rmk_precip)) |
811
|
|
|
|
812
|
|
|
|
813
|
|
|
# Handles parsing temperature format from remarks |
814
|
|
|
def parse_rmk_temp(temp): |
815
|
|
|
if temp.startswith('1'): |
816
|
|
|
return -as_value(temp[1:], 0.1 * units.degC) |
817
|
|
|
else: |
818
|
|
|
return as_value(temp, 0.1 * units.degC) |
819
|
|
|
|
820
|
|
|
|
821
|
|
|
# Hourly temperature (TsTTTsTdTdTd) |
822
|
|
|
def process_hourly_temp(matches, *args): |
823
|
|
|
temp = parse_rmk_temp(matches['temperature']) |
824
|
|
|
if matches['dewpoint']: |
825
|
|
|
dewpt = parse_rmk_temp(matches['dewpoint']) |
826
|
|
|
else: |
827
|
|
|
dewpt = float('NaN') * units.degC |
828
|
|
|
return temp, dewpt |
829
|
|
|
|
830
|
|
|
hourly_temp = RegexParser(r'''\bT(?P<temperature>[01]\d{3}) |
831
|
|
|
(?P<dewpoint>[01]\d{3})?''', process_hourly_temp) |
832
|
|
|
|
833
|
|
|
|
834
|
|
|
# 6-hour max temp (1sTTT) |
835
|
|
|
max_temp_6hr = RegexParser(r'\b1(?P<temperature>[01]\d{3})\b', |
836
|
|
|
grab_group('temperature', parse_rmk_temp)) |
837
|
|
|
|
838
|
|
|
# 6-hour max temp (1sTTT) |
839
|
|
|
min_temp_6hr = RegexParser(r'\b2(?P<temperature>[01]\d{3})\b', |
840
|
|
|
grab_group('temperature', parse_rmk_temp)) |
841
|
|
|
|
842
|
|
|
|
843
|
|
|
# 24-hour temp (4sTTTsTTT) |
844
|
|
|
def process_daily_temp(matches, *args): |
845
|
|
|
return parse_rmk_temp(matches['min']), parse_rmk_temp(matches['max']) |
846
|
|
|
|
847
|
|
|
daily_temp_range = RegexParser(r'\b4(?P<max>[01]\d{3})\ ?(?P<min>[01]\d{3})\b', |
848
|
|
|
process_daily_temp) |
849
|
|
|
|
850
|
|
|
|
851
|
|
|
# 3-hour pressure tendency (5appp) |
852
|
|
|
def process_press_tend(matches, *args): |
853
|
|
|
return int(matches['character']), as_value(matches['amount'], 0.1 * units.mbar) |
854
|
|
|
|
855
|
|
|
press_tend = RegexParser(r'5(?P<character>[0-8])(?P<amount>\d{3})\b', process_press_tend) |
856
|
|
|
|
857
|
|
|
|
858
|
|
|
# Parse non-operational sensors |
859
|
|
|
def process_nonop_sensors(matches, *args): |
860
|
|
|
sensors = dict(RVRNO='Runway Visual Range', PWINO='Present Weather Identifier', |
861
|
|
|
PNO='Precipitation', FZRANO='Freezing Rain Sensor', |
862
|
|
|
TSNO='Lightning Detection System', VISNO='Secondary Visibility Sensor', |
863
|
|
|
CHINO='Secondary Ceiling Height Indicator') |
864
|
|
|
if matches['nonop']: |
865
|
|
|
return sensors.get(matches['nonop'], matches['nonop']) |
866
|
|
|
if matches['nonop2']: |
867
|
|
|
return sensors.get(matches['nonop2'], matches['nonop2']), matches['loc'] |
868
|
|
|
|
869
|
|
|
non_op_sensors = RegexParser(r'''\b(?P<nonop>RVRNO|PWINO|PNO|FZRANO|TSNO) |
870
|
|
|
|((?P<nonop2>VISNO|CHINO)\ (?P<loc>\w+))''', |
871
|
|
|
process_nonop_sensors, repeat=True) |
872
|
|
|
|
873
|
|
|
# Some free-text remarks |
874
|
|
|
pilot_remark = RegexParser(r'([\w\ ;\.]*ATIS\ \w[\w\ ;\.]*)|(QFE[\d\.\ ]+)') |
875
|
|
|
|
876
|
|
|
# Parse maintenance flag |
877
|
|
|
maint = RegexParser(r'(?P<maint>\$)', grab_group('maint', bool), default=False) |
878
|
|
|
|