| Total Complexity | 104 |
| Total Lines | 540 |
| Duplicated Lines | 11.11 % |
| Changes | 0 | ||
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like reports.powerquality often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | import re |
||
| 2 | from datetime import datetime, timedelta, timezone |
||
| 3 | import falcon |
||
| 4 | import mysql.connector |
||
| 5 | import simplejson as json |
||
| 6 | import config |
||
| 7 | import excelexporters.powerquality |
||
| 8 | from core import utilities |
||
| 9 | from core.useractivity import access_control, api_key_control |
||
| 10 | |||
| 11 | |||
| 12 | class Reporting: |
||
| 13 | def __init__(self): |
||
| 14 | """"Initializes Reporting""" |
||
| 15 | pass |
||
| 16 | |||
| 17 | @staticmethod |
||
| 18 | def on_options(req, resp): |
||
| 19 | _ = req |
||
| 20 | resp.status = falcon.HTTP_200 |
||
| 21 | |||
| 22 | #################################################################################################################### |
||
| 23 | # PROCEDURES |
||
| 24 | # Step 1: valid parameters |
||
| 25 | # Step 2: query the meter and energy category |
||
| 26 | # Step 3: query associated points |
||
| 27 | # Step 4: query reporting period points trends |
||
| 28 | # Step 5: query tariff data |
||
| 29 | # Step 6: construct the report |
||
| 30 | #################################################################################################################### |
||
| 31 | @staticmethod |
||
| 32 | def on_get(req, resp): |
||
| 33 | if 'API-KEY' not in req.headers or \ |
||
| 34 | not isinstance(req.headers['API-KEY'], str) or \ |
||
| 35 | len(str.strip(req.headers['API-KEY'])) == 0: |
||
| 36 | access_control(req) |
||
| 37 | else: |
||
| 38 | api_key_control(req) |
||
| 39 | print(req.params) |
||
| 40 | meter_id = req.params.get('meterid') |
||
| 41 | meter_uuid = req.params.get('meteruuid') |
||
| 42 | reporting_period_start_datetime_local = req.params.get('reportingperiodstartdatetime') |
||
| 43 | reporting_period_end_datetime_local = req.params.get('reportingperiodenddatetime') |
||
| 44 | language = req.params.get('language') |
||
| 45 | quick_mode = req.params.get('quickmode') |
||
| 46 | |||
| 47 | ################################################################################################################ |
||
| 48 | # Step 1: valid parameters |
||
| 49 | ################################################################################################################ |
||
| 50 | if meter_id is None and meter_uuid is None: |
||
| 51 | raise falcon.HTTPError(status=falcon.HTTP_400, title='API.BAD_REQUEST', description='API.INVALID_METER_ID') |
||
| 52 | |||
| 53 | if meter_id is not None: |
||
| 54 | meter_id = str.strip(meter_id) |
||
| 55 | if not meter_id.isdigit() or int(meter_id) <= 0: |
||
| 56 | raise falcon.HTTPError(status=falcon.HTTP_400, title='API.BAD_REQUEST', |
||
| 57 | description='API.INVALID_METER_ID') |
||
| 58 | |||
| 59 | if meter_uuid is not None: |
||
| 60 | regex = re.compile(r'^[a-f0-9]{8}-?[a-f0-9]{4}-?4[a-f0-9]{3}-?[89ab][a-f0-9]{3}-?[a-f0-9]{12}\Z', re.I) |
||
| 61 | match = regex.match(str.strip(meter_uuid)) |
||
| 62 | if not bool(match): |
||
| 63 | raise falcon.HTTPError(status=falcon.HTTP_400, title='API.BAD_REQUEST', |
||
| 64 | description='API.INVALID_METER_UUID') |
||
| 65 | |||
| 66 | timezone_offset = int(config.utc_offset[1:3]) * 60 + int(config.utc_offset[4:6]) |
||
| 67 | if config.utc_offset[0] == '-': |
||
| 68 | timezone_offset = -timezone_offset |
||
| 69 | |||
| 70 | if reporting_period_start_datetime_local is None: |
||
| 71 | raise falcon.HTTPError(status=falcon.HTTP_400, title='API.BAD_REQUEST', |
||
| 72 | description="API.INVALID_REPORTING_PERIOD_START_DATETIME") |
||
| 73 | else: |
||
| 74 | reporting_period_start_datetime_local = str.strip(reporting_period_start_datetime_local) |
||
| 75 | try: |
||
| 76 | reporting_start_datetime_utc = datetime.strptime(reporting_period_start_datetime_local, |
||
| 77 | '%Y-%m-%dT%H:%M:%S') |
||
| 78 | except ValueError: |
||
| 79 | raise falcon.HTTPError(status=falcon.HTTP_400, title='API.BAD_REQUEST', |
||
| 80 | description="API.INVALID_REPORTING_PERIOD_START_DATETIME") |
||
| 81 | reporting_start_datetime_utc = \ |
||
| 82 | reporting_start_datetime_utc.replace(tzinfo=timezone.utc) - timedelta(minutes=timezone_offset) |
||
| 83 | # nomalize the start datetime |
||
| 84 | if config.minutes_to_count == 30 and reporting_start_datetime_utc.minute >= 30: |
||
| 85 | reporting_start_datetime_utc = reporting_start_datetime_utc.replace(minute=30, second=0, microsecond=0) |
||
| 86 | else: |
||
| 87 | reporting_start_datetime_utc = reporting_start_datetime_utc.replace(minute=0, second=0, microsecond=0) |
||
| 88 | |||
| 89 | if reporting_period_end_datetime_local is None: |
||
| 90 | raise falcon.HTTPError(status=falcon.HTTP_400, title='API.BAD_REQUEST', |
||
| 91 | description="API.INVALID_REPORTING_PERIOD_END_DATETIME") |
||
| 92 | else: |
||
| 93 | reporting_period_end_datetime_local = str.strip(reporting_period_end_datetime_local) |
||
| 94 | try: |
||
| 95 | reporting_end_datetime_utc = datetime.strptime(reporting_period_end_datetime_local, |
||
| 96 | '%Y-%m-%dT%H:%M:%S') |
||
| 97 | except ValueError: |
||
| 98 | raise falcon.HTTPError(status=falcon.HTTP_400, title='API.BAD_REQUEST', |
||
| 99 | description="API.INVALID_REPORTING_PERIOD_END_DATETIME") |
||
| 100 | reporting_end_datetime_utc = reporting_end_datetime_utc.replace(tzinfo=timezone.utc) - \ |
||
| 101 | timedelta(minutes=timezone_offset) |
||
| 102 | |||
| 103 | if reporting_start_datetime_utc >= reporting_end_datetime_utc: |
||
| 104 | raise falcon.HTTPError(status=falcon.HTTP_400, title='API.BAD_REQUEST', |
||
| 105 | description='API.INVALID_REPORTING_PERIOD_END_DATETIME') |
||
| 106 | |||
| 107 | # if turn quick mode on, do not return parameters data and excel file |
||
| 108 | is_quick_mode = False |
||
| 109 | if quick_mode is not None and \ |
||
| 110 | len(str.strip(quick_mode)) > 0 and \ |
||
| 111 | str.lower(str.strip(quick_mode)) in ('true', 't', 'on', 'yes', 'y'): |
||
| 112 | is_quick_mode = True |
||
| 113 | |||
| 114 | trans = utilities.get_translation(language) |
||
| 115 | trans.install() |
||
| 116 | _ = trans.gettext |
||
| 117 | |||
| 118 | ################################################################################################################ |
||
| 119 | # Step 2: query the meter and energy category |
||
| 120 | ################################################################################################################ |
||
| 121 | cnx_system = mysql.connector.connect(**config.myems_system_db) |
||
| 122 | cursor_system = cnx_system.cursor() |
||
| 123 | |||
| 124 | cnx_historical = mysql.connector.connect(**config.myems_historical_db) |
||
| 125 | cursor_historical = cnx_historical.cursor() |
||
| 126 | if meter_id is not None: |
||
| 127 | cursor_system.execute(" SELECT m.id, m.name, m.cost_center_id, m.energy_category_id, " |
||
| 128 | " ec.name, ec.unit_of_measure, ec.kgce, ec.kgco2e " |
||
| 129 | " FROM tbl_meters m, tbl_energy_categories ec " |
||
| 130 | " WHERE m.id = %s AND m.energy_category_id = ec.id ", (meter_id,)) |
||
| 131 | row_meter = cursor_system.fetchone() |
||
| 132 | elif meter_uuid is not None: |
||
| 133 | cursor_system.execute(" SELECT m.id, m.name, m.cost_center_id, m.energy_category_id, " |
||
| 134 | " ec.name, ec.unit_of_measure, ec.kgce, ec.kgco2e " |
||
| 135 | " FROM tbl_meters m, tbl_energy_categories ec " |
||
| 136 | " WHERE m.uuid = %s AND m.energy_category_id = ec.id ", (meter_uuid,)) |
||
| 137 | row_meter = cursor_system.fetchone() |
||
| 138 | |||
| 139 | if row_meter is None: |
||
|
|
|||
| 140 | if cursor_system: |
||
| 141 | cursor_system.close() |
||
| 142 | if cnx_system: |
||
| 143 | cnx_system.close() |
||
| 144 | |||
| 145 | if cursor_historical: |
||
| 146 | cursor_historical.close() |
||
| 147 | if cnx_historical: |
||
| 148 | cnx_historical.close() |
||
| 149 | raise falcon.HTTPError(status=falcon.HTTP_404, title='API.NOT_FOUND', description='API.METER_NOT_FOUND') |
||
| 150 | meter = dict() |
||
| 151 | meter['id'] = row_meter[0] |
||
| 152 | meter['name'] = row_meter[1] |
||
| 153 | meter['cost_center_id'] = row_meter[2] |
||
| 154 | meter['energy_category_id'] = row_meter[3] |
||
| 155 | meter['energy_category_name'] = row_meter[4] |
||
| 156 | meter['unit_of_measure'] = row_meter[5] |
||
| 157 | meter['kgce'] = row_meter[6] |
||
| 158 | meter['kgco2e'] = row_meter[7] |
||
| 159 | |||
| 160 | ################################################################################################################ |
||
| 161 | # Step 3: query associated points |
||
| 162 | ################################################################################################################ |
||
| 163 | point_list = list() |
||
| 164 | cursor_system.execute(" SELECT p.id, p.name, p.units, p.object_type " |
||
| 165 | " FROM tbl_meters m, tbl_meters_points mp, tbl_points p " |
||
| 166 | " WHERE m.id = %s AND m.id = mp.meter_id AND mp.point_id = p.id " |
||
| 167 | " ORDER BY p.id ", (meter['id'],)) |
||
| 168 | rows_points = cursor_system.fetchall() |
||
| 169 | if rows_points is not None and len(rows_points) > 0: |
||
| 170 | for row in rows_points: |
||
| 171 | point_list.append({"id": row[0], "name": row[1], "units": row[2], "object_type": row[3]}) |
||
| 172 | |||
| 173 | ################################################################################################################ |
||
| 174 | # Step 4: query reporting period points trends |
||
| 175 | ################################################################################################################ |
||
| 176 | reporting = dict() |
||
| 177 | reporting['names'] = list() |
||
| 178 | reporting['timestamps'] = list() |
||
| 179 | reporting['values'] = list() |
||
| 180 | reporting['units'] = list() |
||
| 181 | |||
| 182 | for point in point_list: |
||
| 183 | if is_quick_mode and point['object_type'] != 'ENERGY_VALUE': |
||
| 184 | continue |
||
| 185 | |||
| 186 | point_value_list = list() |
||
| 187 | point_timestamp_list = list() |
||
| 188 | if point['object_type'] == 'ENERGY_VALUE': |
||
| 189 | query = (" SELECT utc_date_time, actual_value " |
||
| 190 | " FROM tbl_energy_value " |
||
| 191 | " WHERE point_id = %s " |
||
| 192 | " AND utc_date_time BETWEEN %s AND %s " |
||
| 193 | " ORDER BY utc_date_time ") |
||
| 194 | cursor_historical.execute(query, (point['id'], |
||
| 195 | reporting_start_datetime_utc, |
||
| 196 | reporting_end_datetime_utc)) |
||
| 197 | rows = cursor_historical.fetchall() |
||
| 198 | |||
| 199 | if rows is not None and len(rows) > 0: |
||
| 200 | for row in rows: |
||
| 201 | current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \ |
||
| 202 | timedelta(minutes=timezone_offset) |
||
| 203 | current_datetime = current_datetime_local.isoformat()[0:19] |
||
| 204 | point_timestamp_list.append(current_datetime) |
||
| 205 | point_value_list.append(row[1]) |
||
| 206 | elif point['object_type'] == 'ANALOG_VALUE': |
||
| 207 | query = (" SELECT utc_date_time, actual_value " |
||
| 208 | " FROM tbl_analog_value " |
||
| 209 | " WHERE point_id = %s " |
||
| 210 | " AND utc_date_time BETWEEN %s AND %s " |
||
| 211 | " ORDER BY utc_date_time ") |
||
| 212 | cursor_historical.execute(query, (point['id'], |
||
| 213 | reporting_start_datetime_utc, |
||
| 214 | reporting_end_datetime_utc)) |
||
| 215 | rows = cursor_historical.fetchall() |
||
| 216 | |||
| 217 | if rows is not None and len(rows) > 0: |
||
| 218 | for row in rows: |
||
| 219 | current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \ |
||
| 220 | timedelta(minutes=timezone_offset) |
||
| 221 | current_datetime = current_datetime_local.isoformat()[0:19] |
||
| 222 | point_timestamp_list.append(current_datetime) |
||
| 223 | point_value_list.append(row[1]) |
||
| 224 | elif point['object_type'] == 'DIGITAL_VALUE': |
||
| 225 | query = (" SELECT utc_date_time, actual_value " |
||
| 226 | " FROM tbl_digital_value " |
||
| 227 | " WHERE point_id = %s " |
||
| 228 | " AND utc_date_time BETWEEN %s AND %s " |
||
| 229 | " ORDER BY utc_date_time ") |
||
| 230 | cursor_historical.execute(query, (point['id'], |
||
| 231 | reporting_start_datetime_utc, |
||
| 232 | reporting_end_datetime_utc)) |
||
| 233 | rows = cursor_historical.fetchall() |
||
| 234 | |||
| 235 | if rows is not None and len(rows) > 0: |
||
| 236 | for row in rows: |
||
| 237 | current_datetime_local = row[0].replace(tzinfo=timezone.utc) + \ |
||
| 238 | timedelta(minutes=timezone_offset) |
||
| 239 | current_datetime = current_datetime_local.isoformat()[0:19] |
||
| 240 | point_timestamp_list.append(current_datetime) |
||
| 241 | point_value_list.append(row[1]) |
||
| 242 | |||
| 243 | reporting['names'].append(point['name'] + ' (' + point['units'] + ')') |
||
| 244 | reporting['timestamps'].append(point_timestamp_list) |
||
| 245 | reporting['values'].append(point_value_list) |
||
| 246 | reporting['units'].append(point['units']) |
||
| 247 | |||
| 248 | ################################################################################################################ |
||
| 249 | # Step 4.1: analyze power quality by unit (A, V, HZ) |
||
| 250 | ################################################################################################################ |
||
| 251 | def _safe_float_list(values): |
||
| 252 | return [float(v) for v in values if v is not None] |
||
| 253 | |||
| 254 | def _calc_basic_stats(values): |
||
| 255 | n = len(values) |
||
| 256 | if n == 0: |
||
| 257 | return None |
||
| 258 | vmin = min(values) |
||
| 259 | vmax = max(values) |
||
| 260 | mean = sum(values) / n |
||
| 261 | # population standard deviation |
||
| 262 | variance = sum((x - mean) ** 2 for x in values) / n |
||
| 263 | std = variance ** 0.5 |
||
| 264 | # percentiles |
||
| 265 | sorted_vals = sorted(values) |
||
| 266 | def _percentile(p): |
||
| 267 | if n == 1: |
||
| 268 | return sorted_vals[0] |
||
| 269 | k = (n - 1) * p |
||
| 270 | f = int(k) |
||
| 271 | c = f + 1 |
||
| 272 | if c >= n: |
||
| 273 | return sorted_vals[-1] |
||
| 274 | d0 = sorted_vals[f] * (c - k) |
||
| 275 | d1 = sorted_vals[c] * (k - f) |
||
| 276 | return d0 + d1 |
||
| 277 | p5 = _percentile(0.05) |
||
| 278 | p95 = _percentile(0.95) |
||
| 279 | return { |
||
| 280 | 'count': n, |
||
| 281 | 'min': vmin, |
||
| 282 | 'max': vmax, |
||
| 283 | 'mean': mean, |
||
| 284 | 'std': std, |
||
| 285 | 'p5': p5, |
||
| 286 | 'p95': p95 |
||
| 287 | } |
||
| 288 | |||
| 289 | analysis = list() |
||
| 290 | # helper to get nominal values from config or default |
||
| 291 | nominal_voltage = getattr(config, 'nominal_voltage', 220.0) |
||
| 292 | nominal_frequency = getattr(config, 'nominal_frequency', 50.0) |
||
| 293 | |||
| 294 | # Collect indices by unit for grouping |
||
| 295 | voltage_indices = [i for i, u in enumerate(reporting['units']) if (u or '').upper() == 'V'] |
||
| 296 | current_indices = [i for i, u in enumerate(reporting['units']) if (u or '').upper() == 'A'] |
||
| 297 | freq_indices = [i for i, u in enumerate(reporting['units']) if (u or '').upper() in ('HZ', 'HERTZ')] |
||
| 298 | |||
| 299 | # 1) Voltage deviation (GB/T 12325): compute deviation and compliance within ±7% by default |
||
| 300 | voltage_deviation_limit_pct = getattr(config, 'voltage_deviation_limit_pct', 7.0) |
||
| 301 | for vidx in voltage_indices: |
||
| 302 | name = reporting['names'][vidx] |
||
| 303 | timestamps = reporting['timestamps'][vidx] |
||
| 304 | values = _safe_float_list(reporting['values'][vidx]) |
||
| 305 | if len(values) == 0 or len(values) != len(timestamps): |
||
| 306 | continue |
||
| 307 | deviations_pct = [] |
||
| 308 | worst_abs_dev = -1.0 |
||
| 309 | worst_time = None |
||
| 310 | within = 0 |
||
| 311 | for i, v in enumerate(values): |
||
| 312 | dev_pct = (v - nominal_voltage) / nominal_voltage * 100.0 |
||
| 313 | deviations_pct.append(dev_pct) |
||
| 314 | abs_dev = abs(dev_pct) |
||
| 315 | if abs_dev <= voltage_deviation_limit_pct: |
||
| 316 | within += 1 |
||
| 317 | if abs_dev > worst_abs_dev: |
||
| 318 | worst_abs_dev = abs_dev |
||
| 319 | worst_time = timestamps[i] |
||
| 320 | compliance = within / len(values) * 100.0 |
||
| 321 | stats = _calc_basic_stats(deviations_pct) |
||
| 322 | analysis.append({ |
||
| 323 | 'point_name': name, |
||
| 324 | 'unit': 'V', |
||
| 325 | 'category': 'voltage', |
||
| 326 | 'type': 'voltage_deviation', |
||
| 327 | 'limit_pct': voltage_deviation_limit_pct, |
||
| 328 | 'compliance_pct': compliance, |
||
| 329 | 'worst_abs_deviation_pct': worst_abs_dev, |
||
| 330 | 'worst_time': worst_time, |
||
| 331 | 'metrics': [ |
||
| 332 | {'name': 'mean_deviation_pct', 'value': stats['mean'] if stats else None}, |
||
| 333 | {'name': 'p95_abs_deviation_pct', 'value': abs(stats['p95']) if stats else None} |
||
| 334 | ] |
||
| 335 | }) |
||
| 336 | |||
| 337 | # 2) Voltage unbalance (GB/T 15543): if we have 3-phase voltage series, compute unbalance rate |
||
| 338 | if len(voltage_indices) >= 3: |
||
| 339 | # pick first three |
||
| 340 | triplet = voltage_indices[:3] |
||
| 341 | ts0 = reporting['timestamps'][triplet[0]] |
||
| 342 | ts1 = reporting['timestamps'][triplet[1]] |
||
| 343 | ts2 = reporting['timestamps'][triplet[2]] |
||
| 344 | v0 = _safe_float_list(reporting['values'][triplet[0]]) |
||
| 345 | v1 = _safe_float_list(reporting['values'][triplet[1]]) |
||
| 346 | v2 = _safe_float_list(reporting['values'][triplet[2]]) |
||
| 347 | View Code Duplication | if len(v0) and len(v0) == len(v1) == len(v2) and ts0 == ts1 == ts2: |
|
| 348 | unbalance_rates = [] |
||
| 349 | worst = -1.0 |
||
| 350 | worst_time = None |
||
| 351 | for i in range(len(v0)): |
||
| 352 | avg_v = (v0[i] + v1[i] + v2[i]) / 3.0 |
||
| 353 | if avg_v <= 0: |
||
| 354 | continue |
||
| 355 | max_dev = max(abs(v0[i] - avg_v), abs(v1[i] - avg_v), abs(v2[i] - avg_v)) |
||
| 356 | rate_pct = max_dev / avg_v * 100.0 |
||
| 357 | unbalance_rates.append(rate_pct) |
||
| 358 | if rate_pct > worst: |
||
| 359 | worst = rate_pct |
||
| 360 | worst_time = ts0[i] |
||
| 361 | limit_pct = getattr(config, 'voltage_unbalance_limit_pct', 2.0) |
||
| 362 | within = sum(1 for r in unbalance_rates if r <= limit_pct) |
||
| 363 | compliance = (within / len(unbalance_rates) * 100.0) if unbalance_rates else None |
||
| 364 | stats = _calc_basic_stats(unbalance_rates) if unbalance_rates else None |
||
| 365 | analysis.append({ |
||
| 366 | 'point_name': 'Three-phase voltage', |
||
| 367 | 'unit': 'V', |
||
| 368 | 'category': 'voltage', |
||
| 369 | 'type': 'voltage_unbalance', |
||
| 370 | 'limit_pct': limit_pct, |
||
| 371 | 'compliance_pct': compliance, |
||
| 372 | 'worst_unbalance_pct': worst, |
||
| 373 | 'worst_time': worst_time, |
||
| 374 | 'metrics': [ |
||
| 375 | {'name': 'mean_unbalance_pct', 'value': stats['mean'] if stats else None}, |
||
| 376 | {'name': 'p95_unbalance_pct', 'value': stats['p95'] if stats else None} |
||
| 377 | ] |
||
| 378 | }) |
||
| 379 | |||
| 380 | # 3) Frequency deviation (GB/T 15945): compliance within ±0.2 Hz; severe > 0.5 Hz |
||
| 381 | freq_normal_limit_hz = getattr(config, 'frequency_normal_limit_hz', 0.2) |
||
| 382 | freq_severe_limit_hz = getattr(config, 'frequency_severe_limit_hz', 0.5) |
||
| 383 | for fidx in freq_indices: |
||
| 384 | name = reporting['names'][fidx] |
||
| 385 | timestamps = reporting['timestamps'][fidx] |
||
| 386 | values = _safe_float_list(reporting['values'][fidx]) |
||
| 387 | if len(values) == 0 or len(values) != len(timestamps): |
||
| 388 | continue |
||
| 389 | abs_devs = [] |
||
| 390 | within = 0 |
||
| 391 | severe = 0 |
||
| 392 | worst = -1.0 |
||
| 393 | worst_time = None |
||
| 394 | for i, hz in enumerate(values): |
||
| 395 | dev = abs(hz - nominal_frequency) |
||
| 396 | abs_devs.append(dev) |
||
| 397 | if dev <= freq_normal_limit_hz: |
||
| 398 | within += 1 |
||
| 399 | if dev > freq_severe_limit_hz: |
||
| 400 | severe += 1 |
||
| 401 | if dev > worst: |
||
| 402 | worst = dev |
||
| 403 | worst_time = timestamps[i] |
||
| 404 | compliance = within / len(values) * 100.0 |
||
| 405 | stats = _calc_basic_stats(abs_devs) |
||
| 406 | analysis.append({ |
||
| 407 | 'point_name': name, |
||
| 408 | 'unit': 'Hz', |
||
| 409 | 'category': 'frequency', |
||
| 410 | 'type': 'frequency_deviation', |
||
| 411 | 'limit_normal_hz': freq_normal_limit_hz, |
||
| 412 | 'limit_severe_hz': freq_severe_limit_hz, |
||
| 413 | 'compliance_pct': compliance, |
||
| 414 | 'severe_exceed_count': severe, |
||
| 415 | 'worst_deviation_hz': worst, |
||
| 416 | 'worst_time': worst_time, |
||
| 417 | 'metrics': [ |
||
| 418 | {'name': 'mean_abs_deviation_hz', 'value': stats['mean'] if stats else None}, |
||
| 419 | {'name': 'p95_abs_deviation_hz', 'value': stats['p95'] if stats else None} |
||
| 420 | ] |
||
| 421 | }) |
||
| 422 | |||
| 423 | # 4) Current unbalance (reference similar method): compute 3-phase current unbalance if available |
||
| 424 | if len(current_indices) >= 3: |
||
| 425 | triplet = current_indices[:3] |
||
| 426 | ts0 = reporting['timestamps'][triplet[0]] |
||
| 427 | ts1 = reporting['timestamps'][triplet[1]] |
||
| 428 | ts2 = reporting['timestamps'][triplet[2]] |
||
| 429 | i0 = _safe_float_list(reporting['values'][triplet[0]]) |
||
| 430 | i1 = _safe_float_list(reporting['values'][triplet[1]]) |
||
| 431 | i2 = _safe_float_list(reporting['values'][triplet[2]]) |
||
| 432 | View Code Duplication | if len(i0) and len(i0) == len(i1) == len(i2) and ts0 == ts1 == ts2: |
|
| 433 | unbalance_rates = [] |
||
| 434 | worst = -1.0 |
||
| 435 | worst_time = None |
||
| 436 | for i in range(len(i0)): |
||
| 437 | avg_i = (i0[i] + i1[i] + i2[i]) / 3.0 |
||
| 438 | if avg_i <= 0: |
||
| 439 | continue |
||
| 440 | max_dev = max(abs(i0[i] - avg_i), abs(i1[i] - avg_i), abs(i2[i] - avg_i)) |
||
| 441 | rate_pct = max_dev / avg_i * 100.0 |
||
| 442 | unbalance_rates.append(rate_pct) |
||
| 443 | if rate_pct > worst: |
||
| 444 | worst = rate_pct |
||
| 445 | worst_time = ts0[i] |
||
| 446 | limit_pct = getattr(config, 'current_unbalance_limit_pct', 10.0) |
||
| 447 | within = sum(1 for r in unbalance_rates if r <= limit_pct) |
||
| 448 | compliance = (within / len(unbalance_rates) * 100.0) if unbalance_rates else None |
||
| 449 | stats = _calc_basic_stats(unbalance_rates) if unbalance_rates else None |
||
| 450 | analysis.append({ |
||
| 451 | 'point_name': 'Three-phase current', |
||
| 452 | 'unit': 'A', |
||
| 453 | 'category': 'current', |
||
| 454 | 'type': 'current_unbalance', |
||
| 455 | 'limit_pct': limit_pct, |
||
| 456 | 'compliance_pct': compliance, |
||
| 457 | 'worst_unbalance_pct': worst, |
||
| 458 | 'worst_time': worst_time, |
||
| 459 | 'metrics': [ |
||
| 460 | {'name': 'mean_unbalance_pct', 'value': stats['mean'] if stats else None}, |
||
| 461 | {'name': 'p95_unbalance_pct', 'value': stats['p95'] if stats else None} |
||
| 462 | ] |
||
| 463 | }) |
||
| 464 | for idx, name in enumerate(reporting['names']): |
||
| 465 | unit = (reporting['units'][idx] or '').upper() |
||
| 466 | values = _safe_float_list(reporting['values'][idx]) |
||
| 467 | if len(values) == 0: |
||
| 468 | continue |
||
| 469 | stats = _calc_basic_stats(values) |
||
| 470 | if stats is None: |
||
| 471 | continue |
||
| 472 | category = None |
||
| 473 | if 'A' == unit: |
||
| 474 | category = 'current' |
||
| 475 | elif 'V' == unit: |
||
| 476 | category = 'voltage' |
||
| 477 | elif 'HZ' == unit or 'HERTZ' == unit: |
||
| 478 | category = 'frequency' |
||
| 479 | # only include targeted categories |
||
| 480 | if category is None: |
||
| 481 | continue |
||
| 482 | analysis.append({ |
||
| 483 | 'point_name': name, |
||
| 484 | 'unit': reporting['units'][idx], |
||
| 485 | 'category': category, |
||
| 486 | 'metrics': [ |
||
| 487 | {'name': 'count', 'value': stats['count']}, |
||
| 488 | {'name': 'min', 'value': stats['min']}, |
||
| 489 | {'name': 'max', 'value': stats['max']}, |
||
| 490 | {'name': 'mean', 'value': stats['mean']}, |
||
| 491 | {'name': 'std', 'value': stats['std']}, |
||
| 492 | {'name': 'p5', 'value': stats['p5']}, |
||
| 493 | {'name': 'p95', 'value': stats['p95']} |
||
| 494 | ] |
||
| 495 | }) |
||
| 496 | |||
| 497 | # Step 5 removed: drop tariff/parameters in power quality API |
||
| 498 | |||
| 499 | ################################################################################################################ |
||
| 500 | # Step 6: construct the report |
||
| 501 | ################################################################################################################ |
||
| 502 | if cursor_system: |
||
| 503 | cursor_system.close() |
||
| 504 | if cnx_system: |
||
| 505 | cnx_system.close() |
||
| 506 | |||
| 507 | if cursor_historical: |
||
| 508 | cursor_historical.close() |
||
| 509 | if cnx_historical: |
||
| 510 | cnx_historical.close() |
||
| 511 | |||
| 512 | result = { |
||
| 513 | "meter": { |
||
| 514 | "cost_center_id": meter['cost_center_id'], |
||
| 515 | "energy_category_id": meter['energy_category_id'], |
||
| 516 | "energy_category_name": meter['energy_category_name'], |
||
| 517 | "unit_of_measure": meter['unit_of_measure'], |
||
| 518 | "kgce": meter['kgce'], |
||
| 519 | "kgco2e": meter['kgco2e'], |
||
| 520 | }, |
||
| 521 | "reporting_period": { |
||
| 522 | "names": reporting['names'], |
||
| 523 | "timestamps": reporting['timestamps'], |
||
| 524 | "values": reporting['values'], |
||
| 525 | }, |
||
| 526 | "parameters": None, |
||
| 527 | "analysis": analysis, |
||
| 528 | "excel_bytes_base64": None |
||
| 529 | } |
||
| 530 | # export result to Excel file and then encode the file to base64 string |
||
| 531 | if not is_quick_mode: |
||
| 532 | result['excel_bytes_base64'] = excelexporters.powerqulity.export(result, |
||
| 533 | meter['name'], |
||
| 534 | reporting_period_start_datetime_local, |
||
| 535 | reporting_period_end_datetime_local, |
||
| 536 | None, |
||
| 537 | language) |
||
| 538 | |||
| 539 | resp.text = json.dumps(result) |
||
| 540 |