| Total Complexity | 56 |
| Total Lines | 1394 |
| Duplicated Lines | 5.02 % |
| Changes | 0 | ||
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like data.datasets.sanity_checks often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | """ |
||
| 2 | This module does sanity checks for both the eGon2035 and the eGon100RE scenario |
||
| 3 | separately where a percentage error is given to showcase difference in output |
||
| 4 | and input values. Please note that there are missing input technologies in the |
||
| 5 | supply tables. |
||
| 6 | Authors: @ALonso, @dana, @nailend, @nesnoj, @khelfen |
||
| 7 | """ |
||
| 8 | from math import isclose |
||
| 9 | from pathlib import Path |
||
| 10 | |||
| 11 | from sqlalchemy import Numeric |
||
| 12 | from sqlalchemy.sql import and_, cast, func, or_ |
||
| 13 | import matplotlib.pyplot as plt |
||
| 14 | import numpy as np |
||
| 15 | import pandas as pd |
||
| 16 | import seaborn as sns |
||
| 17 | |||
| 18 | from egon.data import config, db, logger |
||
| 19 | from egon.data.datasets import Dataset |
||
| 20 | from egon.data.datasets.electricity_demand_timeseries.cts_buildings import ( |
||
| 21 | EgonCtsElectricityDemandBuildingShare, |
||
| 22 | EgonCtsHeatDemandBuildingShare, |
||
| 23 | ) |
||
| 24 | from egon.data.datasets.emobility.motorized_individual_travel.db_classes import ( |
||
| 25 | EgonEvCountMunicipality, |
||
| 26 | EgonEvCountMvGridDistrict, |
||
| 27 | EgonEvCountRegistrationDistrict, |
||
| 28 | EgonEvMvGridDistrict, |
||
| 29 | EgonEvPool, |
||
| 30 | EgonEvTrip, |
||
| 31 | ) |
||
| 32 | from egon.data.datasets.emobility.motorized_individual_travel.helpers import ( |
||
| 33 | DATASET_CFG, |
||
| 34 | read_simbev_metadata_file, |
||
| 35 | ) |
||
| 36 | from egon.data.datasets.etrago_setup import ( |
||
| 37 | EgonPfHvLink, |
||
| 38 | EgonPfHvLinkTimeseries, |
||
| 39 | EgonPfHvLoad, |
||
| 40 | EgonPfHvLoadTimeseries, |
||
| 41 | EgonPfHvStore, |
||
| 42 | EgonPfHvStoreTimeseries, |
||
| 43 | ) |
||
| 44 | from egon.data.datasets.power_plants.pv_rooftop_buildings import ( |
||
| 45 | PV_CAP_PER_SQ_M, |
||
| 46 | ROOF_FACTOR, |
||
| 47 | SCENARIOS, |
||
| 48 | load_building_data, |
||
| 49 | scenario_data, |
||
| 50 | ) |
||
| 51 | from egon.data.datasets.scenario_parameters import get_sector_parameters |
||
| 52 | from egon.data.datasets.storages.home_batteries import get_cbat_pbat_ratio |
||
| 53 | import egon.data |
||
| 54 | |||
| 55 | TESTMODE_OFF = ( |
||
| 56 | config.settings()["egon-data"]["--dataset-boundary"] == "Everything" |
||
| 57 | ) |
||
| 58 | |||
| 59 | |||
| 60 | class SanityChecks(Dataset): |
||
| 61 | def __init__(self, dependencies): |
||
| 62 | super().__init__( |
||
| 63 | name="SanityChecks", |
||
| 64 | version="0.0.6", |
||
| 65 | dependencies=dependencies, |
||
| 66 | tasks={ |
||
| 67 | etrago_eGon2035_electricity, |
||
| 68 | etrago_eGon2035_heat, |
||
| 69 | residential_electricity_annual_sum, |
||
| 70 | residential_electricity_hh_refinement, |
||
| 71 | cts_electricity_demand_share, |
||
| 72 | cts_heat_demand_share, |
||
| 73 | sanitycheck_emobility_mit, |
||
| 74 | sanitycheck_pv_rooftop_buildings, |
||
| 75 | sanitycheck_home_batteries, |
||
| 76 | }, |
||
| 77 | ) |
||
| 78 | |||
| 79 | |||
| 80 | def etrago_eGon2035_electricity(): |
||
| 81 | """Execute basic sanity checks. |
||
| 82 | |||
| 83 | Returns print statements as sanity checks for the electricity sector in |
||
| 84 | the eGon2035 scenario. |
||
| 85 | |||
| 86 | Parameters |
||
| 87 | ---------- |
||
| 88 | None |
||
| 89 | |||
| 90 | Returns |
||
| 91 | ------- |
||
| 92 | None |
||
| 93 | """ |
||
| 94 | |||
| 95 | scn = "eGon2035" |
||
| 96 | |||
| 97 | # Section to check generator capacities |
||
| 98 | logger.info(f"Sanity checks for scenario {scn}") |
||
| 99 | logger.info( |
||
| 100 | "For German electricity generators the following deviations between " |
||
| 101 | "the inputs and outputs can be observed:" |
||
| 102 | ) |
||
| 103 | |||
| 104 | carriers_electricity = [ |
||
| 105 | "others", |
||
| 106 | "reservoir", |
||
| 107 | "run_of_river", |
||
| 108 | "oil", |
||
| 109 | "wind_onshore", |
||
| 110 | "wind_offshore", |
||
| 111 | "solar", |
||
| 112 | "solar_rooftop", |
||
| 113 | "biomass", |
||
| 114 | ] |
||
| 115 | |||
| 116 | for carrier in carriers_electricity: |
||
| 117 | |||
| 118 | if carrier == "biomass": |
||
| 119 | sum_output = db.select_dataframe( |
||
| 120 | """SELECT scn_name, SUM(p_nom::numeric) as output_capacity_mw |
||
| 121 | FROM grid.egon_etrago_generator |
||
| 122 | WHERE bus IN ( |
||
| 123 | SELECT bus_id FROM grid.egon_etrago_bus |
||
| 124 | WHERE scn_name = 'eGon2035' |
||
| 125 | AND country = 'DE') |
||
| 126 | AND carrier IN ('biomass', 'industrial_biomass_CHP', |
||
| 127 | 'central_biomass_CHP') |
||
| 128 | GROUP BY (scn_name); |
||
| 129 | """, |
||
| 130 | warning=False, |
||
| 131 | ) |
||
| 132 | |||
| 133 | else: |
||
| 134 | sum_output = db.select_dataframe( |
||
| 135 | f"""SELECT scn_name, |
||
| 136 | SUM(p_nom::numeric) as output_capacity_mw |
||
| 137 | FROM grid.egon_etrago_generator |
||
| 138 | WHERE scn_name = '{scn}' |
||
| 139 | AND carrier IN ('{carrier}') |
||
| 140 | AND bus IN |
||
| 141 | (SELECT bus_id |
||
| 142 | FROM grid.egon_etrago_bus |
||
| 143 | WHERE scn_name = 'eGon2035' |
||
| 144 | AND country = 'DE') |
||
| 145 | GROUP BY (scn_name); |
||
| 146 | """, |
||
| 147 | warning=False, |
||
| 148 | ) |
||
| 149 | |||
| 150 | sum_input = db.select_dataframe( |
||
| 151 | f"""SELECT carrier, SUM(capacity::numeric) as input_capacity_mw |
||
| 152 | FROM supply.egon_scenario_capacities |
||
| 153 | WHERE carrier= '{carrier}' |
||
| 154 | AND scenario_name ='{scn}' |
||
| 155 | GROUP BY (carrier); |
||
| 156 | """, |
||
| 157 | warning=False, |
||
| 158 | ) |
||
| 159 | |||
| 160 | View Code Duplication | if ( |
|
|
|
|||
| 161 | sum_output.output_capacity_mw.sum() == 0 |
||
| 162 | and sum_input.input_capacity_mw.sum() == 0 |
||
| 163 | ): |
||
| 164 | logger.info( |
||
| 165 | f"No capacity for carrier '{carrier}' needed to be" |
||
| 166 | f" distributed. Everything is fine" |
||
| 167 | ) |
||
| 168 | |||
| 169 | elif ( |
||
| 170 | sum_input.input_capacity_mw.sum() > 0 |
||
| 171 | and sum_output.output_capacity_mw.sum() == 0 |
||
| 172 | ): |
||
| 173 | logger.info( |
||
| 174 | f"Error: Capacity for carrier '{carrier}' was not distributed " |
||
| 175 | f"at all!" |
||
| 176 | ) |
||
| 177 | |||
| 178 | elif ( |
||
| 179 | sum_output.output_capacity_mw.sum() > 0 |
||
| 180 | and sum_input.input_capacity_mw.sum() == 0 |
||
| 181 | ): |
||
| 182 | logger.info( |
||
| 183 | f"Error: Eventhough no input capacity was provided for carrier" |
||
| 184 | f"'{carrier}' a capacity got distributed!" |
||
| 185 | ) |
||
| 186 | |||
| 187 | else: |
||
| 188 | sum_input["error"] = ( |
||
| 189 | (sum_output.output_capacity_mw - sum_input.input_capacity_mw) |
||
| 190 | / sum_input.input_capacity_mw |
||
| 191 | ) * 100 |
||
| 192 | g = sum_input["error"].values[0] |
||
| 193 | |||
| 194 | logger.info(f"{carrier}: " + str(round(g, 2)) + " %") |
||
| 195 | |||
| 196 | # Section to check storage units |
||
| 197 | |||
| 198 | logger.info(f"Sanity checks for scenario {scn}") |
||
| 199 | logger.info( |
||
| 200 | "For German electrical storage units the following deviations between" |
||
| 201 | "the inputs and outputs can be observed:" |
||
| 202 | ) |
||
| 203 | |||
| 204 | carriers_electricity = ["pumped_hydro"] |
||
| 205 | |||
| 206 | for carrier in carriers_electricity: |
||
| 207 | |||
| 208 | sum_output = db.select_dataframe( |
||
| 209 | f"""SELECT scn_name, SUM(p_nom::numeric) as output_capacity_mw |
||
| 210 | FROM grid.egon_etrago_storage |
||
| 211 | WHERE scn_name = '{scn}' |
||
| 212 | AND carrier IN ('{carrier}') |
||
| 213 | AND bus IN |
||
| 214 | (SELECT bus_id |
||
| 215 | FROM grid.egon_etrago_bus |
||
| 216 | WHERE scn_name = 'eGon2035' |
||
| 217 | AND country = 'DE') |
||
| 218 | GROUP BY (scn_name); |
||
| 219 | """, |
||
| 220 | warning=False, |
||
| 221 | ) |
||
| 222 | |||
| 223 | sum_input = db.select_dataframe( |
||
| 224 | f"""SELECT carrier, SUM(capacity::numeric) as input_capacity_mw |
||
| 225 | FROM supply.egon_scenario_capacities |
||
| 226 | WHERE carrier= '{carrier}' |
||
| 227 | AND scenario_name ='{scn}' |
||
| 228 | GROUP BY (carrier); |
||
| 229 | """, |
||
| 230 | warning=False, |
||
| 231 | ) |
||
| 232 | |||
| 233 | View Code Duplication | if ( |
|
| 234 | sum_output.output_capacity_mw.sum() == 0 |
||
| 235 | and sum_input.input_capacity_mw.sum() == 0 |
||
| 236 | ): |
||
| 237 | print( |
||
| 238 | f"No capacity for carrier '{carrier}' needed to be " |
||
| 239 | f"distributed. Everything is fine" |
||
| 240 | ) |
||
| 241 | |||
| 242 | elif ( |
||
| 243 | sum_input.input_capacity_mw.sum() > 0 |
||
| 244 | and sum_output.output_capacity_mw.sum() == 0 |
||
| 245 | ): |
||
| 246 | print( |
||
| 247 | f"Error: Capacity for carrier '{carrier}' was not distributed" |
||
| 248 | f" at all!" |
||
| 249 | ) |
||
| 250 | |||
| 251 | elif ( |
||
| 252 | sum_output.output_capacity_mw.sum() > 0 |
||
| 253 | and sum_input.input_capacity_mw.sum() == 0 |
||
| 254 | ): |
||
| 255 | print( |
||
| 256 | f"Error: Eventhough no input capacity was provided for carrier" |
||
| 257 | f" '{carrier}' a capacity got distributed!" |
||
| 258 | ) |
||
| 259 | |||
| 260 | else: |
||
| 261 | sum_input["error"] = ( |
||
| 262 | (sum_output.output_capacity_mw - sum_input.input_capacity_mw) |
||
| 263 | / sum_input.input_capacity_mw |
||
| 264 | ) * 100 |
||
| 265 | g = sum_input["error"].values[0] |
||
| 266 | |||
| 267 | print(f"{carrier}: " + str(round(g, 2)) + " %") |
||
| 268 | |||
| 269 | # Section to check loads |
||
| 270 | |||
| 271 | print( |
||
| 272 | "For German electricity loads the following deviations between the" |
||
| 273 | " input and output can be observed:" |
||
| 274 | ) |
||
| 275 | |||
| 276 | output_demand = db.select_dataframe( |
||
| 277 | """SELECT a.scn_name, a.carrier, SUM((SELECT SUM(p) |
||
| 278 | FROM UNNEST(b.p_set) p))/1000000::numeric as load_twh |
||
| 279 | FROM grid.egon_etrago_load a |
||
| 280 | JOIN grid.egon_etrago_load_timeseries b |
||
| 281 | ON (a.load_id = b.load_id) |
||
| 282 | JOIN grid.egon_etrago_bus c |
||
| 283 | ON (a.bus=c.bus_id) |
||
| 284 | AND b.scn_name = 'eGon2035' |
||
| 285 | AND a.scn_name = 'eGon2035' |
||
| 286 | AND a.carrier = 'AC' |
||
| 287 | AND c.scn_name= 'eGon2035' |
||
| 288 | AND c.country='DE' |
||
| 289 | GROUP BY (a.scn_name, a.carrier); |
||
| 290 | |||
| 291 | """, |
||
| 292 | warning=False, |
||
| 293 | )["load_twh"].values[0] |
||
| 294 | |||
| 295 | input_cts_ind = db.select_dataframe( |
||
| 296 | """SELECT scenario, |
||
| 297 | SUM(demand::numeric/1000000) as demand_mw_regio_cts_ind |
||
| 298 | FROM demand.egon_demandregio_cts_ind |
||
| 299 | WHERE scenario= 'eGon2035' |
||
| 300 | AND year IN ('2035') |
||
| 301 | GROUP BY (scenario); |
||
| 302 | |||
| 303 | """, |
||
| 304 | warning=False, |
||
| 305 | )["demand_mw_regio_cts_ind"].values[0] |
||
| 306 | |||
| 307 | input_hh = db.select_dataframe( |
||
| 308 | """SELECT scenario, SUM(demand::numeric/1000000) as demand_mw_regio_hh |
||
| 309 | FROM demand.egon_demandregio_hh |
||
| 310 | WHERE scenario= 'eGon2035' |
||
| 311 | AND year IN ('2035') |
||
| 312 | GROUP BY (scenario); |
||
| 313 | """, |
||
| 314 | warning=False, |
||
| 315 | )["demand_mw_regio_hh"].values[0] |
||
| 316 | |||
| 317 | input_demand = input_hh + input_cts_ind |
||
| 318 | |||
| 319 | e = round((output_demand - input_demand) / input_demand, 2) * 100 |
||
| 320 | |||
| 321 | print(f"electricity demand: {e} %") |
||
| 322 | |||
| 323 | |||
| 324 | def etrago_eGon2035_heat(): |
||
| 325 | """Execute basic sanity checks. |
||
| 326 | |||
| 327 | Returns print statements as sanity checks for the heat sector in |
||
| 328 | the eGon2035 scenario. |
||
| 329 | |||
| 330 | Parameters |
||
| 331 | ---------- |
||
| 332 | None |
||
| 333 | |||
| 334 | Returns |
||
| 335 | ------- |
||
| 336 | None |
||
| 337 | """ |
||
| 338 | |||
| 339 | # Check input and output values for the carriers "others", |
||
| 340 | # "reservoir", "run_of_river" and "oil" |
||
| 341 | |||
| 342 | scn = "eGon2035" |
||
| 343 | |||
| 344 | # Section to check generator capacities |
||
| 345 | print(f"Sanity checks for scenario {scn}") |
||
| 346 | print( |
||
| 347 | "For German heat demands the following deviations between the inputs" |
||
| 348 | " and outputs can be observed:" |
||
| 349 | ) |
||
| 350 | |||
| 351 | # Sanity checks for heat demand |
||
| 352 | |||
| 353 | output_heat_demand = db.select_dataframe( |
||
| 354 | """SELECT a.scn_name, |
||
| 355 | (SUM( |
||
| 356 | (SELECT SUM(p) FROM UNNEST(b.p_set) p))/1000000)::numeric as load_twh |
||
| 357 | FROM grid.egon_etrago_load a |
||
| 358 | JOIN grid.egon_etrago_load_timeseries b |
||
| 359 | ON (a.load_id = b.load_id) |
||
| 360 | JOIN grid.egon_etrago_bus c |
||
| 361 | ON (a.bus=c.bus_id) |
||
| 362 | AND b.scn_name = 'eGon2035' |
||
| 363 | AND a.scn_name = 'eGon2035' |
||
| 364 | AND c.scn_name= 'eGon2035' |
||
| 365 | AND c.country='DE' |
||
| 366 | AND a.carrier IN ('rural_heat', 'central_heat') |
||
| 367 | GROUP BY (a.scn_name); |
||
| 368 | """, |
||
| 369 | warning=False, |
||
| 370 | )["load_twh"].values[0] |
||
| 371 | |||
| 372 | input_heat_demand = db.select_dataframe( |
||
| 373 | """SELECT scenario, SUM(demand::numeric/1000000) as demand_mw_peta_heat |
||
| 374 | FROM demand.egon_peta_heat |
||
| 375 | WHERE scenario= 'eGon2035' |
||
| 376 | GROUP BY (scenario); |
||
| 377 | """, |
||
| 378 | warning=False, |
||
| 379 | )["demand_mw_peta_heat"].values[0] |
||
| 380 | |||
| 381 | e_demand = ( |
||
| 382 | round((output_heat_demand - input_heat_demand) / input_heat_demand, 2) |
||
| 383 | * 100 |
||
| 384 | ) |
||
| 385 | |||
| 386 | logger.info(f"heat demand: {e_demand} %") |
||
| 387 | |||
| 388 | # Sanity checks for heat supply |
||
| 389 | |||
| 390 | logger.info( |
||
| 391 | "For German heat supplies the following deviations between the inputs " |
||
| 392 | "and outputs can be observed:" |
||
| 393 | ) |
||
| 394 | |||
| 395 | # Comparison for central heat pumps |
||
| 396 | heat_pump_input = db.select_dataframe( |
||
| 397 | """SELECT carrier, SUM(capacity::numeric) as Urban_central_heat_pump_mw |
||
| 398 | FROM supply.egon_scenario_capacities |
||
| 399 | WHERE carrier= 'urban_central_heat_pump' |
||
| 400 | AND scenario_name IN ('eGon2035') |
||
| 401 | GROUP BY (carrier); |
||
| 402 | """, |
||
| 403 | warning=False, |
||
| 404 | )["urban_central_heat_pump_mw"].values[0] |
||
| 405 | |||
| 406 | heat_pump_output = db.select_dataframe( |
||
| 407 | """SELECT carrier, SUM(p_nom::numeric) as Central_heat_pump_mw |
||
| 408 | FROM grid.egon_etrago_link |
||
| 409 | WHERE carrier= 'central_heat_pump' |
||
| 410 | AND scn_name IN ('eGon2035') |
||
| 411 | GROUP BY (carrier); |
||
| 412 | """, |
||
| 413 | warning=False, |
||
| 414 | )["central_heat_pump_mw"].values[0] |
||
| 415 | |||
| 416 | e_heat_pump = ( |
||
| 417 | round((heat_pump_output - heat_pump_input) / heat_pump_output, 2) * 100 |
||
| 418 | ) |
||
| 419 | |||
| 420 | logger.info(f"'central_heat_pump': {e_heat_pump} % ") |
||
| 421 | |||
| 422 | # Comparison for residential heat pumps |
||
| 423 | |||
| 424 | input_residential_heat_pump = db.select_dataframe( |
||
| 425 | """SELECT carrier, SUM(capacity::numeric) as residential_heat_pump_mw |
||
| 426 | FROM supply.egon_scenario_capacities |
||
| 427 | WHERE carrier= 'residential_rural_heat_pump' |
||
| 428 | AND scenario_name IN ('eGon2035') |
||
| 429 | GROUP BY (carrier); |
||
| 430 | """, |
||
| 431 | warning=False, |
||
| 432 | )["residential_heat_pump_mw"].values[0] |
||
| 433 | |||
| 434 | output_residential_heat_pump = db.select_dataframe( |
||
| 435 | """SELECT carrier, SUM(p_nom::numeric) as rural_heat_pump_mw |
||
| 436 | FROM grid.egon_etrago_link |
||
| 437 | WHERE carrier= 'rural_heat_pump' |
||
| 438 | AND scn_name IN ('eGon2035') |
||
| 439 | GROUP BY (carrier); |
||
| 440 | """, |
||
| 441 | warning=False, |
||
| 442 | )["rural_heat_pump_mw"].values[0] |
||
| 443 | |||
| 444 | e_residential_heat_pump = ( |
||
| 445 | round( |
||
| 446 | (output_residential_heat_pump - input_residential_heat_pump) |
||
| 447 | / input_residential_heat_pump, |
||
| 448 | 2, |
||
| 449 | ) |
||
| 450 | * 100 |
||
| 451 | ) |
||
| 452 | logger.info(f"'residential heat pumps': {e_residential_heat_pump} %") |
||
| 453 | |||
| 454 | # Comparison for resistive heater |
||
| 455 | resistive_heater_input = db.select_dataframe( |
||
| 456 | """SELECT carrier, |
||
| 457 | SUM(capacity::numeric) as Urban_central_resistive_heater_MW |
||
| 458 | FROM supply.egon_scenario_capacities |
||
| 459 | WHERE carrier= 'urban_central_resistive_heater' |
||
| 460 | AND scenario_name IN ('eGon2035') |
||
| 461 | GROUP BY (carrier); |
||
| 462 | """, |
||
| 463 | warning=False, |
||
| 464 | )["urban_central_resistive_heater_mw"].values[0] |
||
| 465 | |||
| 466 | resistive_heater_output = db.select_dataframe( |
||
| 467 | """SELECT carrier, SUM(p_nom::numeric) as central_resistive_heater_MW |
||
| 468 | FROM grid.egon_etrago_link |
||
| 469 | WHERE carrier= 'central_resistive_heater' |
||
| 470 | AND scn_name IN ('eGon2035') |
||
| 471 | GROUP BY (carrier); |
||
| 472 | """, |
||
| 473 | warning=False, |
||
| 474 | )["central_resistive_heater_mw"].values[0] |
||
| 475 | |||
| 476 | e_resistive_heater = ( |
||
| 477 | round( |
||
| 478 | (resistive_heater_output - resistive_heater_input) |
||
| 479 | / resistive_heater_input, |
||
| 480 | 2, |
||
| 481 | ) |
||
| 482 | * 100 |
||
| 483 | ) |
||
| 484 | |||
| 485 | logger.info(f"'resistive heater': {e_resistive_heater} %") |
||
| 486 | |||
| 487 | # Comparison for solar thermal collectors |
||
| 488 | |||
| 489 | input_solar_thermal = db.select_dataframe( |
||
| 490 | """SELECT carrier, SUM(capacity::numeric) as solar_thermal_collector_mw |
||
| 491 | FROM supply.egon_scenario_capacities |
||
| 492 | WHERE carrier= 'urban_central_solar_thermal_collector' |
||
| 493 | AND scenario_name IN ('eGon2035') |
||
| 494 | GROUP BY (carrier); |
||
| 495 | """, |
||
| 496 | warning=False, |
||
| 497 | )["solar_thermal_collector_mw"].values[0] |
||
| 498 | |||
| 499 | output_solar_thermal = db.select_dataframe( |
||
| 500 | """SELECT carrier, SUM(p_nom::numeric) as solar_thermal_collector_mw |
||
| 501 | FROM grid.egon_etrago_generator |
||
| 502 | WHERE carrier= 'solar_thermal_collector' |
||
| 503 | AND scn_name IN ('eGon2035') |
||
| 504 | GROUP BY (carrier); |
||
| 505 | """, |
||
| 506 | warning=False, |
||
| 507 | )["solar_thermal_collector_mw"].values[0] |
||
| 508 | |||
| 509 | e_solar_thermal = ( |
||
| 510 | round( |
||
| 511 | (output_solar_thermal - input_solar_thermal) / input_solar_thermal, |
||
| 512 | 2, |
||
| 513 | ) |
||
| 514 | * 100 |
||
| 515 | ) |
||
| 516 | logger.info(f"'solar thermal collector': {e_solar_thermal} %") |
||
| 517 | |||
| 518 | # Comparison for geothermal |
||
| 519 | |||
| 520 | input_geo_thermal = db.select_dataframe( |
||
| 521 | """SELECT carrier, |
||
| 522 | SUM(capacity::numeric) as Urban_central_geo_thermal_MW |
||
| 523 | FROM supply.egon_scenario_capacities |
||
| 524 | WHERE carrier= 'urban_central_geo_thermal' |
||
| 525 | AND scenario_name IN ('eGon2035') |
||
| 526 | GROUP BY (carrier); |
||
| 527 | """, |
||
| 528 | warning=False, |
||
| 529 | )["urban_central_geo_thermal_mw"].values[0] |
||
| 530 | |||
| 531 | output_geo_thermal = db.select_dataframe( |
||
| 532 | """SELECT carrier, SUM(p_nom::numeric) as geo_thermal_MW |
||
| 533 | FROM grid.egon_etrago_generator |
||
| 534 | WHERE carrier= 'geo_thermal' |
||
| 535 | AND scn_name IN ('eGon2035') |
||
| 536 | GROUP BY (carrier); |
||
| 537 | """, |
||
| 538 | warning=False, |
||
| 539 | )["geo_thermal_mw"].values[0] |
||
| 540 | |||
| 541 | e_geo_thermal = ( |
||
| 542 | round((output_geo_thermal - input_geo_thermal) / input_geo_thermal, 2) |
||
| 543 | * 100 |
||
| 544 | ) |
||
| 545 | logger.info(f"'geothermal': {e_geo_thermal} %") |
||
| 546 | |||
| 547 | |||
| 548 | def residential_electricity_annual_sum(rtol=1e-5): |
||
| 549 | """Sanity check for dataset electricity_demand_timeseries : |
||
| 550 | Demand_Building_Assignment |
||
| 551 | |||
| 552 | Aggregate the annual demand of all census cells at NUTS3 to compare |
||
| 553 | with initial scaling parameters from DemandRegio. |
||
| 554 | """ |
||
| 555 | |||
| 556 | df_nuts3_annual_sum = db.select_dataframe( |
||
| 557 | sql=""" |
||
| 558 | SELECT dr.nuts3, dr.scenario, dr.demand_regio_sum, profiles.profile_sum |
||
| 559 | FROM ( |
||
| 560 | SELECT scenario, SUM(demand) AS profile_sum, vg250_nuts3 |
||
| 561 | FROM demand.egon_demandregio_zensus_electricity AS egon, |
||
| 562 | boundaries.egon_map_zensus_vg250 AS boundaries |
||
| 563 | Where egon.zensus_population_id = boundaries.zensus_population_id |
||
| 564 | AND sector = 'residential' |
||
| 565 | GROUP BY vg250_nuts3, scenario |
||
| 566 | ) AS profiles |
||
| 567 | JOIN ( |
||
| 568 | SELECT nuts3, scenario, sum(demand) AS demand_regio_sum |
||
| 569 | FROM demand.egon_demandregio_hh |
||
| 570 | GROUP BY year, scenario, nuts3 |
||
| 571 | ) AS dr |
||
| 572 | ON profiles.vg250_nuts3 = dr.nuts3 and profiles.scenario = dr.scenario |
||
| 573 | """ |
||
| 574 | ) |
||
| 575 | |||
| 576 | np.testing.assert_allclose( |
||
| 577 | actual=df_nuts3_annual_sum["profile_sum"], |
||
| 578 | desired=df_nuts3_annual_sum["demand_regio_sum"], |
||
| 579 | rtol=rtol, |
||
| 580 | verbose=False, |
||
| 581 | ) |
||
| 582 | |||
| 583 | logger.info( |
||
| 584 | "Aggregated annual residential electricity demand" |
||
| 585 | " matches with DemandRegio at NUTS-3." |
||
| 586 | ) |
||
| 587 | |||
| 588 | |||
| 589 | def residential_electricity_hh_refinement(rtol=1e-5): |
||
| 590 | """Sanity check for dataset electricity_demand_timeseries : |
||
| 591 | Household Demands |
||
| 592 | |||
| 593 | Check sum of aggregated household types after refinement method |
||
| 594 | was applied and compare it to the original census values.""" |
||
| 595 | |||
| 596 | df_refinement = db.select_dataframe( |
||
| 597 | sql=""" |
||
| 598 | SELECT refined.nuts3, refined.characteristics_code, |
||
| 599 | refined.sum_refined::int, census.sum_census::int |
||
| 600 | FROM( |
||
| 601 | SELECT nuts3, characteristics_code, SUM(hh_10types) as sum_refined |
||
| 602 | FROM society.egon_destatis_zensus_household_per_ha_refined |
||
| 603 | GROUP BY nuts3, characteristics_code) |
||
| 604 | AS refined |
||
| 605 | JOIN( |
||
| 606 | SELECT t.nuts3, t.characteristics_code, sum(orig) as sum_census |
||
| 607 | FROM( |
||
| 608 | SELECT nuts3, cell_id, characteristics_code, |
||
| 609 | sum(DISTINCT(hh_5types))as orig |
||
| 610 | FROM society.egon_destatis_zensus_household_per_ha_refined |
||
| 611 | GROUP BY cell_id, characteristics_code, nuts3) AS t |
||
| 612 | GROUP BY t.nuts3, t.characteristics_code ) AS census |
||
| 613 | ON refined.nuts3 = census.nuts3 |
||
| 614 | AND refined.characteristics_code = census.characteristics_code |
||
| 615 | """ |
||
| 616 | ) |
||
| 617 | |||
| 618 | np.testing.assert_allclose( |
||
| 619 | actual=df_refinement["sum_refined"], |
||
| 620 | desired=df_refinement["sum_census"], |
||
| 621 | rtol=rtol, |
||
| 622 | verbose=False, |
||
| 623 | ) |
||
| 624 | |||
| 625 | logger.info("All Aggregated household types match at NUTS-3.") |
||
| 626 | |||
| 627 | |||
| 628 | def cts_electricity_demand_share(rtol=1e-5): |
||
| 629 | """Sanity check for dataset electricity_demand_timeseries : |
||
| 630 | CtsBuildings |
||
| 631 | |||
| 632 | Check sum of aggregated cts electricity demand share which equals to one |
||
| 633 | for every substation as the substation profile is linearly disaggregated |
||
| 634 | to all buildings.""" |
||
| 635 | |||
| 636 | with db.session_scope() as session: |
||
| 637 | cells_query = session.query(EgonCtsElectricityDemandBuildingShare) |
||
| 638 | |||
| 639 | df_demand_share = pd.read_sql( |
||
| 640 | cells_query.statement, cells_query.session.bind, index_col=None |
||
| 641 | ) |
||
| 642 | |||
| 643 | np.testing.assert_allclose( |
||
| 644 | actual=df_demand_share.groupby(["bus_id", "scenario"])[ |
||
| 645 | "profile_share" |
||
| 646 | ].sum(), |
||
| 647 | desired=1, |
||
| 648 | rtol=rtol, |
||
| 649 | verbose=False, |
||
| 650 | ) |
||
| 651 | |||
| 652 | logger.info("The aggregated demand shares equal to one!.") |
||
| 653 | |||
| 654 | |||
| 655 | def cts_heat_demand_share(rtol=1e-5): |
||
| 656 | """Sanity check for dataset electricity_demand_timeseries |
||
| 657 | : CtsBuildings |
||
| 658 | |||
| 659 | Check sum of aggregated cts heat demand share which equals to one |
||
| 660 | for every substation as the substation profile is linearly disaggregated |
||
| 661 | to all buildings.""" |
||
| 662 | |||
| 663 | with db.session_scope() as session: |
||
| 664 | cells_query = session.query(EgonCtsHeatDemandBuildingShare) |
||
| 665 | |||
| 666 | df_demand_share = pd.read_sql( |
||
| 667 | cells_query.statement, cells_query.session.bind, index_col=None |
||
| 668 | ) |
||
| 669 | |||
| 670 | np.testing.assert_allclose( |
||
| 671 | actual=df_demand_share.groupby(["bus_id", "scenario"])[ |
||
| 672 | "profile_share" |
||
| 673 | ].sum(), |
||
| 674 | desired=1, |
||
| 675 | rtol=rtol, |
||
| 676 | verbose=False, |
||
| 677 | ) |
||
| 678 | |||
| 679 | logger.info("The aggregated demand shares equal to one!.") |
||
| 680 | |||
| 681 | |||
| 682 | def sanitycheck_pv_rooftop_buildings(): |
||
| 683 | def egon_power_plants_pv_roof_building(): |
||
| 684 | sql = """ |
||
| 685 | SELECT * |
||
| 686 | FROM supply.egon_power_plants_pv_roof_building |
||
| 687 | """ |
||
| 688 | |||
| 689 | return db.select_dataframe(sql, index_col="index") |
||
| 690 | |||
| 691 | pv_roof_df = egon_power_plants_pv_roof_building() |
||
| 692 | |||
| 693 | valid_buildings_gdf = load_building_data() |
||
| 694 | |||
| 695 | valid_buildings_gdf = valid_buildings_gdf.assign( |
||
| 696 | bus_id=valid_buildings_gdf.bus_id.astype(int), |
||
| 697 | overlay_id=valid_buildings_gdf.overlay_id.astype(int), |
||
| 698 | max_cap=valid_buildings_gdf.building_area.multiply( |
||
| 699 | ROOF_FACTOR * PV_CAP_PER_SQ_M |
||
| 700 | ), |
||
| 701 | ) |
||
| 702 | |||
| 703 | merge_df = pv_roof_df.merge( |
||
| 704 | valid_buildings_gdf[["building_area"]], |
||
| 705 | how="left", |
||
| 706 | left_on="building_id", |
||
| 707 | right_index=True, |
||
| 708 | ) |
||
| 709 | |||
| 710 | assert ( |
||
| 711 | len(merge_df.loc[merge_df.building_area.isna()]) == 0 |
||
| 712 | ), f"{len(merge_df.loc[merge_df.building_area.isna()])} != 0" |
||
| 713 | |||
| 714 | scenarios = ["status_quo", "eGon2035"] |
||
| 715 | |||
| 716 | base_path = Path(egon.data.__path__[0]).resolve() |
||
| 717 | |||
| 718 | res_dir = base_path / "sanity_checks" |
||
| 719 | |||
| 720 | res_dir.mkdir(parents=True, exist_ok=True) |
||
| 721 | |||
| 722 | for scenario in scenarios: |
||
| 723 | fig, (ax1, ax2) = plt.subplots(1, 2, figsize=(15, 8)) |
||
| 724 | |||
| 725 | scenario_df = merge_df.loc[merge_df.scenario == scenario] |
||
| 726 | |||
| 727 | logger.info( |
||
| 728 | scenario + " Capacity:\n" + str(scenario_df.capacity.describe()) |
||
| 729 | ) |
||
| 730 | |||
| 731 | small_gens_df = scenario_df.loc[scenario_df.capacity < 100] |
||
| 732 | |||
| 733 | sns.histplot(data=small_gens_df, x="capacity", ax=ax1).set_title( |
||
| 734 | scenario |
||
| 735 | ) |
||
| 736 | |||
| 737 | sns.scatterplot( |
||
| 738 | data=small_gens_df, x="capacity", y="building_area", ax=ax2 |
||
| 739 | ).set_title(scenario) |
||
| 740 | |||
| 741 | plt.tight_layout() |
||
| 742 | |||
| 743 | plt.savefig( |
||
| 744 | res_dir / f"{scenario}_pv_rooftop_distribution.png", |
||
| 745 | bbox_inches="tight", |
||
| 746 | ) |
||
| 747 | |||
| 748 | for scenario in SCENARIOS: |
||
| 749 | if scenario == "eGon2035": |
||
| 750 | assert isclose( |
||
| 751 | scenario_data(scenario=scenario).capacity.sum(), |
||
| 752 | merge_df.loc[merge_df.scenario == scenario].capacity.sum(), |
||
| 753 | rel_tol=1e-02, |
||
| 754 | ), ( |
||
| 755 | f"{scenario_data(scenario=scenario).capacity.sum()} != " |
||
| 756 | f"{merge_df.loc[merge_df.scenario == scenario].capacity.sum()}" |
||
| 757 | ) |
||
| 758 | elif scenario == "eGon100RE": |
||
| 759 | sources = config.datasets()["solar_rooftop"]["sources"] |
||
| 760 | |||
| 761 | target = db.select_dataframe( |
||
| 762 | f""" |
||
| 763 | SELECT capacity |
||
| 764 | FROM {sources['scenario_capacities']['schema']}. |
||
| 765 | {sources['scenario_capacities']['table']} a |
||
| 766 | WHERE carrier = 'solar_rooftop' |
||
| 767 | AND scenario_name = '{scenario}' |
||
| 768 | """ |
||
| 769 | ).capacity[0] |
||
| 770 | |||
| 771 | dataset = config.settings()["egon-data"]["--dataset-boundary"] |
||
| 772 | |||
| 773 | if dataset == "Schleswig-Holstein": |
||
| 774 | # since the required data is missing for a SH run, it is |
||
| 775 | # implemented manually here |
||
| 776 | total_2035 = 84070 |
||
| 777 | sh_2035 = 2700 |
||
| 778 | |||
| 779 | share = sh_2035 / total_2035 |
||
| 780 | |||
| 781 | target *= share |
||
| 782 | |||
| 783 | assert isclose( |
||
| 784 | target, |
||
| 785 | merge_df.loc[merge_df.scenario == scenario].capacity.sum(), |
||
| 786 | ), ( |
||
| 787 | f"{target} != " |
||
| 788 | f"{merge_df.loc[merge_df.scenario == scenario].capacity.sum()}" |
||
| 789 | ) |
||
| 790 | else: |
||
| 791 | raise ValueError(f"Scenario {scenario} is not valid.") |
||
| 792 | |||
| 793 | |||
| 794 | def sanitycheck_emobility_mit(): |
||
| 795 | """Execute sanity checks for eMobility: motorized individual travel |
||
| 796 | |||
| 797 | Checks data integrity for eGon2035, eGon2035_lowflex and eGon100RE scenario |
||
| 798 | using assertions: |
||
| 799 | 1. Allocated EV numbers and EVs allocated to grid districts |
||
| 800 | 2. Trip data (original inout data from simBEV) |
||
| 801 | 3. Model data in eTraGo PF tables (grid.egon_etrago_*) |
||
| 802 | |||
| 803 | Parameters |
||
| 804 | ---------- |
||
| 805 | None |
||
| 806 | |||
| 807 | Returns |
||
| 808 | ------- |
||
| 809 | None |
||
| 810 | """ |
||
| 811 | |||
| 812 | def check_ev_allocation(): |
||
| 813 | # Get target number for scenario |
||
| 814 | ev_count_target = scenario_variation_parameters["ev_count"] |
||
| 815 | print(f" Target count: {str(ev_count_target)}") |
||
| 816 | |||
| 817 | # Get allocated numbers |
||
| 818 | ev_counts_dict = {} |
||
| 819 | with db.session_scope() as session: |
||
| 820 | for table, level in zip( |
||
| 821 | [ |
||
| 822 | EgonEvCountMvGridDistrict, |
||
| 823 | EgonEvCountMunicipality, |
||
| 824 | EgonEvCountRegistrationDistrict, |
||
| 825 | ], |
||
| 826 | ["Grid District", "Municipality", "Registration District"], |
||
| 827 | ): |
||
| 828 | query = session.query( |
||
| 829 | func.sum( |
||
| 830 | table.bev_mini |
||
| 831 | + table.bev_medium |
||
| 832 | + table.bev_luxury |
||
| 833 | + table.phev_mini |
||
| 834 | + table.phev_medium |
||
| 835 | + table.phev_luxury |
||
| 836 | ).label("ev_count") |
||
| 837 | ).filter( |
||
| 838 | table.scenario == scenario_name, |
||
| 839 | table.scenario_variation == scenario_var_name, |
||
| 840 | ) |
||
| 841 | |||
| 842 | ev_counts = pd.read_sql( |
||
| 843 | query.statement, query.session.bind, index_col=None |
||
| 844 | ) |
||
| 845 | ev_counts_dict[level] = ev_counts.iloc[0].ev_count |
||
| 846 | print( |
||
| 847 | f" Count table: Total count for level {level} " |
||
| 848 | f"(table: {table.__table__}): " |
||
| 849 | f"{str(ev_counts_dict[level])}" |
||
| 850 | ) |
||
| 851 | |||
| 852 | # Compare with scenario target (only if not in testmode) |
||
| 853 | if TESTMODE_OFF: |
||
| 854 | for level, count in ev_counts_dict.items(): |
||
| 855 | np.testing.assert_allclose( |
||
| 856 | count, |
||
| 857 | ev_count_target, |
||
| 858 | rtol=0.0001, |
||
| 859 | err_msg=f"EV numbers in {level} seems to be flawed.", |
||
| 860 | ) |
||
| 861 | else: |
||
| 862 | print(" Testmode is on, skipping sanity check...") |
||
| 863 | |||
| 864 | # Get allocated EVs in grid districts |
||
| 865 | with db.session_scope() as session: |
||
| 866 | query = session.query( |
||
| 867 | func.count(EgonEvMvGridDistrict.egon_ev_pool_ev_id).label( |
||
| 868 | "ev_count" |
||
| 869 | ), |
||
| 870 | ).filter( |
||
| 871 | EgonEvMvGridDistrict.scenario == scenario_name, |
||
| 872 | EgonEvMvGridDistrict.scenario_variation == scenario_var_name, |
||
| 873 | ) |
||
| 874 | ev_count_alloc = ( |
||
| 875 | pd.read_sql(query.statement, query.session.bind, index_col=None) |
||
| 876 | .iloc[0] |
||
| 877 | .ev_count |
||
| 878 | ) |
||
| 879 | print( |
||
| 880 | f" EVs allocated to Grid Districts " |
||
| 881 | f"(table: {EgonEvMvGridDistrict.__table__}) total count: " |
||
| 882 | f"{str(ev_count_alloc)}" |
||
| 883 | ) |
||
| 884 | |||
| 885 | # Compare with scenario target (only if not in testmode) |
||
| 886 | if TESTMODE_OFF: |
||
| 887 | np.testing.assert_allclose( |
||
| 888 | ev_count_alloc, |
||
| 889 | ev_count_target, |
||
| 890 | rtol=0.0001, |
||
| 891 | err_msg=( |
||
| 892 | "EV numbers allocated to Grid Districts seems to be " |
||
| 893 | "flawed." |
||
| 894 | ), |
||
| 895 | ) |
||
| 896 | else: |
||
| 897 | print(" Testmode is on, skipping sanity check...") |
||
| 898 | |||
| 899 | return ev_count_alloc |
||
| 900 | |||
| 901 | def check_trip_data(): |
||
| 902 | # Check if trips start at timestep 0 and have a max. of 35040 steps |
||
| 903 | # (8760h in 15min steps) |
||
| 904 | print(" Checking timeranges...") |
||
| 905 | with db.session_scope() as session: |
||
| 906 | query = session.query( |
||
| 907 | func.count(EgonEvTrip.event_id).label("cnt") |
||
| 908 | ).filter( |
||
| 909 | or_( |
||
| 910 | and_( |
||
| 911 | EgonEvTrip.park_start > 0, |
||
| 912 | EgonEvTrip.simbev_event_id == 0, |
||
| 913 | ), |
||
| 914 | EgonEvTrip.park_end |
||
| 915 | > (60 / int(meta_run_config.stepsize)) * 8760, |
||
| 916 | ), |
||
| 917 | EgonEvTrip.scenario == scenario_name, |
||
| 918 | ) |
||
| 919 | invalid_trips = pd.read_sql( |
||
| 920 | query.statement, query.session.bind, index_col=None |
||
| 921 | ) |
||
| 922 | np.testing.assert_equal( |
||
| 923 | invalid_trips.iloc[0].cnt, |
||
| 924 | 0, |
||
| 925 | err_msg=( |
||
| 926 | f"{str(invalid_trips.iloc[0].cnt)} trips in table " |
||
| 927 | f"{EgonEvTrip.__table__} have invalid timesteps." |
||
| 928 | ), |
||
| 929 | ) |
||
| 930 | |||
| 931 | # Check if charging demand can be covered by available charging energy |
||
| 932 | # while parking |
||
| 933 | print(" Compare charging demand with available power...") |
||
| 934 | with db.session_scope() as session: |
||
| 935 | query = session.query( |
||
| 936 | func.count(EgonEvTrip.event_id).label("cnt") |
||
| 937 | ).filter( |
||
| 938 | func.round( |
||
| 939 | cast( |
||
| 940 | (EgonEvTrip.park_end - EgonEvTrip.park_start + 1) |
||
| 941 | * EgonEvTrip.charging_capacity_nominal |
||
| 942 | * (int(meta_run_config.stepsize) / 60), |
||
| 943 | Numeric, |
||
| 944 | ), |
||
| 945 | 3, |
||
| 946 | ) |
||
| 947 | < cast(EgonEvTrip.charging_demand, Numeric), |
||
| 948 | EgonEvTrip.scenario == scenario_name, |
||
| 949 | ) |
||
| 950 | invalid_trips = pd.read_sql( |
||
| 951 | query.statement, query.session.bind, index_col=None |
||
| 952 | ) |
||
| 953 | np.testing.assert_equal( |
||
| 954 | invalid_trips.iloc[0].cnt, |
||
| 955 | 0, |
||
| 956 | err_msg=( |
||
| 957 | f"In {str(invalid_trips.iloc[0].cnt)} trips (table: " |
||
| 958 | f"{EgonEvTrip.__table__}) the charging demand cannot be " |
||
| 959 | f"covered by available charging power." |
||
| 960 | ), |
||
| 961 | ) |
||
| 962 | |||
| 963 | def check_model_data(): |
||
| 964 | # Check if model components were fully created |
||
| 965 | print(" Check if all model components were created...") |
||
| 966 | # Get MVGDs which got EV allocated |
||
| 967 | with db.session_scope() as session: |
||
| 968 | query = ( |
||
| 969 | session.query( |
||
| 970 | EgonEvMvGridDistrict.bus_id, |
||
| 971 | ) |
||
| 972 | .filter( |
||
| 973 | EgonEvMvGridDistrict.scenario == scenario_name, |
||
| 974 | EgonEvMvGridDistrict.scenario_variation |
||
| 975 | == scenario_var_name, |
||
| 976 | ) |
||
| 977 | .group_by(EgonEvMvGridDistrict.bus_id) |
||
| 978 | ) |
||
| 979 | mvgds_with_ev = ( |
||
| 980 | pd.read_sql(query.statement, query.session.bind, index_col=None) |
||
| 981 | .bus_id.sort_values() |
||
| 982 | .to_list() |
||
| 983 | ) |
||
| 984 | |||
| 985 | # Load model components |
||
| 986 | with db.session_scope() as session: |
||
| 987 | query = ( |
||
| 988 | session.query( |
||
| 989 | EgonPfHvLink.bus0.label("mvgd_bus_id"), |
||
| 990 | EgonPfHvLoad.bus.label("emob_bus_id"), |
||
| 991 | EgonPfHvLoad.load_id.label("load_id"), |
||
| 992 | EgonPfHvStore.store_id.label("store_id"), |
||
| 993 | ) |
||
| 994 | .select_from(EgonPfHvLoad, EgonPfHvStore) |
||
| 995 | .join( |
||
| 996 | EgonPfHvLoadTimeseries, |
||
| 997 | EgonPfHvLoadTimeseries.load_id == EgonPfHvLoad.load_id, |
||
| 998 | ) |
||
| 999 | .join( |
||
| 1000 | EgonPfHvStoreTimeseries, |
||
| 1001 | EgonPfHvStoreTimeseries.store_id == EgonPfHvStore.store_id, |
||
| 1002 | ) |
||
| 1003 | .filter( |
||
| 1004 | EgonPfHvLoad.carrier == "land transport EV", |
||
| 1005 | EgonPfHvLoad.scn_name == scenario_name, |
||
| 1006 | EgonPfHvLoadTimeseries.scn_name == scenario_name, |
||
| 1007 | EgonPfHvStore.carrier == "battery storage", |
||
| 1008 | EgonPfHvStore.scn_name == scenario_name, |
||
| 1009 | EgonPfHvStoreTimeseries.scn_name == scenario_name, |
||
| 1010 | EgonPfHvLink.scn_name == scenario_name, |
||
| 1011 | EgonPfHvLink.bus1 == EgonPfHvLoad.bus, |
||
| 1012 | EgonPfHvLink.bus1 == EgonPfHvStore.bus, |
||
| 1013 | ) |
||
| 1014 | ) |
||
| 1015 | model_components = pd.read_sql( |
||
| 1016 | query.statement, query.session.bind, index_col=None |
||
| 1017 | ) |
||
| 1018 | |||
| 1019 | # Check number of buses with model components connected |
||
| 1020 | mvgd_buses_with_ev = model_components.loc[ |
||
| 1021 | model_components.mvgd_bus_id.isin(mvgds_with_ev) |
||
| 1022 | ] |
||
| 1023 | np.testing.assert_equal( |
||
| 1024 | len(mvgds_with_ev), |
||
| 1025 | len(mvgd_buses_with_ev), |
||
| 1026 | err_msg=( |
||
| 1027 | f"Number of Grid Districts with connected model components " |
||
| 1028 | f"({str(len(mvgd_buses_with_ev))} in tables egon_etrago_*) " |
||
| 1029 | f"differ from number of Grid Districts that got EVs " |
||
| 1030 | f"allocated ({len(mvgds_with_ev)} in table " |
||
| 1031 | f"{EgonEvMvGridDistrict.__table__})." |
||
| 1032 | ), |
||
| 1033 | ) |
||
| 1034 | |||
| 1035 | # Check if all required components exist (if no id is NaN) |
||
| 1036 | np.testing.assert_equal( |
||
| 1037 | model_components.drop_duplicates().isna().any().any(), |
||
| 1038 | False, |
||
| 1039 | err_msg=( |
||
| 1040 | f"Some components are missing (see True values): " |
||
| 1041 | f"{model_components.drop_duplicates().isna().any()}" |
||
| 1042 | ), |
||
| 1043 | ) |
||
| 1044 | |||
| 1045 | # Get all model timeseries |
||
| 1046 | print(" Loading model timeseries...") |
||
| 1047 | # Get all model timeseries |
||
| 1048 | model_ts_dict = { |
||
| 1049 | "Load": { |
||
| 1050 | "carrier": "land transport EV", |
||
| 1051 | "table": EgonPfHvLoad, |
||
| 1052 | "table_ts": EgonPfHvLoadTimeseries, |
||
| 1053 | "column_id": "load_id", |
||
| 1054 | "columns_ts": ["p_set"], |
||
| 1055 | "ts": None, |
||
| 1056 | }, |
||
| 1057 | "Link": { |
||
| 1058 | "carrier": "BEV charger", |
||
| 1059 | "table": EgonPfHvLink, |
||
| 1060 | "table_ts": EgonPfHvLinkTimeseries, |
||
| 1061 | "column_id": "link_id", |
||
| 1062 | "columns_ts": ["p_max_pu"], |
||
| 1063 | "ts": None, |
||
| 1064 | }, |
||
| 1065 | "Store": { |
||
| 1066 | "carrier": "battery storage", |
||
| 1067 | "table": EgonPfHvStore, |
||
| 1068 | "table_ts": EgonPfHvStoreTimeseries, |
||
| 1069 | "column_id": "store_id", |
||
| 1070 | "columns_ts": ["e_min_pu", "e_max_pu"], |
||
| 1071 | "ts": None, |
||
| 1072 | }, |
||
| 1073 | } |
||
| 1074 | |||
| 1075 | with db.session_scope() as session: |
||
| 1076 | for node, attrs in model_ts_dict.items(): |
||
| 1077 | print(f" Loading {node} timeseries...") |
||
| 1078 | subquery = ( |
||
| 1079 | session.query(getattr(attrs["table"], attrs["column_id"])) |
||
| 1080 | .filter(attrs["table"].carrier == attrs["carrier"]) |
||
| 1081 | .filter(attrs["table"].scn_name == scenario_name) |
||
| 1082 | .subquery() |
||
| 1083 | ) |
||
| 1084 | |||
| 1085 | cols = [ |
||
| 1086 | getattr(attrs["table_ts"], c) for c in attrs["columns_ts"] |
||
| 1087 | ] |
||
| 1088 | query = session.query( |
||
| 1089 | getattr(attrs["table_ts"], attrs["column_id"]), *cols |
||
| 1090 | ).filter( |
||
| 1091 | getattr(attrs["table_ts"], attrs["column_id"]).in_( |
||
| 1092 | subquery |
||
| 1093 | ), |
||
| 1094 | attrs["table_ts"].scn_name == scenario_name, |
||
| 1095 | ) |
||
| 1096 | attrs["ts"] = pd.read_sql( |
||
| 1097 | query.statement, |
||
| 1098 | query.session.bind, |
||
| 1099 | index_col=attrs["column_id"], |
||
| 1100 | ) |
||
| 1101 | |||
| 1102 | # Check if all timeseries have 8760 steps |
||
| 1103 | print(" Checking timeranges...") |
||
| 1104 | for node, attrs in model_ts_dict.items(): |
||
| 1105 | for col in attrs["columns_ts"]: |
||
| 1106 | ts = attrs["ts"] |
||
| 1107 | invalid_ts = ts.loc[ts[col].apply(lambda _: len(_)) != 8760][ |
||
| 1108 | col |
||
| 1109 | ].apply(len) |
||
| 1110 | np.testing.assert_equal( |
||
| 1111 | len(invalid_ts), |
||
| 1112 | 0, |
||
| 1113 | err_msg=( |
||
| 1114 | f"{str(len(invalid_ts))} rows in timeseries do not " |
||
| 1115 | f"have 8760 timesteps. Table: " |
||
| 1116 | f"{attrs['table_ts'].__table__}, Column: {col}, IDs: " |
||
| 1117 | f"{str(list(invalid_ts.index))}" |
||
| 1118 | ), |
||
| 1119 | ) |
||
| 1120 | |||
| 1121 | # Compare total energy demand in model with some approximate values |
||
| 1122 | # (per EV: 14,000 km/a, 0.17 kWh/km) |
||
| 1123 | print(" Checking energy demand in model...") |
||
| 1124 | total_energy_model = ( |
||
| 1125 | model_ts_dict["Load"]["ts"].p_set.apply(lambda _: sum(_)).sum() |
||
| 1126 | / 1e6 |
||
| 1127 | ) |
||
| 1128 | print(f" Total energy amount in model: {total_energy_model} TWh") |
||
| 1129 | total_energy_scenario_approx = ev_count_alloc * 14000 * 0.17 / 1e9 |
||
| 1130 | print( |
||
| 1131 | f" Total approximated energy amount in scenario: " |
||
| 1132 | f"{total_energy_scenario_approx} TWh" |
||
| 1133 | ) |
||
| 1134 | np.testing.assert_allclose( |
||
| 1135 | total_energy_model, |
||
| 1136 | total_energy_scenario_approx, |
||
| 1137 | rtol=0.1, |
||
| 1138 | err_msg=( |
||
| 1139 | "The total energy amount in the model deviates heavily " |
||
| 1140 | "from the approximated value for current scenario." |
||
| 1141 | ), |
||
| 1142 | ) |
||
| 1143 | |||
| 1144 | # Compare total storage capacity |
||
| 1145 | print(" Checking storage capacity...") |
||
| 1146 | # Load storage capacities from model |
||
| 1147 | with db.session_scope() as session: |
||
| 1148 | query = session.query( |
||
| 1149 | func.sum(EgonPfHvStore.e_nom).label("e_nom") |
||
| 1150 | ).filter( |
||
| 1151 | EgonPfHvStore.scn_name == scenario_name, |
||
| 1152 | EgonPfHvStore.carrier == "battery storage", |
||
| 1153 | ) |
||
| 1154 | storage_capacity_model = ( |
||
| 1155 | pd.read_sql( |
||
| 1156 | query.statement, query.session.bind, index_col=None |
||
| 1157 | ).e_nom.sum() |
||
| 1158 | / 1e3 |
||
| 1159 | ) |
||
| 1160 | print( |
||
| 1161 | f" Total storage capacity ({EgonPfHvStore.__table__}): " |
||
| 1162 | f"{round(storage_capacity_model, 1)} GWh" |
||
| 1163 | ) |
||
| 1164 | |||
| 1165 | # Load occurences of each EV |
||
| 1166 | with db.session_scope() as session: |
||
| 1167 | query = ( |
||
| 1168 | session.query( |
||
| 1169 | EgonEvMvGridDistrict.bus_id, |
||
| 1170 | EgonEvPool.type, |
||
| 1171 | func.count(EgonEvMvGridDistrict.egon_ev_pool_ev_id).label( |
||
| 1172 | "count" |
||
| 1173 | ), |
||
| 1174 | ) |
||
| 1175 | .join( |
||
| 1176 | EgonEvPool, |
||
| 1177 | EgonEvPool.ev_id |
||
| 1178 | == EgonEvMvGridDistrict.egon_ev_pool_ev_id, |
||
| 1179 | ) |
||
| 1180 | .filter( |
||
| 1181 | EgonEvMvGridDistrict.scenario == scenario_name, |
||
| 1182 | EgonEvMvGridDistrict.scenario_variation |
||
| 1183 | == scenario_var_name, |
||
| 1184 | EgonEvPool.scenario == scenario_name, |
||
| 1185 | ) |
||
| 1186 | .group_by(EgonEvMvGridDistrict.bus_id, EgonEvPool.type) |
||
| 1187 | ) |
||
| 1188 | count_per_ev_all = pd.read_sql( |
||
| 1189 | query.statement, query.session.bind, index_col="bus_id" |
||
| 1190 | ) |
||
| 1191 | count_per_ev_all["bat_cap"] = count_per_ev_all.type.map( |
||
| 1192 | meta_tech_data.battery_capacity |
||
| 1193 | ) |
||
| 1194 | count_per_ev_all["bat_cap_total_MWh"] = ( |
||
| 1195 | count_per_ev_all["count"] * count_per_ev_all.bat_cap / 1e3 |
||
| 1196 | ) |
||
| 1197 | storage_capacity_simbev = count_per_ev_all.bat_cap_total_MWh.div( |
||
| 1198 | 1e3 |
||
| 1199 | ).sum() |
||
| 1200 | print( |
||
| 1201 | f" Total storage capacity (simBEV): " |
||
| 1202 | f"{round(storage_capacity_simbev, 1)} GWh" |
||
| 1203 | ) |
||
| 1204 | |||
| 1205 | np.testing.assert_allclose( |
||
| 1206 | storage_capacity_model, |
||
| 1207 | storage_capacity_simbev, |
||
| 1208 | rtol=0.01, |
||
| 1209 | err_msg=( |
||
| 1210 | "The total storage capacity in the model deviates heavily " |
||
| 1211 | "from the input data provided by simBEV for current scenario." |
||
| 1212 | ), |
||
| 1213 | ) |
||
| 1214 | |||
| 1215 | # Check SoC storage constraint: e_min_pu < e_max_pu for all timesteps |
||
| 1216 | print(" Validating SoC constraints...") |
||
| 1217 | stores_with_invalid_soc = [] |
||
| 1218 | for idx, row in model_ts_dict["Store"]["ts"].iterrows(): |
||
| 1219 | ts = row[["e_min_pu", "e_max_pu"]] |
||
| 1220 | x = np.array(ts.e_min_pu) > np.array(ts.e_max_pu) |
||
| 1221 | if x.any(): |
||
| 1222 | stores_with_invalid_soc.append(idx) |
||
| 1223 | |||
| 1224 | np.testing.assert_equal( |
||
| 1225 | len(stores_with_invalid_soc), |
||
| 1226 | 0, |
||
| 1227 | err_msg=( |
||
| 1228 | f"The store constraint e_min_pu < e_max_pu does not apply " |
||
| 1229 | f"for some storages in {EgonPfHvStoreTimeseries.__table__}. " |
||
| 1230 | f"Invalid store_ids: {stores_with_invalid_soc}" |
||
| 1231 | ), |
||
| 1232 | ) |
||
| 1233 | |||
| 1234 | def check_model_data_lowflex_eGon2035(): |
||
| 1235 | # TODO: Add eGon100RE_lowflex |
||
| 1236 | print("") |
||
| 1237 | print("SCENARIO: eGon2035_lowflex") |
||
| 1238 | |||
| 1239 | # Compare driving load and charging load |
||
| 1240 | print(" Loading eGon2035 model timeseries: driving load...") |
||
| 1241 | with db.session_scope() as session: |
||
| 1242 | query = ( |
||
| 1243 | session.query( |
||
| 1244 | EgonPfHvLoad.load_id, |
||
| 1245 | EgonPfHvLoadTimeseries.p_set, |
||
| 1246 | ) |
||
| 1247 | .join( |
||
| 1248 | EgonPfHvLoadTimeseries, |
||
| 1249 | EgonPfHvLoadTimeseries.load_id == EgonPfHvLoad.load_id, |
||
| 1250 | ) |
||
| 1251 | .filter( |
||
| 1252 | EgonPfHvLoad.carrier == "land transport EV", |
||
| 1253 | EgonPfHvLoad.scn_name == "eGon2035", |
||
| 1254 | EgonPfHvLoadTimeseries.scn_name == "eGon2035", |
||
| 1255 | ) |
||
| 1256 | ) |
||
| 1257 | model_driving_load = pd.read_sql( |
||
| 1258 | query.statement, query.session.bind, index_col=None |
||
| 1259 | ) |
||
| 1260 | driving_load = np.array(model_driving_load.p_set.to_list()).sum(axis=0) |
||
| 1261 | |||
| 1262 | print( |
||
| 1263 | " Loading eGon2035_lowflex model timeseries: dumb charging " |
||
| 1264 | "load..." |
||
| 1265 | ) |
||
| 1266 | with db.session_scope() as session: |
||
| 1267 | query = ( |
||
| 1268 | session.query( |
||
| 1269 | EgonPfHvLoad.load_id, |
||
| 1270 | EgonPfHvLoadTimeseries.p_set, |
||
| 1271 | ) |
||
| 1272 | .join( |
||
| 1273 | EgonPfHvLoadTimeseries, |
||
| 1274 | EgonPfHvLoadTimeseries.load_id == EgonPfHvLoad.load_id, |
||
| 1275 | ) |
||
| 1276 | .filter( |
||
| 1277 | EgonPfHvLoad.carrier == "land transport EV", |
||
| 1278 | EgonPfHvLoad.scn_name == "eGon2035_lowflex", |
||
| 1279 | EgonPfHvLoadTimeseries.scn_name == "eGon2035_lowflex", |
||
| 1280 | ) |
||
| 1281 | ) |
||
| 1282 | model_charging_load_lowflex = pd.read_sql( |
||
| 1283 | query.statement, query.session.bind, index_col=None |
||
| 1284 | ) |
||
| 1285 | charging_load = np.array( |
||
| 1286 | model_charging_load_lowflex.p_set.to_list() |
||
| 1287 | ).sum(axis=0) |
||
| 1288 | |||
| 1289 | # Ratio of driving and charging load should be 0.9 due to charging |
||
| 1290 | # efficiency |
||
| 1291 | print(" Compare cumulative loads...") |
||
| 1292 | print(f" Driving load (eGon2035): {driving_load.sum() / 1e6} TWh") |
||
| 1293 | print( |
||
| 1294 | f" Dumb charging load (eGon2035_lowflex): " |
||
| 1295 | f"{charging_load.sum() / 1e6} TWh" |
||
| 1296 | ) |
||
| 1297 | driving_load_theoretical = ( |
||
| 1298 | float(meta_run_config.eta_cp) * charging_load.sum() |
||
| 1299 | ) |
||
| 1300 | np.testing.assert_allclose( |
||
| 1301 | driving_load.sum(), |
||
| 1302 | driving_load_theoretical, |
||
| 1303 | rtol=0.01, |
||
| 1304 | err_msg=( |
||
| 1305 | f"The driving load (eGon2035) deviates by more than 1% " |
||
| 1306 | f"from the theoretical driving load calculated from charging " |
||
| 1307 | f"load (eGon2035_lowflex) with an efficiency of " |
||
| 1308 | f"{float(meta_run_config.eta_cp)}." |
||
| 1309 | ), |
||
| 1310 | ) |
||
| 1311 | |||
| 1312 | print("=====================================================") |
||
| 1313 | print("=== SANITY CHECKS FOR MOTORIZED INDIVIDUAL TRAVEL ===") |
||
| 1314 | print("=====================================================") |
||
| 1315 | |||
| 1316 | for scenario_name in ["eGon2035", "eGon100RE"]: |
||
| 1317 | scenario_var_name = DATASET_CFG["scenario"]["variation"][scenario_name] |
||
| 1318 | |||
| 1319 | print("") |
||
| 1320 | print(f"SCENARIO: {scenario_name}, VARIATION: {scenario_var_name}") |
||
| 1321 | |||
| 1322 | # Load scenario params for scenario and scenario variation |
||
| 1323 | scenario_variation_parameters = get_sector_parameters( |
||
| 1324 | "mobility", scenario=scenario_name |
||
| 1325 | )["motorized_individual_travel"][scenario_var_name] |
||
| 1326 | |||
| 1327 | # Load simBEV run config and tech data |
||
| 1328 | meta_run_config = read_simbev_metadata_file( |
||
| 1329 | scenario_name, "config" |
||
| 1330 | ).loc["basic"] |
||
| 1331 | meta_tech_data = read_simbev_metadata_file(scenario_name, "tech_data") |
||
| 1332 | |||
| 1333 | print("") |
||
| 1334 | print("Checking EV counts...") |
||
| 1335 | ev_count_alloc = check_ev_allocation() |
||
| 1336 | |||
| 1337 | print("") |
||
| 1338 | print("Checking trip data...") |
||
| 1339 | check_trip_data() |
||
| 1340 | |||
| 1341 | print("") |
||
| 1342 | print("Checking model data...") |
||
| 1343 | check_model_data() |
||
| 1344 | |||
| 1345 | print("") |
||
| 1346 | check_model_data_lowflex_eGon2035() |
||
| 1347 | |||
| 1348 | print("=====================================================") |
||
| 1349 | |||
| 1350 | |||
| 1351 | def sanitycheck_home_batteries(): |
||
| 1352 | # get constants |
||
| 1353 | constants = config.datasets()["home_batteries"]["constants"] |
||
| 1354 | scenarios = constants["scenarios"] |
||
| 1355 | cbat_pbat_ratio = get_cbat_pbat_ratio() |
||
| 1356 | |||
| 1357 | sources = config.datasets()["home_batteries"]["sources"] |
||
| 1358 | targets = config.datasets()["home_batteries"]["targets"] |
||
| 1359 | |||
| 1360 | for scenario in scenarios: |
||
| 1361 | # get home battery capacity per mv grid id |
||
| 1362 | sql = f""" |
||
| 1363 | SELECT el_capacity as p_nom, bus_id FROM |
||
| 1364 | {sources["storage"]["schema"]} |
||
| 1365 | .{sources["storage"]["table"]} |
||
| 1366 | WHERE carrier = 'home_battery' |
||
| 1367 | AND scenario = '{scenario}' |
||
| 1368 | """ |
||
| 1369 | |||
| 1370 | home_batteries_df = db.select_dataframe(sql, index_col="bus_id") |
||
| 1371 | |||
| 1372 | home_batteries_df = home_batteries_df.assign( |
||
| 1373 | capacity=home_batteries_df.p_nom * cbat_pbat_ratio |
||
| 1374 | ) |
||
| 1375 | |||
| 1376 | sql = f""" |
||
| 1377 | SELECT * FROM |
||
| 1378 | {targets["home_batteries"]["schema"]} |
||
| 1379 | .{targets["home_batteries"]["table"]} |
||
| 1380 | WHERE scenario = '{scenario}' |
||
| 1381 | """ |
||
| 1382 | |||
| 1383 | home_batteries_buildings_df = db.select_dataframe( |
||
| 1384 | sql, index_col="index" |
||
| 1385 | ) |
||
| 1386 | |||
| 1387 | df = ( |
||
| 1388 | home_batteries_buildings_df[["bus_id", "p_nom", "capacity"]] |
||
| 1389 | .groupby("bus_id") |
||
| 1390 | .sum() |
||
| 1391 | ) |
||
| 1392 | |||
| 1393 | assert (home_batteries_df.round(6) == df.round(6)).all().all() |
||
| 1394 |