| Total Complexity | 113 |
| Total Lines | 2498 |
| Duplicated Lines | 3.24 % |
| Changes | 0 | ||
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like data.datasets.heat_supply.individual_heating often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | """The central module containing all code dealing with individual heat supply. |
||
| 2 | |||
| 3 | The following main things are done in this module: |
||
| 4 | |||
| 5 | * ?? |
||
| 6 | * Desaggregation of heat pump capacities to individual buildings |
||
| 7 | * Determination of minimum required heat pump capacity for pypsa-eur-sec |
||
| 8 | |||
| 9 | """ |
||
| 10 | |||
| 11 | from pathlib import Path |
||
| 12 | import os |
||
| 13 | import random |
||
| 14 | |||
| 15 | from airflow.operators.python import PythonOperator |
||
| 16 | from psycopg2.extensions import AsIs, register_adapter |
||
| 17 | from sqlalchemy import ARRAY, REAL, Column, Integer, String |
||
| 18 | from sqlalchemy.ext.declarative import declarative_base |
||
| 19 | import geopandas as gpd |
||
| 20 | import numpy as np |
||
| 21 | import pandas as pd |
||
| 22 | import saio |
||
| 23 | |||
| 24 | from egon.data import config, db, logger |
||
| 25 | from egon.data.datasets import Dataset, wrapped_partial |
||
| 26 | from egon.data.datasets.district_heating_areas import ( |
||
| 27 | MapZensusDistrictHeatingAreas, |
||
| 28 | ) |
||
| 29 | from egon.data.datasets.electricity_demand_timeseries.cts_buildings import ( |
||
| 30 | calc_cts_building_profiles, |
||
| 31 | ) |
||
| 32 | from egon.data.datasets.electricity_demand_timeseries.mapping import ( |
||
| 33 | EgonMapZensusMvgdBuildings, |
||
| 34 | ) |
||
| 35 | from egon.data.datasets.electricity_demand_timeseries.tools import ( |
||
| 36 | write_table_to_postgres, |
||
| 37 | ) |
||
| 38 | from egon.data.datasets.emobility.motorized_individual_travel.helpers import ( |
||
| 39 | reduce_mem_usage, |
||
| 40 | ) |
||
| 41 | from egon.data.datasets.heat_demand import EgonPetaHeat |
||
| 42 | from egon.data.datasets.heat_demand_timeseries.daily import ( |
||
| 43 | EgonDailyHeatDemandPerClimateZone, |
||
| 44 | EgonMapZensusClimateZones, |
||
| 45 | ) |
||
| 46 | from egon.data.datasets.heat_demand_timeseries.idp_pool import ( |
||
| 47 | EgonHeatTimeseries, |
||
| 48 | ) |
||
| 49 | |||
| 50 | # get zensus cells with district heating |
||
| 51 | from egon.data.datasets.zensus_mv_grid_districts import MapZensusGridDistricts |
||
| 52 | |||
| 53 | from egon_validation import ArrayCardinalityValidation |
||
| 54 | |||
| 55 | engine = db.engine() |
||
| 56 | Base = declarative_base() |
||
| 57 | |||
| 58 | scenarios = config.settings()["egon-data"]["--scenarios"] |
||
| 59 | |||
| 60 | |||
| 61 | class EgonEtragoTimeseriesIndividualHeating(Base): |
||
| 62 | """ |
||
| 63 | Class definition of table demand.egon_etrago_timeseries_individual_heating. |
||
| 64 | |||
| 65 | This table contains aggregated heat load profiles of all buildings with heat pumps |
||
| 66 | within an MV grid as well as of all buildings with gas boilers within an MV grid for |
||
| 67 | the different scenarios. The data is used in eTraGo. |
||
| 68 | |||
| 69 | """ |
||
| 70 | |||
| 71 | __tablename__ = "egon_etrago_timeseries_individual_heating" |
||
| 72 | __table_args__ = {"schema": "demand"} |
||
| 73 | bus_id = Column(Integer, primary_key=True) |
||
| 74 | scenario = Column(String, primary_key=True) |
||
| 75 | carrier = Column(String, primary_key=True) |
||
| 76 | dist_aggregated_mw = Column(ARRAY(REAL)) |
||
| 77 | |||
| 78 | |||
| 79 | class EgonHpCapacityBuildings(Base): |
||
| 80 | """ |
||
| 81 | Class definition of table demand.egon_hp_capacity_buildings. |
||
| 82 | |||
| 83 | This table contains the heat pump capacity of all buildings with a heat pump. |
||
| 84 | |||
| 85 | """ |
||
| 86 | |||
| 87 | __tablename__ = "egon_hp_capacity_buildings" |
||
| 88 | __table_args__ = {"schema": "demand"} |
||
| 89 | building_id = Column(Integer, primary_key=True) |
||
| 90 | scenario = Column(String, primary_key=True) |
||
| 91 | hp_capacity = Column(REAL) |
||
| 92 | |||
| 93 | |||
| 94 | class HeatPumpsPypsaEur(Dataset): |
||
| 95 | """ |
||
| 96 | Class to determine minimum heat pump capcacities per building for the PyPSA-EUR run. |
||
| 97 | |||
| 98 | The goal is to ensure that the heat pump capacities determined in PyPSA-EUR are |
||
| 99 | sufficient to serve the heat demand of individual buildings after the |
||
| 100 | desaggregation from a few nodes in PyPSA-EUR to the individual buildings. |
||
| 101 | As the heat peak load is not previously determined, it is as well done in this |
||
| 102 | dataset. Further, as determining heat peak load requires heat load |
||
| 103 | profiles of the buildings to be set up, this task is also utilised to set up |
||
| 104 | heat load profiles of all buildings with heat pumps within a grid in the eGon100RE |
||
| 105 | scenario used in eTraGo. |
||
| 106 | |||
| 107 | For more information see data documentation on :ref:`dec-heat-pumps-ref`. |
||
| 108 | |||
| 109 | *Dependencies* |
||
| 110 | * :py:class:`CtsDemandBuildings |
||
| 111 | <egon.data.datasets.electricity_demand_timeseries.cts_buildings.CtsDemandBuildings>` |
||
| 112 | * :py:class:`DistrictHeatingAreas |
||
| 113 | <egon.data.datasets.district_heating_areas.DistrictHeatingAreas>` |
||
| 114 | * :py:class:`HeatTimeSeries |
||
| 115 | <egon.data.datasets.heat_demand_timeseries.HeatTimeSeries>` |
||
| 116 | |||
| 117 | *Resulting tables* |
||
| 118 | * `input-pypsa-eur-sec/minimum_hp_capacity_mv_grid_100RE.csv` file is created, |
||
| 119 | containing the minimum required heat pump capacity per MV grid in MW as |
||
| 120 | input for PyPSA-EUR (created within :func:`export_min_cap_to_csv`) |
||
| 121 | * :py:class:`demand.egon_etrago_timeseries_individual_heating |
||
| 122 | <egon.data.datasets.heat_supply.individual_heating.EgonEtragoTimeseriesIndividualHeating>` |
||
| 123 | is created and filled |
||
| 124 | * :py:class:`demand.egon_building_heat_peak_loads |
||
| 125 | <egon.data.datasets.heat_supply.individual_heating.BuildingHeatPeakLoads>` |
||
| 126 | is created and filled |
||
| 127 | |||
| 128 | **What is the challenge?** |
||
| 129 | |||
| 130 | The main challenge lies in the set up of heat demand profiles per building in |
||
| 131 | :func:`aggregate_residential_and_cts_profiles()` as it takes alot of time and |
||
| 132 | in grids with a high number of buildings requires alot of RAM. Both runtime and RAM |
||
| 133 | usage needed to be improved several times. To speed up the process, tasks are set |
||
| 134 | up to run in parallel. This currently leads to alot of connections being opened and |
||
| 135 | at a certain point to a runtime error due to too many open connections. |
||
| 136 | |||
| 137 | **What are central assumptions during the data processing?** |
||
| 138 | |||
| 139 | Central assumption for determining the minimum required heat pump capacity |
||
| 140 | is that heat pumps can be dimensioned using an approach from the network development |
||
| 141 | plan that uses the building's peak heat demand and a fixed COP (see |
||
| 142 | data documentation on :ref:`dec-heat-pumps-ref`). |
||
| 143 | |||
| 144 | **Drawbacks and limitations of the data** |
||
| 145 | |||
| 146 | The heat demand profiles used here to determine the heat peak load have very few |
||
| 147 | very high peaks that lead to large heat pump capacities. This should be solved |
||
| 148 | somehow. Cutting off the peak is not possible, as the time series of each building |
||
| 149 | is not saved but generated on the fly. Also, just using smaller heat pumps would |
||
| 150 | lead to infeasibilities in eDisGo. |
||
| 151 | |||
| 152 | """ |
||
| 153 | |||
| 154 | #: |
||
| 155 | name: str = "HeatPumpsPypsaEurSec" |
||
| 156 | #: |
||
| 157 | version: str = "0.0.3" |
||
| 158 | |||
| 159 | def __init__(self, dependencies): |
||
| 160 | def dyn_parallel_tasks_pypsa_eur(): |
||
| 161 | """Dynamically generate tasks |
||
| 162 | The goal is to speed up tasks by parallelising bulks of mvgds. |
||
| 163 | |||
| 164 | The number of parallel tasks is defined via parameter |
||
| 165 | `parallel_tasks` in the dataset config `datasets.yml`. |
||
| 166 | |||
| 167 | Returns |
||
| 168 | ------- |
||
| 169 | set of airflow.PythonOperators |
||
| 170 | The tasks. Each element is of |
||
| 171 | :func:`egon.data.datasets.heat_supply.individual_heating. |
||
| 172 | determine_hp_cap_peak_load_mvgd_ts_pypsa_eur` |
||
| 173 | """ |
||
| 174 | parallel_tasks = config.datasets()["demand_timeseries_mvgd"].get( |
||
| 175 | "parallel_tasks", 1 |
||
| 176 | ) |
||
| 177 | |||
| 178 | tasks = set() |
||
| 179 | |||
| 180 | for i in range(parallel_tasks): |
||
| 181 | tasks.add( |
||
| 182 | PythonOperator( |
||
| 183 | task_id=( |
||
| 184 | f"individual_heating." |
||
| 185 | f"determine-hp-capacity-pypsa-eur-" |
||
| 186 | f"mvgd-bulk{i}" |
||
| 187 | ), |
||
| 188 | python_callable=split_mvgds_into_bulks, |
||
| 189 | op_kwargs={ |
||
| 190 | "n": i, |
||
| 191 | "max_n": parallel_tasks, |
||
| 192 | "func": determine_hp_cap_peak_load_mvgd_ts_pypsa_eur, # noqa: E501 |
||
| 193 | }, |
||
| 194 | ) |
||
| 195 | ) |
||
| 196 | return tasks |
||
| 197 | |||
| 198 | tasks_HeatPumpsPypsaEur = set() |
||
| 199 | |||
| 200 | if "eGon100RE" in scenarios: |
||
| 201 | tasks_HeatPumpsPypsaEur = ( |
||
| 202 | delete_pypsa_eur_sec_csv_file, |
||
| 203 | delete_mvgd_ts_100RE, |
||
| 204 | delete_heat_peak_loads_100RE, |
||
| 205 | {*dyn_parallel_tasks_pypsa_eur()}, |
||
| 206 | ) |
||
| 207 | else: |
||
| 208 | tasks_HeatPumpsPypsaEur = ( |
||
| 209 | PythonOperator( |
||
| 210 | task_id="HeatPumpsPypsaEur_skipped", |
||
| 211 | python_callable=skip_task, |
||
| 212 | op_kwargs={ |
||
| 213 | "scn": "eGon100RE", |
||
| 214 | "task": "HeatPumpsPypsaEur", |
||
| 215 | }, |
||
| 216 | ), |
||
| 217 | ) |
||
| 218 | |||
| 219 | super().__init__( |
||
| 220 | name=self.name, |
||
| 221 | version=self.version, |
||
| 222 | dependencies=dependencies, |
||
| 223 | tasks=tasks_HeatPumpsPypsaEur, |
||
| 224 | validation={ |
||
| 225 | "data-quality": [ |
||
| 226 | ArrayCardinalityValidation( |
||
| 227 | table="demand.egon_etrago_timeseries_individual_heating", |
||
| 228 | array_column="dist_aggregated_mv", |
||
| 229 | expected_length=8760, |
||
| 230 | ), |
||
| 231 | ] |
||
| 232 | }, |
||
| 233 | on_validation_failure="continue" |
||
| 234 | ) |
||
| 235 | |||
| 236 | |||
| 237 | class HeatPumpsStatusQuo(Dataset): |
||
| 238 | def __init__(self, dependencies): |
||
| 239 | def dyn_parallel_tasks_status_quo(scenario): |
||
| 240 | """Dynamically generate tasks |
||
| 241 | |||
| 242 | The goal is to speed up tasks by parallelising bulks of mvgds. |
||
| 243 | |||
| 244 | The number of parallel tasks is defined via parameter |
||
| 245 | `parallel_tasks` in the dataset config `datasets.yml`. |
||
| 246 | |||
| 247 | Returns |
||
| 248 | ------- |
||
| 249 | set of airflow.PythonOperators |
||
| 250 | The tasks. Each element is of |
||
| 251 | :func:`egon.data.datasets.heat_supply.individual_heating. |
||
| 252 | determine_hp_cap_peak_load_mvgd_ts_status_quo` |
||
| 253 | """ |
||
| 254 | parallel_tasks = config.datasets()["demand_timeseries_mvgd"].get( |
||
| 255 | "parallel_tasks", 1 |
||
| 256 | ) |
||
| 257 | |||
| 258 | tasks = set() |
||
| 259 | |||
| 260 | for i in range(parallel_tasks): |
||
| 261 | tasks.add( |
||
| 262 | PythonOperator( |
||
| 263 | task_id=( |
||
| 264 | "individual_heating." |
||
| 265 | f"determine-hp-capacity-{scenario}-" |
||
| 266 | f"mvgd-bulk{i}" |
||
| 267 | ), |
||
| 268 | python_callable=split_mvgds_into_bulks, |
||
| 269 | op_kwargs={ |
||
| 270 | "n": i, |
||
| 271 | "max_n": parallel_tasks, |
||
| 272 | "scenario": scenario, |
||
| 273 | "func": determine_hp_cap_peak_load_mvgd_ts_status_quo, |
||
| 274 | }, |
||
| 275 | ) |
||
| 276 | ) |
||
| 277 | return tasks |
||
| 278 | |||
| 279 | if any( |
||
| 280 | "status" in scenario |
||
| 281 | for scenario in config.settings()["egon-data"]["--scenarios"] |
||
| 282 | ): |
||
| 283 | tasks = () |
||
| 284 | |||
| 285 | for scenario in config.settings()["egon-data"]["--scenarios"]: |
||
| 286 | if "status" in scenario: |
||
| 287 | postfix = f"_{scenario[-4:]}" |
||
| 288 | |||
| 289 | tasks += ( |
||
| 290 | wrapped_partial( |
||
| 291 | delete_heat_peak_loads_status_quo, |
||
| 292 | scenario=scenario, |
||
| 293 | postfix=postfix, |
||
| 294 | ), |
||
| 295 | wrapped_partial( |
||
| 296 | delete_hp_capacity_status_quo, |
||
| 297 | scenario=scenario, |
||
| 298 | postfix=postfix, |
||
| 299 | ), |
||
| 300 | wrapped_partial( |
||
| 301 | delete_mvgd_ts_status_quo, |
||
| 302 | scenario=scenario, |
||
| 303 | postfix=postfix, |
||
| 304 | ), |
||
| 305 | ) |
||
| 306 | |||
| 307 | tasks += ({*dyn_parallel_tasks_status_quo(scenario)},) |
||
| 308 | else: |
||
| 309 | tasks = ( |
||
| 310 | PythonOperator( |
||
| 311 | task_id="HeatPumpsSQ_skipped", |
||
| 312 | python_callable=skip_task, |
||
| 313 | op_kwargs={"scn": "sq", "task": "HeatPumpsStatusQuo"}, |
||
| 314 | ), |
||
| 315 | ) |
||
| 316 | |||
| 317 | super().__init__( |
||
| 318 | name="HeatPumpsStatusQuo", |
||
| 319 | version="0.0.4", |
||
| 320 | dependencies=dependencies, |
||
| 321 | tasks=tasks, |
||
| 322 | ) |
||
| 323 | |||
| 324 | |||
| 325 | class HeatPumps2035(Dataset): |
||
| 326 | """ |
||
| 327 | Class for desaggregation of heat pump capcacities per MV grid district to individual |
||
| 328 | buildings for eGon2035 scenario. |
||
| 329 | |||
| 330 | The heat pump capacity per MV grid district is disaggregated to buildings |
||
| 331 | with individual heating based on the buildings heat peak demand. The buildings are |
||
| 332 | chosen randomly until the target capacity per MV grid district is reached. Buildings |
||
| 333 | with PV rooftop have a higher probability to be assigned a heat pump. As the |
||
| 334 | building's heat peak load is not previously determined, it is as well done in this |
||
| 335 | dataset. Further, as determining heat peak load requires heat load |
||
| 336 | profiles of the buildings to be set up, this task is also utilised to set up |
||
| 337 | aggregated heat load profiles of all buildings with heat pumps within a grid as |
||
| 338 | well as for all buildings with a gas boiler (i.e. all buildings with decentral |
||
| 339 | heating system minus buildings with heat pump) needed in eTraGo. |
||
| 340 | |||
| 341 | For more information see data documentation on :ref:`dec-heat-pumps-ref`. |
||
| 342 | |||
| 343 | Heat pump capacity per building in the eGon100RE scenario is set up in a separate |
||
| 344 | dataset, :py:class:`HeatPumps2050 <HeatPumps2050>`, as for one reason in case of the |
||
| 345 | eGon100RE scenario the minimum required heat pump capacity per building can directly |
||
| 346 | be determined using the peak heat demand per building determined in the dataset |
||
| 347 | :py:class:`HeatPumpsPypsaEurSec <HeatPumpsPypsaEurSec>`, whereas peak heat |
||
| 348 | demand data does not yet exist for the eGon2035 scenario. Another reason is, |
||
| 349 | that in case of the eGon100RE scenario all buildings with individual heating have a |
||
| 350 | heat pump whereas in the eGon2035 scenario buildings are randomly selected until the |
||
| 351 | installed heat pump capacity per MV grid is met. All other buildings with individual |
||
| 352 | heating but no heat pump are assigned a gas boiler. |
||
| 353 | |||
| 354 | *Dependencies* |
||
| 355 | * :py:class:`CtsDemandBuildings |
||
| 356 | <egon.data.datasets.electricity_demand_timeseries.cts_buildings.CtsDemandBuildings>` |
||
| 357 | * :py:class:`DistrictHeatingAreas |
||
| 358 | <egon.data.datasets.district_heating_areas.DistrictHeatingAreas>` |
||
| 359 | * :py:class:`HeatSupply <egon.data.datasets.heat_supply.HeatSupply>` |
||
| 360 | * :py:class:`HeatTimeSeries |
||
| 361 | <egon.data.datasets.heat_demand_timeseries.HeatTimeSeries>` |
||
| 362 | * :py:class:`HeatPumpsPypsaEurSec |
||
| 363 | <egon.data.datasets.heat_supply.individual_heating.HeatPumpsPypsaEurSec>` |
||
| 364 | * :py:func:`pv_rooftop_to_buildings |
||
| 365 | <egon.data.datasets.power_plants.pv_rooftop_buildings.pv_rooftop_to_buildings>` |
||
| 366 | |||
| 367 | *Resulting tables* |
||
| 368 | * :py:class:`demand.egon_hp_capacity_buildings |
||
| 369 | <egon.data.datasets.heat_supply.individual_heating.EgonHpCapacityBuildings>` |
||
| 370 | is created (if it doesn't yet exist) and filled |
||
| 371 | * :py:class:`demand.egon_etrago_timeseries_individual_heating |
||
| 372 | <egon.data.datasets.heat_supply.individual_heating.EgonEtragoTimeseriesIndividualHeating>` |
||
| 373 | is created (if it doesn't yet exist) and filled |
||
| 374 | * :py:class:`demand.egon_building_heat_peak_loads |
||
| 375 | <egon.data.datasets.heat_supply.individual_heating.BuildingHeatPeakLoads>` |
||
| 376 | is created (if it doesn't yet exist) and filled |
||
| 377 | |||
| 378 | **What is the challenge?** |
||
| 379 | |||
| 380 | The main challenge lies in the set up of heat demand profiles per building in |
||
| 381 | :func:`aggregate_residential_and_cts_profiles()` as it takes alot of time and |
||
| 382 | in grids with a high number of buildings requires alot of RAM. Both runtime and RAM |
||
| 383 | usage needed to be improved several times. To speed up the process, tasks are set |
||
| 384 | up to run in parallel. This currently leads to alot of connections being opened and |
||
| 385 | at a certain point to a runtime error due to too many open connections. |
||
| 386 | |||
| 387 | **What are central assumptions during the data processing?** |
||
| 388 | |||
| 389 | Central assumption for desaggregating the heat pump capacity to individual buildings |
||
| 390 | is that heat pumps can be dimensioned using an approach from the network development |
||
| 391 | plan that uses the building's peak heat demand and a fixed COP (see |
||
| 392 | data documentation on :ref:`dec-heat-pumps-ref`). |
||
| 393 | Another central assumption is, that buildings with PV rooftop plants are more likely |
||
| 394 | to have a heat pump than other buildings (see |
||
| 395 | :func:`determine_buildings_with_hp_in_mv_grid()` for details). |
||
| 396 | |||
| 397 | **Drawbacks and limitations of the data** |
||
| 398 | |||
| 399 | The heat demand profiles used here to determine the heat peak load have very few |
||
| 400 | very high peaks that lead to large heat pump capacities. This should be solved |
||
| 401 | somehow. Cutting off the peak is not possible, as the time series of each building |
||
| 402 | is not saved but generated on the fly. Also, just using smaller heat pumps would |
||
| 403 | lead to infeasibilities in eDisGo. |
||
| 404 | |||
| 405 | """ |
||
| 406 | |||
| 407 | #: |
||
| 408 | name: str = "HeatPumps2035" |
||
| 409 | #: |
||
| 410 | version: str = "0.0.3" |
||
| 411 | |||
| 412 | def __init__(self, dependencies): |
||
| 413 | def dyn_parallel_tasks_2035(): |
||
| 414 | """Dynamically generate tasks |
||
| 415 | |||
| 416 | The goal is to speed up tasks by parallelising bulks of mvgds. |
||
| 417 | |||
| 418 | The number of parallel tasks is defined via parameter |
||
| 419 | `parallel_tasks` in the dataset config `datasets.yml`. |
||
| 420 | |||
| 421 | Returns |
||
| 422 | ------- |
||
| 423 | set of airflow.PythonOperators |
||
| 424 | The tasks. Each element is of |
||
| 425 | :func:`egon.data.datasets.heat_supply.individual_heating. |
||
| 426 | determine_hp_cap_peak_load_mvgd_ts_2035` |
||
| 427 | """ |
||
| 428 | parallel_tasks = config.datasets()["demand_timeseries_mvgd"].get( |
||
| 429 | "parallel_tasks", 1 |
||
| 430 | ) |
||
| 431 | |||
| 432 | tasks = set() |
||
| 433 | |||
| 434 | for i in range(parallel_tasks): |
||
| 435 | tasks.add( |
||
| 436 | PythonOperator( |
||
| 437 | task_id=( |
||
| 438 | "individual_heating." |
||
| 439 | f"determine-hp-capacity-2035-" |
||
| 440 | f"mvgd-bulk{i}" |
||
| 441 | ), |
||
| 442 | python_callable=split_mvgds_into_bulks, |
||
| 443 | op_kwargs={ |
||
| 444 | "n": i, |
||
| 445 | "max_n": parallel_tasks, |
||
| 446 | "func": determine_hp_cap_peak_load_mvgd_ts_2035, |
||
| 447 | }, |
||
| 448 | ) |
||
| 449 | ) |
||
| 450 | return tasks |
||
| 451 | |||
| 452 | if "eGon2035" in scenarios: |
||
| 453 | tasks_HeatPumps2035 = ( |
||
| 454 | delete_heat_peak_loads_2035, |
||
| 455 | delete_hp_capacity_2035, |
||
| 456 | delete_mvgd_ts_2035, |
||
| 457 | {*dyn_parallel_tasks_2035()}, |
||
| 458 | ) |
||
| 459 | else: |
||
| 460 | tasks_HeatPumps2035 = ( |
||
| 461 | PythonOperator( |
||
| 462 | task_id="HeatPumps2035_skipped", |
||
| 463 | python_callable=skip_task, |
||
| 464 | op_kwargs={"scn": "eGon2035", "task": "HeatPumps2035"}, |
||
| 465 | ), |
||
| 466 | ) |
||
| 467 | |||
| 468 | super().__init__( |
||
| 469 | name=self.version, |
||
| 470 | version="0.0.3", |
||
| 471 | dependencies=dependencies, |
||
| 472 | tasks=tasks_HeatPumps2035, |
||
| 473 | validation={ |
||
| 474 | "data-quality":[ |
||
| 475 | ArrayCardinalityValidation( |
||
| 476 | table="demand.egon_etrago_timeseries_individual_heating", |
||
| 477 | array_column="dist_aggregated_mv", |
||
| 478 | expected_length=8760, |
||
| 479 | ), |
||
| 480 | ] |
||
| 481 | }, |
||
| 482 | on_validation_failure="continue" |
||
| 483 | ) |
||
| 484 | |||
| 485 | |||
| 486 | class HeatPumps2050(Dataset): |
||
| 487 | """ |
||
| 488 | Class for desaggregation of heat pump capcacities per MV grid district to individual |
||
| 489 | buildings for eGon100RE scenario. |
||
| 490 | |||
| 491 | Optimised heat pump capacity from PyPSA-EUR run is disaggregated to all buildings |
||
| 492 | with individual heating (as heat pumps are the only option for individual heating |
||
| 493 | in the eGon100RE scenario) based on buildings heat peak demand. The heat peak demand |
||
| 494 | per building does in this dataset, in contrast to the |
||
| 495 | :py:class:`HeatPumps2035 <egon.data.datasets.pypsaeursec.HeatPumps2035>` dataset, |
||
| 496 | not need to be determined, as it was already determined in the |
||
| 497 | :py:class:`PypsaEurSec <egon.data.datasets.pypsaeursec.PypsaEurSec>` dataset. |
||
| 498 | |||
| 499 | For more information see data documentation on :ref:`dec-heat-pumps-ref`. |
||
| 500 | |||
| 501 | Heat pump capacity per building for the eGon2035 scenario is set up in a separate |
||
| 502 | dataset, :py:class:`HeatPumps2035 <HeatPumps2035>`. See there for further |
||
| 503 | information as to why. |
||
| 504 | |||
| 505 | *Dependencies* |
||
| 506 | * :py:class:`PypsaEurSec <egon.data.datasets.pypsaeursec.PypsaEurSec>` |
||
| 507 | * :py:class:`HeatPumpsPypsaEurSec |
||
| 508 | <egon.data.datasets.heat_supply.individual_heating.HeatPumpsPypsaEurSec>` |
||
| 509 | * :py:class:`HeatSupply <egon.data.datasets.heat_supply.HeatSupply>` |
||
| 510 | |||
| 511 | *Resulting tables* |
||
| 512 | * :py:class:`demand.egon_hp_capacity_buildings |
||
| 513 | <egon.data.datasets.heat_supply.individual_heating.EgonHpCapacityBuildings>` |
||
| 514 | is created (if it doesn't yet exist) and filled |
||
| 515 | |||
| 516 | **What are central assumptions during the data processing?** |
||
| 517 | |||
| 518 | Central assumption for desaggregating the heat pump capacity to individual buildings |
||
| 519 | is that heat pumps can be dimensioned using an approach from the network development |
||
| 520 | plan that uses the building's peak heat demand and a fixed COP (see |
||
| 521 | data documentation on :ref:`dec-heat-pumps-ref`). |
||
| 522 | |||
| 523 | **Drawbacks and limitations of the data** |
||
| 524 | |||
| 525 | The heat demand profiles used here to determine the heat peak load have very few |
||
| 526 | very high peaks that lead to large heat pump capacities. This should be solved |
||
| 527 | somehow. Cutting off the peak is not possible, as the time series of each building |
||
| 528 | is not saved but generated on the fly. Also, just using smaller heat pumps would |
||
| 529 | lead to infeasibilities in eDisGo. |
||
| 530 | |||
| 531 | """ |
||
| 532 | |||
| 533 | #: |
||
| 534 | name: str = "HeatPumps2050" |
||
| 535 | #: |
||
| 536 | version: str = "0.0.3" |
||
| 537 | |||
| 538 | def __init__(self, dependencies): |
||
| 539 | tasks_HeatPumps2050 = set() |
||
| 540 | |||
| 541 | if "eGon100RE" in scenarios: |
||
| 542 | tasks_HeatPumps2050 = ( |
||
| 543 | delete_hp_capacity_100RE, |
||
| 544 | determine_hp_cap_buildings_eGon100RE, |
||
| 545 | ) |
||
| 546 | else: |
||
| 547 | tasks_HeatPumps2050 = ( |
||
| 548 | PythonOperator( |
||
| 549 | task_id="HeatPumps2050_skipped", |
||
| 550 | python_callable=skip_task, |
||
| 551 | op_kwargs={"scn": "eGon100RE", "task": "HeatPumps2050"}, |
||
| 552 | ), |
||
| 553 | ) |
||
| 554 | |||
| 555 | super().__init__( |
||
| 556 | name=self.name, |
||
| 557 | version=self.version, |
||
| 558 | dependencies=dependencies, |
||
| 559 | tasks=tasks_HeatPumps2050, |
||
| 560 | ) |
||
| 561 | |||
| 562 | |||
| 563 | View Code Duplication | class BuildingHeatPeakLoads(Base): |
|
|
|
|||
| 564 | """ |
||
| 565 | Class definition of table demand.egon_building_heat_peak_loads. |
||
| 566 | |||
| 567 | Table with peak heat demand of residential and CTS heat demand combined for |
||
| 568 | each building. |
||
| 569 | |||
| 570 | """ |
||
| 571 | |||
| 572 | __tablename__ = "egon_building_heat_peak_loads" |
||
| 573 | __table_args__ = {"schema": "demand"} |
||
| 574 | |||
| 575 | building_id = Column(Integer, primary_key=True) |
||
| 576 | scenario = Column(String, primary_key=True) |
||
| 577 | sector = Column(String, primary_key=True) |
||
| 578 | peak_load_in_w = Column(REAL) |
||
| 579 | |||
| 580 | |||
| 581 | def skip_task(scn=str, task=str): |
||
| 582 | logger.info( |
||
| 583 | f"{scn} is not in the list of scenarios. {task} dataset is skipped." |
||
| 584 | ) |
||
| 585 | |||
| 586 | return |
||
| 587 | |||
| 588 | |||
| 589 | def adapt_numpy_float64(numpy_float64): |
||
| 590 | return AsIs(numpy_float64) |
||
| 591 | |||
| 592 | |||
| 593 | def adapt_numpy_int64(numpy_int64): |
||
| 594 | return AsIs(numpy_int64) |
||
| 595 | |||
| 596 | |||
| 597 | def cascade_per_technology( |
||
| 598 | heat_per_mv, |
||
| 599 | technologies, |
||
| 600 | scenario, |
||
| 601 | distribution_level, |
||
| 602 | max_size_individual_chp=0.05, |
||
| 603 | ): |
||
| 604 | """Add plants for individual heat. |
||
| 605 | Currently only on mv grid district level. |
||
| 606 | |||
| 607 | Parameters |
||
| 608 | ---------- |
||
| 609 | mv_grid_districts : geopandas.geodataframe.GeoDataFrame |
||
| 610 | MV grid districts including the heat demand |
||
| 611 | technologies : pandas.DataFrame |
||
| 612 | List of supply technologies and their parameters |
||
| 613 | scenario : str |
||
| 614 | Name of the scenario |
||
| 615 | max_size_individual_chp : float |
||
| 616 | Maximum capacity of an individual chp in MW |
||
| 617 | Returns |
||
| 618 | ------- |
||
| 619 | mv_grid_districts : geopandas.geodataframe.GeoDataFrame |
||
| 620 | MV grid district which need additional individual heat supply |
||
| 621 | technologies : pandas.DataFrame |
||
| 622 | List of supply technologies and their parameters |
||
| 623 | append_df : pandas.DataFrame |
||
| 624 | List of plants per mv grid for the selected technology |
||
| 625 | |||
| 626 | """ |
||
| 627 | sources = config.datasets()["heat_supply"]["sources"] |
||
| 628 | |||
| 629 | tech = technologies[technologies.priority == technologies.priority.max()] |
||
| 630 | |||
| 631 | # Distribute heat pumps linear to remaining demand. |
||
| 632 | if tech.index == "heat_pump": |
||
| 633 | if distribution_level == "federal_states": |
||
| 634 | # Select target values per federal state |
||
| 635 | target = db.select_dataframe( |
||
| 636 | f""" |
||
| 637 | SELECT DISTINCT ON (gen) gen as state, capacity |
||
| 638 | FROM {sources['scenario_capacities']['schema']}. |
||
| 639 | {sources['scenario_capacities']['table']} a |
||
| 640 | JOIN {sources['federal_states']['schema']}. |
||
| 641 | {sources['federal_states']['table']} b |
||
| 642 | ON a.nuts = b.nuts |
||
| 643 | WHERE scenario_name = '{scenario}' |
||
| 644 | AND carrier = 'residential_rural_heat_pump' |
||
| 645 | """, |
||
| 646 | index_col="state", |
||
| 647 | ) |
||
| 648 | |||
| 649 | heat_per_mv["share"] = heat_per_mv.groupby( |
||
| 650 | "state", |
||
| 651 | group_keys=False, |
||
| 652 | ).remaining_demand.apply(lambda grp: grp / grp.sum()) |
||
| 653 | |||
| 654 | append_df = ( |
||
| 655 | heat_per_mv["share"] |
||
| 656 | .mul(target.capacity[heat_per_mv["state"]].values) |
||
| 657 | .reset_index() |
||
| 658 | ) |
||
| 659 | else: |
||
| 660 | # Select target value for Germany |
||
| 661 | target = db.select_dataframe( |
||
| 662 | f""" |
||
| 663 | SELECT SUM(capacity) AS capacity |
||
| 664 | FROM {sources['scenario_capacities']['schema']}. |
||
| 665 | {sources['scenario_capacities']['table']} a |
||
| 666 | WHERE scenario_name = '{scenario}' |
||
| 667 | AND carrier = 'rural_heat_pump' |
||
| 668 | """ |
||
| 669 | ) |
||
| 670 | |||
| 671 | if not target.capacity[0]: |
||
| 672 | target.capacity[0] = 0 |
||
| 673 | |||
| 674 | if ( |
||
| 675 | config.settings()["egon-data"]["--dataset-boundary"] |
||
| 676 | == "Schleswig-Holstein" |
||
| 677 | ): |
||
| 678 | target.capacity[0] /= 16 |
||
| 679 | |||
| 680 | heat_per_mv["share"] = ( |
||
| 681 | heat_per_mv.remaining_demand |
||
| 682 | / heat_per_mv.remaining_demand.sum() |
||
| 683 | ) |
||
| 684 | |||
| 685 | append_df = ( |
||
| 686 | heat_per_mv["share"].mul(target.capacity[0]).reset_index() |
||
| 687 | ) |
||
| 688 | |||
| 689 | append_df.rename( |
||
| 690 | {"bus_id": "mv_grid_id", "share": "capacity"}, axis=1, inplace=True |
||
| 691 | ) |
||
| 692 | |||
| 693 | elif (tech.index == "gas_boiler") & (scenario == "eGon2035"): |
||
| 694 | append_df = pd.DataFrame( |
||
| 695 | data={ |
||
| 696 | "capacity": heat_per_mv.remaining_demand.div( |
||
| 697 | tech.estimated_flh.values[0] |
||
| 698 | ), |
||
| 699 | "carrier": f"residential_rural_{tech.index}", |
||
| 700 | "mv_grid_id": heat_per_mv.index, |
||
| 701 | "scenario": scenario, |
||
| 702 | } |
||
| 703 | ) |
||
| 704 | |||
| 705 | elif tech.index in ("gas_boiler", "resistive_heater", "solar_thermal"): |
||
| 706 | # Select target value for Germany |
||
| 707 | target = db.select_dataframe( |
||
| 708 | f""" |
||
| 709 | SELECT SUM(capacity) AS capacity |
||
| 710 | FROM {sources['scenario_capacities']['schema']}. |
||
| 711 | {sources['scenario_capacities']['table']} a |
||
| 712 | WHERE scenario_name = '{scenario}' |
||
| 713 | AND carrier = 'rural_{tech.index[0]}' |
||
| 714 | """ |
||
| 715 | ) |
||
| 716 | |||
| 717 | if ( |
||
| 718 | config.settings()["egon-data"]["--dataset-boundary"] |
||
| 719 | == "Schleswig-Holstein" |
||
| 720 | ): |
||
| 721 | target.capacity[0] /= 16 |
||
| 722 | |||
| 723 | heat_per_mv["share"] = ( |
||
| 724 | heat_per_mv.remaining_demand / heat_per_mv.remaining_demand.sum() |
||
| 725 | ) |
||
| 726 | |||
| 727 | append_df = heat_per_mv["share"].mul(target.capacity[0]).reset_index() |
||
| 728 | |||
| 729 | append_df.rename( |
||
| 730 | {"bus_id": "mv_grid_id", "share": "capacity"}, axis=1, inplace=True |
||
| 731 | ) |
||
| 732 | |||
| 733 | else: |
||
| 734 | append_df = pd.DataFrame( |
||
| 735 | data={ |
||
| 736 | "capacity": heat_per_mv.remaining_demand.div( |
||
| 737 | tech.estimated_flh.values[0] |
||
| 738 | ), |
||
| 739 | "carrier": f"residential_rural_{tech.index}", |
||
| 740 | "mv_grid_id": heat_per_mv.index, |
||
| 741 | "scenario": scenario, |
||
| 742 | } |
||
| 743 | ) |
||
| 744 | |||
| 745 | if append_df.size > 0: |
||
| 746 | append_df["carrier"] = tech.index[0] |
||
| 747 | heat_per_mv.loc[ |
||
| 748 | append_df.mv_grid_id, "remaining_demand" |
||
| 749 | ] -= append_df.set_index("mv_grid_id").capacity.mul( |
||
| 750 | tech.estimated_flh.values[0] |
||
| 751 | ) |
||
| 752 | |||
| 753 | heat_per_mv = heat_per_mv[heat_per_mv.remaining_demand >= 0] |
||
| 754 | |||
| 755 | technologies = technologies.drop(tech.index) |
||
| 756 | |||
| 757 | return heat_per_mv, technologies, append_df |
||
| 758 | |||
| 759 | |||
| 760 | def cascade_heat_supply_indiv(scenario, distribution_level, plotting=True): |
||
| 761 | """Assigns supply strategy for individual heating in four steps. |
||
| 762 | 1. all small scale CHP are connected. |
||
| 763 | 2. If the supply can not meet the heat demand, solar thermal collectors |
||
| 764 | are attached. This is not implemented yet, since individual |
||
| 765 | solar thermal plants are not considered in eGon2035 scenario. |
||
| 766 | 3. If this is not suitable, the mv grid is also supplied by heat pumps. |
||
| 767 | 4. The last option are individual gas boilers. |
||
| 768 | |||
| 769 | Parameters |
||
| 770 | ---------- |
||
| 771 | scenario : str |
||
| 772 | Name of scenario |
||
| 773 | plotting : bool, optional |
||
| 774 | Choose if individual heating supply is plotted. The default is True. |
||
| 775 | |||
| 776 | Returns |
||
| 777 | ------- |
||
| 778 | resulting_capacities : pandas.DataFrame |
||
| 779 | List of plants per mv grid |
||
| 780 | |||
| 781 | """ |
||
| 782 | |||
| 783 | sources = config.datasets()["heat_supply"]["sources"] |
||
| 784 | |||
| 785 | # Select residential heat demand per mv grid district and federal state |
||
| 786 | heat_per_mv = db.select_geodataframe( |
||
| 787 | f""" |
||
| 788 | SELECT d.bus_id as bus_id, SUM(demand) as demand, |
||
| 789 | c.vg250_lan as state, d.geom |
||
| 790 | FROM {sources['heat_demand']['schema']}. |
||
| 791 | {sources['heat_demand']['table']} a |
||
| 792 | JOIN {sources['map_zensus_grid']['schema']}. |
||
| 793 | {sources['map_zensus_grid']['table']} b |
||
| 794 | ON a.zensus_population_id = b.zensus_population_id |
||
| 795 | JOIN {sources['map_vg250_grid']['schema']}. |
||
| 796 | {sources['map_vg250_grid']['table']} c |
||
| 797 | ON b.bus_id = c.bus_id |
||
| 798 | JOIN {sources['mv_grids']['schema']}. |
||
| 799 | {sources['mv_grids']['table']} d |
||
| 800 | ON d.bus_id = c.bus_id |
||
| 801 | WHERE scenario = '{scenario}' |
||
| 802 | AND a.zensus_population_id NOT IN ( |
||
| 803 | SELECT zensus_population_id |
||
| 804 | FROM {sources['map_dh']['schema']}.{sources['map_dh']['table']} |
||
| 805 | WHERE scenario = '{scenario}') |
||
| 806 | GROUP BY d.bus_id, vg250_lan, geom |
||
| 807 | """, |
||
| 808 | index_col="bus_id", |
||
| 809 | ) |
||
| 810 | |||
| 811 | # Store geometry of mv grid |
||
| 812 | geom_mv = heat_per_mv.geom.centroid.copy() |
||
| 813 | |||
| 814 | # Initalize Dataframe for results |
||
| 815 | resulting_capacities = pd.DataFrame( |
||
| 816 | columns=["mv_grid_id", "carrier", "capacity"] |
||
| 817 | ) |
||
| 818 | |||
| 819 | # Set technology data according to |
||
| 820 | # http://www.wbzu.de/seminare/infopool/infopool-bhkw |
||
| 821 | if scenario == "eGon2035": |
||
| 822 | technologies = pd.DataFrame( |
||
| 823 | index=["heat_pump", "gas_boiler"], |
||
| 824 | columns=["estimated_flh", "priority"], |
||
| 825 | data={"estimated_flh": [4000, 8000], "priority": [2, 1]}, |
||
| 826 | ) |
||
| 827 | elif scenario == "eGon100RE": |
||
| 828 | technologies = pd.DataFrame( |
||
| 829 | index=[ |
||
| 830 | "heat_pump", |
||
| 831 | "resistive_heater", |
||
| 832 | "solar_thermal", |
||
| 833 | "gas_boiler", |
||
| 834 | "oil_boiler", |
||
| 835 | ], |
||
| 836 | columns=["estimated_flh", "priority"], |
||
| 837 | data={ |
||
| 838 | "estimated_flh": [4000, 2000, 2000, 8000, 8000], |
||
| 839 | "priority": [5, 4, 3, 2, 1], |
||
| 840 | }, |
||
| 841 | ) |
||
| 842 | elif "status" in scenario: |
||
| 843 | technologies = pd.DataFrame( |
||
| 844 | index=["heat_pump"], |
||
| 845 | columns=["estimated_flh", "priority"], |
||
| 846 | data={"estimated_flh": [4000], "priority": [1]}, |
||
| 847 | ) |
||
| 848 | else: |
||
| 849 | raise ValueError(f"{scenario=} is not valid.") |
||
| 850 | |||
| 851 | # In the beginning, the remaining demand equals demand |
||
| 852 | heat_per_mv["remaining_demand"] = heat_per_mv["demand"] |
||
| 853 | |||
| 854 | # Connect new technologies, if there is still heat demand left |
||
| 855 | while (len(technologies) > 0) and (len(heat_per_mv) > 0): |
||
| 856 | # Attach new supply technology |
||
| 857 | heat_per_mv, technologies, append_df = cascade_per_technology( |
||
| 858 | heat_per_mv, technologies, scenario, distribution_level |
||
| 859 | ) |
||
| 860 | # Collect resulting capacities |
||
| 861 | resulting_capacities = pd.concat( |
||
| 862 | [resulting_capacities, append_df], ignore_index=True |
||
| 863 | ) |
||
| 864 | |||
| 865 | if plotting: |
||
| 866 | plot_heat_supply(resulting_capacities) |
||
| 867 | |||
| 868 | return gpd.GeoDataFrame( |
||
| 869 | resulting_capacities, |
||
| 870 | geometry=geom_mv[resulting_capacities.mv_grid_id].values, |
||
| 871 | ) |
||
| 872 | |||
| 873 | |||
| 874 | View Code Duplication | def get_peta_demand(mvgd, scenario): |
|
| 875 | """ |
||
| 876 | Retrieve annual peta heat demand for residential buildings for either |
||
| 877 | eGon2035 or eGon100RE scenario. |
||
| 878 | |||
| 879 | Parameters |
||
| 880 | ---------- |
||
| 881 | mvgd : int |
||
| 882 | MV grid ID. |
||
| 883 | scenario : str |
||
| 884 | Possible options are eGon2035 or eGon100RE |
||
| 885 | |||
| 886 | Returns |
||
| 887 | ------- |
||
| 888 | df_peta_demand : pd.DataFrame |
||
| 889 | Annual residential heat demand per building and scenario. Columns of |
||
| 890 | the dataframe are zensus_population_id and demand. |
||
| 891 | |||
| 892 | """ |
||
| 893 | |||
| 894 | with db.session_scope() as session: |
||
| 895 | query = ( |
||
| 896 | session.query( |
||
| 897 | MapZensusGridDistricts.zensus_population_id, |
||
| 898 | EgonPetaHeat.demand, |
||
| 899 | ) |
||
| 900 | .filter(MapZensusGridDistricts.bus_id == mvgd) |
||
| 901 | .filter( |
||
| 902 | MapZensusGridDistricts.zensus_population_id |
||
| 903 | == EgonPetaHeat.zensus_population_id |
||
| 904 | ) |
||
| 905 | .filter( |
||
| 906 | EgonPetaHeat.sector == "residential", |
||
| 907 | EgonPetaHeat.scenario == scenario, |
||
| 908 | ) |
||
| 909 | ) |
||
| 910 | |||
| 911 | df_peta_demand = pd.read_sql( |
||
| 912 | query.statement, query.session.bind, index_col=None |
||
| 913 | ) |
||
| 914 | |||
| 915 | return df_peta_demand |
||
| 916 | |||
| 917 | |||
| 918 | def get_residential_heat_profile_ids(mvgd): |
||
| 919 | """ |
||
| 920 | Retrieve 365 daily heat profiles ids per residential building and selected |
||
| 921 | mvgd. |
||
| 922 | |||
| 923 | Parameters |
||
| 924 | ---------- |
||
| 925 | mvgd : int |
||
| 926 | ID of MVGD |
||
| 927 | |||
| 928 | Returns |
||
| 929 | ------- |
||
| 930 | df_profiles_ids : pd.DataFrame |
||
| 931 | Residential daily heat profile ID's per building. Columns of the |
||
| 932 | dataframe are zensus_population_id, building_id, |
||
| 933 | selected_idp_profiles, buildings and day_of_year. |
||
| 934 | |||
| 935 | """ |
||
| 936 | with db.session_scope() as session: |
||
| 937 | query = ( |
||
| 938 | session.query( |
||
| 939 | MapZensusGridDistricts.zensus_population_id, |
||
| 940 | EgonHeatTimeseries.building_id, |
||
| 941 | EgonHeatTimeseries.selected_idp_profiles, |
||
| 942 | ) |
||
| 943 | .filter(MapZensusGridDistricts.bus_id == mvgd) |
||
| 944 | .filter( |
||
| 945 | MapZensusGridDistricts.zensus_population_id |
||
| 946 | == EgonHeatTimeseries.zensus_population_id |
||
| 947 | ) |
||
| 948 | ) |
||
| 949 | |||
| 950 | df_profiles_ids = pd.read_sql( |
||
| 951 | query.statement, query.session.bind, index_col=None |
||
| 952 | ) |
||
| 953 | # Add building count per cell |
||
| 954 | df_profiles_ids = pd.merge( |
||
| 955 | left=df_profiles_ids, |
||
| 956 | right=df_profiles_ids.groupby("zensus_population_id")["building_id"] |
||
| 957 | .count() |
||
| 958 | .rename("buildings"), |
||
| 959 | left_on="zensus_population_id", |
||
| 960 | right_index=True, |
||
| 961 | ) |
||
| 962 | |||
| 963 | # unnest array of ids per building |
||
| 964 | df_profiles_ids = df_profiles_ids.explode("selected_idp_profiles") |
||
| 965 | # add day of year column by order of list |
||
| 966 | df_profiles_ids["day_of_year"] = ( |
||
| 967 | df_profiles_ids.groupby("building_id").cumcount() + 1 |
||
| 968 | ) |
||
| 969 | return df_profiles_ids |
||
| 970 | |||
| 971 | |||
| 972 | def get_daily_profiles(profile_ids): |
||
| 973 | """ |
||
| 974 | Parameters |
||
| 975 | ---------- |
||
| 976 | profile_ids : list(int) |
||
| 977 | daily heat profile ID's |
||
| 978 | |||
| 979 | Returns |
||
| 980 | ------- |
||
| 981 | df_profiles : pd.DataFrame |
||
| 982 | Residential daily heat profiles. Columns of the dataframe are idp, |
||
| 983 | house, temperature_class and hour. |
||
| 984 | |||
| 985 | """ |
||
| 986 | |||
| 987 | saio.register_schema("demand", db.engine()) |
||
| 988 | from saio.demand import egon_heat_idp_pool |
||
| 989 | |||
| 990 | with db.session_scope() as session: |
||
| 991 | query = session.query(egon_heat_idp_pool).filter( |
||
| 992 | egon_heat_idp_pool.index.in_(profile_ids) |
||
| 993 | ) |
||
| 994 | |||
| 995 | df_profiles = pd.read_sql( |
||
| 996 | query.statement, query.session.bind, index_col="index" |
||
| 997 | ) |
||
| 998 | |||
| 999 | # unnest array of profile values per id |
||
| 1000 | df_profiles = df_profiles.explode("idp") |
||
| 1001 | # Add column for hour of day |
||
| 1002 | df_profiles["hour"] = df_profiles.groupby(axis=0, level=0).cumcount() + 1 |
||
| 1003 | |||
| 1004 | return df_profiles |
||
| 1005 | |||
| 1006 | |||
| 1007 | def get_daily_demand_share(mvgd): |
||
| 1008 | """per census cell |
||
| 1009 | Parameters |
||
| 1010 | ---------- |
||
| 1011 | mvgd : int |
||
| 1012 | MVGD id |
||
| 1013 | |||
| 1014 | Returns |
||
| 1015 | ------- |
||
| 1016 | df_daily_demand_share : pd.DataFrame |
||
| 1017 | Daily annual demand share per cencus cell. Columns of the dataframe |
||
| 1018 | are zensus_population_id, day_of_year and daily_demand_share. |
||
| 1019 | |||
| 1020 | """ |
||
| 1021 | |||
| 1022 | with db.session_scope() as session: |
||
| 1023 | query = session.query( |
||
| 1024 | MapZensusGridDistricts.zensus_population_id, |
||
| 1025 | EgonDailyHeatDemandPerClimateZone.day_of_year, |
||
| 1026 | EgonDailyHeatDemandPerClimateZone.daily_demand_share, |
||
| 1027 | ).filter( |
||
| 1028 | EgonMapZensusClimateZones.climate_zone |
||
| 1029 | == EgonDailyHeatDemandPerClimateZone.climate_zone, |
||
| 1030 | MapZensusGridDistricts.zensus_population_id |
||
| 1031 | == EgonMapZensusClimateZones.zensus_population_id, |
||
| 1032 | MapZensusGridDistricts.bus_id == mvgd, |
||
| 1033 | ) |
||
| 1034 | |||
| 1035 | df_daily_demand_share = pd.read_sql( |
||
| 1036 | query.statement, query.session.bind, index_col=None |
||
| 1037 | ) |
||
| 1038 | return df_daily_demand_share |
||
| 1039 | |||
| 1040 | |||
| 1041 | def calc_residential_heat_profiles_per_mvgd(mvgd, scenario): |
||
| 1042 | """ |
||
| 1043 | Gets residential heat profiles per building in MV grid for either eGon2035 |
||
| 1044 | or eGon100RE scenario. |
||
| 1045 | |||
| 1046 | Parameters |
||
| 1047 | ---------- |
||
| 1048 | mvgd : int |
||
| 1049 | MV grid ID. |
||
| 1050 | scenario : str |
||
| 1051 | Possible options are eGon2035 or eGon100RE. |
||
| 1052 | |||
| 1053 | Returns |
||
| 1054 | -------- |
||
| 1055 | pd.DataFrame |
||
| 1056 | Heat demand profiles of buildings. Columns are: |
||
| 1057 | * zensus_population_id : int |
||
| 1058 | Zensus cell ID building is in. |
||
| 1059 | * building_id : int |
||
| 1060 | ID of building. |
||
| 1061 | * day_of_year : int |
||
| 1062 | Day of the year (1 - 365). |
||
| 1063 | * hour : int |
||
| 1064 | Hour of the day (1 - 24). |
||
| 1065 | * demand_ts : float |
||
| 1066 | Building's residential heat demand in MW, for specified hour |
||
| 1067 | of the year (specified through columns `day_of_year` and |
||
| 1068 | `hour`). |
||
| 1069 | """ |
||
| 1070 | |||
| 1071 | columns = [ |
||
| 1072 | "zensus_population_id", |
||
| 1073 | "building_id", |
||
| 1074 | "day_of_year", |
||
| 1075 | "hour", |
||
| 1076 | "demand_ts", |
||
| 1077 | ] |
||
| 1078 | |||
| 1079 | df_peta_demand = get_peta_demand(mvgd, scenario) |
||
| 1080 | df_peta_demand = reduce_mem_usage(df_peta_demand) |
||
| 1081 | |||
| 1082 | # TODO maybe return empty dataframe |
||
| 1083 | if df_peta_demand.empty: |
||
| 1084 | logger.info(f"No demand for MVGD: {mvgd}") |
||
| 1085 | return pd.DataFrame(columns=columns) |
||
| 1086 | |||
| 1087 | df_profiles_ids = get_residential_heat_profile_ids(mvgd) |
||
| 1088 | |||
| 1089 | if df_profiles_ids.empty: |
||
| 1090 | logger.info(f"No profiles for MVGD: {mvgd}") |
||
| 1091 | return pd.DataFrame(columns=columns) |
||
| 1092 | |||
| 1093 | df_profiles = get_daily_profiles( |
||
| 1094 | df_profiles_ids["selected_idp_profiles"].unique() |
||
| 1095 | ) |
||
| 1096 | |||
| 1097 | df_daily_demand_share = get_daily_demand_share(mvgd) |
||
| 1098 | |||
| 1099 | # Merge profile ids to peta demand by zensus_population_id |
||
| 1100 | df_profile_merge = pd.merge( |
||
| 1101 | left=df_peta_demand, right=df_profiles_ids, on="zensus_population_id" |
||
| 1102 | ) |
||
| 1103 | |||
| 1104 | df_profile_merge.demand = df_profile_merge.demand.div( |
||
| 1105 | df_profile_merge.buildings |
||
| 1106 | ) |
||
| 1107 | df_profile_merge.drop("buildings", axis="columns", inplace=True) |
||
| 1108 | |||
| 1109 | # Merge daily demand to daily profile ids by zensus_population_id and day |
||
| 1110 | df_profile_merge = pd.merge( |
||
| 1111 | left=df_profile_merge, |
||
| 1112 | right=df_daily_demand_share, |
||
| 1113 | on=["zensus_population_id", "day_of_year"], |
||
| 1114 | ) |
||
| 1115 | df_profile_merge.demand = df_profile_merge.demand.mul( |
||
| 1116 | df_profile_merge.daily_demand_share |
||
| 1117 | ) |
||
| 1118 | df_profile_merge.drop("daily_demand_share", axis="columns", inplace=True) |
||
| 1119 | df_profile_merge = reduce_mem_usage(df_profile_merge) |
||
| 1120 | |||
| 1121 | # Merge daily profiles by profile id |
||
| 1122 | df_profile_merge = pd.merge( |
||
| 1123 | left=df_profile_merge, |
||
| 1124 | right=df_profiles[["idp", "hour"]], |
||
| 1125 | left_on="selected_idp_profiles", |
||
| 1126 | right_index=True, |
||
| 1127 | ) |
||
| 1128 | df_profile_merge = reduce_mem_usage(df_profile_merge) |
||
| 1129 | |||
| 1130 | df_profile_merge.demand = df_profile_merge.demand.mul( |
||
| 1131 | df_profile_merge.idp.astype(float) |
||
| 1132 | ) |
||
| 1133 | df_profile_merge.drop("idp", axis="columns", inplace=True) |
||
| 1134 | |||
| 1135 | df_profile_merge.rename( |
||
| 1136 | {"demand": "demand_ts"}, axis="columns", inplace=True |
||
| 1137 | ) |
||
| 1138 | |||
| 1139 | df_profile_merge = reduce_mem_usage(df_profile_merge) |
||
| 1140 | |||
| 1141 | return df_profile_merge.loc[:, columns] |
||
| 1142 | |||
| 1143 | |||
| 1144 | View Code Duplication | def plot_heat_supply(resulting_capacities): |
|
| 1145 | from matplotlib import pyplot as plt |
||
| 1146 | |||
| 1147 | mv_grids = db.select_geodataframe( |
||
| 1148 | """ |
||
| 1149 | SELECT * FROM grid.egon_mv_grid_district |
||
| 1150 | """, |
||
| 1151 | index_col="bus_id", |
||
| 1152 | ) |
||
| 1153 | |||
| 1154 | for c in ["CHP", "heat_pump"]: |
||
| 1155 | mv_grids[c] = ( |
||
| 1156 | resulting_capacities[resulting_capacities.carrier == c] |
||
| 1157 | .set_index("mv_grid_id") |
||
| 1158 | .capacity |
||
| 1159 | ) |
||
| 1160 | |||
| 1161 | fig, ax = plt.subplots(1, 1) |
||
| 1162 | mv_grids.boundary.plot(linewidth=0.2, ax=ax, color="black") |
||
| 1163 | mv_grids.plot( |
||
| 1164 | ax=ax, |
||
| 1165 | column=c, |
||
| 1166 | cmap="magma_r", |
||
| 1167 | legend=True, |
||
| 1168 | legend_kwds={ |
||
| 1169 | "label": f"Installed {c} in MW", |
||
| 1170 | "orientation": "vertical", |
||
| 1171 | }, |
||
| 1172 | ) |
||
| 1173 | plt.savefig(f"plots/individual_heat_supply_{c}.png", dpi=300) |
||
| 1174 | |||
| 1175 | |||
| 1176 | def get_zensus_cells_with_decentral_heat_demand_in_mv_grid( |
||
| 1177 | scenario, mv_grid_id |
||
| 1178 | ): |
||
| 1179 | """ |
||
| 1180 | Returns zensus cell IDs with decentral heating systems in given MV grid. |
||
| 1181 | |||
| 1182 | As cells with district heating differ between scenarios, this is also |
||
| 1183 | depending on the scenario. |
||
| 1184 | |||
| 1185 | Parameters |
||
| 1186 | ----------- |
||
| 1187 | scenario : str |
||
| 1188 | Name of scenario. Can be either "eGon2035" or "eGon100RE". |
||
| 1189 | mv_grid_id : int |
||
| 1190 | ID of MV grid. |
||
| 1191 | |||
| 1192 | Returns |
||
| 1193 | -------- |
||
| 1194 | pd.Index(int) |
||
| 1195 | Zensus cell IDs (as int) of buildings with decentral heating systems in |
||
| 1196 | given MV grid. Type is pandas Index to avoid errors later on when it is |
||
| 1197 | used in a query. |
||
| 1198 | |||
| 1199 | """ |
||
| 1200 | |||
| 1201 | # get zensus cells in grid |
||
| 1202 | zensus_population_ids = db.select_dataframe( |
||
| 1203 | f""" |
||
| 1204 | SELECT zensus_population_id |
||
| 1205 | FROM boundaries.egon_map_zensus_grid_districts |
||
| 1206 | WHERE bus_id = {mv_grid_id} |
||
| 1207 | """, |
||
| 1208 | index_col=None, |
||
| 1209 | ).zensus_population_id.values |
||
| 1210 | |||
| 1211 | # maybe use adapter |
||
| 1212 | # convert to pd.Index (otherwise type is np.int64, which will for some |
||
| 1213 | # reason throw an error when used in a query) |
||
| 1214 | zensus_population_ids = pd.Index(zensus_population_ids) |
||
| 1215 | |||
| 1216 | # get zensus cells with district heating |
||
| 1217 | with db.session_scope() as session: |
||
| 1218 | query = session.query( |
||
| 1219 | MapZensusDistrictHeatingAreas.zensus_population_id, |
||
| 1220 | ).filter( |
||
| 1221 | MapZensusDistrictHeatingAreas.scenario == scenario, |
||
| 1222 | MapZensusDistrictHeatingAreas.zensus_population_id.in_( |
||
| 1223 | zensus_population_ids |
||
| 1224 | ), |
||
| 1225 | ) |
||
| 1226 | |||
| 1227 | cells_with_dh = pd.read_sql( |
||
| 1228 | query.statement, query.session.bind, index_col=None |
||
| 1229 | ).zensus_population_id.values |
||
| 1230 | |||
| 1231 | # remove zensus cells with district heating |
||
| 1232 | zensus_population_ids = zensus_population_ids.drop( |
||
| 1233 | cells_with_dh, errors="ignore" |
||
| 1234 | ) |
||
| 1235 | return pd.Index(zensus_population_ids) |
||
| 1236 | |||
| 1237 | |||
| 1238 | def get_residential_buildings_with_decentral_heat_demand_in_mv_grid( |
||
| 1239 | scenario, mv_grid_id |
||
| 1240 | ): |
||
| 1241 | """ |
||
| 1242 | Returns building IDs of buildings with decentral residential heat demand in |
||
| 1243 | given MV grid. |
||
| 1244 | |||
| 1245 | As cells with district heating differ between scenarios, this is also |
||
| 1246 | depending on the scenario. |
||
| 1247 | |||
| 1248 | Parameters |
||
| 1249 | ----------- |
||
| 1250 | scenario : str |
||
| 1251 | Name of scenario. Can be either "eGon2035" or "eGon100RE". |
||
| 1252 | mv_grid_id : int |
||
| 1253 | ID of MV grid. |
||
| 1254 | |||
| 1255 | Returns |
||
| 1256 | -------- |
||
| 1257 | pd.Index(int) |
||
| 1258 | Building IDs (as int) of buildings with decentral heating system in |
||
| 1259 | given MV grid. Type is pandas Index to avoid errors later on when it is |
||
| 1260 | used in a query. |
||
| 1261 | |||
| 1262 | """ |
||
| 1263 | # get zensus cells with decentral heating |
||
| 1264 | zensus_population_ids = ( |
||
| 1265 | get_zensus_cells_with_decentral_heat_demand_in_mv_grid( |
||
| 1266 | scenario, mv_grid_id |
||
| 1267 | ) |
||
| 1268 | ) |
||
| 1269 | |||
| 1270 | # get buildings with decentral heat demand |
||
| 1271 | saio.register_schema("demand", engine) |
||
| 1272 | from saio.demand import egon_heat_timeseries_selected_profiles |
||
| 1273 | |||
| 1274 | with db.session_scope() as session: |
||
| 1275 | query = session.query( |
||
| 1276 | egon_heat_timeseries_selected_profiles.building_id, |
||
| 1277 | ).filter( |
||
| 1278 | egon_heat_timeseries_selected_profiles.zensus_population_id.in_( |
||
| 1279 | zensus_population_ids |
||
| 1280 | ) |
||
| 1281 | ) |
||
| 1282 | |||
| 1283 | buildings_with_heat_demand = pd.read_sql( |
||
| 1284 | query.statement, query.session.bind, index_col=None |
||
| 1285 | ).building_id.values |
||
| 1286 | |||
| 1287 | return pd.Index(buildings_with_heat_demand) |
||
| 1288 | |||
| 1289 | |||
| 1290 | def get_cts_buildings_with_decentral_heat_demand_in_mv_grid( |
||
| 1291 | scenario, mv_grid_id |
||
| 1292 | ): |
||
| 1293 | """ |
||
| 1294 | Returns building IDs of buildings with decentral CTS heat demand in |
||
| 1295 | given MV grid. |
||
| 1296 | |||
| 1297 | As cells with district heating differ between scenarios, this is also |
||
| 1298 | depending on the scenario. |
||
| 1299 | |||
| 1300 | Parameters |
||
| 1301 | ----------- |
||
| 1302 | scenario : str |
||
| 1303 | Name of scenario. Can be either "eGon2035" or "eGon100RE". |
||
| 1304 | mv_grid_id : int |
||
| 1305 | ID of MV grid. |
||
| 1306 | |||
| 1307 | Returns |
||
| 1308 | -------- |
||
| 1309 | pd.Index(int) |
||
| 1310 | Building IDs (as int) of buildings with decentral heating system in |
||
| 1311 | given MV grid. Type is pandas Index to avoid errors later on when it is |
||
| 1312 | used in a query. |
||
| 1313 | |||
| 1314 | """ |
||
| 1315 | |||
| 1316 | # get zensus cells with decentral heating |
||
| 1317 | zensus_population_ids = ( |
||
| 1318 | get_zensus_cells_with_decentral_heat_demand_in_mv_grid( |
||
| 1319 | scenario, mv_grid_id |
||
| 1320 | ) |
||
| 1321 | ) |
||
| 1322 | |||
| 1323 | # get buildings with decentral heat demand |
||
| 1324 | with db.session_scope() as session: |
||
| 1325 | query = session.query(EgonMapZensusMvgdBuildings.building_id).filter( |
||
| 1326 | EgonMapZensusMvgdBuildings.sector == "cts", |
||
| 1327 | EgonMapZensusMvgdBuildings.zensus_population_id.in_( |
||
| 1328 | zensus_population_ids |
||
| 1329 | ), |
||
| 1330 | ) |
||
| 1331 | |||
| 1332 | buildings_with_heat_demand = pd.read_sql( |
||
| 1333 | query.statement, query.session.bind, index_col=None |
||
| 1334 | ).building_id.values |
||
| 1335 | |||
| 1336 | return pd.Index(buildings_with_heat_demand) |
||
| 1337 | |||
| 1338 | |||
| 1339 | def get_buildings_with_decentral_heat_demand_in_mv_grid(mvgd, scenario): |
||
| 1340 | """ |
||
| 1341 | Returns building IDs of buildings with decentral heat demand in |
||
| 1342 | given MV grid. |
||
| 1343 | |||
| 1344 | As cells with district heating differ between scenarios, this is also |
||
| 1345 | depending on the scenario. CTS and residential have to be retrieved |
||
| 1346 | seperatly as some residential buildings only have electricity but no |
||
| 1347 | heat demand. This does not occure in CTS. |
||
| 1348 | |||
| 1349 | Parameters |
||
| 1350 | ----------- |
||
| 1351 | mvgd : int |
||
| 1352 | ID of MV grid. |
||
| 1353 | scenario : str |
||
| 1354 | Name of scenario. Can be either "eGon2035" or "eGon100RE". |
||
| 1355 | |||
| 1356 | Returns |
||
| 1357 | -------- |
||
| 1358 | pd.Index(int) |
||
| 1359 | Building IDs (as int) of buildings with decentral heating system in |
||
| 1360 | given MV grid. Type is pandas Index to avoid errors later on when it is |
||
| 1361 | used in a query. |
||
| 1362 | |||
| 1363 | """ |
||
| 1364 | # get residential buildings with decentral heating systems |
||
| 1365 | buildings_decentral_heating_res = ( |
||
| 1366 | get_residential_buildings_with_decentral_heat_demand_in_mv_grid( |
||
| 1367 | scenario, mvgd |
||
| 1368 | ) |
||
| 1369 | ) |
||
| 1370 | |||
| 1371 | # get CTS buildings with decentral heating systems |
||
| 1372 | buildings_decentral_heating_cts = ( |
||
| 1373 | get_cts_buildings_with_decentral_heat_demand_in_mv_grid(scenario, mvgd) |
||
| 1374 | ) |
||
| 1375 | |||
| 1376 | # merge residential and CTS buildings |
||
| 1377 | buildings_decentral_heating = buildings_decentral_heating_res.union( |
||
| 1378 | buildings_decentral_heating_cts |
||
| 1379 | ).unique() |
||
| 1380 | |||
| 1381 | return buildings_decentral_heating |
||
| 1382 | |||
| 1383 | |||
| 1384 | def get_total_heat_pump_capacity_of_mv_grid(scenario, mv_grid_id): |
||
| 1385 | """ |
||
| 1386 | Returns total heat pump capacity per grid that was previously defined |
||
| 1387 | (by NEP or pypsa-eur-sec). |
||
| 1388 | |||
| 1389 | Parameters |
||
| 1390 | ----------- |
||
| 1391 | scenario : str |
||
| 1392 | Name of scenario. Can be either "eGon2035" or "eGon100RE". |
||
| 1393 | mv_grid_id : int |
||
| 1394 | ID of MV grid. |
||
| 1395 | |||
| 1396 | Returns |
||
| 1397 | -------- |
||
| 1398 | float |
||
| 1399 | Total heat pump capacity in MW in given MV grid. |
||
| 1400 | |||
| 1401 | """ |
||
| 1402 | from egon.data.datasets.heat_supply import EgonIndividualHeatingSupply |
||
| 1403 | |||
| 1404 | with db.session_scope() as session: |
||
| 1405 | query = ( |
||
| 1406 | session.query( |
||
| 1407 | EgonIndividualHeatingSupply.mv_grid_id, |
||
| 1408 | EgonIndividualHeatingSupply.capacity, |
||
| 1409 | ) |
||
| 1410 | .filter(EgonIndividualHeatingSupply.scenario == scenario) |
||
| 1411 | .filter(EgonIndividualHeatingSupply.carrier == "heat_pump") |
||
| 1412 | .filter(EgonIndividualHeatingSupply.mv_grid_id == mv_grid_id) |
||
| 1413 | ) |
||
| 1414 | |||
| 1415 | hp_cap_mv_grid = pd.read_sql( |
||
| 1416 | query.statement, query.session.bind, index_col="mv_grid_id" |
||
| 1417 | ) |
||
| 1418 | if hp_cap_mv_grid.empty: |
||
| 1419 | return 0.0 |
||
| 1420 | else: |
||
| 1421 | return hp_cap_mv_grid.capacity.values[0] |
||
| 1422 | |||
| 1423 | |||
| 1424 | def get_heat_peak_demand_per_building(scenario, building_ids): |
||
| 1425 | """""" |
||
| 1426 | |||
| 1427 | with db.session_scope() as session: |
||
| 1428 | query = ( |
||
| 1429 | session.query( |
||
| 1430 | BuildingHeatPeakLoads.building_id, |
||
| 1431 | BuildingHeatPeakLoads.peak_load_in_w, |
||
| 1432 | ) |
||
| 1433 | .filter(BuildingHeatPeakLoads.scenario == scenario) |
||
| 1434 | .filter(BuildingHeatPeakLoads.building_id.in_(building_ids)) |
||
| 1435 | ) |
||
| 1436 | |||
| 1437 | df_heat_peak_demand = pd.read_sql( |
||
| 1438 | query.statement, query.session.bind, index_col=None |
||
| 1439 | ) |
||
| 1440 | |||
| 1441 | # TODO remove check |
||
| 1442 | if df_heat_peak_demand.duplicated("building_id").any(): |
||
| 1443 | raise ValueError("Duplicate building_id") |
||
| 1444 | |||
| 1445 | # convert to series and from W to MW |
||
| 1446 | df_heat_peak_demand = ( |
||
| 1447 | df_heat_peak_demand.set_index("building_id").loc[:, "peak_load_in_w"] |
||
| 1448 | * 1e6 |
||
| 1449 | ) |
||
| 1450 | return df_heat_peak_demand |
||
| 1451 | |||
| 1452 | |||
| 1453 | def determine_minimum_hp_capacity_per_building( |
||
| 1454 | peak_heat_demand, flexibility_factor=24 / 18, cop=1.7 |
||
| 1455 | ): |
||
| 1456 | """ |
||
| 1457 | Determines minimum required heat pump capacity. |
||
| 1458 | |||
| 1459 | Parameters |
||
| 1460 | ---------- |
||
| 1461 | peak_heat_demand : pd.Series |
||
| 1462 | Series with peak heat demand per building in MW. Index contains the |
||
| 1463 | building ID. |
||
| 1464 | flexibility_factor : float |
||
| 1465 | Factor to overdimension the heat pump to allow for some flexible |
||
| 1466 | dispatch in times of high heat demand. Per default, a factor of 24/18 |
||
| 1467 | is used, to take into account |
||
| 1468 | |||
| 1469 | Returns |
||
| 1470 | ------- |
||
| 1471 | pd.Series |
||
| 1472 | Pandas series with minimum required heat pump capacity per building in |
||
| 1473 | MW. |
||
| 1474 | |||
| 1475 | """ |
||
| 1476 | return peak_heat_demand * flexibility_factor / cop |
||
| 1477 | |||
| 1478 | |||
| 1479 | def determine_buildings_with_hp_in_mv_grid( |
||
| 1480 | hp_cap_mv_grid, min_hp_cap_per_building |
||
| 1481 | ): |
||
| 1482 | """ |
||
| 1483 | Distributes given total heat pump capacity to buildings based on their peak |
||
| 1484 | heat demand. |
||
| 1485 | |||
| 1486 | Parameters |
||
| 1487 | ----------- |
||
| 1488 | hp_cap_mv_grid : float |
||
| 1489 | Total heat pump capacity in MW in given MV grid. |
||
| 1490 | min_hp_cap_per_building : pd.Series |
||
| 1491 | Pandas series with minimum required heat pump capacity per building |
||
| 1492 | in MW. |
||
| 1493 | |||
| 1494 | Returns |
||
| 1495 | ------- |
||
| 1496 | pd.Index(int) |
||
| 1497 | Building IDs (as int) of buildings to get heat demand time series for. |
||
| 1498 | |||
| 1499 | """ |
||
| 1500 | building_ids = min_hp_cap_per_building.index |
||
| 1501 | |||
| 1502 | # get buildings with PV to give them a higher priority when selecting |
||
| 1503 | # buildings a heat pump will be allocated to |
||
| 1504 | saio.register_schema("supply", engine) |
||
| 1505 | from saio.supply import egon_power_plants_pv_roof_building |
||
| 1506 | |||
| 1507 | with db.session_scope() as session: |
||
| 1508 | query = session.query( |
||
| 1509 | egon_power_plants_pv_roof_building.building_id |
||
| 1510 | ).filter( |
||
| 1511 | egon_power_plants_pv_roof_building.building_id.in_(building_ids), |
||
| 1512 | egon_power_plants_pv_roof_building.scenario == "eGon2035", |
||
| 1513 | ) |
||
| 1514 | |||
| 1515 | buildings_with_pv = pd.read_sql( |
||
| 1516 | query.statement, query.session.bind, index_col=None |
||
| 1517 | ).building_id.values |
||
| 1518 | # set different weights for buildings with PV and without PV |
||
| 1519 | weight_with_pv = 1.5 |
||
| 1520 | weight_without_pv = 1.0 |
||
| 1521 | weights = pd.concat( |
||
| 1522 | [ |
||
| 1523 | pd.DataFrame( |
||
| 1524 | {"weight": weight_without_pv}, |
||
| 1525 | index=building_ids.drop(buildings_with_pv, errors="ignore"), |
||
| 1526 | ), |
||
| 1527 | pd.DataFrame({"weight": weight_with_pv}, index=buildings_with_pv), |
||
| 1528 | ] |
||
| 1529 | ) |
||
| 1530 | # normalise weights (probability needs to add up to 1) |
||
| 1531 | weights.weight = weights.weight / weights.weight.sum() |
||
| 1532 | |||
| 1533 | # get random order at which buildings are chosen |
||
| 1534 | np.random.seed(db.credentials()["--random-seed"]) |
||
| 1535 | buildings_with_hp_order = np.random.choice( |
||
| 1536 | weights.index, |
||
| 1537 | size=len(weights), |
||
| 1538 | replace=False, |
||
| 1539 | p=weights.weight.values, |
||
| 1540 | ) |
||
| 1541 | |||
| 1542 | # select buildings until HP capacity in MV grid is reached (some rest |
||
| 1543 | # capacity will remain) |
||
| 1544 | hp_cumsum = min_hp_cap_per_building.loc[buildings_with_hp_order].cumsum() |
||
| 1545 | buildings_with_hp = hp_cumsum[hp_cumsum <= hp_cap_mv_grid].index |
||
| 1546 | |||
| 1547 | # choose random heat pumps until remaining heat pumps are larger than |
||
| 1548 | # remaining heat pump capacity |
||
| 1549 | remaining_hp_cap = ( |
||
| 1550 | hp_cap_mv_grid - min_hp_cap_per_building.loc[buildings_with_hp].sum() |
||
| 1551 | ) |
||
| 1552 | min_cap_buildings_wo_hp = min_hp_cap_per_building.loc[ |
||
| 1553 | building_ids.drop(buildings_with_hp) |
||
| 1554 | ] |
||
| 1555 | possible_buildings = min_cap_buildings_wo_hp[ |
||
| 1556 | min_cap_buildings_wo_hp <= remaining_hp_cap |
||
| 1557 | ].index |
||
| 1558 | while len(possible_buildings) > 0: |
||
| 1559 | random.seed(db.credentials()["--random-seed"]) |
||
| 1560 | new_hp_building = random.choice(possible_buildings) |
||
| 1561 | # add new building to building with HP |
||
| 1562 | buildings_with_hp = buildings_with_hp.union( |
||
| 1563 | pd.Index([new_hp_building]) |
||
| 1564 | ) |
||
| 1565 | # determine if there are still possible buildings |
||
| 1566 | remaining_hp_cap = ( |
||
| 1567 | hp_cap_mv_grid |
||
| 1568 | - min_hp_cap_per_building.loc[buildings_with_hp].sum() |
||
| 1569 | ) |
||
| 1570 | min_cap_buildings_wo_hp = min_hp_cap_per_building.loc[ |
||
| 1571 | building_ids.drop(buildings_with_hp) |
||
| 1572 | ] |
||
| 1573 | possible_buildings = min_cap_buildings_wo_hp[ |
||
| 1574 | min_cap_buildings_wo_hp <= remaining_hp_cap |
||
| 1575 | ].index |
||
| 1576 | |||
| 1577 | return buildings_with_hp |
||
| 1578 | |||
| 1579 | |||
| 1580 | def desaggregate_hp_capacity(min_hp_cap_per_building, hp_cap_mv_grid): |
||
| 1581 | """ |
||
| 1582 | Desaggregates the required total heat pump capacity to buildings. |
||
| 1583 | |||
| 1584 | All buildings are previously assigned a minimum required heat pump |
||
| 1585 | capacity. If the total heat pump capacity exceeds this, larger heat pumps |
||
| 1586 | are assigned. |
||
| 1587 | |||
| 1588 | Parameters |
||
| 1589 | ------------ |
||
| 1590 | min_hp_cap_per_building : pd.Series |
||
| 1591 | Pandas series with minimum required heat pump capacity per building |
||
| 1592 | in MW. |
||
| 1593 | hp_cap_mv_grid : float |
||
| 1594 | Total heat pump capacity in MW in given MV grid. |
||
| 1595 | |||
| 1596 | Returns |
||
| 1597 | -------- |
||
| 1598 | pd.Series |
||
| 1599 | Pandas series with heat pump capacity per building in MW. |
||
| 1600 | |||
| 1601 | """ |
||
| 1602 | # distribute remaining capacity to all buildings with HP depending on |
||
| 1603 | # installed HP capacity |
||
| 1604 | |||
| 1605 | allocated_cap = min_hp_cap_per_building.sum() |
||
| 1606 | remaining_cap = hp_cap_mv_grid - allocated_cap |
||
| 1607 | |||
| 1608 | fac = remaining_cap / allocated_cap |
||
| 1609 | hp_cap_per_building = ( |
||
| 1610 | min_hp_cap_per_building * fac + min_hp_cap_per_building |
||
| 1611 | ) |
||
| 1612 | hp_cap_per_building.index.name = "building_id" |
||
| 1613 | |||
| 1614 | return hp_cap_per_building |
||
| 1615 | |||
| 1616 | |||
| 1617 | def determine_min_hp_cap_buildings_pypsa_eur_sec( |
||
| 1618 | peak_heat_demand, building_ids |
||
| 1619 | ): |
||
| 1620 | """ |
||
| 1621 | Determines minimum required HP capacity in MV grid in MW as input for |
||
| 1622 | pypsa-eur-sec. |
||
| 1623 | |||
| 1624 | Parameters |
||
| 1625 | ---------- |
||
| 1626 | peak_heat_demand : pd.Series |
||
| 1627 | Series with peak heat demand per building in MW. Index contains the |
||
| 1628 | building ID. |
||
| 1629 | building_ids : pd.Index(int) |
||
| 1630 | Building IDs (as int) of buildings with decentral heating system in |
||
| 1631 | given MV grid. |
||
| 1632 | |||
| 1633 | Returns |
||
| 1634 | -------- |
||
| 1635 | float |
||
| 1636 | Minimum required HP capacity in MV grid in MW. |
||
| 1637 | |||
| 1638 | """ |
||
| 1639 | if len(building_ids) > 0: |
||
| 1640 | peak_heat_demand = peak_heat_demand.loc[building_ids] |
||
| 1641 | # determine minimum required heat pump capacity per building |
||
| 1642 | min_hp_cap_buildings = determine_minimum_hp_capacity_per_building( |
||
| 1643 | peak_heat_demand |
||
| 1644 | ) |
||
| 1645 | return min_hp_cap_buildings.sum() |
||
| 1646 | else: |
||
| 1647 | return 0.0 |
||
| 1648 | |||
| 1649 | |||
| 1650 | def determine_hp_cap_buildings_pvbased_per_mvgd( |
||
| 1651 | scenario, mv_grid_id, peak_heat_demand, building_ids |
||
| 1652 | ): |
||
| 1653 | """ |
||
| 1654 | Determines which buildings in the MV grid will have a HP (buildings with PV |
||
| 1655 | rooftop are more likely to be assigned) in the eGon2035 scenario, as well |
||
| 1656 | as their respective HP capacity in MW. |
||
| 1657 | |||
| 1658 | Parameters |
||
| 1659 | ----------- |
||
| 1660 | mv_grid_id : int |
||
| 1661 | ID of MV grid. |
||
| 1662 | peak_heat_demand : pd.Series |
||
| 1663 | Series with peak heat demand per building in MW. Index contains the |
||
| 1664 | building ID. |
||
| 1665 | building_ids : pd.Index(int) |
||
| 1666 | Building IDs (as int) of buildings with decentral heating system in |
||
| 1667 | given MV grid. |
||
| 1668 | |||
| 1669 | """ |
||
| 1670 | |||
| 1671 | hp_cap_grid = get_total_heat_pump_capacity_of_mv_grid(scenario, mv_grid_id) |
||
| 1672 | |||
| 1673 | if len(building_ids) > 0 and hp_cap_grid > 0.0: |
||
| 1674 | peak_heat_demand = peak_heat_demand.loc[building_ids] |
||
| 1675 | |||
| 1676 | # determine minimum required heat pump capacity per building |
||
| 1677 | min_hp_cap_buildings = determine_minimum_hp_capacity_per_building( |
||
| 1678 | peak_heat_demand |
||
| 1679 | ) |
||
| 1680 | |||
| 1681 | # select buildings that will have a heat pump |
||
| 1682 | buildings_with_hp = determine_buildings_with_hp_in_mv_grid( |
||
| 1683 | hp_cap_grid, min_hp_cap_buildings |
||
| 1684 | ) |
||
| 1685 | |||
| 1686 | # distribute total heat pump capacity to all buildings with HP |
||
| 1687 | hp_cap_per_building = desaggregate_hp_capacity( |
||
| 1688 | min_hp_cap_buildings.loc[buildings_with_hp], hp_cap_grid |
||
| 1689 | ) |
||
| 1690 | |||
| 1691 | return hp_cap_per_building.rename("hp_capacity") |
||
| 1692 | |||
| 1693 | else: |
||
| 1694 | return pd.Series(dtype="float64").rename("hp_capacity") |
||
| 1695 | |||
| 1696 | |||
| 1697 | def determine_hp_cap_buildings_eGon100RE_per_mvgd(mv_grid_id): |
||
| 1698 | """ |
||
| 1699 | Determines HP capacity per building in eGon100RE scenario. |
||
| 1700 | |||
| 1701 | In eGon100RE scenario all buildings without district heating get a heat |
||
| 1702 | pump. |
||
| 1703 | |||
| 1704 | Returns |
||
| 1705 | -------- |
||
| 1706 | pd.Series |
||
| 1707 | Pandas series with heat pump capacity per building in MW. |
||
| 1708 | |||
| 1709 | """ |
||
| 1710 | |||
| 1711 | hp_cap_grid = get_total_heat_pump_capacity_of_mv_grid( |
||
| 1712 | "eGon100RE", mv_grid_id |
||
| 1713 | ) |
||
| 1714 | |||
| 1715 | if hp_cap_grid > 0.0: |
||
| 1716 | # get buildings with decentral heating systems |
||
| 1717 | building_ids = get_buildings_with_decentral_heat_demand_in_mv_grid( |
||
| 1718 | mv_grid_id, scenario="eGon100RE" |
||
| 1719 | ) |
||
| 1720 | |||
| 1721 | logger.info(f"MVGD={mv_grid_id} | Get peak loads from DB") |
||
| 1722 | df_peak_heat_demand = get_heat_peak_demand_per_building( |
||
| 1723 | "eGon100RE", building_ids |
||
| 1724 | ) |
||
| 1725 | |||
| 1726 | logger.info(f"MVGD={mv_grid_id} | Determine HP capacities.") |
||
| 1727 | # determine minimum required heat pump capacity per building |
||
| 1728 | min_hp_cap_buildings = determine_minimum_hp_capacity_per_building( |
||
| 1729 | df_peak_heat_demand, flexibility_factor=24 / 18, cop=1.7 |
||
| 1730 | ) |
||
| 1731 | |||
| 1732 | logger.info(f"MVGD={mv_grid_id} | Desaggregate HP capacities.") |
||
| 1733 | # distribute total heat pump capacity to all buildings with HP |
||
| 1734 | hp_cap_per_building = desaggregate_hp_capacity( |
||
| 1735 | min_hp_cap_buildings, hp_cap_grid |
||
| 1736 | ) |
||
| 1737 | |||
| 1738 | return hp_cap_per_building.rename("hp_capacity") |
||
| 1739 | else: |
||
| 1740 | return pd.Series(dtype="float64").rename("hp_capacity") |
||
| 1741 | |||
| 1742 | |||
| 1743 | def determine_hp_cap_buildings_eGon100RE(): |
||
| 1744 | """ |
||
| 1745 | Main function to determine HP capacity per building in eGon100RE scenario. |
||
| 1746 | |||
| 1747 | """ |
||
| 1748 | |||
| 1749 | # ========== Register np datatypes with SQLA ========== |
||
| 1750 | register_adapter(np.float64, adapt_numpy_float64) |
||
| 1751 | register_adapter(np.int64, adapt_numpy_int64) |
||
| 1752 | # ===================================================== |
||
| 1753 | |||
| 1754 | with db.session_scope() as session: |
||
| 1755 | query = ( |
||
| 1756 | session.query( |
||
| 1757 | MapZensusGridDistricts.bus_id, |
||
| 1758 | ) |
||
| 1759 | .filter( |
||
| 1760 | MapZensusGridDistricts.zensus_population_id |
||
| 1761 | == EgonPetaHeat.zensus_population_id |
||
| 1762 | ) |
||
| 1763 | .distinct(MapZensusGridDistricts.bus_id) |
||
| 1764 | ) |
||
| 1765 | mvgd_ids = pd.read_sql( |
||
| 1766 | query.statement, query.session.bind, index_col=None |
||
| 1767 | ) |
||
| 1768 | mvgd_ids = mvgd_ids.sort_values("bus_id") |
||
| 1769 | mvgd_ids = mvgd_ids["bus_id"].values |
||
| 1770 | |||
| 1771 | df_hp_cap_per_building_100RE_db = pd.DataFrame( |
||
| 1772 | columns=["building_id", "hp_capacity"] |
||
| 1773 | ) |
||
| 1774 | |||
| 1775 | for mvgd_id in mvgd_ids: |
||
| 1776 | logger.info(f"MVGD={mvgd_id} | Start") |
||
| 1777 | |||
| 1778 | hp_cap_per_building_100RE = ( |
||
| 1779 | determine_hp_cap_buildings_eGon100RE_per_mvgd(mvgd_id) |
||
| 1780 | ) |
||
| 1781 | |||
| 1782 | if not hp_cap_per_building_100RE.empty: |
||
| 1783 | df_hp_cap_per_building_100RE_db = pd.concat( |
||
| 1784 | [ |
||
| 1785 | df_hp_cap_per_building_100RE_db, |
||
| 1786 | hp_cap_per_building_100RE.reset_index(), |
||
| 1787 | ], |
||
| 1788 | axis=0, |
||
| 1789 | ) |
||
| 1790 | |||
| 1791 | logger.info(f"MVGD={min(mvgd_ids)} : {max(mvgd_ids)} | Write data to db.") |
||
| 1792 | df_hp_cap_per_building_100RE_db["scenario"] = "eGon100RE" |
||
| 1793 | |||
| 1794 | EgonHpCapacityBuildings.__table__.create(bind=engine, checkfirst=True) |
||
| 1795 | |||
| 1796 | write_table_to_postgres( |
||
| 1797 | df_hp_cap_per_building_100RE_db, |
||
| 1798 | EgonHpCapacityBuildings, |
||
| 1799 | drop=False, |
||
| 1800 | ) |
||
| 1801 | |||
| 1802 | |||
| 1803 | def aggregate_residential_and_cts_profiles(mvgd, scenario): |
||
| 1804 | """ |
||
| 1805 | Gets residential and CTS heat demand profiles per building and aggregates |
||
| 1806 | them. |
||
| 1807 | |||
| 1808 | Parameters |
||
| 1809 | ---------- |
||
| 1810 | mvgd : int |
||
| 1811 | MV grid ID. |
||
| 1812 | scenario : str |
||
| 1813 | Possible options are eGon2035 or eGon100RE. |
||
| 1814 | |||
| 1815 | Returns |
||
| 1816 | -------- |
||
| 1817 | pd.DataFrame |
||
| 1818 | Table of demand profile per building. Column names are building IDs and |
||
| 1819 | index is hour of the year as int (0-8759). |
||
| 1820 | |||
| 1821 | """ |
||
| 1822 | # ############### get residential heat demand profiles ############### |
||
| 1823 | df_heat_ts = calc_residential_heat_profiles_per_mvgd( |
||
| 1824 | mvgd=mvgd, scenario=scenario |
||
| 1825 | ) |
||
| 1826 | |||
| 1827 | # pivot to allow aggregation with CTS profiles |
||
| 1828 | df_heat_ts = df_heat_ts.pivot( |
||
| 1829 | index=["day_of_year", "hour"], |
||
| 1830 | columns="building_id", |
||
| 1831 | values="demand_ts", |
||
| 1832 | ) |
||
| 1833 | df_heat_ts = df_heat_ts.sort_index().reset_index(drop=True) |
||
| 1834 | |||
| 1835 | # ############### get CTS heat demand profiles ############### |
||
| 1836 | heat_demand_cts_ts = calc_cts_building_profiles( |
||
| 1837 | bus_ids=[mvgd], |
||
| 1838 | scenario=scenario, |
||
| 1839 | sector="heat", |
||
| 1840 | ) |
||
| 1841 | |||
| 1842 | # ############# aggregate residential and CTS demand profiles ############# |
||
| 1843 | df_heat_ts = pd.concat([df_heat_ts, heat_demand_cts_ts], axis=1) |
||
| 1844 | |||
| 1845 | df_heat_ts = df_heat_ts.groupby(axis=1, level=0).sum() |
||
| 1846 | |||
| 1847 | return df_heat_ts |
||
| 1848 | |||
| 1849 | |||
| 1850 | def export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db, drop=False): |
||
| 1851 | """ |
||
| 1852 | Function to export the collected results of all MVGDs per bulk to DB. |
||
| 1853 | |||
| 1854 | Parameters |
||
| 1855 | ---------- |
||
| 1856 | df_peak_loads_db : pd.DataFrame |
||
| 1857 | Table of building peak loads of all MVGDs per bulk |
||
| 1858 | df_heat_mvgd_ts_db : pd.DataFrame |
||
| 1859 | Table of all aggregated MVGD profiles per bulk |
||
| 1860 | drop : boolean |
||
| 1861 | Drop and recreate table if True |
||
| 1862 | |||
| 1863 | """ |
||
| 1864 | |||
| 1865 | df_peak_loads_db = df_peak_loads_db.melt( |
||
| 1866 | id_vars="building_id", |
||
| 1867 | var_name="scenario", |
||
| 1868 | value_name="peak_load_in_w", |
||
| 1869 | ) |
||
| 1870 | df_peak_loads_db["building_id"] = df_peak_loads_db["building_id"].astype( |
||
| 1871 | int |
||
| 1872 | ) |
||
| 1873 | df_peak_loads_db["sector"] = "residential+cts" |
||
| 1874 | # From MW to W |
||
| 1875 | df_peak_loads_db["peak_load_in_w"] = ( |
||
| 1876 | df_peak_loads_db["peak_load_in_w"] * 1e6 |
||
| 1877 | ) |
||
| 1878 | write_table_to_postgres(df_peak_loads_db, BuildingHeatPeakLoads, drop=drop) |
||
| 1879 | |||
| 1880 | dtypes = { |
||
| 1881 | column.key: column.type |
||
| 1882 | for column in EgonEtragoTimeseriesIndividualHeating.__table__.columns |
||
| 1883 | } |
||
| 1884 | df_heat_mvgd_ts_db = df_heat_mvgd_ts_db.loc[:, dtypes.keys()] |
||
| 1885 | |||
| 1886 | if drop: |
||
| 1887 | logger.info( |
||
| 1888 | f"Drop and recreate table " |
||
| 1889 | f"{EgonEtragoTimeseriesIndividualHeating.__table__.name}." |
||
| 1890 | ) |
||
| 1891 | EgonEtragoTimeseriesIndividualHeating.__table__.drop( |
||
| 1892 | bind=engine, checkfirst=True |
||
| 1893 | ) |
||
| 1894 | EgonEtragoTimeseriesIndividualHeating.__table__.create( |
||
| 1895 | bind=engine, checkfirst=True |
||
| 1896 | ) |
||
| 1897 | |||
| 1898 | with db.session_scope() as session: |
||
| 1899 | df_heat_mvgd_ts_db.to_sql( |
||
| 1900 | name=EgonEtragoTimeseriesIndividualHeating.__table__.name, |
||
| 1901 | schema=EgonEtragoTimeseriesIndividualHeating.__table__.schema, |
||
| 1902 | con=session.connection(), |
||
| 1903 | if_exists="append", |
||
| 1904 | method="multi", |
||
| 1905 | index=False, |
||
| 1906 | dtype=dtypes, |
||
| 1907 | ) |
||
| 1908 | |||
| 1909 | |||
| 1910 | def export_min_cap_to_csv(df_hp_min_cap_mv_grid_pypsa_eur_sec): |
||
| 1911 | """Export minimum capacity of heat pumps for pypsa eur sec to csv""" |
||
| 1912 | |||
| 1913 | df_hp_min_cap_mv_grid_pypsa_eur_sec.index.name = "mvgd_id" |
||
| 1914 | df_hp_min_cap_mv_grid_pypsa_eur_sec = ( |
||
| 1915 | df_hp_min_cap_mv_grid_pypsa_eur_sec.to_frame( |
||
| 1916 | name="min_hp_capacity" |
||
| 1917 | ).reset_index() |
||
| 1918 | ) |
||
| 1919 | |||
| 1920 | folder = Path(".") / "input-pypsa-eur-sec" |
||
| 1921 | file = folder / "minimum_hp_capacity_mv_grid_100RE.csv" |
||
| 1922 | # Create the folder, if it does not exist already |
||
| 1923 | if not os.path.exists(folder): |
||
| 1924 | os.mkdir(folder) |
||
| 1925 | if not file.is_file(): |
||
| 1926 | logger.info(f"Create {file}") |
||
| 1927 | df_hp_min_cap_mv_grid_pypsa_eur_sec.to_csv(file, mode="w", header=True) |
||
| 1928 | else: |
||
| 1929 | df_hp_min_cap_mv_grid_pypsa_eur_sec.to_csv( |
||
| 1930 | file, mode="a", header=False |
||
| 1931 | ) |
||
| 1932 | |||
| 1933 | |||
| 1934 | def delete_pypsa_eur_sec_csv_file(): |
||
| 1935 | """Delete pypsa eur sec minimum heat pump capacity csv before new run""" |
||
| 1936 | |||
| 1937 | folder = Path(".") / "input-pypsa-eur-sec" |
||
| 1938 | file = folder / "minimum_hp_capacity_mv_grid_100RE.csv" |
||
| 1939 | if file.is_file(): |
||
| 1940 | logger.info(f"Delete {file}") |
||
| 1941 | os.remove(file) |
||
| 1942 | |||
| 1943 | |||
| 1944 | def catch_missing_buidings(buildings_decentral_heating, peak_load): |
||
| 1945 | """ |
||
| 1946 | Check for missing buildings and reduce the list of buildings with |
||
| 1947 | decentral heating if no peak loads available. This should only happen |
||
| 1948 | in case of cutout SH |
||
| 1949 | |||
| 1950 | Parameters |
||
| 1951 | ----------- |
||
| 1952 | buildings_decentral_heating : list(int) |
||
| 1953 | Array or list of buildings with decentral heating |
||
| 1954 | |||
| 1955 | peak_load : pd.Series |
||
| 1956 | Peak loads of all building within the mvgd |
||
| 1957 | |||
| 1958 | """ |
||
| 1959 | # Catch missing buildings key error |
||
| 1960 | # should only happen within cutout SH |
||
| 1961 | if not all(buildings_decentral_heating.isin(peak_load.index)): |
||
| 1962 | diff = buildings_decentral_heating.difference(peak_load.index) |
||
| 1963 | logger.warning( |
||
| 1964 | f"Dropped {len(diff)} building ids due to missing peak " |
||
| 1965 | f"loads. {len(buildings_decentral_heating)} left." |
||
| 1966 | ) |
||
| 1967 | logger.info(f"Dropped buildings: {diff.values}") |
||
| 1968 | buildings_decentral_heating = buildings_decentral_heating.drop(diff) |
||
| 1969 | |||
| 1970 | return buildings_decentral_heating |
||
| 1971 | |||
| 1972 | |||
| 1973 | def determine_hp_cap_peak_load_mvgd_ts_2035(mvgd_ids): |
||
| 1974 | """ |
||
| 1975 | Main function to determine HP capacity per building in eGon2035 scenario. |
||
| 1976 | Further, creates heat demand time series for all buildings with heat pumps |
||
| 1977 | in MV grid, as well as for all buildings with gas boilers, used in eTraGo. |
||
| 1978 | |||
| 1979 | Parameters |
||
| 1980 | ----------- |
||
| 1981 | mvgd_ids : list(int) |
||
| 1982 | List of MV grid IDs to determine data for. |
||
| 1983 | |||
| 1984 | """ |
||
| 1985 | |||
| 1986 | # ========== Register np datatypes with SQLA ========== |
||
| 1987 | register_adapter(np.float64, adapt_numpy_float64) |
||
| 1988 | register_adapter(np.int64, adapt_numpy_int64) |
||
| 1989 | # ===================================================== |
||
| 1990 | |||
| 1991 | df_peak_loads_db = pd.DataFrame() |
||
| 1992 | df_hp_cap_per_building_2035_db = pd.DataFrame() |
||
| 1993 | df_heat_mvgd_ts_db = pd.DataFrame() |
||
| 1994 | |||
| 1995 | for mvgd in mvgd_ids: |
||
| 1996 | logger.info(f"MVGD={mvgd} | Start") |
||
| 1997 | |||
| 1998 | # ############# aggregate residential and CTS demand profiles ##### |
||
| 1999 | |||
| 2000 | df_heat_ts = aggregate_residential_and_cts_profiles( |
||
| 2001 | mvgd, scenario="eGon2035" |
||
| 2002 | ) |
||
| 2003 | |||
| 2004 | # ##################### determine peak loads ################### |
||
| 2005 | logger.info(f"MVGD={mvgd} | Determine peak loads.") |
||
| 2006 | |||
| 2007 | peak_load_2035 = df_heat_ts.max().rename("eGon2035") |
||
| 2008 | |||
| 2009 | # ######## determine HP capacity per building ######### |
||
| 2010 | logger.info(f"MVGD={mvgd} | Determine HP capacities.") |
||
| 2011 | |||
| 2012 | buildings_decentral_heating = ( |
||
| 2013 | get_buildings_with_decentral_heat_demand_in_mv_grid( |
||
| 2014 | mvgd, scenario="eGon2035" |
||
| 2015 | ) |
||
| 2016 | ) |
||
| 2017 | |||
| 2018 | # Reduce list of decentral heating if no Peak load available |
||
| 2019 | # TODO maybe remove after succesfull DE run |
||
| 2020 | # Might be fixed in #990 |
||
| 2021 | buildings_decentral_heating = catch_missing_buidings( |
||
| 2022 | buildings_decentral_heating, peak_load_2035 |
||
| 2023 | ) |
||
| 2024 | |||
| 2025 | hp_cap_per_building_2035 = determine_hp_cap_buildings_pvbased_per_mvgd( |
||
| 2026 | "eGon2035", |
||
| 2027 | mvgd, |
||
| 2028 | peak_load_2035, |
||
| 2029 | buildings_decentral_heating, |
||
| 2030 | ) |
||
| 2031 | buildings_gas_2035 = pd.Index(buildings_decentral_heating).drop( |
||
| 2032 | hp_cap_per_building_2035.index |
||
| 2033 | ) |
||
| 2034 | |||
| 2035 | # ################ aggregated heat profiles ################### |
||
| 2036 | logger.info(f"MVGD={mvgd} | Aggregate heat profiles.") |
||
| 2037 | |||
| 2038 | df_mvgd_ts_2035_hp = df_heat_ts.loc[ |
||
| 2039 | :, |
||
| 2040 | hp_cap_per_building_2035.index, |
||
| 2041 | ].sum(axis=1) |
||
| 2042 | |||
| 2043 | # heat demand time series for buildings with gas boiler |
||
| 2044 | df_mvgd_ts_2035_gas = df_heat_ts.loc[:, buildings_gas_2035].sum(axis=1) |
||
| 2045 | |||
| 2046 | df_heat_mvgd_ts = pd.DataFrame( |
||
| 2047 | data={ |
||
| 2048 | "carrier": ["heat_pump", "CH4"], |
||
| 2049 | "bus_id": mvgd, |
||
| 2050 | "scenario": ["eGon2035", "eGon2035"], |
||
| 2051 | "dist_aggregated_mw": [ |
||
| 2052 | df_mvgd_ts_2035_hp.to_list(), |
||
| 2053 | df_mvgd_ts_2035_gas.to_list(), |
||
| 2054 | ], |
||
| 2055 | } |
||
| 2056 | ) |
||
| 2057 | |||
| 2058 | # ################ collect results ################## |
||
| 2059 | logger.info(f"MVGD={mvgd} | Collect results.") |
||
| 2060 | |||
| 2061 | df_peak_loads_db = pd.concat( |
||
| 2062 | [df_peak_loads_db, peak_load_2035.reset_index()], |
||
| 2063 | axis=0, |
||
| 2064 | ignore_index=True, |
||
| 2065 | ) |
||
| 2066 | |||
| 2067 | df_heat_mvgd_ts_db = pd.concat( |
||
| 2068 | [df_heat_mvgd_ts_db, df_heat_mvgd_ts], axis=0, ignore_index=True |
||
| 2069 | ) |
||
| 2070 | |||
| 2071 | df_hp_cap_per_building_2035_db = pd.concat( |
||
| 2072 | [ |
||
| 2073 | df_hp_cap_per_building_2035_db, |
||
| 2074 | hp_cap_per_building_2035.reset_index(), |
||
| 2075 | ], |
||
| 2076 | axis=0, |
||
| 2077 | ) |
||
| 2078 | |||
| 2079 | # ################ export to db ####################### |
||
| 2080 | logger.info(f"MVGD={min(mvgd_ids)} : {max(mvgd_ids)} | Write data to db.") |
||
| 2081 | |||
| 2082 | export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db, drop=False) |
||
| 2083 | |||
| 2084 | df_hp_cap_per_building_2035_db["scenario"] = "eGon2035" |
||
| 2085 | |||
| 2086 | # TODO debug duplicated building_ids |
||
| 2087 | duplicates = df_hp_cap_per_building_2035_db.loc[ |
||
| 2088 | df_hp_cap_per_building_2035_db.duplicated("building_id", keep=False) |
||
| 2089 | ] |
||
| 2090 | |||
| 2091 | if not duplicates.empty: |
||
| 2092 | logger.info( |
||
| 2093 | f"Dropped duplicated buildings: " |
||
| 2094 | f"{duplicates.loc[:,['building_id', 'hp_capacity']]}" |
||
| 2095 | ) |
||
| 2096 | |||
| 2097 | df_hp_cap_per_building_2035_db.drop_duplicates("building_id", inplace=True) |
||
| 2098 | |||
| 2099 | df_hp_cap_per_building_2035_db.building_id = ( |
||
| 2100 | df_hp_cap_per_building_2035_db.building_id.astype(int) |
||
| 2101 | ) |
||
| 2102 | |||
| 2103 | write_table_to_postgres( |
||
| 2104 | df_hp_cap_per_building_2035_db, |
||
| 2105 | EgonHpCapacityBuildings, |
||
| 2106 | drop=False, |
||
| 2107 | ) |
||
| 2108 | |||
| 2109 | |||
| 2110 | def determine_hp_cap_peak_load_mvgd_ts_status_quo(mvgd_ids, scenario): |
||
| 2111 | """ |
||
| 2112 | Main function to determine HP capacity per building in status quo scenario. |
||
| 2113 | Further, creates heat demand time series for all buildings with heat pumps |
||
| 2114 | in MV grid, as well as for all buildings with gas boilers, used in eTraGo. |
||
| 2115 | |||
| 2116 | Parameters |
||
| 2117 | ----------- |
||
| 2118 | mvgd_ids : list(int) |
||
| 2119 | List of MV grid IDs to determine data for. |
||
| 2120 | |||
| 2121 | """ |
||
| 2122 | |||
| 2123 | # ========== Register np datatypes with SQLA ========== |
||
| 2124 | register_adapter(np.float64, adapt_numpy_float64) |
||
| 2125 | register_adapter(np.int64, adapt_numpy_int64) |
||
| 2126 | # ===================================================== |
||
| 2127 | |||
| 2128 | df_peak_loads_db = pd.DataFrame() |
||
| 2129 | df_hp_cap_per_building_status_quo_db = pd.DataFrame() |
||
| 2130 | df_heat_mvgd_ts_db = pd.DataFrame() |
||
| 2131 | |||
| 2132 | for mvgd in mvgd_ids: |
||
| 2133 | logger.info(f"MVGD={mvgd} | Start") |
||
| 2134 | |||
| 2135 | # ############# aggregate residential and CTS demand profiles ##### |
||
| 2136 | |||
| 2137 | df_heat_ts = aggregate_residential_and_cts_profiles( |
||
| 2138 | mvgd, scenario=scenario |
||
| 2139 | ) |
||
| 2140 | |||
| 2141 | # ##################### determine peak loads ################### |
||
| 2142 | logger.info(f"MVGD={mvgd} | Determine peak loads.") |
||
| 2143 | |||
| 2144 | peak_load_status_quo = df_heat_ts.max().rename(scenario) |
||
| 2145 | |||
| 2146 | # ######## determine HP capacity per building ######### |
||
| 2147 | logger.info(f"MVGD={mvgd} | Determine HP capacities.") |
||
| 2148 | |||
| 2149 | buildings_decentral_heating = ( |
||
| 2150 | get_buildings_with_decentral_heat_demand_in_mv_grid( |
||
| 2151 | mvgd, scenario=scenario |
||
| 2152 | ) |
||
| 2153 | ) |
||
| 2154 | |||
| 2155 | # Reduce list of decentral heating if no Peak load available |
||
| 2156 | # TODO maybe remove after succesfull DE run |
||
| 2157 | # Might be fixed in #990 |
||
| 2158 | buildings_decentral_heating = catch_missing_buidings( |
||
| 2159 | buildings_decentral_heating, peak_load_status_quo |
||
| 2160 | ) |
||
| 2161 | |||
| 2162 | hp_cap_per_building_status_quo = ( |
||
| 2163 | determine_hp_cap_buildings_pvbased_per_mvgd( |
||
| 2164 | scenario, |
||
| 2165 | mvgd, |
||
| 2166 | peak_load_status_quo, |
||
| 2167 | buildings_decentral_heating, |
||
| 2168 | ) |
||
| 2169 | ) |
||
| 2170 | |||
| 2171 | # ################ aggregated heat profiles ################### |
||
| 2172 | logger.info(f"MVGD={mvgd} | Aggregate heat profiles.") |
||
| 2173 | |||
| 2174 | df_mvgd_ts_status_quo_hp = df_heat_ts.loc[ |
||
| 2175 | :, |
||
| 2176 | hp_cap_per_building_status_quo.index, |
||
| 2177 | ].sum(axis=1) |
||
| 2178 | |||
| 2179 | df_heat_mvgd_ts = pd.DataFrame( |
||
| 2180 | data={ |
||
| 2181 | "carrier": "heat_pump", |
||
| 2182 | "bus_id": mvgd, |
||
| 2183 | "scenario": scenario, |
||
| 2184 | "dist_aggregated_mw": [df_mvgd_ts_status_quo_hp.to_list()], |
||
| 2185 | } |
||
| 2186 | ) |
||
| 2187 | |||
| 2188 | # ################ collect results ################## |
||
| 2189 | logger.info(f"MVGD={mvgd} | Collect results.") |
||
| 2190 | |||
| 2191 | df_peak_loads_db = pd.concat( |
||
| 2192 | [df_peak_loads_db, peak_load_status_quo.reset_index()], |
||
| 2193 | axis=0, |
||
| 2194 | ignore_index=True, |
||
| 2195 | ) |
||
| 2196 | |||
| 2197 | df_heat_mvgd_ts_db = pd.concat( |
||
| 2198 | [df_heat_mvgd_ts_db, df_heat_mvgd_ts], axis=0, ignore_index=True |
||
| 2199 | ) |
||
| 2200 | |||
| 2201 | df_hp_cap_per_building_status_quo_db = pd.concat( |
||
| 2202 | [ |
||
| 2203 | df_hp_cap_per_building_status_quo_db, |
||
| 2204 | hp_cap_per_building_status_quo.reset_index(), |
||
| 2205 | ], |
||
| 2206 | axis=0, |
||
| 2207 | ) |
||
| 2208 | |||
| 2209 | # ################ export to db ####################### |
||
| 2210 | logger.info(f"MVGD={min(mvgd_ids)} : {max(mvgd_ids)} | Write data to db.") |
||
| 2211 | |||
| 2212 | export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db, drop=False) |
||
| 2213 | |||
| 2214 | df_hp_cap_per_building_status_quo_db["scenario"] = scenario |
||
| 2215 | |||
| 2216 | # TODO debug duplicated building_ids |
||
| 2217 | duplicates = df_hp_cap_per_building_status_quo_db.loc[ |
||
| 2218 | df_hp_cap_per_building_status_quo_db.duplicated( |
||
| 2219 | "building_id", keep=False |
||
| 2220 | ) |
||
| 2221 | ] |
||
| 2222 | |||
| 2223 | if not duplicates.empty: |
||
| 2224 | logger.info( |
||
| 2225 | f"Dropped duplicated buildings: " |
||
| 2226 | f"{duplicates.loc[:,['building_id', 'hp_capacity']]}" |
||
| 2227 | ) |
||
| 2228 | |||
| 2229 | df_hp_cap_per_building_status_quo_db.drop_duplicates( |
||
| 2230 | "building_id", inplace=True |
||
| 2231 | ) |
||
| 2232 | |||
| 2233 | df_hp_cap_per_building_status_quo_db.building_id = ( |
||
| 2234 | df_hp_cap_per_building_status_quo_db.building_id.astype(int) |
||
| 2235 | ) |
||
| 2236 | |||
| 2237 | write_table_to_postgres( |
||
| 2238 | df_hp_cap_per_building_status_quo_db, |
||
| 2239 | EgonHpCapacityBuildings, |
||
| 2240 | drop=False, |
||
| 2241 | ) |
||
| 2242 | |||
| 2243 | |||
| 2244 | def determine_hp_cap_peak_load_mvgd_ts_pypsa_eur(mvgd_ids): |
||
| 2245 | """ |
||
| 2246 | Main function to determine minimum required HP capacity in MV for |
||
| 2247 | pypsa-eur-sec. Further, creates heat demand time series for all buildings |
||
| 2248 | with heat pumps in MV grid in eGon100RE scenario, used in eTraGo. |
||
| 2249 | |||
| 2250 | Parameters |
||
| 2251 | ----------- |
||
| 2252 | mvgd_ids : list(int) |
||
| 2253 | List of MV grid IDs to determine data for. |
||
| 2254 | |||
| 2255 | """ |
||
| 2256 | |||
| 2257 | # ========== Register np datatypes with SQLA ========== |
||
| 2258 | register_adapter(np.float64, adapt_numpy_float64) |
||
| 2259 | register_adapter(np.int64, adapt_numpy_int64) |
||
| 2260 | # ===================================================== |
||
| 2261 | |||
| 2262 | df_peak_loads_db = pd.DataFrame() |
||
| 2263 | df_heat_mvgd_ts_db = pd.DataFrame() |
||
| 2264 | df_hp_min_cap_mv_grid_pypsa_eur_sec = pd.Series(dtype="float64") |
||
| 2265 | |||
| 2266 | for mvgd in mvgd_ids: |
||
| 2267 | logger.info(f"MVGD={mvgd} | Start") |
||
| 2268 | |||
| 2269 | # ############# aggregate residential and CTS demand profiles ##### |
||
| 2270 | |||
| 2271 | df_heat_ts = aggregate_residential_and_cts_profiles( |
||
| 2272 | mvgd, scenario="eGon100RE" |
||
| 2273 | ) |
||
| 2274 | |||
| 2275 | # ##################### determine peak loads ################### |
||
| 2276 | logger.info(f"MVGD={mvgd} | Determine peak loads.") |
||
| 2277 | |||
| 2278 | peak_load_100RE = df_heat_ts.max().rename("eGon100RE") |
||
| 2279 | |||
| 2280 | # ######## determine minimum HP capacity pypsa-eur-sec ########### |
||
| 2281 | logger.info(f"MVGD={mvgd} | Determine minimum HP capacity.") |
||
| 2282 | |||
| 2283 | buildings_decentral_heating = ( |
||
| 2284 | get_buildings_with_decentral_heat_demand_in_mv_grid( |
||
| 2285 | mvgd, scenario="eGon100RE" |
||
| 2286 | ) |
||
| 2287 | ) |
||
| 2288 | |||
| 2289 | # Reduce list of decentral heating if no Peak load available |
||
| 2290 | # TODO maybe remove after succesfull DE run |
||
| 2291 | buildings_decentral_heating = catch_missing_buidings( |
||
| 2292 | buildings_decentral_heating, peak_load_100RE |
||
| 2293 | ) |
||
| 2294 | |||
| 2295 | hp_min_cap_mv_grid_pypsa_eur_sec = ( |
||
| 2296 | determine_min_hp_cap_buildings_pypsa_eur_sec( |
||
| 2297 | peak_load_100RE, |
||
| 2298 | buildings_decentral_heating, |
||
| 2299 | ) |
||
| 2300 | ) |
||
| 2301 | |||
| 2302 | # ################ aggregated heat profiles ################### |
||
| 2303 | logger.info(f"MVGD={mvgd} | Aggregate heat profiles.") |
||
| 2304 | |||
| 2305 | df_mvgd_ts_hp = df_heat_ts.loc[ |
||
| 2306 | :, |
||
| 2307 | buildings_decentral_heating, |
||
| 2308 | ].sum(axis=1) |
||
| 2309 | |||
| 2310 | df_heat_mvgd_ts = pd.DataFrame( |
||
| 2311 | data={ |
||
| 2312 | "carrier": "heat_pump", |
||
| 2313 | "bus_id": mvgd, |
||
| 2314 | "scenario": "eGon100RE", |
||
| 2315 | "dist_aggregated_mw": [df_mvgd_ts_hp.to_list()], |
||
| 2316 | } |
||
| 2317 | ) |
||
| 2318 | |||
| 2319 | # ################ collect results ################## |
||
| 2320 | logger.info(f"MVGD={mvgd} | Collect results.") |
||
| 2321 | |||
| 2322 | df_peak_loads_db = pd.concat( |
||
| 2323 | [df_peak_loads_db, peak_load_100RE.reset_index()], |
||
| 2324 | axis=0, |
||
| 2325 | ignore_index=True, |
||
| 2326 | ) |
||
| 2327 | |||
| 2328 | df_heat_mvgd_ts_db = pd.concat( |
||
| 2329 | [df_heat_mvgd_ts_db, df_heat_mvgd_ts], axis=0, ignore_index=True |
||
| 2330 | ) |
||
| 2331 | |||
| 2332 | df_hp_min_cap_mv_grid_pypsa_eur_sec.loc[mvgd] = ( |
||
| 2333 | hp_min_cap_mv_grid_pypsa_eur_sec |
||
| 2334 | ) |
||
| 2335 | |||
| 2336 | # ################ export to db and csv ###################### |
||
| 2337 | logger.info(f"MVGD={min(mvgd_ids)} : {max(mvgd_ids)} | Write data to db.") |
||
| 2338 | |||
| 2339 | export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db, drop=False) |
||
| 2340 | |||
| 2341 | logger.info( |
||
| 2342 | f"MVGD={min(mvgd_ids)} : {max(mvgd_ids)} | Write " |
||
| 2343 | f"pypsa-eur-sec min " |
||
| 2344 | f"HP capacities to csv." |
||
| 2345 | ) |
||
| 2346 | export_min_cap_to_csv(df_hp_min_cap_mv_grid_pypsa_eur_sec) |
||
| 2347 | |||
| 2348 | |||
| 2349 | def split_mvgds_into_bulks(n, max_n, func, scenario=None): |
||
| 2350 | """ |
||
| 2351 | Generic function to split task into multiple parallel tasks, |
||
| 2352 | dividing the number of MVGDs into even bulks. |
||
| 2353 | |||
| 2354 | Parameters |
||
| 2355 | ----------- |
||
| 2356 | n : int |
||
| 2357 | Number of bulk |
||
| 2358 | max_n: int |
||
| 2359 | Maximum number of bulks |
||
| 2360 | func : function |
||
| 2361 | The funnction which is then called with the list of MVGD as |
||
| 2362 | parameter. |
||
| 2363 | """ |
||
| 2364 | |||
| 2365 | with db.session_scope() as session: |
||
| 2366 | query = ( |
||
| 2367 | session.query( |
||
| 2368 | MapZensusGridDistricts.bus_id, |
||
| 2369 | ) |
||
| 2370 | .filter( |
||
| 2371 | MapZensusGridDistricts.zensus_population_id |
||
| 2372 | == EgonPetaHeat.zensus_population_id |
||
| 2373 | ) |
||
| 2374 | .distinct(MapZensusGridDistricts.bus_id) |
||
| 2375 | ) |
||
| 2376 | mvgd_ids = pd.read_sql( |
||
| 2377 | query.statement, query.session.bind, index_col=None |
||
| 2378 | ) |
||
| 2379 | |||
| 2380 | mvgd_ids = mvgd_ids.sort_values("bus_id").reset_index(drop=True) |
||
| 2381 | |||
| 2382 | mvgd_ids = np.array_split(mvgd_ids["bus_id"].values, max_n) |
||
| 2383 | # Only take split n |
||
| 2384 | mvgd_ids = mvgd_ids[n] |
||
| 2385 | |||
| 2386 | logger.info(f"Bulk takes care of MVGD: {min(mvgd_ids)} : {max(mvgd_ids)}") |
||
| 2387 | |||
| 2388 | if scenario is not None: |
||
| 2389 | func(mvgd_ids, scenario=scenario) |
||
| 2390 | else: |
||
| 2391 | func(mvgd_ids) |
||
| 2392 | |||
| 2393 | |||
| 2394 | def delete_hp_capacity(scenario): |
||
| 2395 | """Remove all hp capacities for the selected scenario |
||
| 2396 | |||
| 2397 | Parameters |
||
| 2398 | ----------- |
||
| 2399 | scenario : string |
||
| 2400 | Either eGon2035 or eGon100RE |
||
| 2401 | |||
| 2402 | """ |
||
| 2403 | |||
| 2404 | with db.session_scope() as session: |
||
| 2405 | # Buses |
||
| 2406 | session.query(EgonHpCapacityBuildings).filter( |
||
| 2407 | EgonHpCapacityBuildings.scenario == scenario |
||
| 2408 | ).delete(synchronize_session=False) |
||
| 2409 | |||
| 2410 | |||
| 2411 | def delete_mvgd_ts(scenario): |
||
| 2412 | """Remove all hp capacities for the selected scenario |
||
| 2413 | |||
| 2414 | Parameters |
||
| 2415 | ----------- |
||
| 2416 | scenario : string |
||
| 2417 | Either eGon2035 or eGon100RE |
||
| 2418 | |||
| 2419 | """ |
||
| 2420 | |||
| 2421 | with db.session_scope() as session: |
||
| 2422 | # Buses |
||
| 2423 | session.query(EgonEtragoTimeseriesIndividualHeating).filter( |
||
| 2424 | EgonEtragoTimeseriesIndividualHeating.scenario == scenario |
||
| 2425 | ).delete(synchronize_session=False) |
||
| 2426 | |||
| 2427 | |||
| 2428 | def delete_hp_capacity_100RE(): |
||
| 2429 | """Remove all hp capacities for the selected eGon100RE""" |
||
| 2430 | EgonHpCapacityBuildings.__table__.create(bind=engine, checkfirst=True) |
||
| 2431 | delete_hp_capacity(scenario="eGon100RE") |
||
| 2432 | |||
| 2433 | |||
| 2434 | def delete_hp_capacity_status_quo(scenario): |
||
| 2435 | """Remove all hp capacities for the selected status quo""" |
||
| 2436 | EgonHpCapacityBuildings.__table__.create(bind=engine, checkfirst=True) |
||
| 2437 | delete_hp_capacity(scenario=scenario) |
||
| 2438 | |||
| 2439 | |||
| 2440 | def delete_hp_capacity_2035(): |
||
| 2441 | """Remove all hp capacities for the selected eGon2035""" |
||
| 2442 | EgonHpCapacityBuildings.__table__.create(bind=engine, checkfirst=True) |
||
| 2443 | delete_hp_capacity(scenario="eGon2035") |
||
| 2444 | |||
| 2445 | |||
| 2446 | def delete_mvgd_ts_status_quo(scenario): |
||
| 2447 | """Remove all mvgd ts for the selected status quo""" |
||
| 2448 | EgonEtragoTimeseriesIndividualHeating.__table__.create( |
||
| 2449 | bind=engine, checkfirst=True |
||
| 2450 | ) |
||
| 2451 | delete_mvgd_ts(scenario=scenario) |
||
| 2452 | |||
| 2453 | |||
| 2454 | def delete_mvgd_ts_2035(): |
||
| 2455 | """Remove all mvgd ts for the selected eGon2035""" |
||
| 2456 | EgonEtragoTimeseriesIndividualHeating.__table__.create( |
||
| 2457 | bind=engine, checkfirst=True |
||
| 2458 | ) |
||
| 2459 | delete_mvgd_ts(scenario="eGon2035") |
||
| 2460 | |||
| 2461 | |||
| 2462 | def delete_mvgd_ts_100RE(): |
||
| 2463 | """Remove all mvgd ts for the selected eGon100RE""" |
||
| 2464 | EgonEtragoTimeseriesIndividualHeating.__table__.create( |
||
| 2465 | bind=engine, checkfirst=True |
||
| 2466 | ) |
||
| 2467 | delete_mvgd_ts(scenario="eGon100RE") |
||
| 2468 | |||
| 2469 | |||
| 2470 | def delete_heat_peak_loads_status_quo(scenario): |
||
| 2471 | """Remove all heat peak loads for status quo.""" |
||
| 2472 | BuildingHeatPeakLoads.__table__.create(bind=engine, checkfirst=True) |
||
| 2473 | with db.session_scope() as session: |
||
| 2474 | # Buses |
||
| 2475 | session.query(BuildingHeatPeakLoads).filter( |
||
| 2476 | BuildingHeatPeakLoads.scenario == scenario |
||
| 2477 | ).delete(synchronize_session=False) |
||
| 2478 | |||
| 2479 | |||
| 2480 | def delete_heat_peak_loads_2035(): |
||
| 2481 | """Remove all heat peak loads for eGon2035.""" |
||
| 2482 | BuildingHeatPeakLoads.__table__.create(bind=engine, checkfirst=True) |
||
| 2483 | with db.session_scope() as session: |
||
| 2484 | # Buses |
||
| 2485 | session.query(BuildingHeatPeakLoads).filter( |
||
| 2486 | BuildingHeatPeakLoads.scenario == "eGon2035" |
||
| 2487 | ).delete(synchronize_session=False) |
||
| 2488 | |||
| 2489 | |||
| 2490 | def delete_heat_peak_loads_100RE(): |
||
| 2491 | """Remove all heat peak loads for eGon100RE.""" |
||
| 2492 | BuildingHeatPeakLoads.__table__.create(bind=engine, checkfirst=True) |
||
| 2493 | with db.session_scope() as session: |
||
| 2494 | # Buses |
||
| 2495 | session.query(BuildingHeatPeakLoads).filter( |
||
| 2496 | BuildingHeatPeakLoads.scenario == "eGon100RE" |
||
| 2497 | ).delete(synchronize_session=False) |
||
| 2498 |