| Total Complexity | 81 | 
| Total Lines | 1775 | 
| Duplicated Lines | 4.06 % | 
| Changes | 0 | ||
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like data.datasets.heat_supply.individual_heating often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | """The central module containing all code dealing with  | 
            ||
| 2 | individual heat supply.  | 
            ||
| 3 | |||
| 4 | """  | 
            ||
| 5 | from pathlib import Path  | 
            ||
| 6 | import os  | 
            ||
| 7 | import random  | 
            ||
| 8 | import time  | 
            ||
| 9 | |||
| 10 | from airflow.operators.python_operator import PythonOperator  | 
            ||
| 11 | from psycopg2.extensions import AsIs, register_adapter  | 
            ||
| 12 | from sqlalchemy import ARRAY, REAL, Column, Integer, String  | 
            ||
| 13 | from sqlalchemy.ext.declarative import declarative_base  | 
            ||
| 14 | import geopandas as gpd  | 
            ||
| 15 | import numpy as np  | 
            ||
| 16 | import pandas as pd  | 
            ||
| 17 | import saio  | 
            ||
| 18 | |||
| 19 | from egon.data import config, db, logger  | 
            ||
| 20 | from egon.data.datasets import Dataset  | 
            ||
| 21 | from egon.data.datasets.district_heating_areas import (  | 
            ||
| 22 | MapZensusDistrictHeatingAreas,  | 
            ||
| 23 | )  | 
            ||
| 24 | from egon.data.datasets.electricity_demand_timeseries.cts_buildings import (  | 
            ||
| 25 | calc_cts_building_profiles,  | 
            ||
| 26 | )  | 
            ||
| 27 | from egon.data.datasets.electricity_demand_timeseries.mapping import (  | 
            ||
| 28 | EgonMapZensusMvgdBuildings,  | 
            ||
| 29 | )  | 
            ||
| 30 | from egon.data.datasets.electricity_demand_timeseries.tools import (  | 
            ||
| 31 | write_table_to_postgres,  | 
            ||
| 32 | )  | 
            ||
| 33 | from egon.data.datasets.heat_demand import EgonPetaHeat  | 
            ||
| 34 | from egon.data.datasets.heat_demand_timeseries.daily import (  | 
            ||
| 35 | EgonDailyHeatDemandPerClimateZone,  | 
            ||
| 36 | EgonMapZensusClimateZones,  | 
            ||
| 37 | )  | 
            ||
| 38 | from egon.data.datasets.heat_demand_timeseries.idp_pool import (  | 
            ||
| 39 | EgonHeatTimeseries,  | 
            ||
| 40 | )  | 
            ||
| 41 | |||
| 42 | # get zensus cells with district heating  | 
            ||
| 43 | from egon.data.datasets.zensus_mv_grid_districts import MapZensusGridDistricts  | 
            ||
| 44 | |||
| 45 | engine = db.engine()  | 
            ||
| 46 | Base = declarative_base()  | 
            ||
| 47 | |||
| 48 | |||
| 49 | class EgonEtragoTimeseriesIndividualHeating(Base):  | 
            ||
| 50 | __tablename__ = "egon_etrago_timeseries_individual_heating"  | 
            ||
| 51 |     __table_args__ = {"schema": "demand"} | 
            ||
| 52 | bus_id = Column(Integer, primary_key=True)  | 
            ||
| 53 | scenario = Column(String, primary_key=True)  | 
            ||
| 54 | carrier = Column(String, primary_key=True)  | 
            ||
| 55 | dist_aggregated_mw = Column(ARRAY(REAL))  | 
            ||
| 56 | |||
| 57 | |||
| 58 | class EgonHpCapacityBuildings(Base):  | 
            ||
| 59 | __tablename__ = "egon_hp_capacity_buildings"  | 
            ||
| 60 |     __table_args__ = {"schema": "demand"} | 
            ||
| 61 | building_id = Column(Integer, primary_key=True)  | 
            ||
| 62 | scenario = Column(String, primary_key=True)  | 
            ||
| 63 | hp_capacity = Column(REAL)  | 
            ||
| 64 | |||
| 65 | |||
| 66 | class HeatPumpsPypsaEurSec(Dataset):  | 
            ||
| 67 | def __init__(self, dependencies):  | 
            ||
| 68 | def dyn_parallel_tasks_pypsa_eur_sec():  | 
            ||
| 69 | """Dynamically generate tasks  | 
            ||
| 70 | The goal is to speed up tasks by parallelising bulks of mvgds.  | 
            ||
| 71 | |||
| 72 | The number of parallel tasks is defined via parameter  | 
            ||
| 73 | `parallel_tasks` in the dataset config `datasets.yml`.  | 
            ||
| 74 | |||
| 75 | Returns  | 
            ||
| 76 | -------  | 
            ||
| 77 | set of airflow.PythonOperators  | 
            ||
| 78 | The tasks. Each element is of  | 
            ||
| 79 | :func:`egon.data.datasets.heat_supply.individual_heating.  | 
            ||
| 80 | determine_hp_cap_peak_load_mvgd_ts_pypsa_eur_sec`  | 
            ||
| 81 | """  | 
            ||
| 82 | parallel_tasks = config.datasets()["demand_timeseries_mvgd"].get(  | 
            ||
| 83 | "parallel_tasks", 1  | 
            ||
| 84 | )  | 
            ||
| 85 | |||
| 86 | tasks = set()  | 
            ||
| 87 | for i in range(parallel_tasks):  | 
            ||
| 88 | tasks.add(  | 
            ||
| 89 | PythonOperator(  | 
            ||
| 90 | task_id=(  | 
            ||
| 91 | f"determine-hp-capacity-pypsa-eur-sec-"  | 
            ||
| 92 |                             f"mvgd-bulk{i}" | 
            ||
| 93 | ),  | 
            ||
| 94 | python_callable=split_mvgds_into_bulks_pypsa_eur_sec,  | 
            ||
| 95 |                         op_kwargs={ | 
            ||
| 96 | "n": i,  | 
            ||
| 97 | "max_n": parallel_tasks,  | 
            ||
| 98 | "func": determine_hp_cap_peak_load_mvgd_ts_pypsa_eur_sec, # noqa: E501  | 
            ||
| 99 | },  | 
            ||
| 100 | )  | 
            ||
| 101 | )  | 
            ||
| 102 | return tasks  | 
            ||
| 103 | |||
| 104 | super().__init__(  | 
            ||
| 105 | name="HeatPumpsPypsaEurSec",  | 
            ||
| 106 | version="0.0.0",  | 
            ||
| 107 | dependencies=dependencies,  | 
            ||
| 108 | tasks=(  | 
            ||
| 109 | create_peak_load_table,  | 
            ||
| 110 | create_egon_etrago_timeseries_individual_heating,  | 
            ||
| 111 |                 {*dyn_parallel_tasks_pypsa_eur_sec()}, | 
            ||
| 112 | ),  | 
            ||
| 113 | )  | 
            ||
| 114 | |||
| 115 | |||
| 116 | class HeatPumps2035(Dataset):  | 
            ||
| 117 | def __init__(self, dependencies):  | 
            ||
| 118 | def dyn_parallel_tasks_2035():  | 
            ||
| 119 | """Dynamically generate tasks  | 
            ||
| 120 | |||
| 121 | The goal is to speed up tasks by parallelising bulks of mvgds.  | 
            ||
| 122 | |||
| 123 | The number of parallel tasks is defined via parameter  | 
            ||
| 124 | `parallel_tasks` in the dataset config `datasets.yml`.  | 
            ||
| 125 | |||
| 126 | Returns  | 
            ||
| 127 | -------  | 
            ||
| 128 | set of airflow.PythonOperators  | 
            ||
| 129 | The tasks. Each element is of  | 
            ||
| 130 | :func:`egon.data.datasets.heat_supply.individual_heating.  | 
            ||
| 131 | determine_hp_cap_peak_load_mvgd_ts_2035`  | 
            ||
| 132 | """  | 
            ||
| 133 | parallel_tasks = config.datasets()["demand_timeseries_mvgd"].get(  | 
            ||
| 134 | "parallel_tasks", 1  | 
            ||
| 135 | )  | 
            ||
| 136 | tasks = set()  | 
            ||
| 137 | for i in range(parallel_tasks):  | 
            ||
| 138 | tasks.add(  | 
            ||
| 139 | PythonOperator(  | 
            ||
| 140 | task_id=(  | 
            ||
| 141 | f"determine-hp-capacity-pypsa-eur-sec_"  | 
            ||
| 142 |                             f"mvgd_bulk{i}" | 
            ||
| 143 | ),  | 
            ||
| 144 | python_callable=split_mvgds_into_bulks_2035,  | 
            ||
| 145 |                         op_kwargs={ | 
            ||
| 146 | "n": i,  | 
            ||
| 147 | "max_n": parallel_tasks,  | 
            ||
| 148 | "func": determine_hp_cap_peak_load_mvgd_ts_2035,  | 
            ||
| 149 | },  | 
            ||
| 150 | )  | 
            ||
| 151 | )  | 
            ||
| 152 | return tasks  | 
            ||
| 153 | |||
| 154 | super().__init__(  | 
            ||
| 155 | name="HeatPumps2035",  | 
            ||
| 156 | version="0.0.0",  | 
            ||
| 157 | dependencies=dependencies,  | 
            ||
| 158 | tasks=(  | 
            ||
| 159 | create_hp_capacity_table,  | 
            ||
| 160 | delete_peak_loads_if_existing,  | 
            ||
| 161 |                 {*dyn_parallel_tasks_2035()}, | 
            ||
| 162 | ),  | 
            ||
| 163 | )  | 
            ||
| 164 | |||
| 165 | |||
| 166 | class HeatPumps2050(Dataset):  | 
            ||
| 167 | def __init__(self, dependencies):  | 
            ||
| 168 | super().__init__(  | 
            ||
| 169 | name="HeatPumps2050",  | 
            ||
| 170 | version="0.0.0",  | 
            ||
| 171 | dependencies=dependencies,  | 
            ||
| 172 | tasks=(determine_hp_cap_buildings_eGon100RE),  | 
            ||
| 173 | )  | 
            ||
| 174 | |||
| 175 | |||
| 176 | class BuildingHeatPeakLoads(Base):  | 
            ||
| 177 | __tablename__ = "egon_building_heat_peak_loads"  | 
            ||
| 178 |     __table_args__ = {"schema": "demand"} | 
            ||
| 179 | |||
| 180 | building_id = Column(Integer, primary_key=True)  | 
            ||
| 181 | scenario = Column(String, primary_key=True)  | 
            ||
| 182 | sector = Column(String, primary_key=True)  | 
            ||
| 183 | peak_load_in_w = Column(REAL)  | 
            ||
| 184 | |||
| 185 | |||
| 186 | def adapt_numpy_float64(numpy_float64):  | 
            ||
| 187 | return AsIs(numpy_float64)  | 
            ||
| 188 | |||
| 189 | |||
| 190 | def adapt_numpy_int64(numpy_int64):  | 
            ||
| 191 | return AsIs(numpy_int64)  | 
            ||
| 192 | |||
| 193 | |||
| 194 | def timeit(func):  | 
            ||
| 195 | """  | 
            ||
| 196 | Decorator for measuring function's running time.  | 
            ||
| 197 | """  | 
            ||
| 198 | |||
| 199 | def measure_time(*args, **kw):  | 
            ||
| 200 | start_time = time.time()  | 
            ||
| 201 | result = func(*args, **kw)  | 
            ||
| 202 | print(  | 
            ||
| 203 | "Processing time of %s(): %.2f seconds."  | 
            ||
| 204 | % (func.__qualname__, time.time() - start_time)  | 
            ||
| 205 | )  | 
            ||
| 206 | return result  | 
            ||
| 207 | |||
| 208 | return measure_time  | 
            ||
| 209 | |||
| 210 | |||
| 211 | def cascade_per_technology(  | 
            ||
| 212 | heat_per_mv,  | 
            ||
| 213 | technologies,  | 
            ||
| 214 | scenario,  | 
            ||
| 215 | distribution_level,  | 
            ||
| 216 | max_size_individual_chp=0.05,  | 
            ||
| 217 | ):  | 
            ||
| 218 | |||
| 219 | """Add plants for individual heat.  | 
            ||
| 220 | Currently only on mv grid district level.  | 
            ||
| 221 | |||
| 222 | Parameters  | 
            ||
| 223 | ----------  | 
            ||
| 224 | mv_grid_districts : geopandas.geodataframe.GeoDataFrame  | 
            ||
| 225 | MV grid districts including the heat demand  | 
            ||
| 226 | technologies : pandas.DataFrame  | 
            ||
| 227 | List of supply technologies and their parameters  | 
            ||
| 228 | scenario : str  | 
            ||
| 229 | Name of the scenario  | 
            ||
| 230 | max_size_individual_chp : float  | 
            ||
| 231 | Maximum capacity of an individual chp in MW  | 
            ||
| 232 | Returns  | 
            ||
| 233 | -------  | 
            ||
| 234 | mv_grid_districts : geopandas.geodataframe.GeoDataFrame  | 
            ||
| 235 | MV grid district which need additional individual heat supply  | 
            ||
| 236 | technologies : pandas.DataFrame  | 
            ||
| 237 | List of supply technologies and their parameters  | 
            ||
| 238 | append_df : pandas.DataFrame  | 
            ||
| 239 | List of plants per mv grid for the selected technology  | 
            ||
| 240 | |||
| 241 | """  | 
            ||
| 242 | sources = config.datasets()["heat_supply"]["sources"]  | 
            ||
| 243 | |||
| 244 | tech = technologies[technologies.priority == technologies.priority.max()]  | 
            ||
| 245 | |||
| 246 | # Distribute heat pumps linear to remaining demand.  | 
            ||
| 247 | if tech.index == "heat_pump":  | 
            ||
| 248 | |||
| 249 | if distribution_level == "federal_state":  | 
            ||
| 250 | # Select target values per federal state  | 
            ||
| 251 | target = db.select_dataframe(  | 
            ||
| 252 | f"""  | 
            ||
| 253 | SELECT DISTINCT ON (gen) gen as state, capacity  | 
            ||
| 254 |                     FROM {sources['scenario_capacities']['schema']}. | 
            ||
| 255 |                     {sources['scenario_capacities']['table']} a | 
            ||
| 256 |                     JOIN {sources['federal_states']['schema']}. | 
            ||
| 257 |                     {sources['federal_states']['table']} b | 
            ||
| 258 | ON a.nuts = b.nuts  | 
            ||
| 259 |                     WHERE scenario_name = '{scenario}' | 
            ||
| 260 | AND carrier = 'residential_rural_heat_pump'  | 
            ||
| 261 | """,  | 
            ||
| 262 | index_col="state",  | 
            ||
| 263 | )  | 
            ||
| 264 | |||
| 265 | heat_per_mv["share"] = heat_per_mv.groupby(  | 
            ||
| 266 | "state"  | 
            ||
| 267 | ).remaining_demand.apply(lambda grp: grp / grp.sum())  | 
            ||
| 268 | |||
| 269 | append_df = (  | 
            ||
| 270 | heat_per_mv["share"]  | 
            ||
| 271 | .mul(target.capacity[heat_per_mv["state"]].values)  | 
            ||
| 272 | .reset_index()  | 
            ||
| 273 | )  | 
            ||
| 274 | else:  | 
            ||
| 275 | # Select target value for Germany  | 
            ||
| 276 | target = db.select_dataframe(  | 
            ||
| 277 | f"""  | 
            ||
| 278 | SELECT SUM(capacity) AS capacity  | 
            ||
| 279 |                     FROM {sources['scenario_capacities']['schema']}. | 
            ||
| 280 |                     {sources['scenario_capacities']['table']} a | 
            ||
| 281 |                     WHERE scenario_name = '{scenario}' | 
            ||
| 282 | AND carrier = 'residential_rural_heat_pump'  | 
            ||
| 283 | """  | 
            ||
| 284 | )  | 
            ||
| 285 | |||
| 286 | heat_per_mv["share"] = (  | 
            ||
| 287 | heat_per_mv.remaining_demand  | 
            ||
| 288 | / heat_per_mv.remaining_demand.sum()  | 
            ||
| 289 | )  | 
            ||
| 290 | |||
| 291 | append_df = (  | 
            ||
| 292 | heat_per_mv["share"].mul(target.capacity[0]).reset_index()  | 
            ||
| 293 | )  | 
            ||
| 294 | |||
| 295 | append_df.rename(  | 
            ||
| 296 |             {"bus_id": "mv_grid_id", "share": "capacity"}, axis=1, inplace=True | 
            ||
| 297 | )  | 
            ||
| 298 | |||
| 299 | elif tech.index == "gas_boiler":  | 
            ||
| 300 | |||
| 301 | append_df = pd.DataFrame(  | 
            ||
| 302 |             data={ | 
            ||
| 303 | "capacity": heat_per_mv.remaining_demand.div(  | 
            ||
| 304 | tech.estimated_flh.values[0]  | 
            ||
| 305 | ),  | 
            ||
| 306 | "carrier": "residential_rural_gas_boiler",  | 
            ||
| 307 | "mv_grid_id": heat_per_mv.index,  | 
            ||
| 308 | "scenario": scenario,  | 
            ||
| 309 | }  | 
            ||
| 310 | )  | 
            ||
| 311 | |||
| 312 | if append_df.size > 0:  | 
            ||
| 
                                                                                                    
                        
                         | 
                |||
| 313 | append_df["carrier"] = tech.index[0]  | 
            ||
| 314 | heat_per_mv.loc[  | 
            ||
| 315 | append_df.mv_grid_id, "remaining_demand"  | 
            ||
| 316 |         ] -= append_df.set_index("mv_grid_id").capacity.mul( | 
            ||
| 317 | tech.estimated_flh.values[0]  | 
            ||
| 318 | )  | 
            ||
| 319 | |||
| 320 | heat_per_mv = heat_per_mv[heat_per_mv.remaining_demand >= 0]  | 
            ||
| 321 | |||
| 322 | technologies = technologies.drop(tech.index)  | 
            ||
| 323 | |||
| 324 | return heat_per_mv, technologies, append_df  | 
            ||
| 325 | |||
| 326 | |||
| 327 | def cascade_heat_supply_indiv(scenario, distribution_level, plotting=True):  | 
            ||
| 328 | """Assigns supply strategy for individual heating in four steps.  | 
            ||
| 329 | |||
| 330 | 1.) all small scale CHP are connected.  | 
            ||
| 331 | 2.) If the supply can not meet the heat demand, solar thermal collectors  | 
            ||
| 332 | are attached. This is not implemented yet, since individual  | 
            ||
| 333 | solar thermal plants are not considered in eGon2035 scenario.  | 
            ||
| 334 | 3.) If this is not suitable, the mv grid is also supplied by heat pumps.  | 
            ||
| 335 | 4.) The last option are individual gas boilers.  | 
            ||
| 336 | |||
| 337 | Parameters  | 
            ||
| 338 | ----------  | 
            ||
| 339 | scenario : str  | 
            ||
| 340 | Name of scenario  | 
            ||
| 341 | plotting : bool, optional  | 
            ||
| 342 | Choose if individual heating supply is plotted. The default is True.  | 
            ||
| 343 | |||
| 344 | Returns  | 
            ||
| 345 | -------  | 
            ||
| 346 | resulting_capacities : pandas.DataFrame  | 
            ||
| 347 | List of plants per mv grid  | 
            ||
| 348 | |||
| 349 | """  | 
            ||
| 350 | |||
| 351 | sources = config.datasets()["heat_supply"]["sources"]  | 
            ||
| 352 | |||
| 353 | # Select residential heat demand per mv grid district and federal state  | 
            ||
| 354 | heat_per_mv = db.select_geodataframe(  | 
            ||
| 355 | f"""  | 
            ||
| 356 | SELECT d.bus_id as bus_id, SUM(demand) as demand,  | 
            ||
| 357 | c.vg250_lan as state, d.geom  | 
            ||
| 358 |         FROM {sources['heat_demand']['schema']}. | 
            ||
| 359 |         {sources['heat_demand']['table']} a | 
            ||
| 360 |         JOIN {sources['map_zensus_grid']['schema']}. | 
            ||
| 361 |         {sources['map_zensus_grid']['table']} b | 
            ||
| 362 | ON a.zensus_population_id = b.zensus_population_id  | 
            ||
| 363 |         JOIN {sources['map_vg250_grid']['schema']}. | 
            ||
| 364 |         {sources['map_vg250_grid']['table']} c | 
            ||
| 365 | ON b.bus_id = c.bus_id  | 
            ||
| 366 |         JOIN {sources['mv_grids']['schema']}. | 
            ||
| 367 |         {sources['mv_grids']['table']} d | 
            ||
| 368 | ON d.bus_id = c.bus_id  | 
            ||
| 369 |         WHERE scenario = '{scenario}' | 
            ||
| 370 | AND a.zensus_population_id NOT IN (  | 
            ||
| 371 | SELECT zensus_population_id  | 
            ||
| 372 |             FROM {sources['map_dh']['schema']}.{sources['map_dh']['table']} | 
            ||
| 373 |             WHERE scenario = '{scenario}') | 
            ||
| 374 | GROUP BY d.bus_id, vg250_lan, geom  | 
            ||
| 375 | """,  | 
            ||
| 376 | index_col="bus_id",  | 
            ||
| 377 | )  | 
            ||
| 378 | |||
| 379 | # Store geometry of mv grid  | 
            ||
| 380 | geom_mv = heat_per_mv.geom.centroid.copy()  | 
            ||
| 381 | |||
| 382 | # Initalize Dataframe for results  | 
            ||
| 383 | resulting_capacities = pd.DataFrame(  | 
            ||
| 384 | columns=["mv_grid_id", "carrier", "capacity"]  | 
            ||
| 385 | )  | 
            ||
| 386 | |||
| 387 | # Set technology data according to  | 
            ||
| 388 | # http://www.wbzu.de/seminare/infopool/infopool-bhkw  | 
            ||
| 389 | # TODO: Add gas boilers and solar themal (eGon100RE)  | 
            ||
| 390 | technologies = pd.DataFrame(  | 
            ||
| 391 | index=["heat_pump", "gas_boiler"],  | 
            ||
| 392 | columns=["estimated_flh", "priority"],  | 
            ||
| 393 |         data={"estimated_flh": [4000, 8000], "priority": [2, 1]}, | 
            ||
| 394 | )  | 
            ||
| 395 | |||
| 396 | # In the beginning, the remaining demand equals demand  | 
            ||
| 397 | heat_per_mv["remaining_demand"] = heat_per_mv["demand"]  | 
            ||
| 398 | |||
| 399 | # Connect new technologies, if there is still heat demand left  | 
            ||
| 400 | while (len(technologies) > 0) and (len(heat_per_mv) > 0):  | 
            ||
| 401 | # Attach new supply technology  | 
            ||
| 402 | heat_per_mv, technologies, append_df = cascade_per_technology(  | 
            ||
| 403 | heat_per_mv, technologies, scenario, distribution_level  | 
            ||
| 404 | )  | 
            ||
| 405 | # Collect resulting capacities  | 
            ||
| 406 | resulting_capacities = resulting_capacities.append(  | 
            ||
| 407 | append_df, ignore_index=True  | 
            ||
| 408 | )  | 
            ||
| 409 | |||
| 410 | if plotting:  | 
            ||
| 411 | plot_heat_supply(resulting_capacities)  | 
            ||
| 412 | |||
| 413 | return gpd.GeoDataFrame(  | 
            ||
| 414 | resulting_capacities,  | 
            ||
| 415 | geometry=geom_mv[resulting_capacities.mv_grid_id].values,  | 
            ||
| 416 | )  | 
            ||
| 417 | |||
| 418 | |||
| 419 | def get_peta_demand(mvgd, scenario):  | 
            ||
| 420 | """  | 
            ||
| 421 | Retrieve annual peta heat demand for residential buildings for either  | 
            ||
| 422 | eGon2035 or eGon100RE scenario.  | 
            ||
| 423 | |||
| 424 | Parameters  | 
            ||
| 425 | ----------  | 
            ||
| 426 | mvgd : int  | 
            ||
| 427 | MV grid ID.  | 
            ||
| 428 | scenario : str  | 
            ||
| 429 | Possible options are eGon2035 or eGon100RE  | 
            ||
| 430 | |||
| 431 | Returns  | 
            ||
| 432 | -------  | 
            ||
| 433 | df_peta_demand : pd.DataFrame  | 
            ||
| 434 | Annual residential heat demand per building and scenario. Columns of  | 
            ||
| 435 | the dataframe are zensus_population_id and demand.  | 
            ||
| 436 | |||
| 437 | """  | 
            ||
| 438 | |||
| 439 | with db.session_scope() as session:  | 
            ||
| 440 | query = (  | 
            ||
| 441 | session.query(  | 
            ||
| 442 | MapZensusGridDistricts.zensus_population_id,  | 
            ||
| 443 | EgonPetaHeat.demand,  | 
            ||
| 444 | )  | 
            ||
| 445 | .filter(MapZensusGridDistricts.bus_id == mvgd)  | 
            ||
| 446 | .filter(  | 
            ||
| 447 | MapZensusGridDistricts.zensus_population_id  | 
            ||
| 448 | == EgonPetaHeat.zensus_population_id  | 
            ||
| 449 | )  | 
            ||
| 450 | .filter(  | 
            ||
| 451 | EgonPetaHeat.sector == "residential",  | 
            ||
| 452 | EgonPetaHeat.scenario == scenario,  | 
            ||
| 453 | )  | 
            ||
| 454 | )  | 
            ||
| 455 | |||
| 456 | df_peta_demand = pd.read_sql(  | 
            ||
| 457 | query.statement, query.session.bind, index_col=None  | 
            ||
| 458 | )  | 
            ||
| 459 | |||
| 460 | return df_peta_demand  | 
            ||
| 461 | |||
| 462 | |||
| 463 | def get_residential_heat_profile_ids(mvgd):  | 
            ||
| 464 | """  | 
            ||
| 465 | Retrieve 365 daily heat profiles ids per residential building and selected  | 
            ||
| 466 | mvgd.  | 
            ||
| 467 | |||
| 468 | Parameters  | 
            ||
| 469 | ----------  | 
            ||
| 470 | mvgd : int  | 
            ||
| 471 | ID of MVGD  | 
            ||
| 472 | |||
| 473 | Returns  | 
            ||
| 474 | -------  | 
            ||
| 475 | df_profiles_ids : pd.DataFrame  | 
            ||
| 476 | Residential daily heat profile ID's per building. Columns of the  | 
            ||
| 477 | dataframe are zensus_population_id, building_id,  | 
            ||
| 478 | selected_idp_profiles, buildings and day_of_year.  | 
            ||
| 479 | |||
| 480 | """  | 
            ||
| 481 | with db.session_scope() as session:  | 
            ||
| 482 | query = (  | 
            ||
| 483 | session.query(  | 
            ||
| 484 | MapZensusGridDistricts.zensus_population_id,  | 
            ||
| 485 | EgonHeatTimeseries.building_id,  | 
            ||
| 486 | EgonHeatTimeseries.selected_idp_profiles,  | 
            ||
| 487 | )  | 
            ||
| 488 | .filter(MapZensusGridDistricts.bus_id == mvgd)  | 
            ||
| 489 | .filter(  | 
            ||
| 490 | MapZensusGridDistricts.zensus_population_id  | 
            ||
| 491 | == EgonHeatTimeseries.zensus_population_id  | 
            ||
| 492 | )  | 
            ||
| 493 | )  | 
            ||
| 494 | |||
| 495 | df_profiles_ids = pd.read_sql(  | 
            ||
| 496 | query.statement, query.session.bind, index_col=None  | 
            ||
| 497 | )  | 
            ||
| 498 | # Add building count per cell  | 
            ||
| 499 | df_profiles_ids = pd.merge(  | 
            ||
| 500 | left=df_profiles_ids,  | 
            ||
| 501 |         right=df_profiles_ids.groupby("zensus_population_id")["building_id"] | 
            ||
| 502 | .count()  | 
            ||
| 503 |         .rename("buildings"), | 
            ||
| 504 | left_on="zensus_population_id",  | 
            ||
| 505 | right_index=True,  | 
            ||
| 506 | )  | 
            ||
| 507 | |||
| 508 | # unnest array of ids per building  | 
            ||
| 509 |     df_profiles_ids = df_profiles_ids.explode("selected_idp_profiles") | 
            ||
| 510 | # add day of year column by order of list  | 
            ||
| 511 | df_profiles_ids["day_of_year"] = (  | 
            ||
| 512 |         df_profiles_ids.groupby("building_id").cumcount() + 1 | 
            ||
| 513 | )  | 
            ||
| 514 | return df_profiles_ids  | 
            ||
| 515 | |||
| 516 | |||
| 517 | def get_daily_profiles(profile_ids):  | 
            ||
| 518 | """  | 
            ||
| 519 | Parameters  | 
            ||
| 520 | ----------  | 
            ||
| 521 | profile_ids : list(int)  | 
            ||
| 522 | daily heat profile ID's  | 
            ||
| 523 | |||
| 524 | Returns  | 
            ||
| 525 | -------  | 
            ||
| 526 | df_profiles : pd.DataFrame  | 
            ||
| 527 | Residential daily heat profiles. Columns of the dataframe are idp,  | 
            ||
| 528 | house, temperature_class and hour.  | 
            ||
| 529 | |||
| 530 | """  | 
            ||
| 531 |     saio.register_schema("demand", db.engine()) | 
            ||
| 532 | from saio.demand import egon_heat_idp_pool  | 
            ||
| 533 | |||
| 534 | with db.session_scope() as session:  | 
            ||
| 535 | query = session.query(egon_heat_idp_pool).filter(  | 
            ||
| 536 | egon_heat_idp_pool.index.in_(profile_ids)  | 
            ||
| 537 | )  | 
            ||
| 538 | |||
| 539 | df_profiles = pd.read_sql(  | 
            ||
| 540 | query.statement, query.session.bind, index_col="index"  | 
            ||
| 541 | )  | 
            ||
| 542 | |||
| 543 | # unnest array of profile values per id  | 
            ||
| 544 |     df_profiles = df_profiles.explode("idp") | 
            ||
| 545 | # Add column for hour of day  | 
            ||
| 546 | df_profiles["hour"] = df_profiles.groupby(axis=0, level=0).cumcount() + 1  | 
            ||
| 547 | |||
| 548 | return df_profiles  | 
            ||
| 549 | |||
| 550 | |||
| 551 | def get_daily_demand_share(mvgd):  | 
            ||
| 552 | """per census cell  | 
            ||
| 553 | Parameters  | 
            ||
| 554 | ----------  | 
            ||
| 555 | mvgd : int  | 
            ||
| 556 | MVGD id  | 
            ||
| 557 | |||
| 558 | Returns  | 
            ||
| 559 | -------  | 
            ||
| 560 | df_daily_demand_share : pd.DataFrame  | 
            ||
| 561 | Daily annual demand share per cencus cell. Columns of the dataframe  | 
            ||
| 562 | are zensus_population_id, day_of_year and daily_demand_share.  | 
            ||
| 563 | |||
| 564 | """  | 
            ||
| 565 | |||
| 566 | with db.session_scope() as session:  | 
            ||
| 567 | query = session.query(  | 
            ||
| 568 | MapZensusGridDistricts.zensus_population_id,  | 
            ||
| 569 | EgonDailyHeatDemandPerClimateZone.day_of_year,  | 
            ||
| 570 | EgonDailyHeatDemandPerClimateZone.daily_demand_share,  | 
            ||
| 571 | ).filter(  | 
            ||
| 572 | EgonMapZensusClimateZones.climate_zone  | 
            ||
| 573 | == EgonDailyHeatDemandPerClimateZone.climate_zone,  | 
            ||
| 574 | MapZensusGridDistricts.zensus_population_id  | 
            ||
| 575 | == EgonMapZensusClimateZones.zensus_population_id,  | 
            ||
| 576 | MapZensusGridDistricts.bus_id == mvgd,  | 
            ||
| 577 | )  | 
            ||
| 578 | |||
| 579 | df_daily_demand_share = pd.read_sql(  | 
            ||
| 580 | query.statement, query.session.bind, index_col=None  | 
            ||
| 581 | )  | 
            ||
| 582 | return df_daily_demand_share  | 
            ||
| 583 | |||
| 584 | |||
| 585 | def calc_residential_heat_profiles_per_mvgd(mvgd, scenario):  | 
            ||
| 586 | """  | 
            ||
| 587 | Gets residential heat profiles per building in MV grid for either eGon2035  | 
            ||
| 588 | or eGon100RE scenario.  | 
            ||
| 589 | |||
| 590 | Parameters  | 
            ||
| 591 | ----------  | 
            ||
| 592 | mvgd : int  | 
            ||
| 593 | MV grid ID.  | 
            ||
| 594 | scenario : str  | 
            ||
| 595 | Possible options are eGon2035 or eGon100RE.  | 
            ||
| 596 | |||
| 597 | Returns  | 
            ||
| 598 | --------  | 
            ||
| 599 | pd.DataFrame  | 
            ||
| 600 | Heat demand profiles of buildings. Columns are:  | 
            ||
| 601 | * zensus_population_id : int  | 
            ||
| 602 | Zensus cell ID building is in.  | 
            ||
| 603 | * building_id : int  | 
            ||
| 604 | ID of building.  | 
            ||
| 605 | * day_of_year : int  | 
            ||
| 606 | Day of the year (1 - 365).  | 
            ||
| 607 | * hour : int  | 
            ||
| 608 | Hour of the day (1 - 24).  | 
            ||
| 609 | * demand_ts : float  | 
            ||
| 610 | Building's residential heat demand in MW, for specified hour  | 
            ||
| 611 | of the year (specified through columns `day_of_year` and  | 
            ||
| 612 | `hour`).  | 
            ||
| 613 | """  | 
            ||
| 614 | |||
| 615 | columns = [  | 
            ||
| 616 | "zensus_population_id",  | 
            ||
| 617 | "building_id",  | 
            ||
| 618 | "day_of_year",  | 
            ||
| 619 | "hour",  | 
            ||
| 620 | "demand_ts",  | 
            ||
| 621 | ]  | 
            ||
| 622 | |||
| 623 | df_peta_demand = get_peta_demand(mvgd, scenario)  | 
            ||
| 624 | |||
| 625 | # TODO maybe return empty dataframe  | 
            ||
| 626 | if df_peta_demand.empty:  | 
            ||
| 627 |         logger.info(f"No demand for MVGD: {mvgd}") | 
            ||
| 628 | return pd.DataFrame(columns=columns)  | 
            ||
| 629 | |||
| 630 | df_profiles_ids = get_residential_heat_profile_ids(mvgd)  | 
            ||
| 631 | |||
| 632 | if df_profiles_ids.empty:  | 
            ||
| 633 |         logger.info(f"No profiles for MVGD: {mvgd}") | 
            ||
| 634 | return pd.DataFrame(columns=columns)  | 
            ||
| 635 | |||
| 636 | df_profiles = get_daily_profiles(  | 
            ||
| 637 | df_profiles_ids["selected_idp_profiles"].unique()  | 
            ||
| 638 | )  | 
            ||
| 639 | |||
| 640 | df_daily_demand_share = get_daily_demand_share(mvgd)  | 
            ||
| 641 | |||
| 642 | # Merge profile ids to peta demand by zensus_population_id  | 
            ||
| 643 | df_profile_merge = pd.merge(  | 
            ||
| 644 | left=df_peta_demand, right=df_profiles_ids, on="zensus_population_id"  | 
            ||
| 645 | )  | 
            ||
| 646 | |||
| 647 | # Merge daily demand to daily profile ids by zensus_population_id and day  | 
            ||
| 648 | df_profile_merge = pd.merge(  | 
            ||
| 649 | left=df_profile_merge,  | 
            ||
| 650 | right=df_daily_demand_share,  | 
            ||
| 651 | on=["zensus_population_id", "day_of_year"],  | 
            ||
| 652 | )  | 
            ||
| 653 | |||
| 654 | # Merge daily profiles by profile id  | 
            ||
| 655 | df_profile_merge = pd.merge(  | 
            ||
| 656 | left=df_profile_merge,  | 
            ||
| 657 | right=df_profiles[["idp", "hour"]],  | 
            ||
| 658 | left_on="selected_idp_profiles",  | 
            ||
| 659 | right_index=True,  | 
            ||
| 660 | )  | 
            ||
| 661 | |||
| 662 | # Scale profiles  | 
            ||
| 663 | df_profile_merge["demand_ts"] = (  | 
            ||
| 664 | df_profile_merge["idp"]  | 
            ||
| 665 | .mul(df_profile_merge["daily_demand_share"])  | 
            ||
| 666 | .mul(df_profile_merge["demand"])  | 
            ||
| 667 | .div(df_profile_merge["buildings"])  | 
            ||
| 668 | )  | 
            ||
| 669 | |||
| 670 | return df_profile_merge.loc[:, columns]  | 
            ||
| 671 | |||
| 672 | |||
| 673 | View Code Duplication | def plot_heat_supply(resulting_capacities):  | 
            |
| 674 | |||
| 675 | from matplotlib import pyplot as plt  | 
            ||
| 676 | |||
| 677 | mv_grids = db.select_geodataframe(  | 
            ||
| 678 | """  | 
            ||
| 679 | SELECT * FROM grid.egon_mv_grid_district  | 
            ||
| 680 | """,  | 
            ||
| 681 | index_col="bus_id",  | 
            ||
| 682 | )  | 
            ||
| 683 | |||
| 684 | for c in ["CHP", "heat_pump"]:  | 
            ||
| 685 | mv_grids[c] = (  | 
            ||
| 686 | resulting_capacities[resulting_capacities.carrier == c]  | 
            ||
| 687 |             .set_index("mv_grid_id") | 
            ||
| 688 | .capacity  | 
            ||
| 689 | )  | 
            ||
| 690 | |||
| 691 | fig, ax = plt.subplots(1, 1)  | 
            ||
| 692 | mv_grids.boundary.plot(linewidth=0.2, ax=ax, color="black")  | 
            ||
| 693 | mv_grids.plot(  | 
            ||
| 694 | ax=ax,  | 
            ||
| 695 | column=c,  | 
            ||
| 696 | cmap="magma_r",  | 
            ||
| 697 | legend=True,  | 
            ||
| 698 |             legend_kwds={ | 
            ||
| 699 |                 "label": f"Installed {c} in MW", | 
            ||
| 700 | "orientation": "vertical",  | 
            ||
| 701 | },  | 
            ||
| 702 | )  | 
            ||
| 703 |         plt.savefig(f"plots/individual_heat_supply_{c}.png", dpi=300) | 
            ||
| 704 | |||
| 705 | |||
| 706 | def get_zensus_cells_with_decentral_heat_demand_in_mv_grid(  | 
            ||
| 707 | scenario, mv_grid_id  | 
            ||
| 708 | ):  | 
            ||
| 709 | """  | 
            ||
| 710 | Returns zensus cell IDs with decentral heating systems in given MV grid.  | 
            ||
| 711 | |||
| 712 | As cells with district heating differ between scenarios, this is also  | 
            ||
| 713 | depending on the scenario.  | 
            ||
| 714 | |||
| 715 | Parameters  | 
            ||
| 716 | -----------  | 
            ||
| 717 | scenario : str  | 
            ||
| 718 | Name of scenario. Can be either "eGon2035" or "eGon100RE".  | 
            ||
| 719 | mv_grid_id : int  | 
            ||
| 720 | ID of MV grid.  | 
            ||
| 721 | |||
| 722 | Returns  | 
            ||
| 723 | --------  | 
            ||
| 724 | pd.Index(int)  | 
            ||
| 725 | Zensus cell IDs (as int) of buildings with decentral heating systems in  | 
            ||
| 726 | given MV grid. Type is pandas Index to avoid errors later on when it is  | 
            ||
| 727 | used in a query.  | 
            ||
| 728 | |||
| 729 | """  | 
            ||
| 730 | |||
| 731 | # get zensus cells in grid  | 
            ||
| 732 | zensus_population_ids = db.select_dataframe(  | 
            ||
| 733 | f"""  | 
            ||
| 734 | SELECT zensus_population_id  | 
            ||
| 735 | FROM boundaries.egon_map_zensus_grid_districts  | 
            ||
| 736 |         WHERE bus_id = {mv_grid_id} | 
            ||
| 737 | """,  | 
            ||
| 738 | index_col=None,  | 
            ||
| 739 | ).zensus_population_id.values  | 
            ||
| 740 | |||
| 741 | # maybe use adapter  | 
            ||
| 742 | # convert to pd.Index (otherwise type is np.int64, which will for some  | 
            ||
| 743 | # reason throw an error when used in a query)  | 
            ||
| 744 | zensus_population_ids = pd.Index(zensus_population_ids)  | 
            ||
| 745 | |||
| 746 | # get zensus cells with district heating  | 
            ||
| 747 | with db.session_scope() as session:  | 
            ||
| 748 | query = session.query(  | 
            ||
| 749 | MapZensusDistrictHeatingAreas.zensus_population_id,  | 
            ||
| 750 | ).filter(  | 
            ||
| 751 | MapZensusDistrictHeatingAreas.scenario == scenario,  | 
            ||
| 752 | MapZensusDistrictHeatingAreas.zensus_population_id.in_(  | 
            ||
| 753 | zensus_population_ids  | 
            ||
| 754 | ),  | 
            ||
| 755 | )  | 
            ||
| 756 | |||
| 757 | cells_with_dh = pd.read_sql(  | 
            ||
| 758 | query.statement, query.session.bind, index_col=None  | 
            ||
| 759 | ).zensus_population_id.values  | 
            ||
| 760 | |||
| 761 | # remove zensus cells with district heating  | 
            ||
| 762 | zensus_population_ids = zensus_population_ids.drop(  | 
            ||
| 763 | cells_with_dh, errors="ignore"  | 
            ||
| 764 | )  | 
            ||
| 765 | return pd.Index(zensus_population_ids)  | 
            ||
| 766 | |||
| 767 | |||
| 768 | def get_residential_buildings_with_decentral_heat_demand_in_mv_grid(  | 
            ||
| 769 | scenario, mv_grid_id  | 
            ||
| 770 | ):  | 
            ||
| 771 | """  | 
            ||
| 772 | Returns building IDs of buildings with decentral residential heat demand in  | 
            ||
| 773 | given MV grid.  | 
            ||
| 774 | |||
| 775 | As cells with district heating differ between scenarios, this is also  | 
            ||
| 776 | depending on the scenario.  | 
            ||
| 777 | |||
| 778 | Parameters  | 
            ||
| 779 | -----------  | 
            ||
| 780 | scenario : str  | 
            ||
| 781 | Name of scenario. Can be either "eGon2035" or "eGon100RE".  | 
            ||
| 782 | mv_grid_id : int  | 
            ||
| 783 | ID of MV grid.  | 
            ||
| 784 | |||
| 785 | Returns  | 
            ||
| 786 | --------  | 
            ||
| 787 | pd.Index(int)  | 
            ||
| 788 | Building IDs (as int) of buildings with decentral heating system in  | 
            ||
| 789 | given MV grid. Type is pandas Index to avoid errors later on when it is  | 
            ||
| 790 | used in a query.  | 
            ||
| 791 | |||
| 792 | """  | 
            ||
| 793 | # get zensus cells with decentral heating  | 
            ||
| 794 | zensus_population_ids = (  | 
            ||
| 795 | get_zensus_cells_with_decentral_heat_demand_in_mv_grid(  | 
            ||
| 796 | scenario, mv_grid_id  | 
            ||
| 797 | )  | 
            ||
| 798 | )  | 
            ||
| 799 | |||
| 800 | # get buildings with decentral heat demand  | 
            ||
| 801 |     saio.register_schema("demand", engine) | 
            ||
| 802 | from saio.demand import egon_heat_timeseries_selected_profiles  | 
            ||
| 803 | |||
| 804 | with db.session_scope() as session:  | 
            ||
| 805 | query = session.query(  | 
            ||
| 806 | egon_heat_timeseries_selected_profiles.building_id,  | 
            ||
| 807 | ).filter(  | 
            ||
| 808 | egon_heat_timeseries_selected_profiles.zensus_population_id.in_(  | 
            ||
| 809 | zensus_population_ids  | 
            ||
| 810 | )  | 
            ||
| 811 | )  | 
            ||
| 812 | |||
| 813 | buildings_with_heat_demand = pd.read_sql(  | 
            ||
| 814 | query.statement, query.session.bind, index_col=None  | 
            ||
| 815 | ).building_id.values  | 
            ||
| 816 | |||
| 817 | return pd.Index(buildings_with_heat_demand)  | 
            ||
| 818 | |||
| 819 | |||
| 820 | def get_cts_buildings_with_decentral_heat_demand_in_mv_grid(  | 
            ||
| 821 | scenario, mv_grid_id  | 
            ||
| 822 | ):  | 
            ||
| 823 | """  | 
            ||
| 824 | Returns building IDs of buildings with decentral CTS heat demand in  | 
            ||
| 825 | given MV grid.  | 
            ||
| 826 | |||
| 827 | As cells with district heating differ between scenarios, this is also  | 
            ||
| 828 | depending on the scenario.  | 
            ||
| 829 | |||
| 830 | Parameters  | 
            ||
| 831 | -----------  | 
            ||
| 832 | scenario : str  | 
            ||
| 833 | Name of scenario. Can be either "eGon2035" or "eGon100RE".  | 
            ||
| 834 | mv_grid_id : int  | 
            ||
| 835 | ID of MV grid.  | 
            ||
| 836 | |||
| 837 | Returns  | 
            ||
| 838 | --------  | 
            ||
| 839 | pd.Index(int)  | 
            ||
| 840 | Building IDs (as int) of buildings with decentral heating system in  | 
            ||
| 841 | given MV grid. Type is pandas Index to avoid errors later on when it is  | 
            ||
| 842 | used in a query.  | 
            ||
| 843 | |||
| 844 | """  | 
            ||
| 845 | |||
| 846 | # get zensus cells with decentral heating  | 
            ||
| 847 | zensus_population_ids = (  | 
            ||
| 848 | get_zensus_cells_with_decentral_heat_demand_in_mv_grid(  | 
            ||
| 849 | scenario, mv_grid_id  | 
            ||
| 850 | )  | 
            ||
| 851 | )  | 
            ||
| 852 | |||
| 853 | # get buildings with decentral heat demand  | 
            ||
| 854 | with db.session_scope() as session:  | 
            ||
| 855 | query = session.query(EgonMapZensusMvgdBuildings.building_id).filter(  | 
            ||
| 856 | EgonMapZensusMvgdBuildings.sector == "cts",  | 
            ||
| 857 | EgonMapZensusMvgdBuildings.zensus_population_id.in_(  | 
            ||
| 858 | zensus_population_ids  | 
            ||
| 859 | ),  | 
            ||
| 860 | )  | 
            ||
| 861 | |||
| 862 | buildings_with_heat_demand = pd.read_sql(  | 
            ||
| 863 | query.statement, query.session.bind, index_col=None  | 
            ||
| 864 | ).building_id.values  | 
            ||
| 865 | |||
| 866 | return pd.Index(buildings_with_heat_demand)  | 
            ||
| 867 | |||
| 868 | |||
| 869 | def get_buildings_with_decentral_heat_demand_in_mv_grid(mvgd, scenario):  | 
            ||
| 870 | """  | 
            ||
| 871 | Returns building IDs of buildings with decentral heat demand in  | 
            ||
| 872 | given MV grid.  | 
            ||
| 873 | |||
| 874 | As cells with district heating differ between scenarios, this is also  | 
            ||
| 875 | depending on the scenario.  | 
            ||
| 876 | |||
| 877 | Parameters  | 
            ||
| 878 | -----------  | 
            ||
| 879 | mv_grid_id : int  | 
            ||
| 880 | ID of MV grid.  | 
            ||
| 881 | scenario : str  | 
            ||
| 882 | Name of scenario. Can be either "eGon2035" or "eGon100RE".  | 
            ||
| 883 | |||
| 884 | Returns  | 
            ||
| 885 | --------  | 
            ||
| 886 | pd.Index(int)  | 
            ||
| 887 | Building IDs (as int) of buildings with decentral heating system in  | 
            ||
| 888 | given MV grid. Type is pandas Index to avoid errors later on when it is  | 
            ||
| 889 | used in a query.  | 
            ||
| 890 | |||
| 891 | """  | 
            ||
| 892 | # get residential buildings with decentral heating systems  | 
            ||
| 893 | buildings_decentral_heating_res = (  | 
            ||
| 894 | get_residential_buildings_with_decentral_heat_demand_in_mv_grid(  | 
            ||
| 895 | scenario, mvgd  | 
            ||
| 896 | )  | 
            ||
| 897 | )  | 
            ||
| 898 | |||
| 899 | # get CTS buildings with decentral heating systems  | 
            ||
| 900 | buildings_decentral_heating_cts = (  | 
            ||
| 901 | get_cts_buildings_with_decentral_heat_demand_in_mv_grid(scenario, mvgd)  | 
            ||
| 902 | )  | 
            ||
| 903 | |||
| 904 | # merge residential and CTS buildings  | 
            ||
| 905 | buildings_decentral_heating = buildings_decentral_heating_res.append(  | 
            ||
| 906 | buildings_decentral_heating_cts  | 
            ||
| 907 | ).unique()  | 
            ||
| 908 | |||
| 909 | return buildings_decentral_heating  | 
            ||
| 910 | |||
| 911 | |||
| 912 | def get_total_heat_pump_capacity_of_mv_grid(scenario, mv_grid_id):  | 
            ||
| 913 | """  | 
            ||
| 914 | Returns total heat pump capacity per grid that was previously defined  | 
            ||
| 915 | (by NEP or pypsa-eur-sec).  | 
            ||
| 916 | |||
| 917 | Parameters  | 
            ||
| 918 | -----------  | 
            ||
| 919 | scenario : str  | 
            ||
| 920 | Name of scenario. Can be either "eGon2035" or "eGon100RE".  | 
            ||
| 921 | mv_grid_id : int  | 
            ||
| 922 | ID of MV grid.  | 
            ||
| 923 | |||
| 924 | Returns  | 
            ||
| 925 | --------  | 
            ||
| 926 | float  | 
            ||
| 927 | Total heat pump capacity in MW in given MV grid.  | 
            ||
| 928 | |||
| 929 | """  | 
            ||
| 930 | from egon.data.datasets.heat_supply import EgonIndividualHeatingSupply  | 
            ||
| 931 | |||
| 932 | with db.session_scope() as session:  | 
            ||
| 933 | query = (  | 
            ||
| 934 | session.query(  | 
            ||
| 935 | EgonIndividualHeatingSupply.mv_grid_id,  | 
            ||
| 936 | EgonIndividualHeatingSupply.capacity,  | 
            ||
| 937 | )  | 
            ||
| 938 | .filter(EgonIndividualHeatingSupply.scenario == scenario)  | 
            ||
| 939 | .filter(EgonIndividualHeatingSupply.carrier == "heat_pump")  | 
            ||
| 940 | .filter(EgonIndividualHeatingSupply.mv_grid_id == mv_grid_id)  | 
            ||
| 941 | )  | 
            ||
| 942 | |||
| 943 | hp_cap_mv_grid = pd.read_sql(  | 
            ||
| 944 | query.statement, query.session.bind, index_col="mv_grid_id"  | 
            ||
| 945 | )  | 
            ||
| 946 | if hp_cap_mv_grid.empty:  | 
            ||
| 947 | return 0.0  | 
            ||
| 948 | else:  | 
            ||
| 949 | return hp_cap_mv_grid.capacity.values[0]  | 
            ||
| 950 | |||
| 951 | |||
| 952 | def get_heat_peak_demand_per_building(scenario, building_ids):  | 
            ||
| 953 | """"""  | 
            ||
| 954 | |||
| 955 | with db.session_scope() as session:  | 
            ||
| 956 | query = (  | 
            ||
| 957 | session.query(  | 
            ||
| 958 | BuildingHeatPeakLoads.building_id,  | 
            ||
| 959 | BuildingHeatPeakLoads.peak_load_in_w,  | 
            ||
| 960 | )  | 
            ||
| 961 | .filter(BuildingHeatPeakLoads.scenario == scenario)  | 
            ||
| 962 | .filter(BuildingHeatPeakLoads.building_id.in_(building_ids))  | 
            ||
| 963 | )  | 
            ||
| 964 | |||
| 965 | df_heat_peak_demand = pd.read_sql(  | 
            ||
| 966 | query.statement, query.session.bind, index_col=None  | 
            ||
| 967 | )  | 
            ||
| 968 | |||
| 969 | # TODO remove check  | 
            ||
| 970 |     if df_heat_peak_demand.duplicated("building_id").any(): | 
            ||
| 971 |         raise ValueError("Duplicate building_id") | 
            ||
| 972 | |||
| 973 | # convert to series and from W to MW  | 
            ||
| 974 | df_heat_peak_demand = (  | 
            ||
| 975 |         df_heat_peak_demand.set_index("building_id").loc[:, "peak_load_in_w"] | 
            ||
| 976 | * 1e6  | 
            ||
| 977 | )  | 
            ||
| 978 | return df_heat_peak_demand  | 
            ||
| 979 | |||
| 980 | |||
| 981 | def determine_minimum_hp_capacity_per_building(  | 
            ||
| 982 | peak_heat_demand, flexibility_factor=24 / 18, cop=1.7  | 
            ||
| 983 | ):  | 
            ||
| 984 | """  | 
            ||
| 985 | Determines minimum required heat pump capacity.  | 
            ||
| 986 | |||
| 987 | Parameters  | 
            ||
| 988 | ----------  | 
            ||
| 989 | peak_heat_demand : pd.Series  | 
            ||
| 990 | Series with peak heat demand per building in MW. Index contains the  | 
            ||
| 991 | building ID.  | 
            ||
| 992 | flexibility_factor : float  | 
            ||
| 993 | Factor to overdimension the heat pump to allow for some flexible  | 
            ||
| 994 | dispatch in times of high heat demand. Per default, a factor of 24/18  | 
            ||
| 995 | is used, to take into account  | 
            ||
| 996 | |||
| 997 | Returns  | 
            ||
| 998 | -------  | 
            ||
| 999 | pd.Series  | 
            ||
| 1000 | Pandas series with minimum required heat pump capacity per building in  | 
            ||
| 1001 | MW.  | 
            ||
| 1002 | |||
| 1003 | """  | 
            ||
| 1004 | return peak_heat_demand * flexibility_factor / cop  | 
            ||
| 1005 | |||
| 1006 | |||
| 1007 | def determine_buildings_with_hp_in_mv_grid(  | 
            ||
| 1008 | hp_cap_mv_grid, min_hp_cap_per_building  | 
            ||
| 1009 | ):  | 
            ||
| 1010 | """  | 
            ||
| 1011 | Distributes given total heat pump capacity to buildings based on their peak  | 
            ||
| 1012 | heat demand.  | 
            ||
| 1013 | |||
| 1014 | Parameters  | 
            ||
| 1015 | -----------  | 
            ||
| 1016 | hp_cap_mv_grid : float  | 
            ||
| 1017 | Total heat pump capacity in MW in given MV grid.  | 
            ||
| 1018 | min_hp_cap_per_building : pd.Series  | 
            ||
| 1019 | Pandas series with minimum required heat pump capacity per building  | 
            ||
| 1020 | in MW.  | 
            ||
| 1021 | |||
| 1022 | Returns  | 
            ||
| 1023 | -------  | 
            ||
| 1024 | pd.Index(int)  | 
            ||
| 1025 | Building IDs (as int) of buildings to get heat demand time series for.  | 
            ||
| 1026 | |||
| 1027 | """  | 
            ||
| 1028 | building_ids = min_hp_cap_per_building.index  | 
            ||
| 1029 | |||
| 1030 | # get buildings with PV to give them a higher priority when selecting  | 
            ||
| 1031 | # buildings a heat pump will be allocated to  | 
            ||
| 1032 |     saio.register_schema("supply", engine) | 
            ||
| 1033 | from saio.supply import egon_power_plants_pv_roof_building  | 
            ||
| 1034 | |||
| 1035 | with db.session_scope() as session:  | 
            ||
| 1036 | query = session.query(  | 
            ||
| 1037 | egon_power_plants_pv_roof_building.building_id  | 
            ||
| 1038 | ).filter(  | 
            ||
| 1039 | egon_power_plants_pv_roof_building.building_id.in_(building_ids)  | 
            ||
| 1040 | )  | 
            ||
| 1041 | |||
| 1042 | buildings_with_pv = pd.read_sql(  | 
            ||
| 1043 | query.statement, query.session.bind, index_col=None  | 
            ||
| 1044 | ).building_id.values  | 
            ||
| 1045 | # set different weights for buildings with PV and without PV  | 
            ||
| 1046 | weight_with_pv = 1.5  | 
            ||
| 1047 | weight_without_pv = 1.0  | 
            ||
| 1048 | weights = pd.concat(  | 
            ||
| 1049 | [  | 
            ||
| 1050 | pd.DataFrame(  | 
            ||
| 1051 |                 {"weight": weight_without_pv}, | 
            ||
| 1052 | index=building_ids.drop(buildings_with_pv, errors="ignore"),  | 
            ||
| 1053 | ),  | 
            ||
| 1054 |             pd.DataFrame({"weight": weight_with_pv}, index=buildings_with_pv), | 
            ||
| 1055 | ]  | 
            ||
| 1056 | )  | 
            ||
| 1057 | # normalise weights (probability needs to add up to 1)  | 
            ||
| 1058 | weights.weight = weights.weight / weights.weight.sum()  | 
            ||
| 1059 | |||
| 1060 | # get random order at which buildings are chosen  | 
            ||
| 1061 | np.random.seed(db.credentials()["--random-seed"])  | 
            ||
| 1062 | buildings_with_hp_order = np.random.choice(  | 
            ||
| 1063 | weights.index,  | 
            ||
| 1064 | size=len(weights),  | 
            ||
| 1065 | replace=False,  | 
            ||
| 1066 | p=weights.weight.values,  | 
            ||
| 1067 | )  | 
            ||
| 1068 | |||
| 1069 | # select buildings until HP capacity in MV grid is reached (some rest  | 
            ||
| 1070 | # capacity will remain)  | 
            ||
| 1071 | hp_cumsum = min_hp_cap_per_building.loc[buildings_with_hp_order].cumsum()  | 
            ||
| 1072 | buildings_with_hp = hp_cumsum[hp_cumsum <= hp_cap_mv_grid].index  | 
            ||
| 1073 | |||
| 1074 | # choose random heat pumps until remaining heat pumps are larger than  | 
            ||
| 1075 | # remaining heat pump capacity  | 
            ||
| 1076 | remaining_hp_cap = (  | 
            ||
| 1077 | hp_cap_mv_grid - min_hp_cap_per_building.loc[buildings_with_hp].sum()  | 
            ||
| 1078 | )  | 
            ||
| 1079 | min_cap_buildings_wo_hp = min_hp_cap_per_building.loc[  | 
            ||
| 1080 | building_ids.drop(buildings_with_hp)  | 
            ||
| 1081 | ]  | 
            ||
| 1082 | possible_buildings = min_cap_buildings_wo_hp[  | 
            ||
| 1083 | min_cap_buildings_wo_hp <= remaining_hp_cap  | 
            ||
| 1084 | ].index  | 
            ||
| 1085 | while len(possible_buildings) > 0:  | 
            ||
| 1086 | random.seed(db.credentials()["--random-seed"])  | 
            ||
| 1087 | new_hp_building = random.choice(possible_buildings)  | 
            ||
| 1088 | # add new building to building with HP  | 
            ||
| 1089 | buildings_with_hp = buildings_with_hp.append(  | 
            ||
| 1090 | pd.Index([new_hp_building])  | 
            ||
| 1091 | )  | 
            ||
| 1092 | # determine if there are still possible buildings  | 
            ||
| 1093 | remaining_hp_cap = (  | 
            ||
| 1094 | hp_cap_mv_grid  | 
            ||
| 1095 | - min_hp_cap_per_building.loc[buildings_with_hp].sum()  | 
            ||
| 1096 | )  | 
            ||
| 1097 | min_cap_buildings_wo_hp = min_hp_cap_per_building.loc[  | 
            ||
| 1098 | building_ids.drop(buildings_with_hp)  | 
            ||
| 1099 | ]  | 
            ||
| 1100 | possible_buildings = min_cap_buildings_wo_hp[  | 
            ||
| 1101 | min_cap_buildings_wo_hp <= remaining_hp_cap  | 
            ||
| 1102 | ].index  | 
            ||
| 1103 | |||
| 1104 | return buildings_with_hp  | 
            ||
| 1105 | |||
| 1106 | |||
| 1107 | def desaggregate_hp_capacity(min_hp_cap_per_building, hp_cap_mv_grid):  | 
            ||
| 1108 | """  | 
            ||
| 1109 | Desaggregates the required total heat pump capacity to buildings.  | 
            ||
| 1110 | |||
| 1111 | All buildings are previously assigned a minimum required heat pump  | 
            ||
| 1112 | capacity. If the total heat pump capacity exceeds this, larger heat pumps  | 
            ||
| 1113 | are assigned.  | 
            ||
| 1114 | |||
| 1115 | Parameters  | 
            ||
| 1116 | ------------  | 
            ||
| 1117 | min_hp_cap_per_building : pd.Series  | 
            ||
| 1118 | Pandas series with minimum required heat pump capacity per building  | 
            ||
| 1119 | in MW.  | 
            ||
| 1120 | hp_cap_mv_grid : float  | 
            ||
| 1121 | Total heat pump capacity in MW in given MV grid.  | 
            ||
| 1122 | |||
| 1123 | Returns  | 
            ||
| 1124 | --------  | 
            ||
| 1125 | pd.Series  | 
            ||
| 1126 | Pandas series with heat pump capacity per building in MW.  | 
            ||
| 1127 | |||
| 1128 | """  | 
            ||
| 1129 | # distribute remaining capacity to all buildings with HP depending on  | 
            ||
| 1130 | # installed HP capacity  | 
            ||
| 1131 | |||
| 1132 | allocated_cap = min_hp_cap_per_building.sum()  | 
            ||
| 1133 | remaining_cap = hp_cap_mv_grid - allocated_cap  | 
            ||
| 1134 | |||
| 1135 | fac = remaining_cap / allocated_cap  | 
            ||
| 1136 | hp_cap_per_building = (  | 
            ||
| 1137 | min_hp_cap_per_building * fac + min_hp_cap_per_building  | 
            ||
| 1138 | )  | 
            ||
| 1139 | hp_cap_per_building.index.name = "building_id"  | 
            ||
| 1140 | |||
| 1141 | return hp_cap_per_building  | 
            ||
| 1142 | |||
| 1143 | |||
| 1144 | def determine_min_hp_cap_buildings_pypsa_eur_sec(  | 
            ||
| 1145 | peak_heat_demand, building_ids  | 
            ||
| 1146 | ):  | 
            ||
| 1147 | """  | 
            ||
| 1148 | Determines minimum required HP capacity in MV grid in MW as input for  | 
            ||
| 1149 | pypsa-eur-sec.  | 
            ||
| 1150 | |||
| 1151 | Parameters  | 
            ||
| 1152 | ----------  | 
            ||
| 1153 | peak_heat_demand : pd.Series  | 
            ||
| 1154 | Series with peak heat demand per building in MW. Index contains the  | 
            ||
| 1155 | building ID.  | 
            ||
| 1156 | building_ids : pd.Index(int)  | 
            ||
| 1157 | Building IDs (as int) of buildings with decentral heating system in  | 
            ||
| 1158 | given MV grid.  | 
            ||
| 1159 | |||
| 1160 | Returns  | 
            ||
| 1161 | --------  | 
            ||
| 1162 | float  | 
            ||
| 1163 | Minimum required HP capacity in MV grid in MW.  | 
            ||
| 1164 | |||
| 1165 | """  | 
            ||
| 1166 | if len(building_ids) > 0:  | 
            ||
| 1167 | peak_heat_demand = peak_heat_demand.loc[building_ids]  | 
            ||
| 1168 | # determine minimum required heat pump capacity per building  | 
            ||
| 1169 | min_hp_cap_buildings = determine_minimum_hp_capacity_per_building(  | 
            ||
| 1170 | peak_heat_demand  | 
            ||
| 1171 | )  | 
            ||
| 1172 | return min_hp_cap_buildings.sum()  | 
            ||
| 1173 | else:  | 
            ||
| 1174 | return 0.0  | 
            ||
| 1175 | |||
| 1176 | |||
| 1177 | def determine_hp_cap_buildings_eGon2035_per_mvgd(  | 
            ||
| 1178 | mv_grid_id, peak_heat_demand, building_ids  | 
            ||
| 1179 | ):  | 
            ||
| 1180 | """  | 
            ||
| 1181 | Determines which buildings in the MV grid will have a HP (buildings with PV  | 
            ||
| 1182 | rooftop are more likely to be assigned) in the eGon2035 scenario, as well  | 
            ||
| 1183 | as their respective HP capacity in MW.  | 
            ||
| 1184 | |||
| 1185 | Parameters  | 
            ||
| 1186 | -----------  | 
            ||
| 1187 | mv_grid_id : int  | 
            ||
| 1188 | ID of MV grid.  | 
            ||
| 1189 | peak_heat_demand : pd.Series  | 
            ||
| 1190 | Series with peak heat demand per building in MW. Index contains the  | 
            ||
| 1191 | building ID.  | 
            ||
| 1192 | building_ids : pd.Index(int)  | 
            ||
| 1193 | Building IDs (as int) of buildings with decentral heating system in  | 
            ||
| 1194 | given MV grid.  | 
            ||
| 1195 | |||
| 1196 | """  | 
            ||
| 1197 | |||
| 1198 | hp_cap_grid = get_total_heat_pump_capacity_of_mv_grid(  | 
            ||
| 1199 | "eGon2035", mv_grid_id  | 
            ||
| 1200 | )  | 
            ||
| 1201 | |||
| 1202 | if len(building_ids) > 0 and hp_cap_grid > 0.0:  | 
            ||
| 1203 | peak_heat_demand = peak_heat_demand.loc[building_ids]  | 
            ||
| 1204 | |||
| 1205 | # determine minimum required heat pump capacity per building  | 
            ||
| 1206 | min_hp_cap_buildings = determine_minimum_hp_capacity_per_building(  | 
            ||
| 1207 | peak_heat_demand  | 
            ||
| 1208 | )  | 
            ||
| 1209 | |||
| 1210 | # select buildings that will have a heat pump  | 
            ||
| 1211 | buildings_with_hp = determine_buildings_with_hp_in_mv_grid(  | 
            ||
| 1212 | hp_cap_grid, min_hp_cap_buildings  | 
            ||
| 1213 | )  | 
            ||
| 1214 | |||
| 1215 | # distribute total heat pump capacity to all buildings with HP  | 
            ||
| 1216 | hp_cap_per_building = desaggregate_hp_capacity(  | 
            ||
| 1217 | min_hp_cap_buildings.loc[buildings_with_hp], hp_cap_grid  | 
            ||
| 1218 | )  | 
            ||
| 1219 | |||
| 1220 |         return hp_cap_per_building.rename("hp_capacity") | 
            ||
| 1221 | |||
| 1222 | else:  | 
            ||
| 1223 |         return pd.Series(dtype="float64").rename("hp_capacity") | 
            ||
| 1224 | |||
| 1225 | |||
| 1226 | def determine_hp_cap_buildings_eGon100RE_per_mvgd(mv_grid_id):  | 
            ||
| 1227 | """  | 
            ||
| 1228 | Determines HP capacity per building in eGon100RE scenario.  | 
            ||
| 1229 | |||
| 1230 | In eGon100RE scenario all buildings without district heating get a heat  | 
            ||
| 1231 | pump.  | 
            ||
| 1232 | |||
| 1233 | Returns  | 
            ||
| 1234 | --------  | 
            ||
| 1235 | pd.Series  | 
            ||
| 1236 | Pandas series with heat pump capacity per building in MW.  | 
            ||
| 1237 | |||
| 1238 | """  | 
            ||
| 1239 | |||
| 1240 | hp_cap_grid = get_total_heat_pump_capacity_of_mv_grid(  | 
            ||
| 1241 | "eGon100RE", mv_grid_id  | 
            ||
| 1242 | )  | 
            ||
| 1243 | |||
| 1244 | if hp_cap_grid > 0.0:  | 
            ||
| 1245 | |||
| 1246 | # get buildings with decentral heating systems  | 
            ||
| 1247 | building_ids = get_buildings_with_decentral_heat_demand_in_mv_grid(  | 
            ||
| 1248 | mv_grid_id, scenario="eGon100RE"  | 
            ||
| 1249 | )  | 
            ||
| 1250 | |||
| 1251 |         logger.info(f"MVGD={mv_grid_id} | Get peak loads from DB") | 
            ||
| 1252 | df_peak_heat_demand = get_heat_peak_demand_per_building(  | 
            ||
| 1253 | "eGon100RE", building_ids  | 
            ||
| 1254 | )  | 
            ||
| 1255 | |||
| 1256 |         logger.info(f"MVGD={mv_grid_id} | Determine HP capacities.") | 
            ||
| 1257 | # determine minimum required heat pump capacity per building  | 
            ||
| 1258 | min_hp_cap_buildings = determine_minimum_hp_capacity_per_building(  | 
            ||
| 1259 | df_peak_heat_demand, flexibility_factor=24 / 18, cop=1.7  | 
            ||
| 1260 | )  | 
            ||
| 1261 | |||
| 1262 |         logger.info(f"MVGD={mv_grid_id} | Desaggregate HP capacities.") | 
            ||
| 1263 | # distribute total heat pump capacity to all buildings with HP  | 
            ||
| 1264 | hp_cap_per_building = desaggregate_hp_capacity(  | 
            ||
| 1265 | min_hp_cap_buildings, hp_cap_grid  | 
            ||
| 1266 | )  | 
            ||
| 1267 | |||
| 1268 |         return hp_cap_per_building.rename("hp_capacity") | 
            ||
| 1269 | else:  | 
            ||
| 1270 |         return pd.Series(dtype="float64").rename("hp_capacity") | 
            ||
| 1271 | |||
| 1272 | |||
| 1273 | def determine_hp_cap_buildings_eGon100RE():  | 
            ||
| 1274 | """  | 
            ||
| 1275 | Main function to determine HP capacity per building in eGon100RE scenario.  | 
            ||
| 1276 | |||
| 1277 | """  | 
            ||
| 1278 | |||
| 1279 | # ========== Register np datatypes with SQLA ==========  | 
            ||
| 1280 | register_adapter(np.float64, adapt_numpy_float64)  | 
            ||
| 1281 | register_adapter(np.int64, adapt_numpy_int64)  | 
            ||
| 1282 | # =====================================================  | 
            ||
| 1283 | |||
| 1284 | with db.session_scope() as session:  | 
            ||
| 1285 | query = (  | 
            ||
| 1286 | session.query(  | 
            ||
| 1287 | MapZensusGridDistricts.bus_id,  | 
            ||
| 1288 | )  | 
            ||
| 1289 | .filter(  | 
            ||
| 1290 | MapZensusGridDistricts.zensus_population_id  | 
            ||
| 1291 | == EgonPetaHeat.zensus_population_id  | 
            ||
| 1292 | )  | 
            ||
| 1293 | .distinct(MapZensusGridDistricts.bus_id)  | 
            ||
| 1294 | )  | 
            ||
| 1295 | mvgd_ids = pd.read_sql(query.statement, query.session.bind, index_col=None)  | 
            ||
| 1296 |     mvgd_ids = mvgd_ids.sort_values("bus_id") | 
            ||
| 1297 | mvgd_ids = mvgd_ids["bus_id"].values  | 
            ||
| 1298 | |||
| 1299 | df_hp_cap_per_building_100RE_db = pd.DataFrame(  | 
            ||
| 1300 | columns=["building_id", "hp_capacity"]  | 
            ||
| 1301 | )  | 
            ||
| 1302 | |||
| 1303 | for mvgd_id in mvgd_ids:  | 
            ||
| 1304 | |||
| 1305 |         logger.info(f"MVGD={mvgd_id} | Start") | 
            ||
| 1306 | |||
| 1307 | hp_cap_per_building_100RE = (  | 
            ||
| 1308 | determine_hp_cap_buildings_eGon100RE_per_mvgd(mvgd_id)  | 
            ||
| 1309 | )  | 
            ||
| 1310 | |||
| 1311 | if not hp_cap_per_building_100RE.empty:  | 
            ||
| 1312 | df_hp_cap_per_building_100RE_db = pd.concat(  | 
            ||
| 1313 | [  | 
            ||
| 1314 | df_hp_cap_per_building_100RE_db,  | 
            ||
| 1315 | hp_cap_per_building_100RE.reset_index(),  | 
            ||
| 1316 | ],  | 
            ||
| 1317 | axis=0,  | 
            ||
| 1318 | )  | 
            ||
| 1319 | |||
| 1320 |     logger.info(f"MVGD={min(mvgd_ids)} - {max(mvgd_ids)} | Write data to db.") | 
            ||
| 1321 | df_hp_cap_per_building_100RE_db["scenario"] = "eGon100RE"  | 
            ||
| 1322 | |||
| 1323 | write_table_to_postgres(  | 
            ||
| 1324 | df_hp_cap_per_building_100RE_db,  | 
            ||
| 1325 | EgonHpCapacityBuildings,  | 
            ||
| 1326 | engine=engine,  | 
            ||
| 1327 | drop=False,  | 
            ||
| 1328 | )  | 
            ||
| 1329 | |||
| 1330 | |||
| 1331 | def aggregate_residential_and_cts_profiles(mvgd, scenario):  | 
            ||
| 1332 | """  | 
            ||
| 1333 | Gets residential and CTS heat demand profiles per building and aggregates  | 
            ||
| 1334 | them.  | 
            ||
| 1335 | |||
| 1336 | Parameters  | 
            ||
| 1337 | ----------  | 
            ||
| 1338 | mvgd : int  | 
            ||
| 1339 | MV grid ID.  | 
            ||
| 1340 | scenario : str  | 
            ||
| 1341 | Possible options are eGon2035 or eGon100RE.  | 
            ||
| 1342 | |||
| 1343 | Returns  | 
            ||
| 1344 | --------  | 
            ||
| 1345 | pd.DataFrame  | 
            ||
| 1346 | Table of demand profile per building. Column names are building IDs and  | 
            ||
| 1347 | index is hour of the year as int (0-8759).  | 
            ||
| 1348 | |||
| 1349 | """  | 
            ||
| 1350 | # ############### get residential heat demand profiles ###############  | 
            ||
| 1351 | df_heat_ts = calc_residential_heat_profiles_per_mvgd(  | 
            ||
| 1352 | mvgd=mvgd, scenario=scenario  | 
            ||
| 1353 | )  | 
            ||
| 1354 | |||
| 1355 | # pivot to allow aggregation with CTS profiles  | 
            ||
| 1356 | df_heat_ts = df_heat_ts.pivot(  | 
            ||
| 1357 | index=["day_of_year", "hour"],  | 
            ||
| 1358 | columns="building_id",  | 
            ||
| 1359 | values="demand_ts",  | 
            ||
| 1360 | )  | 
            ||
| 1361 | df_heat_ts = df_heat_ts.sort_index().reset_index(drop=True)  | 
            ||
| 1362 | |||
| 1363 | # ############### get CTS heat demand profiles ###############  | 
            ||
| 1364 | heat_demand_cts_ts = calc_cts_building_profiles(  | 
            ||
| 1365 | bus_ids=[mvgd],  | 
            ||
| 1366 | scenario=scenario,  | 
            ||
| 1367 | sector="heat",  | 
            ||
| 1368 | )  | 
            ||
| 1369 | |||
| 1370 | # ############# aggregate residential and CTS demand profiles #############  | 
            ||
| 1371 | df_heat_ts = pd.concat([df_heat_ts, heat_demand_cts_ts], axis=1)  | 
            ||
| 1372 | |||
| 1373 | df_heat_ts = df_heat_ts.groupby(axis=1, level=0).sum()  | 
            ||
| 1374 | |||
| 1375 | return df_heat_ts  | 
            ||
| 1376 | |||
| 1377 | |||
| 1378 | def export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db):  | 
            ||
| 1379 | """"""  | 
            ||
| 1380 | |||
| 1381 | df_peak_loads_db = df_peak_loads_db.melt(  | 
            ||
| 1382 | id_vars="building_id",  | 
            ||
| 1383 | var_name="scenario",  | 
            ||
| 1384 | value_name="peak_load_in_w",  | 
            ||
| 1385 | )  | 
            ||
| 1386 | df_peak_loads_db["sector"] = "residential+cts"  | 
            ||
| 1387 | # From MW to W  | 
            ||
| 1388 | df_peak_loads_db["peak_load_in_w"] = (  | 
            ||
| 1389 | df_peak_loads_db["peak_load_in_w"] * 1e6  | 
            ||
| 1390 | )  | 
            ||
| 1391 | write_table_to_postgres(  | 
            ||
| 1392 | df_peak_loads_db, BuildingHeatPeakLoads, engine=engine  | 
            ||
| 1393 | )  | 
            ||
| 1394 | |||
| 1395 |     columns = { | 
            ||
| 1396 | column.key: column.type  | 
            ||
| 1397 | for column in EgonEtragoTimeseriesIndividualHeating.__table__.columns  | 
            ||
| 1398 | }  | 
            ||
| 1399 | df_heat_mvgd_ts_db = df_heat_mvgd_ts_db.loc[:, columns.keys()]  | 
            ||
| 1400 | |||
| 1401 | df_heat_mvgd_ts_db.to_sql(  | 
            ||
| 1402 | name=EgonEtragoTimeseriesIndividualHeating.__table__.name,  | 
            ||
| 1403 | schema=EgonEtragoTimeseriesIndividualHeating.__table__.schema,  | 
            ||
| 1404 | con=engine,  | 
            ||
| 1405 | if_exists="append",  | 
            ||
| 1406 | method="multi",  | 
            ||
| 1407 | index=False,  | 
            ||
| 1408 | dtype=columns,  | 
            ||
| 1409 | )  | 
            ||
| 1410 | |||
| 1411 | |||
| 1412 | def export_min_cap_to_csv(df_hp_min_cap_mv_grid_pypsa_eur_sec):  | 
            ||
| 1413 | |||
| 1414 | df_hp_min_cap_mv_grid_pypsa_eur_sec.index.name = "mvgd_id"  | 
            ||
| 1415 | df_hp_min_cap_mv_grid_pypsa_eur_sec = (  | 
            ||
| 1416 | df_hp_min_cap_mv_grid_pypsa_eur_sec.to_frame(  | 
            ||
| 1417 | name="min_hp_capacity"  | 
            ||
| 1418 | ).reset_index()  | 
            ||
| 1419 | )  | 
            ||
| 1420 | |||
| 1421 |     folder = Path(".") / "input-pypsa-eur-sec" | 
            ||
| 1422 | file = folder / "minimum_hp_capacity_mv_grid_2035.csv"  | 
            ||
| 1423 | # Create the folder, if it does not exist already  | 
            ||
| 1424 | if not os.path.exists(folder):  | 
            ||
| 1425 | os.mkdir(folder)  | 
            ||
| 1426 | # TODO check append  | 
            ||
| 1427 | if not file.is_file():  | 
            ||
| 1428 | df_hp_min_cap_mv_grid_pypsa_eur_sec.to_csv(file)  | 
            ||
| 1429 | # TODO outsource into separate task incl delete file if clearing  | 
            ||
| 1430 | else:  | 
            ||
| 1431 | df_hp_min_cap_mv_grid_pypsa_eur_sec.to_csv(  | 
            ||
| 1432 | file, mode="a", header=False  | 
            ||
| 1433 | )  | 
            ||
| 1434 | |||
| 1435 | |||
| 1436 | def catch_missing_buidings(buildings_decentral_heating, peak_load):  | 
            ||
| 1437 | """  | 
            ||
| 1438 | Check for missing buildings and reduce the list of buildings with  | 
            ||
| 1439 | decentral heating if no peak loads available. This should only happen  | 
            ||
| 1440 | in case of cutout SH  | 
            ||
| 1441 | |||
| 1442 | Parameters  | 
            ||
| 1443 | -----------  | 
            ||
| 1444 | buildings_decentral_heating : list(int)  | 
            ||
| 1445 | Array or list of buildings with decentral heating  | 
            ||
| 1446 | |||
| 1447 | peak_load : pd.Series  | 
            ||
| 1448 | Peak loads of all building within the mvgd  | 
            ||
| 1449 | |||
| 1450 | """  | 
            ||
| 1451 | # Catch missing buildings key error  | 
            ||
| 1452 | # should only happen within cutout SH  | 
            ||
| 1453 | if (  | 
            ||
| 1454 | not all(buildings_decentral_heating.isin(peak_load.index))  | 
            ||
| 1455 | and config.settings()["egon-data"]["--dataset-boundary"]  | 
            ||
| 1456 | == "Schleswig-Holstein"  | 
            ||
| 1457 | ):  | 
            ||
| 1458 | diff = buildings_decentral_heating.difference(peak_load.index)  | 
            ||
| 1459 | logger.warning(  | 
            ||
| 1460 |             f"Dropped {len(diff)} building ids due to missing peak " | 
            ||
| 1461 |             f"loads. {len(buildings_decentral_heating)} left." | 
            ||
| 1462 | )  | 
            ||
| 1463 |         logger.info(f"Dropped buildings: {diff}") | 
            ||
| 1464 | buildings_decentral_heating.drop(diff, inplace=True)  | 
            ||
| 1465 | |||
| 1466 | return buildings_decentral_heating  | 
            ||
| 1467 | |||
| 1468 | |||
| 1469 | def determine_hp_cap_peak_load_mvgd_ts_2035(mvgd_ids):  | 
            ||
| 1470 | """  | 
            ||
| 1471 | Main function to determine HP capacity per building in eGon2035 scenario.  | 
            ||
| 1472 | Further, creates heat demand time series for all buildings with heat pumps  | 
            ||
| 1473 | in MV grid, as well as for all buildings with gas boilers, used in eTraGo.  | 
            ||
| 1474 | |||
| 1475 | Parameters  | 
            ||
| 1476 | -----------  | 
            ||
| 1477 | mvgd_ids : list(int)  | 
            ||
| 1478 | List of MV grid IDs to determine data for.  | 
            ||
| 1479 | |||
| 1480 | """  | 
            ||
| 1481 | |||
| 1482 | # ========== Register np datatypes with SQLA ==========  | 
            ||
| 1483 | register_adapter(np.float64, adapt_numpy_float64)  | 
            ||
| 1484 | register_adapter(np.int64, adapt_numpy_int64)  | 
            ||
| 1485 | # =====================================================  | 
            ||
| 1486 | |||
| 1487 | df_peak_loads_db = pd.DataFrame()  | 
            ||
| 1488 | df_hp_cap_per_building_2035_db = pd.DataFrame()  | 
            ||
| 1489 | df_heat_mvgd_ts_db = pd.DataFrame()  | 
            ||
| 1490 | |||
| 1491 | for mvgd in mvgd_ids:  | 
            ||
| 1492 | |||
| 1493 |         logger.info(f"MVGD={mvgd} | Start") | 
            ||
| 1494 | |||
| 1495 | # ############# aggregate residential and CTS demand profiles #####  | 
            ||
| 1496 | |||
| 1497 | df_heat_ts = aggregate_residential_and_cts_profiles(  | 
            ||
| 1498 | mvgd, scenario="eGon2035"  | 
            ||
| 1499 | )  | 
            ||
| 1500 | |||
| 1501 | # ##################### determine peak loads ###################  | 
            ||
| 1502 |         logger.info(f"MVGD={mvgd} | Determine peak loads.") | 
            ||
| 1503 | |||
| 1504 |         peak_load_2035 = df_heat_ts.max().rename("eGon2035") | 
            ||
| 1505 | |||
| 1506 | # ######## determine HP capacity per building #########  | 
            ||
| 1507 |         logger.info(f"MVGD={mvgd} | Determine HP capacities.") | 
            ||
| 1508 | |||
| 1509 | buildings_decentral_heating = (  | 
            ||
| 1510 | get_buildings_with_decentral_heat_demand_in_mv_grid(  | 
            ||
| 1511 | mvgd, scenario="eGon2035"  | 
            ||
| 1512 | )  | 
            ||
| 1513 | )  | 
            ||
| 1514 | |||
| 1515 | # Reduce list of decentral heating if no Peak load available  | 
            ||
| 1516 | # TODO maybe remove after succesfull DE run  | 
            ||
| 1517 | buildings_decentral_heating = catch_missing_buidings(  | 
            ||
| 1518 | buildings_decentral_heating, peak_load_2035  | 
            ||
| 1519 | )  | 
            ||
| 1520 | |||
| 1521 | hp_cap_per_building_2035 = (  | 
            ||
| 1522 | determine_hp_cap_buildings_eGon2035_per_mvgd(  | 
            ||
| 1523 | mvgd,  | 
            ||
| 1524 | peak_load_2035,  | 
            ||
| 1525 | buildings_decentral_heating,  | 
            ||
| 1526 | )  | 
            ||
| 1527 | )  | 
            ||
| 1528 | buildings_gas_2035 = pd.Index(buildings_decentral_heating).drop(  | 
            ||
| 1529 | hp_cap_per_building_2035.index  | 
            ||
| 1530 | )  | 
            ||
| 1531 | |||
| 1532 | # ################ aggregated heat profiles ###################  | 
            ||
| 1533 |         logger.info(f"MVGD={mvgd} | Aggregate heat profiles.") | 
            ||
| 1534 | |||
| 1535 | df_mvgd_ts_2035_hp = df_heat_ts.loc[  | 
            ||
| 1536 | :,  | 
            ||
| 1537 | hp_cap_per_building_2035.index,  | 
            ||
| 1538 | ].sum(axis=1)  | 
            ||
| 1539 | |||
| 1540 | # heat demand time series for buildings with gas boiler  | 
            ||
| 1541 | df_mvgd_ts_2035_gas = df_heat_ts.loc[:, buildings_gas_2035].sum(axis=1)  | 
            ||
| 1542 | |||
| 1543 | df_heat_mvgd_ts = pd.DataFrame(  | 
            ||
| 1544 |             data={ | 
            ||
| 1545 | "carrier": ["heat_pump", "CH4"],  | 
            ||
| 1546 | "bus_id": mvgd,  | 
            ||
| 1547 | "scenario": ["eGon2035", "eGon2035"],  | 
            ||
| 1548 | "dist_aggregated_mw": [  | 
            ||
| 1549 | df_mvgd_ts_2035_hp.to_list(),  | 
            ||
| 1550 | df_mvgd_ts_2035_gas.to_list(),  | 
            ||
| 1551 | ],  | 
            ||
| 1552 | }  | 
            ||
| 1553 | )  | 
            ||
| 1554 | |||
| 1555 | # ################ collect results ##################  | 
            ||
| 1556 |         logger.info(f"MVGD={mvgd} | Collect results.") | 
            ||
| 1557 | |||
| 1558 | df_peak_loads_db = pd.concat(  | 
            ||
| 1559 | [df_peak_loads_db, peak_load_2035.reset_index()],  | 
            ||
| 1560 | axis=0,  | 
            ||
| 1561 | ignore_index=True,  | 
            ||
| 1562 | )  | 
            ||
| 1563 | |||
| 1564 | df_heat_mvgd_ts_db = pd.concat(  | 
            ||
| 1565 | [df_heat_mvgd_ts_db, df_heat_mvgd_ts], axis=0, ignore_index=True  | 
            ||
| 1566 | )  | 
            ||
| 1567 | |||
| 1568 | df_hp_cap_per_building_2035_db = pd.concat(  | 
            ||
| 1569 | [  | 
            ||
| 1570 | df_hp_cap_per_building_2035_db,  | 
            ||
| 1571 | hp_cap_per_building_2035.reset_index(),  | 
            ||
| 1572 | ],  | 
            ||
| 1573 | axis=0,  | 
            ||
| 1574 | )  | 
            ||
| 1575 | |||
| 1576 | # ################ export to db #######################  | 
            ||
| 1577 |     logger.info(f"MVGD={min(mvgd_ids)} - {max(mvgd_ids)} | Write data to db.") | 
            ||
| 1578 | export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db)  | 
            ||
| 1579 | |||
| 1580 | df_hp_cap_per_building_2035_db["scenario"] = "eGon2035"  | 
            ||
| 1581 | write_table_to_postgres(  | 
            ||
| 1582 | df_hp_cap_per_building_2035_db,  | 
            ||
| 1583 | EgonHpCapacityBuildings,  | 
            ||
| 1584 | engine=engine,  | 
            ||
| 1585 | drop=False,  | 
            ||
| 1586 | )  | 
            ||
| 1587 | |||
| 1588 | |||
| 1589 | def determine_hp_cap_peak_load_mvgd_ts_pypsa_eur_sec(mvgd_ids):  | 
            ||
| 1590 | """  | 
            ||
| 1591 | Main function to determine minimum required HP capacity in MV for  | 
            ||
| 1592 | pypsa-eur-sec. Further, creates heat demand time series for all buildings  | 
            ||
| 1593 | with heat pumps in MV grid in eGon100RE scenario, used in eTraGo.  | 
            ||
| 1594 | |||
| 1595 | Parameters  | 
            ||
| 1596 | -----------  | 
            ||
| 1597 | mvgd_ids : list(int)  | 
            ||
| 1598 | List of MV grid IDs to determine data for.  | 
            ||
| 1599 | |||
| 1600 | """  | 
            ||
| 1601 | |||
| 1602 | # ========== Register np datatypes with SQLA ==========  | 
            ||
| 1603 | register_adapter(np.float64, adapt_numpy_float64)  | 
            ||
| 1604 | register_adapter(np.int64, adapt_numpy_int64)  | 
            ||
| 1605 | # =====================================================  | 
            ||
| 1606 | |||
| 1607 | df_peak_loads_db = pd.DataFrame()  | 
            ||
| 1608 | df_heat_mvgd_ts_db = pd.DataFrame()  | 
            ||
| 1609 | df_hp_min_cap_mv_grid_pypsa_eur_sec = pd.Series(dtype="float64")  | 
            ||
| 1610 | |||
| 1611 | for mvgd in mvgd_ids:  | 
            ||
| 1612 | |||
| 1613 |         logger.info(f"MVGD={mvgd} | Start") | 
            ||
| 1614 | |||
| 1615 | # ############# aggregate residential and CTS demand profiles #####  | 
            ||
| 1616 | |||
| 1617 | df_heat_ts = aggregate_residential_and_cts_profiles(  | 
            ||
| 1618 | mvgd, scenario="eGon100RE"  | 
            ||
| 1619 | )  | 
            ||
| 1620 | |||
| 1621 | # ##################### determine peak loads ###################  | 
            ||
| 1622 |         logger.info(f"MVGD={mvgd} | Determine peak loads.") | 
            ||
| 1623 | |||
| 1624 |         peak_load_100RE = df_heat_ts.max().rename("eGon100RE") | 
            ||
| 1625 | |||
| 1626 | # ######## determine minimum HP capacity pypsa-eur-sec ###########  | 
            ||
| 1627 |         logger.info(f"MVGD={mvgd} | Determine minimum HP capacity.") | 
            ||
| 1628 | |||
| 1629 | buildings_decentral_heating = (  | 
            ||
| 1630 | get_buildings_with_decentral_heat_demand_in_mv_grid(  | 
            ||
| 1631 | mvgd, scenario="eGon100RE"  | 
            ||
| 1632 | )  | 
            ||
| 1633 | )  | 
            ||
| 1634 | |||
| 1635 | # Reduce list of decentral heating if no Peak load available  | 
            ||
| 1636 | # TODO maybe remove after succesfull DE run  | 
            ||
| 1637 | buildings_decentral_heating = catch_missing_buidings(  | 
            ||
| 1638 | buildings_decentral_heating, peak_load_100RE  | 
            ||
| 1639 | )  | 
            ||
| 1640 | |||
| 1641 | hp_min_cap_mv_grid_pypsa_eur_sec = (  | 
            ||
| 1642 | determine_min_hp_cap_buildings_pypsa_eur_sec(  | 
            ||
| 1643 | peak_load_100RE,  | 
            ||
| 1644 | buildings_decentral_heating,  | 
            ||
| 1645 | )  | 
            ||
| 1646 | )  | 
            ||
| 1647 | |||
| 1648 | # ################ aggregated heat profiles ###################  | 
            ||
| 1649 |         logger.info(f"MVGD={mvgd} | Aggregate heat profiles.") | 
            ||
| 1650 | |||
| 1651 | df_mvgd_ts_hp = df_heat_ts.loc[  | 
            ||
| 1652 | :,  | 
            ||
| 1653 | buildings_decentral_heating,  | 
            ||
| 1654 | ].sum(axis=1)  | 
            ||
| 1655 | |||
| 1656 | df_heat_mvgd_ts = pd.DataFrame(  | 
            ||
| 1657 |             data={ | 
            ||
| 1658 | "carrier": "heat_pump",  | 
            ||
| 1659 | "bus_id": mvgd,  | 
            ||
| 1660 | "scenario": "eGon100RE",  | 
            ||
| 1661 | "dist_aggregated_mw": [df_mvgd_ts_hp.to_list()],  | 
            ||
| 1662 | }  | 
            ||
| 1663 | )  | 
            ||
| 1664 | |||
| 1665 | # ################ collect results ##################  | 
            ||
| 1666 |         logger.info(f"MVGD={mvgd} | Collect results.") | 
            ||
| 1667 | |||
| 1668 | df_peak_loads_db = pd.concat(  | 
            ||
| 1669 | [df_peak_loads_db, peak_load_100RE.reset_index()],  | 
            ||
| 1670 | axis=0,  | 
            ||
| 1671 | ignore_index=True,  | 
            ||
| 1672 | )  | 
            ||
| 1673 | |||
| 1674 | df_heat_mvgd_ts_db = pd.concat(  | 
            ||
| 1675 | [df_heat_mvgd_ts_db, df_heat_mvgd_ts], axis=0, ignore_index=True  | 
            ||
| 1676 | )  | 
            ||
| 1677 | |||
| 1678 | df_hp_min_cap_mv_grid_pypsa_eur_sec.loc[  | 
            ||
| 1679 | mvgd  | 
            ||
| 1680 | ] = hp_min_cap_mv_grid_pypsa_eur_sec  | 
            ||
| 1681 | |||
| 1682 | # ################ export to db and csv ######################  | 
            ||
| 1683 |     logger.info(f"MVGD={min(mvgd_ids)} - {max(mvgd_ids)} | Write data to db.") | 
            ||
| 1684 | export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db)  | 
            ||
| 1685 | |||
| 1686 | logger.info(  | 
            ||
| 1687 |         f"MVGD={min(mvgd_ids)} - {max(mvgd_ids)} | Write " | 
            ||
| 1688 | f"pypsa-eur-sec min "  | 
            ||
| 1689 | f"HP capacities to csv."  | 
            ||
| 1690 | )  | 
            ||
| 1691 | export_min_cap_to_csv(df_hp_min_cap_mv_grid_pypsa_eur_sec)  | 
            ||
| 1692 | |||
| 1693 | |||
| 1694 | View Code Duplication | def split_mvgds_into_bulks_2035(n, max_n, func):  | 
            |
| 1695 | """"""  | 
            ||
| 1696 | |||
| 1697 | with db.session_scope() as session:  | 
            ||
| 1698 | query = (  | 
            ||
| 1699 | session.query(  | 
            ||
| 1700 | MapZensusGridDistricts.bus_id,  | 
            ||
| 1701 | )  | 
            ||
| 1702 | .filter(  | 
            ||
| 1703 | MapZensusGridDistricts.zensus_population_id  | 
            ||
| 1704 | == EgonPetaHeat.zensus_population_id  | 
            ||
| 1705 | )  | 
            ||
| 1706 | .distinct(MapZensusGridDistricts.bus_id)  | 
            ||
| 1707 | )  | 
            ||
| 1708 | mvgd_ids = pd.read_sql(query.statement, query.session.bind, index_col=None)  | 
            ||
| 1709 | |||
| 1710 |     mvgd_ids = mvgd_ids.sort_values("bus_id").reset_index(drop=True) | 
            ||
| 1711 | |||
| 1712 | mvgd_ids = np.array_split(mvgd_ids["bus_id"].values, max_n)  | 
            ||
| 1713 | # Only take split n  | 
            ||
| 1714 | mvgd_ids = mvgd_ids[n]  | 
            ||
| 1715 | |||
| 1716 |     logger.info(f"Bulk takes care of MVGD: {min(mvgd_ids)} - {max(mvgd_ids)}") | 
            ||
| 1717 | func(mvgd_ids)  | 
            ||
| 1718 | |||
| 1719 | |||
| 1720 | View Code Duplication | def split_mvgds_into_bulks_pypsa_eur_sec(n, max_n, func):  | 
            |
| 1721 | """"""  | 
            ||
| 1722 | |||
| 1723 | with db.session_scope() as session:  | 
            ||
| 1724 | query = (  | 
            ||
| 1725 | session.query(  | 
            ||
| 1726 | MapZensusGridDistricts.bus_id,  | 
            ||
| 1727 | )  | 
            ||
| 1728 | .filter(  | 
            ||
| 1729 | MapZensusGridDistricts.zensus_population_id  | 
            ||
| 1730 | == EgonPetaHeat.zensus_population_id  | 
            ||
| 1731 | )  | 
            ||
| 1732 | .distinct(MapZensusGridDistricts.bus_id)  | 
            ||
| 1733 | )  | 
            ||
| 1734 | mvgd_ids = pd.read_sql(query.statement, query.session.bind, index_col=None)  | 
            ||
| 1735 | |||
| 1736 |     mvgd_ids = mvgd_ids.sort_values("bus_id").reset_index(drop=True) | 
            ||
| 1737 | |||
| 1738 | mvgd_ids = np.array_split(mvgd_ids["bus_id"].values, max_n)  | 
            ||
| 1739 | # Only take split n  | 
            ||
| 1740 | mvgd_ids = mvgd_ids[n]  | 
            ||
| 1741 | |||
| 1742 |     logger.info(f"Bulk takes care of MVGD: {min(mvgd_ids)} - {max(mvgd_ids)}") | 
            ||
| 1743 | func(mvgd_ids)  | 
            ||
| 1744 | |||
| 1745 | |||
| 1746 | def create_peak_load_table():  | 
            ||
| 1747 | |||
| 1748 | BuildingHeatPeakLoads.__table__.drop(bind=engine, checkfirst=True)  | 
            ||
| 1749 | BuildingHeatPeakLoads.__table__.create(bind=engine, checkfirst=True)  | 
            ||
| 1750 | |||
| 1751 | |||
| 1752 | def create_hp_capacity_table():  | 
            ||
| 1753 | |||
| 1754 | EgonHpCapacityBuildings.__table__.drop(bind=engine, checkfirst=True)  | 
            ||
| 1755 | EgonHpCapacityBuildings.__table__.create(bind=engine, checkfirst=True)  | 
            ||
| 1756 | |||
| 1757 | |||
| 1758 | def create_egon_etrago_timeseries_individual_heating():  | 
            ||
| 1759 | |||
| 1760 | EgonEtragoTimeseriesIndividualHeating.__table__.drop(  | 
            ||
| 1761 | bind=engine, checkfirst=True  | 
            ||
| 1762 | )  | 
            ||
| 1763 | EgonEtragoTimeseriesIndividualHeating.__table__.create(  | 
            ||
| 1764 | bind=engine, checkfirst=True  | 
            ||
| 1765 | )  | 
            ||
| 1766 | |||
| 1767 | |||
| 1768 | def delete_peak_loads_if_existing():  | 
            ||
| 1769 | """Remove all entries"""  | 
            ||
| 1770 | with db.session_scope() as session:  | 
            ||
| 1771 | # Buses  | 
            ||
| 1772 | session.query(BuildingHeatPeakLoads).filter(  | 
            ||
| 1773 | BuildingHeatPeakLoads.scenario == "eGon2035"  | 
            ||
| 1774 | ).delete(synchronize_session=False)  | 
            ||
| 1775 |