Passed
Pull Request — dev (#905)
by
unknown
01:36
created

HeatPumpsPypsaEurSec.__init__()   B

Complexity

Conditions 3

Size

Total Lines 70
Code Lines 37

Duplication

Lines 70
Ratio 100 %

Importance

Changes 0
Metric Value
eloc 37
dl 70
loc 70
rs 8.9919
c 0
b 0
f 0
cc 3
nop 2

How to fix   Long Method   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

1
"""The central module containing all code dealing with
2
individual heat supply.
3
4
"""
5
from pathlib import Path
6
import os
7
import random
8
import time
9
10
from airflow.operators.python_operator import PythonOperator
11
from psycopg2.extensions import AsIs, register_adapter
12
from sqlalchemy import ARRAY, REAL, Column, Integer, String
13
from sqlalchemy.ext.declarative import declarative_base
14
import geopandas as gpd
15
import numpy as np
16
import pandas as pd
17
import saio
18
19
from egon.data import config, db, logger
20
from egon.data.datasets import Dataset
21
from egon.data.datasets.district_heating_areas import (
22
    MapZensusDistrictHeatingAreas,
23
)
24
from egon.data.datasets.electricity_demand_timeseries.cts_buildings import (
25
    calc_cts_building_profiles,
26
)
27
from egon.data.datasets.electricity_demand_timeseries.mapping import (
28
    EgonMapZensusMvgdBuildings,
29
)
30
from egon.data.datasets.electricity_demand_timeseries.tools import (
31
    write_table_to_postgres,
32
)
33
from egon.data.datasets.heat_demand import EgonPetaHeat
34
from egon.data.datasets.heat_demand_timeseries.daily import (
35
    EgonDailyHeatDemandPerClimateZone,
36
    EgonMapZensusClimateZones,
37
)
38
from egon.data.datasets.heat_demand_timeseries.idp_pool import (
39
    EgonHeatTimeseries,
40
)
41
42
# get zensus cells with district heating
43
from egon.data.datasets.zensus_mv_grid_districts import MapZensusGridDistricts
44
45
engine = db.engine()
46
Base = declarative_base()
47
48
49
class EgonEtragoTimeseriesIndividualHeating(Base):
50
    __tablename__ = "egon_etrago_timeseries_individual_heating"
51
    __table_args__ = {"schema": "demand"}
52
    bus_id = Column(Integer, primary_key=True)
53
    scenario = Column(String, primary_key=True)
54
    carrier = Column(String, primary_key=True)
55
    dist_aggregated_mw = Column(ARRAY(REAL))
56
57
58
class EgonHpCapacityBuildings(Base):
59
    __tablename__ = "egon_hp_capacity_buildings"
60
    __table_args__ = {"schema": "demand"}
61
    building_id = Column(Integer, primary_key=True)
62
    scenario = Column(String, primary_key=True)
63
    hp_capacity = Column(REAL)
64
65
66 View Code Duplication
class HeatPumpsPypsaEurSec(Dataset):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
67
    def __init__(self, dependencies):
68
        def dyn_parallel_tasks():
69
            """Dynamically generate tasks
70
71
            The goal is to speed up tasks by parallelising bulks of mvgds.
72
73
            The number of parallel tasks is defined via parameter
74
            `parallel_tasks` in the dataset config `datasets.yml`.
75
76
            Returns
77
            -------
78
            set of airflow.PythonOperators
79
                The tasks. Each element is of
80
                :func:`egon.data.datasets.heat_supply.individual_heating.
81
                determine_hp_capacity_eGon2035_pypsa_eur_sec`
82
            """
83
            parallel_tasks = config.datasets()["demand_timeseries_mvgd"].get(
84
                "parallel_tasks", 1
85
            )
86
            # ========== Register np datatypes with SQLA ==========
87
            register_adapter(np.float64, adapt_numpy_float64)
88
            register_adapter(np.int64, adapt_numpy_int64)
89
            # =====================================================
90
91
            with db.session_scope() as session:
92
                query = (
93
                    session.query(
94
                        MapZensusGridDistricts.bus_id,
95
                    )
96
                    .filter(
97
                        MapZensusGridDistricts.zensus_population_id
98
                        == EgonPetaHeat.zensus_population_id
99
                    )
100
                    .distinct(MapZensusGridDistricts.bus_id)
101
                )
102
            mvgd_ids = pd.read_sql(
103
                query.statement, query.session.bind, index_col=None
104
            )
105
106
            mvgd_ids = mvgd_ids.sort_values("bus_id").reset_index(drop=True)
107
108
            mvgd_ids = np.array_split(
109
                mvgd_ids["bus_id"].values, parallel_tasks
110
            )
111
112
            # mvgd_bunch_size = divmod(MVGD_MIN_COUNT, parallel_tasks)[0]
113
            tasks = set()
114
            for i, bulk in enumerate(mvgd_ids):
115
                tasks.add(
116
                    PythonOperator(
117
                        task_id=(
118
                            f"determine-hp-capacity-pypsa-eur-sec_"
119
                            f"mvgd_{min(bulk)}-{max(bulk)}"
120
                        ),
121
                        python_callable=determine_hp_cap_peak_load_mvgd_ts_pypsa_eur_sec,
122
                        op_kwargs={
123
                            "mvgd_ids": bulk,
124
                        },
125
                    )
126
                )
127
            return tasks
128
129
        super().__init__(
130
            name="HeatPumpsPypsaEurSec",
131
            version="0.0.0",
132
            dependencies=dependencies,
133
            tasks=(
134
                create_peak_load_table,
135
                create_egon_etrago_timeseries_individual_heating,
136
                {*dyn_parallel_tasks()},
137
            ),
138
        )
139
140
141 View Code Duplication
class HeatPumps2035(Dataset):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
142
    def __init__(self, dependencies):
143
        def dyn_parallel_tasks():
144
            """Dynamically generate tasks
145
146
            The goal is to speed up tasks by parallelising bulks of mvgds.
147
148
            The number of parallel tasks is defined via parameter
149
            `parallel_tasks` in the dataset config `datasets.yml`.
150
151
            Returns
152
            -------
153
            set of airflow.PythonOperators
154
                The tasks. Each element is of
155
                :func:`egon.data.datasets.heat_supply.individual_heating.
156
                determine_hp_capacity_eGon2035_pypsa_eur_sec`
157
            """
158
            parallel_tasks = config.datasets()["demand_timeseries_mvgd"].get(
159
                "parallel_tasks", 1
160
            )
161
            # ========== Register np datatypes with SQLA ==========
162
            register_adapter(np.float64, adapt_numpy_float64)
163
            register_adapter(np.int64, adapt_numpy_int64)
164
            # =====================================================
165
166
            with db.session_scope() as session:
167
                query = (
168
                    session.query(
169
                        MapZensusGridDistricts.bus_id,
170
                    )
171
                    .filter(
172
                        MapZensusGridDistricts.zensus_population_id
173
                        == EgonPetaHeat.zensus_population_id
174
                    )
175
                    .distinct(MapZensusGridDistricts.bus_id)
176
                )
177
            mvgd_ids = pd.read_sql(
178
                query.statement, query.session.bind, index_col=None
179
            )
180
181
            mvgd_ids = mvgd_ids.sort_values("bus_id").reset_index(drop=True)
182
183
            mvgd_ids = np.array_split(
184
                mvgd_ids["bus_id"].values, parallel_tasks
185
            )
186
187
            # mvgd_bunch_size = divmod(MVGD_MIN_COUNT, parallel_tasks)[0]
188
            tasks = set()
189
            for i, bulk in enumerate(mvgd_ids):
190
                tasks.add(
191
                    PythonOperator(
192
                        task_id=(
193
                            f"determine-hp-capacity-eGon2035_"
194
                            f"mvgd_{min(bulk)}-{max(bulk)}"
195
                        ),
196
                        python_callable=determine_hp_cap_peak_load_mvgd_ts_2035,
197
                        op_kwargs={
198
                            "mvgd_ids": bulk,
199
                        },
200
                    )
201
                )
202
            return tasks
203
204
        super().__init__(
205
            name="HeatPumps2035",
206
            version="0.0.0",
207
            dependencies=dependencies,
208
            tasks=(
209
                create_hp_capacity_table,
210
                delete_peak_loads_if_existing,
211
                {*dyn_parallel_tasks()},
212
            ),
213
        )
214
215
216
class HeatPumps2050(Dataset):
217
    def __init__(self, dependencies):
218
        super().__init__(
219
            name="HeatPumps2050",
220
            version="0.0.0",
221
            dependencies=dependencies,
222
            tasks=(determine_hp_cap_buildings_eGon100RE),
223
        )
224
225
226
class BuildingHeatPeakLoads(Base):
227
    __tablename__ = "egon_building_heat_peak_loads"
228
    __table_args__ = {"schema": "demand"}
229
230
    building_id = Column(Integer, primary_key=True)
231
    scenario = Column(String, primary_key=True)
232
    sector = Column(String, primary_key=True)
233
    peak_load_in_w = Column(REAL)
234
235
236
def adapt_numpy_float64(numpy_float64):
237
    return AsIs(numpy_float64)
238
239
240
def adapt_numpy_int64(numpy_int64):
241
    return AsIs(numpy_int64)
242
243
244
def timeit(func):
245
    """
246
    Decorator for measuring function's running time.
247
    """
248
249
    def measure_time(*args, **kw):
250
        start_time = time.time()
251
        result = func(*args, **kw)
252
        print(
253
            "Processing time of %s(): %.2f seconds."
254
            % (func.__qualname__, time.time() - start_time)
255
        )
256
        return result
257
258
    return measure_time
259
260
261
def cascade_per_technology(
262
    heat_per_mv,
263
    technologies,
264
    scenario,
265
    distribution_level,
266
    max_size_individual_chp=0.05,
267
):
268
269
    """Add plants for individual heat.
270
    Currently only on mv grid district level.
271
272
    Parameters
273
    ----------
274
    mv_grid_districts : geopandas.geodataframe.GeoDataFrame
275
        MV grid districts including the heat demand
276
    technologies : pandas.DataFrame
277
        List of supply technologies and their parameters
278
    scenario : str
279
        Name of the scenario
280
    max_size_individual_chp : float
281
        Maximum capacity of an individual chp in MW
282
    Returns
283
    -------
284
    mv_grid_districts : geopandas.geodataframe.GeoDataFrame
285
        MV grid district which need additional individual heat supply
286
    technologies : pandas.DataFrame
287
        List of supply technologies and their parameters
288
    append_df : pandas.DataFrame
289
        List of plants per mv grid for the selected technology
290
291
    """
292
    sources = config.datasets()["heat_supply"]["sources"]
293
294
    tech = technologies[technologies.priority == technologies.priority.max()]
295
296
    # Distribute heat pumps linear to remaining demand.
297
    if tech.index == "heat_pump":
298
299
        if distribution_level == "federal_state":
300
            # Select target values per federal state
301
            target = db.select_dataframe(
302
                f"""
303
                    SELECT DISTINCT ON (gen) gen as state, capacity
304
                    FROM {sources['scenario_capacities']['schema']}.
305
                    {sources['scenario_capacities']['table']} a
306
                    JOIN {sources['federal_states']['schema']}.
307
                    {sources['federal_states']['table']} b
308
                    ON a.nuts = b.nuts
309
                    WHERE scenario_name = '{scenario}'
310
                    AND carrier = 'residential_rural_heat_pump'
311
                    """,
312
                index_col="state",
313
            )
314
315
            heat_per_mv["share"] = heat_per_mv.groupby(
316
                "state"
317
            ).remaining_demand.apply(lambda grp: grp / grp.sum())
318
319
            append_df = (
320
                heat_per_mv["share"]
321
                .mul(target.capacity[heat_per_mv["state"]].values)
322
                .reset_index()
323
            )
324
        else:
325
            # Select target value for Germany
326
            target = db.select_dataframe(
327
                f"""
328
                    SELECT SUM(capacity) AS capacity
329
                    FROM {sources['scenario_capacities']['schema']}.
330
                    {sources['scenario_capacities']['table']} a
331
                    WHERE scenario_name = '{scenario}'
332
                    AND carrier = 'residential_rural_heat_pump'
333
                    """
334
            )
335
336
            heat_per_mv["share"] = (
337
                heat_per_mv.remaining_demand
338
                / heat_per_mv.remaining_demand.sum()
339
            )
340
341
            append_df = (
342
                heat_per_mv["share"].mul(target.capacity[0]).reset_index()
343
            )
344
345
        append_df.rename(
346
            {"bus_id": "mv_grid_id", "share": "capacity"}, axis=1, inplace=True
347
        )
348
349
    elif tech.index == "gas_boiler":
350
351
        append_df = pd.DataFrame(
352
            data={
353
                "capacity": heat_per_mv.remaining_demand.div(
354
                    tech.estimated_flh.values[0]
355
                ),
356
                "carrier": "residential_rural_gas_boiler",
357
                "mv_grid_id": heat_per_mv.index,
358
                "scenario": scenario,
359
            }
360
        )
361
362
    if append_df.size > 0:
0 ignored issues
show
introduced by
The variable append_df does not seem to be defined for all execution paths.
Loading history...
363
        append_df["carrier"] = tech.index[0]
364
        heat_per_mv.loc[
365
            append_df.mv_grid_id, "remaining_demand"
366
        ] -= append_df.set_index("mv_grid_id").capacity.mul(
367
            tech.estimated_flh.values[0]
368
        )
369
370
    heat_per_mv = heat_per_mv[heat_per_mv.remaining_demand >= 0]
371
372
    technologies = technologies.drop(tech.index)
373
374
    return heat_per_mv, technologies, append_df
375
376
377
def cascade_heat_supply_indiv(scenario, distribution_level, plotting=True):
378
    """Assigns supply strategy for individual heating in four steps.
379
380
    1.) all small scale CHP are connected.
381
    2.) If the supply can not  meet the heat demand, solar thermal collectors
382
        are attached. This is not implemented yet, since individual
383
        solar thermal plants are not considered in eGon2035 scenario.
384
    3.) If this is not suitable, the mv grid is also supplied by heat pumps.
385
    4.) The last option are individual gas boilers.
386
387
    Parameters
388
    ----------
389
    scenario : str
390
        Name of scenario
391
    plotting : bool, optional
392
        Choose if individual heating supply is plotted. The default is True.
393
394
    Returns
395
    -------
396
    resulting_capacities : pandas.DataFrame
397
        List of plants per mv grid
398
399
    """
400
401
    sources = config.datasets()["heat_supply"]["sources"]
402
403
    # Select residential heat demand per mv grid district and federal state
404
    heat_per_mv = db.select_geodataframe(
405
        f"""
406
        SELECT d.bus_id as bus_id, SUM(demand) as demand,
407
        c.vg250_lan as state, d.geom
408
        FROM {sources['heat_demand']['schema']}.
409
        {sources['heat_demand']['table']} a
410
        JOIN {sources['map_zensus_grid']['schema']}.
411
        {sources['map_zensus_grid']['table']} b
412
        ON a.zensus_population_id = b.zensus_population_id
413
        JOIN {sources['map_vg250_grid']['schema']}.
414
        {sources['map_vg250_grid']['table']} c
415
        ON b.bus_id = c.bus_id
416
        JOIN {sources['mv_grids']['schema']}.
417
        {sources['mv_grids']['table']} d
418
        ON d.bus_id = c.bus_id
419
        WHERE scenario = '{scenario}'
420
        AND a.zensus_population_id NOT IN (
421
            SELECT zensus_population_id
422
            FROM {sources['map_dh']['schema']}.{sources['map_dh']['table']}
423
            WHERE scenario = '{scenario}')
424
        GROUP BY d.bus_id, vg250_lan, geom
425
        """,
426
        index_col="bus_id",
427
    )
428
429
    # Store geometry of mv grid
430
    geom_mv = heat_per_mv.geom.centroid.copy()
431
432
    # Initalize Dataframe for results
433
    resulting_capacities = pd.DataFrame(
434
        columns=["mv_grid_id", "carrier", "capacity"]
435
    )
436
437
    # Set technology data according to
438
    # http://www.wbzu.de/seminare/infopool/infopool-bhkw
439
    # TODO: Add gas boilers and solar themal (eGon100RE)
440
    technologies = pd.DataFrame(
441
        index=["heat_pump", "gas_boiler"],
442
        columns=["estimated_flh", "priority"],
443
        data={"estimated_flh": [4000, 8000], "priority": [2, 1]},
444
    )
445
446
    # In the beginning, the remaining demand equals demand
447
    heat_per_mv["remaining_demand"] = heat_per_mv["demand"]
448
449
    # Connect new technologies, if there is still heat demand left
450
    while (len(technologies) > 0) and (len(heat_per_mv) > 0):
451
        # Attach new supply technology
452
        heat_per_mv, technologies, append_df = cascade_per_technology(
453
            heat_per_mv, technologies, scenario, distribution_level
454
        )
455
        # Collect resulting capacities
456
        resulting_capacities = resulting_capacities.append(
457
            append_df, ignore_index=True
458
        )
459
460
    if plotting:
461
        plot_heat_supply(resulting_capacities)
462
463
    return gpd.GeoDataFrame(
464
        resulting_capacities,
465
        geometry=geom_mv[resulting_capacities.mv_grid_id].values,
466
    )
467
468
469
def get_peta_demand(mvgd, scenario):
470
    """
471
    Retrieve annual peta heat demand for residential buildings for either
472
    eGon2035 or eGon100RE scenario.
473
474
    Parameters
475
    ----------
476
    mvgd : int
477
        MV grid ID.
478
    scenario : str
479
        Possible options are eGon2035 or eGon100RE
480
481
    Returns
482
    -------
483
    df_peta_demand : pd.DataFrame
484
        Annual residential heat demand per building and scenario. Columns of the
485
        dataframe are zensus_population_id and demand.
486
487
    """
488
489
    with db.session_scope() as session:
490
        query = (
491
            session.query(
492
                MapZensusGridDistricts.zensus_population_id,
493
                EgonPetaHeat.demand,
494
            )
495
            .filter(MapZensusGridDistricts.bus_id == mvgd)
496
            .filter(
497
                MapZensusGridDistricts.zensus_population_id
498
                == EgonPetaHeat.zensus_population_id
499
            )
500
            .filter(
501
                EgonPetaHeat.sector == "residential",
502
                EgonPetaHeat.scenario == scenario,
503
            )
504
        )
505
506
    df_peta_demand = pd.read_sql(
507
        query.statement, query.session.bind, index_col=None
508
    )
509
510
    return df_peta_demand
511
512
513
def get_residential_heat_profile_ids(mvgd):
514
    """
515
    Retrieve 365 daily heat profiles ids per residential building and selected
516
    mvgd.
517
518
    Parameters
519
    ----------
520
    mvgd : int
521
        ID of MVGD
522
523
    Returns
524
    -------
525
    df_profiles_ids : pd.DataFrame
526
        Residential daily heat profile ID's per building. Columns of the dataframe
527
        are zensus_population_id, building_id, selected_idp_profiles, buildings
528
        and day_of_year.
529
530
    """
531
    with db.session_scope() as session:
532
        query = (
533
            session.query(
534
                MapZensusGridDistricts.zensus_population_id,
535
                EgonHeatTimeseries.building_id,
536
                EgonHeatTimeseries.selected_idp_profiles,
537
            )
538
            .filter(MapZensusGridDistricts.bus_id == mvgd)
539
            .filter(
540
                MapZensusGridDistricts.zensus_population_id
541
                == EgonHeatTimeseries.zensus_population_id
542
            )
543
        )
544
545
    df_profiles_ids = pd.read_sql(
546
        query.statement, query.session.bind, index_col=None
547
    )
548
    # Add building count per cell
549
    df_profiles_ids = pd.merge(
550
        left=df_profiles_ids,
551
        right=df_profiles_ids.groupby("zensus_population_id")["building_id"]
552
        .count()
553
        .rename("buildings"),
554
        left_on="zensus_population_id",
555
        right_index=True,
556
    )
557
558
    # unnest array of ids per building
559
    df_profiles_ids = df_profiles_ids.explode("selected_idp_profiles")
560
    # add day of year column by order of list
561
    df_profiles_ids["day_of_year"] = (
562
        df_profiles_ids.groupby("building_id").cumcount() + 1
563
    )
564
    return df_profiles_ids
565
566
567
def get_daily_profiles(profile_ids):
568
    """
569
    Parameters
570
    ----------
571
    profile_ids : list(int)
572
        daily heat profile ID's
573
574
    Returns
575
    -------
576
    df_profiles : pd.DataFrame
577
        Residential daily heat profiles. Columns of the dataframe are idp, house,
578
        temperature_class and hour.
579
580
    """
581
    saio.register_schema("demand", db.engine())
582
    from saio.demand import egon_heat_idp_pool
583
584
    with db.session_scope() as session:
585
        query = session.query(egon_heat_idp_pool).filter(
586
            egon_heat_idp_pool.index.in_(profile_ids)
587
        )
588
589
    df_profiles = pd.read_sql(
590
        query.statement, query.session.bind, index_col="index"
591
    )
592
593
    # unnest array of profile values per id
594
    df_profiles = df_profiles.explode("idp")
595
    # Add column for hour of day
596
    df_profiles["hour"] = df_profiles.groupby(axis=0, level=0).cumcount() + 1
597
598
    return df_profiles
599
600
601
def get_daily_demand_share(mvgd):
602
    """per census cell
603
    Parameters
604
    ----------
605
    mvgd : int
606
        MVGD id
607
608
    Returns
609
    -------
610
    df_daily_demand_share : pd.DataFrame
611
        Daily annual demand share per cencus cell. Columns of the dataframe
612
        are zensus_population_id, day_of_year and daily_demand_share.
613
614
    """
615
616
    with db.session_scope() as session:
617
        query = session.query(
618
            MapZensusGridDistricts.zensus_population_id,
619
            EgonDailyHeatDemandPerClimateZone.day_of_year,
620
            EgonDailyHeatDemandPerClimateZone.daily_demand_share,
621
        ).filter(
622
            EgonMapZensusClimateZones.climate_zone
623
            == EgonDailyHeatDemandPerClimateZone.climate_zone,
624
            MapZensusGridDistricts.zensus_population_id
625
            == EgonMapZensusClimateZones.zensus_population_id,
626
            MapZensusGridDistricts.bus_id == mvgd,
627
        )
628
629
    df_daily_demand_share = pd.read_sql(
630
        query.statement, query.session.bind, index_col=None
631
    )
632
    return df_daily_demand_share
633
634
635
def calc_residential_heat_profiles_per_mvgd(mvgd, scenario):
636
    """
637
    Gets residential heat profiles per building in MV grid for either eGon2035
638
    or eGon100RE scenario.
639
640
    Parameters
641
    ----------
642
    mvgd : int
643
        MV grid ID.
644
    scenario : str
645
        Possible options are eGon2035 or eGon100RE.
646
647
    Returns
648
    --------
649
    pd.DataFrame
650
        Heat demand profiles of buildings. Columns are:
651
            * zensus_population_id : int
652
                Zensus cell ID building is in.
653
            * building_id : int
654
                ID of building.
655
            * day_of_year : int
656
                Day of the year (1 - 365).
657
            * hour : int
658
                Hour of the day (1 - 24).
659
            * demand_ts : float
660
                Building's residential heat demand in MW, for specified hour
661
                of the year (specified through columns `day_of_year` and
662
                `hour`).
663
    """
664
665
    columns = [
666
        "zensus_population_id",
667
        "building_id",
668
        "day_of_year",
669
        "hour",
670
        "demand_ts",
671
    ]
672
673
    df_peta_demand = get_peta_demand(mvgd, scenario)
674
675
    # TODO maybe return empty dataframe
676
    if df_peta_demand.empty:
677
        logger.info(f"No demand for MVGD: {mvgd}")
678
        return pd.DataFrame(columns=columns)
679
680
    df_profiles_ids = get_residential_heat_profile_ids(mvgd)
681
682
    if df_profiles_ids.empty:
683
        logger.info(f"No profiles for MVGD: {mvgd}")
684
        return pd.DataFrame(columns=columns)
685
686
    df_profiles = get_daily_profiles(
687
        df_profiles_ids["selected_idp_profiles"].unique()
688
    )
689
690
    df_daily_demand_share = get_daily_demand_share(mvgd)
691
692
    # Merge profile ids to peta demand by zensus_population_id
693
    df_profile_merge = pd.merge(
694
        left=df_peta_demand, right=df_profiles_ids, on="zensus_population_id"
695
    )
696
697
    # Merge daily demand to daily profile ids by zensus_population_id and day
698
    df_profile_merge = pd.merge(
699
        left=df_profile_merge,
700
        right=df_daily_demand_share,
701
        on=["zensus_population_id", "day_of_year"],
702
    )
703
704
    # Merge daily profiles by profile id
705
    df_profile_merge = pd.merge(
706
        left=df_profile_merge,
707
        right=df_profiles[["idp", "hour"]],
708
        left_on="selected_idp_profiles",
709
        right_index=True,
710
    )
711
712
    # Scale profiles
713
    df_profile_merge["demand_ts"] = (
714
        df_profile_merge["idp"]
715
        .mul(df_profile_merge["daily_demand_share"])
716
        .mul(df_profile_merge["demand"])
717
        .div(df_profile_merge["buildings"])
718
    )
719
720
    return df_profile_merge.loc[:, columns]
721
722
723 View Code Duplication
def plot_heat_supply(resulting_capacities):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
724
725
    from matplotlib import pyplot as plt
726
727
    mv_grids = db.select_geodataframe(
728
        """
729
        SELECT * FROM grid.egon_mv_grid_district
730
        """,
731
        index_col="bus_id",
732
    )
733
734
    for c in ["CHP", "heat_pump"]:
735
        mv_grids[c] = (
736
            resulting_capacities[resulting_capacities.carrier == c]
737
            .set_index("mv_grid_id")
738
            .capacity
739
        )
740
741
        fig, ax = plt.subplots(1, 1)
742
        mv_grids.boundary.plot(linewidth=0.2, ax=ax, color="black")
743
        mv_grids.plot(
744
            ax=ax,
745
            column=c,
746
            cmap="magma_r",
747
            legend=True,
748
            legend_kwds={
749
                "label": f"Installed {c} in MW",
750
                "orientation": "vertical",
751
            },
752
        )
753
        plt.savefig(f"plots/individual_heat_supply_{c}.png", dpi=300)
754
755
756
def get_zensus_cells_with_decentral_heat_demand_in_mv_grid(
757
    scenario, mv_grid_id
758
):
759
    """
760
    Returns zensus cell IDs with decentral heating systems in given MV grid.
761
762
    As cells with district heating differ between scenarios, this is also
763
    depending on the scenario.
764
765
    Parameters
766
    -----------
767
    scenario : str
768
        Name of scenario. Can be either "eGon2035" or "eGon100RE".
769
    mv_grid_id : int
770
        ID of MV grid.
771
772
    Returns
773
    --------
774
    pd.Index(int)
775
        Zensus cell IDs (as int) of buildings with decentral heating systems in
776
        given MV grid. Type is pandas Index to avoid errors later on when it is
777
        used in a query.
778
779
    """
780
781
    # get zensus cells in grid
782
    zensus_population_ids = db.select_dataframe(
783
        f"""
784
        SELECT zensus_population_id
785
        FROM boundaries.egon_map_zensus_grid_districts
786
        WHERE bus_id = {mv_grid_id}
787
        """,
788
        index_col=None,
789
    ).zensus_population_id.values
790
791
    # maybe use adapter
792
    # convert to pd.Index (otherwise type is np.int64, which will for some
793
    # reason throw an error when used in a query)
794
    zensus_population_ids = pd.Index(zensus_population_ids)
795
796
    # get zensus cells with district heating
797
    with db.session_scope() as session:
798
        query = session.query(
799
            MapZensusDistrictHeatingAreas.zensus_population_id,
800
        ).filter(
801
            MapZensusDistrictHeatingAreas.scenario == scenario,
802
            MapZensusDistrictHeatingAreas.zensus_population_id.in_(
803
                zensus_population_ids
804
            ),
805
        )
806
807
    cells_with_dh = pd.read_sql(
808
        query.statement, query.session.bind, index_col=None
809
    ).zensus_population_id.values
810
811
    # remove zensus cells with district heating
812
    zensus_population_ids = zensus_population_ids.drop(
813
        cells_with_dh, errors="ignore"
814
    )
815
    return pd.Index(zensus_population_ids)
816
817
818
def get_residential_buildings_with_decentral_heat_demand_in_mv_grid(
819
    scenario, mv_grid_id
820
):
821
    """
822
    Returns building IDs of buildings with decentral residential heat demand in
823
    given MV grid.
824
825
    As cells with district heating differ between scenarios, this is also
826
    depending on the scenario.
827
828
    Parameters
829
    -----------
830
    scenario : str
831
        Name of scenario. Can be either "eGon2035" or "eGon100RE".
832
    mv_grid_id : int
833
        ID of MV grid.
834
835
    Returns
836
    --------
837
    pd.Index(int)
838
        Building IDs (as int) of buildings with decentral heating system in
839
        given MV grid. Type is pandas Index to avoid errors later on when it is
840
        used in a query.
841
842
    """
843
    # get zensus cells with decentral heating
844
    zensus_population_ids = (
845
        get_zensus_cells_with_decentral_heat_demand_in_mv_grid(
846
            scenario, mv_grid_id
847
        )
848
    )
849
850
    # get buildings with decentral heat demand
851
    saio.register_schema("demand", engine)
852
    from saio.demand import egon_heat_timeseries_selected_profiles
853
854
    with db.session_scope() as session:
855
        query = session.query(
856
            egon_heat_timeseries_selected_profiles.building_id,
857
        ).filter(
858
            egon_heat_timeseries_selected_profiles.zensus_population_id.in_(
859
                zensus_population_ids
860
            )
861
        )
862
863
    buildings_with_heat_demand = pd.read_sql(
864
        query.statement, query.session.bind, index_col=None
865
    ).building_id.values
866
867
    return pd.Index(buildings_with_heat_demand)
868
869
870
def get_cts_buildings_with_decentral_heat_demand_in_mv_grid(
871
    scenario, mv_grid_id
872
):
873
    """
874
    Returns building IDs of buildings with decentral CTS heat demand in
875
    given MV grid.
876
877
    As cells with district heating differ between scenarios, this is also
878
    depending on the scenario.
879
880
    Parameters
881
    -----------
882
    scenario : str
883
        Name of scenario. Can be either "eGon2035" or "eGon100RE".
884
    mv_grid_id : int
885
        ID of MV grid.
886
887
    Returns
888
    --------
889
    pd.Index(int)
890
        Building IDs (as int) of buildings with decentral heating system in
891
        given MV grid. Type is pandas Index to avoid errors later on when it is
892
        used in a query.
893
894
    """
895
896
    # get zensus cells with decentral heating
897
    zensus_population_ids = (
898
        get_zensus_cells_with_decentral_heat_demand_in_mv_grid(
899
            scenario, mv_grid_id
900
        )
901
    )
902
903
    # get buildings with decentral heat demand
904
    with db.session_scope() as session:
905
        query = session.query(EgonMapZensusMvgdBuildings.building_id).filter(
906
            EgonMapZensusMvgdBuildings.sector == "cts",
907
            EgonMapZensusMvgdBuildings.zensus_population_id.in_(
908
                zensus_population_ids
909
            ),
910
        )
911
912
    buildings_with_heat_demand = pd.read_sql(
913
        query.statement, query.session.bind, index_col=None
914
    ).building_id.values
915
916
    return pd.Index(buildings_with_heat_demand)
917
918
919
def get_buildings_with_decentral_heat_demand_in_mv_grid(mvgd, scenario):
920
    """
921
    Returns building IDs of buildings with decentral heat demand in
922
    given MV grid.
923
924
    As cells with district heating differ between scenarios, this is also
925
    depending on the scenario.
926
927
    Parameters
928
    -----------
929
    mv_grid_id : int
930
        ID of MV grid.
931
    scenario : str
932
        Name of scenario. Can be either "eGon2035" or "eGon100RE".
933
934
    Returns
935
    --------
936
    pd.Index(int)
937
        Building IDs (as int) of buildings with decentral heating system in given
938
        MV grid. Type is pandas Index to avoid errors later on when it is
939
        used in a query.
940
941
    """
942
    # get residential buildings with decentral heating systems
943
    buildings_decentral_heating_res = (
944
        get_residential_buildings_with_decentral_heat_demand_in_mv_grid(
945
            scenario, mvgd
946
        )
947
    )
948
949
    # get CTS buildings with decentral heating systems
950
    buildings_decentral_heating_cts = (
951
        get_cts_buildings_with_decentral_heat_demand_in_mv_grid(scenario, mvgd)
952
    )
953
954
    # merge residential and CTS buildings
955
    buildings_decentral_heating = buildings_decentral_heating_res.append(
956
        buildings_decentral_heating_cts
957
    ).unique()
958
959
    return buildings_decentral_heating
960
961
962
def get_total_heat_pump_capacity_of_mv_grid(scenario, mv_grid_id):
963
    """
964
    Returns total heat pump capacity per grid that was previously defined
965
    (by NEP or pypsa-eur-sec).
966
967
    Parameters
968
    -----------
969
    scenario : str
970
        Name of scenario. Can be either "eGon2035" or "eGon100RE".
971
    mv_grid_id : int
972
        ID of MV grid.
973
974
    Returns
975
    --------
976
    float
977
        Total heat pump capacity in MW in given MV grid.
978
979
    """
980
    from egon.data.datasets.heat_supply import EgonIndividualHeatingSupply
981
982
    with db.session_scope() as session:
983
        query = (
984
            session.query(
985
                EgonIndividualHeatingSupply.mv_grid_id,
986
                EgonIndividualHeatingSupply.capacity,
987
            )
988
            .filter(EgonIndividualHeatingSupply.scenario == scenario)
989
            .filter(EgonIndividualHeatingSupply.carrier == "heat_pump")
990
            .filter(EgonIndividualHeatingSupply.mv_grid_id == mv_grid_id)
991
        )
992
993
    hp_cap_mv_grid = pd.read_sql(
994
        query.statement, query.session.bind, index_col="mv_grid_id"
995
    )
996
    if hp_cap_mv_grid.empty:
997
        return 0.0
998
    else:
999
        return hp_cap_mv_grid.capacity.values[0]
1000
1001
1002
def get_heat_peak_demand_per_building(scenario, building_ids):
1003
    """"""
1004
1005
    with db.session_scope() as session:
1006
        query = (
1007
            session.query(
1008
                BuildingHeatPeakLoads.building_id,
1009
                BuildingHeatPeakLoads.peak_load_in_w,
1010
            ).filter(BuildingHeatPeakLoads.scenario == scenario)
1011
            # .filter(BuildingHeatPeakLoads.sector == "both")
1012
            .filter(BuildingHeatPeakLoads.building_id.in_(building_ids))
1013
        )
1014
1015
    df_heat_peak_demand = pd.read_sql(
1016
        query.statement, query.session.bind, index_col=None
1017
    )
1018
1019
    # TODO remove check
1020
    if df_heat_peak_demand.duplicated("building_id").any():
1021
        raise ValueError("Duplicate building_id")
1022
1023
    # convert to series and from W to MW
1024
    df_heat_peak_demand = df_heat_peak_demand.set_index("building_id").loc[
1025
                          :, "peak_load_in_w"] * 1e6
1026
    return df_heat_peak_demand
1027
1028
1029
def determine_minimum_hp_capacity_per_building(
1030
    peak_heat_demand, flexibility_factor=24 / 18, cop=1.7
1031
):
1032
    """
1033
    Determines minimum required heat pump capacity.
1034
1035
    Parameters
1036
    ----------
1037
    peak_heat_demand : pd.Series
1038
        Series with peak heat demand per building in MW. Index contains the
1039
        building ID.
1040
    flexibility_factor : float
1041
        Factor to overdimension the heat pump to allow for some flexible
1042
        dispatch in times of high heat demand. Per default, a factor of 24/18
1043
        is used, to take into account
1044
1045
    Returns
1046
    -------
1047
    pd.Series
1048
        Pandas series with minimum required heat pump capacity per building in
1049
        MW.
1050
1051
    """
1052
    return peak_heat_demand * flexibility_factor / cop
1053
1054
1055
def determine_buildings_with_hp_in_mv_grid(
1056
    hp_cap_mv_grid, min_hp_cap_per_building
1057
):
1058
    """
1059
    Distributes given total heat pump capacity to buildings based on their peak
1060
    heat demand.
1061
1062
    Parameters
1063
    -----------
1064
    hp_cap_mv_grid : float
1065
        Total heat pump capacity in MW in given MV grid.
1066
    min_hp_cap_per_building : pd.Series
1067
        Pandas series with minimum required heat pump capacity per building
1068
         in MW.
1069
1070
    Returns
1071
    -------
1072
    pd.Index(int)
1073
        Building IDs (as int) of buildings to get heat demand time series for.
1074
1075
    """
1076
    building_ids = min_hp_cap_per_building.index
1077
1078
    # get buildings with PV to give them a higher priority when selecting
1079
    # buildings a heat pump will be allocated to
1080
    saio.register_schema("supply", engine)
1081
    from saio.supply import egon_power_plants_pv_roof_building
1082
1083
    with db.session_scope() as session:
1084
        query = session.query(
1085
            egon_power_plants_pv_roof_building.building_id
1086
        ).filter(
1087
            egon_power_plants_pv_roof_building.building_id.in_(building_ids)
1088
        )
1089
1090
    buildings_with_pv = pd.read_sql(
1091
        query.statement, query.session.bind, index_col=None
1092
    ).building_id.values
1093
    # set different weights for buildings with PV and without PV
1094
    weight_with_pv = 1.5
1095
    weight_without_pv = 1.0
1096
    weights = pd.concat(
1097
        [
1098
            pd.DataFrame(
1099
                {"weight": weight_without_pv},
1100
                index=building_ids.drop(buildings_with_pv, errors="ignore"),
1101
            ),
1102
            pd.DataFrame({"weight": weight_with_pv}, index=buildings_with_pv),
1103
        ]
1104
    )
1105
    # normalise weights (probability needs to add up to 1)
1106
    weights.weight = weights.weight / weights.weight.sum()
1107
1108
    # get random order at which buildings are chosen
1109
    np.random.seed(db.credentials()["--random-seed"])
1110
    buildings_with_hp_order = np.random.choice(
1111
        weights.index,
1112
        size=len(weights),
1113
        replace=False,
1114
        p=weights.weight.values,
1115
    )
1116
1117
    # select buildings until HP capacity in MV grid is reached (some rest
1118
    # capacity will remain)
1119
    hp_cumsum = min_hp_cap_per_building.loc[buildings_with_hp_order].cumsum()
1120
    buildings_with_hp = hp_cumsum[hp_cumsum <= hp_cap_mv_grid].index
1121
1122
    # choose random heat pumps until remaining heat pumps are larger than
1123
    # remaining heat pump capacity
1124
    remaining_hp_cap = (
1125
        hp_cap_mv_grid - min_hp_cap_per_building.loc[buildings_with_hp].sum()
1126
    )
1127
    min_cap_buildings_wo_hp = min_hp_cap_per_building.loc[
1128
        building_ids.drop(buildings_with_hp)
1129
    ]
1130
    possible_buildings = min_cap_buildings_wo_hp[
1131
        min_cap_buildings_wo_hp <= remaining_hp_cap
1132
    ].index
1133
    while len(possible_buildings) > 0:
1134
        random.seed(db.credentials()["--random-seed"])
1135
        new_hp_building = random.choice(possible_buildings)
1136
        # add new building to building with HP
1137
        buildings_with_hp = buildings_with_hp.append(
1138
            pd.Index([new_hp_building])
1139
        )
1140
        # determine if there are still possible buildings
1141
        remaining_hp_cap = (
1142
            hp_cap_mv_grid
1143
            - min_hp_cap_per_building.loc[buildings_with_hp].sum()
1144
        )
1145
        min_cap_buildings_wo_hp = min_hp_cap_per_building.loc[
1146
            building_ids.drop(buildings_with_hp)
1147
        ]
1148
        possible_buildings = min_cap_buildings_wo_hp[
1149
            min_cap_buildings_wo_hp <= remaining_hp_cap
1150
        ].index
1151
1152
    return buildings_with_hp
1153
1154
1155
def desaggregate_hp_capacity(min_hp_cap_per_building, hp_cap_mv_grid):
1156
    """
1157
    Desaggregates the required total heat pump capacity to buildings.
1158
1159
    All buildings are previously assigned a minimum required heat pump
1160
    capacity. If the total heat pump capacity exceeds this, larger heat pumps
1161
    are assigned.
1162
1163
    Parameters
1164
    ------------
1165
    min_hp_cap_per_building : pd.Series
1166
        Pandas series with minimum required heat pump capacity per building
1167
         in MW.
1168
    hp_cap_mv_grid : float
1169
        Total heat pump capacity in MW in given MV grid.
1170
1171
    Returns
1172
    --------
1173
    pd.Series
1174
        Pandas series with heat pump capacity per building in MW.
1175
1176
    """
1177
    # distribute remaining capacity to all buildings with HP depending on
1178
    # installed HP capacity
1179
1180
    allocated_cap = min_hp_cap_per_building.sum()
1181
    remaining_cap = hp_cap_mv_grid - allocated_cap
1182
1183
    fac = remaining_cap / allocated_cap
1184
    hp_cap_per_building = (
1185
        min_hp_cap_per_building * fac + min_hp_cap_per_building
1186
    )
1187
    hp_cap_per_building.index.name = "building_id"
1188
1189
    return hp_cap_per_building
1190
1191
1192
def determine_min_hp_cap_buildings_pypsa_eur_sec(
1193
    peak_heat_demand, building_ids
1194
):
1195
    """
1196
    Determines minimum required HP capacity in MV grid in MW as input for
1197
    pypsa-eur-sec.
1198
1199
    Parameters
1200
    ----------
1201
    peak_heat_demand : pd.Series
1202
        Series with peak heat demand per building in MW. Index contains the
1203
        building ID.
1204
    building_ids : pd.Index(int)
1205
        Building IDs (as int) of buildings with decentral heating system in
1206
        given MV grid.
1207
1208
    Returns
1209
    --------
1210
    float
1211
        Minimum required HP capacity in MV grid in MW.
1212
1213
    """
1214
    if len(building_ids) > 0:
1215
        peak_heat_demand = peak_heat_demand.loc[building_ids]
1216
        # determine minimum required heat pump capacity per building
1217
        min_hp_cap_buildings = determine_minimum_hp_capacity_per_building(
1218
            peak_heat_demand
1219
        )
1220
        return min_hp_cap_buildings.sum()
1221
    else:
1222
        return 0.0
1223
1224
1225
def determine_hp_cap_buildings_eGon2035_per_mvgd(
1226
    mv_grid_id, peak_heat_demand, building_ids
1227
):
1228
    """
1229
    Determines which buildings in the MV grid will have a HP (buildings with PV
1230
    rooftop are more likely to be assigned) in the eGon2035 scenario, as well
1231
    as their respective HP capacity in MW.
1232
1233
    Parameters
1234
    -----------
1235
    mv_grid_id : int
1236
        ID of MV grid.
1237
    peak_heat_demand : pd.Series
1238
        Series with peak heat demand per building in MW. Index contains the
1239
        building ID.
1240
    building_ids : pd.Index(int)
1241
        Building IDs (as int) of buildings with decentral heating system in
1242
        given MV grid.
1243
1244
    """
1245
1246
    hp_cap_grid = get_total_heat_pump_capacity_of_mv_grid(
1247
        "eGon2035", mv_grid_id
1248
    )
1249
1250
    if len(building_ids) > 0 and hp_cap_grid > 0.0:
1251
        peak_heat_demand = peak_heat_demand.loc[building_ids]
1252
1253
        # determine minimum required heat pump capacity per building
1254
        min_hp_cap_buildings = determine_minimum_hp_capacity_per_building(
1255
            peak_heat_demand
1256
        )
1257
1258
        # select buildings that will have a heat pump
1259
        buildings_with_hp = determine_buildings_with_hp_in_mv_grid(
1260
            hp_cap_grid, min_hp_cap_buildings
1261
        )
1262
1263
        # distribute total heat pump capacity to all buildings with HP
1264
        hp_cap_per_building = desaggregate_hp_capacity(
1265
            min_hp_cap_buildings.loc[buildings_with_hp], hp_cap_grid
1266
        )
1267
1268
        return hp_cap_per_building.rename("hp_capacity")
1269
1270
    else:
1271
        return pd.Series().rename("hp_capacity")
1272
1273
1274
def determine_hp_cap_buildings_eGon100RE_per_mvgd(mv_grid_id):
1275
    """
1276
    Determines HP capacity per building in eGon100RE scenario.
1277
1278
    In eGon100RE scenario all buildings without district heating get a heat
1279
    pump.
1280
1281
    Returns
1282
    --------
1283
    pd.Series
1284
        Pandas series with heat pump capacity per building in MW.
1285
1286
    """
1287
1288
    hp_cap_grid = get_total_heat_pump_capacity_of_mv_grid(
1289
        "eGon100RE", mv_grid_id
1290
    )
1291
1292
    if hp_cap_grid > 0.0:
1293
1294
        # get buildings with decentral heating systems
1295
        building_ids = get_buildings_with_decentral_heat_demand_in_mv_grid(
1296
            mv_grid_id, scenario="eGon100RE"
1297
        )
1298
1299
        # TODO get peak demand from db
1300
        df_peak_heat_demand = get_heat_peak_demand_per_building(
1301
            "eGon100RE", building_ids
1302
        )
1303
1304
        # determine minimum required heat pump capacity per building
1305
        min_hp_cap_buildings = determine_minimum_hp_capacity_per_building(
1306
            df_peak_heat_demand, flexibility_factor=24 / 18, cop=1.7
1307
        )
1308
1309
        # distribute total heat pump capacity to all buildings with HP
1310
        hp_cap_per_building = desaggregate_hp_capacity(
1311
            min_hp_cap_buildings, hp_cap_grid
1312
        )
1313
1314
        return hp_cap_per_building.rename("hp_capacity")
1315
    else:
1316
        return pd.Series().rename("hp_capacity")
1317
1318
1319
def determine_hp_cap_buildings_eGon100RE():
1320
    """
1321
    Main function to determine HP capacity per building in eGon100RE scenario.
1322
1323
    """
1324
1325
    with db.session_scope() as session:
1326
        query = (
1327
            session.query(
1328
                MapZensusGridDistricts.bus_id,
1329
            )
1330
            .filter(
1331
                MapZensusGridDistricts.zensus_population_id
1332
                == EgonPetaHeat.zensus_population_id
1333
            )
1334
            .distinct(MapZensusGridDistricts.bus_id)
1335
        )
1336
    mvgd_ids = pd.read_sql(query.statement, query.session.bind, index_col=None)
1337
    mvgd_ids = mvgd_ids.sort_values("bus_id").reset_index(drop=True)
1338
1339
    df_hp_cap_per_building_100RE_db = pd.DataFrame()
1340
1341
    for mvgd_id in mvgd_ids:
1342
1343
        hp_cap_per_building_100RE = (
1344
            determine_hp_cap_buildings_eGon100RE_per_mvgd(mvgd_id)
1345
        )
1346
1347
        if not hp_cap_per_building_100RE.empty:
1348
            df_hp_cap_per_building_100RE_db = pd.concat(
1349
                [
1350
                    df_hp_cap_per_building_100RE_db,
1351
                    hp_cap_per_building_100RE.reset_index(),
1352
                ],
1353
                axis=0,
1354
            )
1355
1356
    df_hp_cap_per_building_100RE_db["scenario"] = "eGon100RE"
1357
    write_table_to_postgres(
1358
        df_hp_cap_per_building_100RE_db,
1359
        EgonHpCapacityBuildings,
1360
        engine=engine,
1361
        drop=False,
1362
    )
1363
1364
1365
def aggregate_residential_and_cts_profiles(mvgd, scenario):
1366
    """
1367
    Gets residential and CTS heat demand profiles per building and aggregates them.
1368
1369
    Parameters
1370
    ----------
1371
    mvgd : int
1372
        MV grid ID.
1373
    scenario : str
1374
        Possible options are eGon2035 or eGon100RE.
1375
1376
    Returns
1377
    --------
1378
    pd.DataFrame
1379
        Table of demand profile per building. Column names are building IDs and index
1380
        is hour of the year as int (0-8759).
1381
1382
    """
1383
    # ############### get residential heat demand profiles ###############
1384
    df_heat_ts = calc_residential_heat_profiles_per_mvgd(
1385
        mvgd=mvgd, scenario=scenario
1386
    )
1387
1388
    # pivot to allow aggregation with CTS profiles
1389
    df_heat_ts = df_heat_ts.pivot(
1390
        index=["day_of_year", "hour"],
1391
        columns="building_id",
1392
        values="demand_ts",
1393
    )
1394
    df_heat_ts = df_heat_ts.sort_index().reset_index(drop=True)
1395
1396
    # ############### get CTS heat demand profiles ###############
1397
    heat_demand_cts_ts = calc_cts_building_profiles(
1398
        bus_ids=[mvgd],
1399
        scenario=scenario,
1400
        sector="heat",
1401
    )
1402
1403
    # ############# aggregate residential and CTS demand profiles #############
1404
    df_heat_ts = pd.concat([df_heat_ts, heat_demand_cts_ts], axis=1)
1405
1406
    df_heat_ts = df_heat_ts.groupby(axis=1, level=0).sum()
1407
1408
    return df_heat_ts
1409
1410
1411
def export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db):
1412
    """"""
1413
1414
    df_peak_loads_db = df_peak_loads_db.melt(
1415
        id_vars="building_id",
1416
        var_name="scenario",
1417
        value_name="peak_load_in_w",
1418
    )
1419
    df_peak_loads_db["sector"] = "residential+cts"
1420
    # From MW to W
1421
    df_peak_loads_db["peak_load_in_w"] = (
1422
        df_peak_loads_db["peak_load_in_w"] * 1e6
1423
    )
1424
    write_table_to_postgres(
1425
        df_peak_loads_db, BuildingHeatPeakLoads, engine=engine
1426
    )
1427
1428
    columns = {
1429
        column.key: column.type
1430
        for column in EgonEtragoTimeseriesIndividualHeating.__table__.columns
1431
    }
1432
    df_heat_mvgd_ts_db = df_heat_mvgd_ts_db.loc[:, columns.keys()]
1433
1434
    df_heat_mvgd_ts_db.to_sql(
1435
        name=EgonEtragoTimeseriesIndividualHeating.__table__.name,
1436
        schema=EgonEtragoTimeseriesIndividualHeating.__table__.schema,
1437
        con=engine,
1438
        if_exists="append",
1439
        method="multi",
1440
        index=False,
1441
        dtype=columns,
1442
    )
1443
1444
1445
def export_min_cap_to_csv(df_hp_min_cap_mv_grid_pypsa_eur_sec):
1446
1447
    df_hp_min_cap_mv_grid_pypsa_eur_sec.index.name = "mvgd_id"
1448
    df_hp_min_cap_mv_grid_pypsa_eur_sec = (
1449
        df_hp_min_cap_mv_grid_pypsa_eur_sec.to_frame(
1450
            name="min_hp_capacity"
1451
        ).reset_index()
1452
    )
1453
1454
    folder = Path(".") / "input-pypsa-eur-sec"
1455
    file = folder / "minimum_hp_capacity_mv_grid_2035.csv"
1456
    # Create the folder, if it does not exist already
1457
    if not os.path.exists(folder):
1458
        os.mkdir(folder)
1459
    # TODO check append
1460
    if not file.is_file():
1461
        df_hp_min_cap_mv_grid_pypsa_eur_sec.to_csv(file)
1462
        # TODO outsource into separate task incl delete file if clearing
1463
    else:
1464
        df_hp_min_cap_mv_grid_pypsa_eur_sec.to_csv(
1465
            file, mode="a", header=False
1466
        )
1467
1468
1469
def determine_hp_cap_peak_load_mvgd_ts_2035(mvgd_ids):
1470
    """
1471
    Main function to determine HP capacity per building in eGon2035 scenario.
1472
    Further, creates heat demand time series for all buildings with heat pumps
1473
    in MV grid, as well as for all buildings with gas boilers, used in eTraGo.
1474
1475
    Parameters
1476
    -----------
1477
    mvgd_ids : list(int)
1478
        List of MV grid IDs to determine data for.
1479
1480
    """
1481
1482
    # ========== Register np datatypes with SQLA ==========
1483
    register_adapter(np.float64, adapt_numpy_float64)
1484
    register_adapter(np.int64, adapt_numpy_int64)
1485
    # =====================================================
1486
1487
    df_peak_loads_db = pd.DataFrame()
1488
    df_hp_cap_per_building_2035_db = pd.DataFrame()
1489
    df_heat_mvgd_ts_db = pd.DataFrame()
1490
1491
    for mvgd in mvgd_ids:
1492
1493
        logger.debug(f"MVGD={mvgd} | Start")
1494
1495
        # ############# aggregate residential and CTS demand profiles #####
1496
1497
        df_heat_ts = aggregate_residential_and_cts_profiles(
1498
            mvgd, scenario="eGon2035"
1499
        )
1500
1501
        # ##################### determine peak loads ###################
1502
        logger.debug(f"MVGD={mvgd} | Determine peak loads.")
1503
1504
        peak_load = df_heat_ts.max().rename("eGon2035")
1505
1506
        # ######## determine HP capacity per building #########
1507
        logger.debug(f"MVGD={mvgd} | Determine HP capacities.")
1508
1509
        buildings_decentral_heating = (
1510
            get_buildings_with_decentral_heat_demand_in_mv_grid(
1511
                mvgd, scenario="eGon2035"
1512
            )
1513
        )
1514
        hp_cap_per_building_2035 = (
1515
            determine_hp_cap_buildings_eGon2035_per_mvgd(
1516
                mvgd,
1517
                peak_load,
1518
                buildings_decentral_heating,
1519
            )
1520
        )
1521
        buildings_gas_2035 = pd.Index(buildings_decentral_heating).drop(
1522
            hp_cap_per_building_2035.index
1523
        )
1524
1525
        # ################ aggregated heat profiles ###################
1526
        logger.debug(f"MVGD={mvgd} | Aggregate heat profiles.")
1527
1528
        df_mvgd_ts_2035_hp = df_heat_ts.loc[
1529
            :,
1530
            hp_cap_per_building_2035.index,
1531
        ].sum(axis=1)
1532
1533
        # heat demand time series for buildings with gas boiler
1534
        df_mvgd_ts_2035_gas = df_heat_ts.loc[:, buildings_gas_2035].sum(axis=1)
1535
1536
        df_heat_mvgd_ts = pd.DataFrame(
1537
            data={
1538
                "carrier": ["heat_pump", "CH4"],
1539
                "bus_id": mvgd,
1540
                "scenario": ["eGon2035", "eGon2035"],
1541
                "dist_aggregated_mw": [
1542
                    df_mvgd_ts_2035_hp.to_list(),
1543
                    df_mvgd_ts_2035_gas.to_list(),
1544
                ],
1545
            }
1546
        )
1547
1548
        # ################ collect results ##################
1549
        logger.debug(f"MVGD={mvgd} | Collect results.")
1550
1551
        df_peak_loads_db = pd.concat(
1552
            [df_peak_loads_db, peak_load.reset_index()],
1553
            axis=0,
1554
            ignore_index=True,
1555
        )
1556
1557
        df_heat_mvgd_ts_db = pd.concat(
1558
            [df_heat_mvgd_ts_db, df_heat_mvgd_ts], axis=0, ignore_index=True
1559
        )
1560
1561
        df_hp_cap_per_building_2035_db = pd.concat(
1562
            [
1563
                df_hp_cap_per_building_2035_db,
1564
                hp_cap_per_building_2035.reset_index(),
1565
            ],
1566
            axis=0,
1567
        )
1568
1569
    # ################ export to db #######################
1570
    logger.debug(" Write data to db.")
1571
    export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db)
1572
1573
    df_hp_cap_per_building_2035_db["scenario"] = "eGon2035"
1574
    write_table_to_postgres(
1575
        df_hp_cap_per_building_2035_db,
1576
        EgonHpCapacityBuildings,
1577
        engine=engine,
1578
        drop=False,
1579
    )
1580
1581
1582
def determine_hp_cap_peak_load_mvgd_ts_pypsa_eur_sec(mvgd_ids):
1583
    """
1584
    Main function to determine minimum required HP capacity in MV for pypsa-eur-sec.
1585
    Further, creates heat demand time series for all buildings with heat pumps in MV
1586
    grid in eGon100RE scenario, used in eTraGo.
1587
1588
    Parameters
1589
    -----------
1590
    mvgd_ids : list(int)
1591
        List of MV grid IDs to determine data for.
1592
1593
    """
1594
1595
    # ========== Register np datatypes with SQLA ==========
1596
    register_adapter(np.float64, adapt_numpy_float64)
1597
    register_adapter(np.int64, adapt_numpy_int64)
1598
    # =====================================================
1599
1600
    df_peak_loads_db = pd.DataFrame()
1601
    df_heat_mvgd_ts_db = pd.DataFrame()
1602
    df_hp_min_cap_mv_grid_pypsa_eur_sec = pd.Series()
1603
1604
    for mvgd in mvgd_ids:
1605
1606
        logger.debug(f"MVGD={mvgd} | Start")
1607
1608
        # ############# aggregate residential and CTS demand profiles #####
1609
1610
        df_heat_ts = aggregate_residential_and_cts_profiles(
1611
            mvgd, scenario="eGon100RE"
1612
        )
1613
1614
        # ##################### determine peak loads ###################
1615
        logger.debug(f"MVGD={mvgd} | Determine peak loads.")
1616
1617
        peak_load_100RE = df_heat_ts.max().rename("eGon100RE")
1618
1619
        # ######## determine minimum HP capacity pypsa-eur-sec ###########
1620
        buildings_decentral_heating = (
1621
            get_buildings_with_decentral_heat_demand_in_mv_grid(
1622
                mvgd, scenario="eGon100RE"
1623
            )
1624
        )
1625
        hp_min_cap_mv_grid_pypsa_eur_sec = (
1626
            determine_min_hp_cap_buildings_pypsa_eur_sec(
1627
                peak_load_100RE,
1628
                buildings_decentral_heating,
1629
            )
1630
        )
1631
1632
        # ################ aggregated heat profiles ###################
1633
        logger.debug(f"MVGD={mvgd} | Aggregate heat profiles.")
1634
1635
        df_mvgd_ts_hp = df_heat_ts.loc[
1636
            :,
1637
            buildings_decentral_heating,
1638
        ].sum(axis=1)
1639
1640
        df_heat_mvgd_ts = pd.DataFrame(
1641
            data={
1642
                "carrier": "heat_pump",
1643
                "bus_id": mvgd,
1644
                "scenario": "eGon100RE",
1645
                "dist_aggregated_mw": [df_mvgd_ts_hp.to_list()],
1646
            }
1647
        )
1648
1649
        # ################ collect results ##################
1650
        logger.debug(f"MVGD={mvgd} | Collect results.")
1651
1652
        df_peak_loads_db = pd.concat(
1653
            [df_peak_loads_db, peak_load_100RE.reset_index()],
1654
            axis=0,
1655
            ignore_index=True,
1656
        )
1657
1658
        df_heat_mvgd_ts_db = pd.concat(
1659
            [df_heat_mvgd_ts_db, df_heat_mvgd_ts], axis=0, ignore_index=True
1660
        )
1661
1662
        df_hp_min_cap_mv_grid_pypsa_eur_sec.loc[
1663
            mvgd
1664
        ] = hp_min_cap_mv_grid_pypsa_eur_sec
1665
1666
    # ################ export to db and csv ######################
1667
    logger.debug(" Write data to db.")
1668
    export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db)
1669
1670
    logger.debug("Write pypsa-eur-sec min HP capacities to csv.")
1671
    export_min_cap_to_csv(df_hp_min_cap_mv_grid_pypsa_eur_sec)
1672
1673
1674
def create_peak_load_table():
1675
1676
    BuildingHeatPeakLoads.__table__.drop(bind=engine, checkfirst=True)
1677
    BuildingHeatPeakLoads.__table__.create(bind=engine, checkfirst=True)
1678
1679
1680
def create_hp_capacity_table():
1681
1682
    EgonHpCapacityBuildings.__table__.drop(bind=engine, checkfirst=True)
1683
    EgonHpCapacityBuildings.__table__.create(bind=engine, checkfirst=True)
1684
1685
1686
def create_egon_etrago_timeseries_individual_heating():
1687
1688
    EgonEtragoTimeseriesIndividualHeating.__table__.drop(
1689
        bind=engine, checkfirst=True
1690
    )
1691
    EgonEtragoTimeseriesIndividualHeating.__table__.create(
1692
        bind=engine, checkfirst=True
1693
    )
1694
1695
1696
def delete_peak_loads_if_existing():
1697
    """Remove all entries"""
1698
    with db.session_scope() as session:
1699
        # Buses
1700
        session.query(BuildingHeatPeakLoads).filter(
1701
            BuildingHeatPeakLoads.scenario == "eGon2035"
1702
        ).delete(synchronize_session=False)
1703
1704
1705
if __name__ == "__main__":
1706
1707
    determine_hp_cap_buildings_eGon100RE()
1708
1709
    create_peak_load_table()
1710
    create_egon_etrago_timeseries_individual_heating()
1711
    determine_hp_cap_peak_load_mvgd_ts_pypsa_eur_sec([1556])
1712
1713
    create_hp_capacity_table()
1714
    delete_peak_loads_if_existing()
1715
    determine_hp_cap_peak_load_mvgd_ts_2035([1556])
1716