Passed
Pull Request — dev (#568)
by
unknown
01:43
created

data.datasets.scenario_capacities.add_metadata()   B

Complexity

Conditions 1

Size

Total Lines 94
Code Lines 61

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 61
dl 0
loc 94
rs 8.2763
c 0
b 0
f 0
cc 1
nop 0

How to fix   Long Method   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

1
"""The central module containing all code dealing with importing data from
2
Netzentwicklungsplan 2035, Version 2031, Szenario C
3
"""
4
5
from pathlib import Path
6
import datetime
7
import json
8
import time
9
10
from sqlalchemy import Column, Float, Integer, String
11
from sqlalchemy.ext.declarative import declarative_base
12
from sqlalchemy.orm import sessionmaker
13
import numpy as np
14
import pandas as pd
15
import yaml
16
17
from egon.data import db
18
from egon.data.config import settings
19
from egon.data.datasets import Dataset
20
from egon.data.metadata import (
21
    context,
22
    generate_resource_fields_from_sqla_model,
23
    license_ccby,
24
    meta_metadata,
25
    sources,
26
)
27
import egon.data.config
28
29
Base = declarative_base()
30
31
32
class EgonScenarioCapacities(Base):
33
    __tablename__ = "egon_scenario_capacities"
34
    __table_args__ = {"schema": "supply"}
35
    index = Column(Integer, primary_key=True)
36
    component = Column(String(25))
37
    carrier = Column(String(50))
38
    capacity = Column(Float)
39
    nuts = Column(String(12))
40
    scenario_name = Column(String(50))
41
42
43
class NEP2021ConvPowerPlants(Base):
44
    __tablename__ = "egon_nep_2021_conventional_powerplants"
45
    __table_args__ = {"schema": "supply"}
46
    index = Column(String(50), primary_key=True)
47
    bnetza_id = Column(String(50))
48
    name = Column(String(100))
49
    name_unit = Column(String(50))
50
    carrier_nep = Column(String(50))
51
    carrier = Column(String(12))
52
    chp = Column(String(12))
53
    postcode = Column(String(12))
54
    city = Column(String(50))
55
    federal_state = Column(String(12))
56
    commissioned = Column(String(12))
57
    status = Column(String(50))
58
    capacity = Column(Float)
59
    a2035_chp = Column(String(12))
60
    a2035_capacity = Column(Float)
61
    b2035_chp = Column(String(12))
62
    b2035_capacity = Column(Float)
63
    c2035_chp = Column(String(12))
64
    c2035_capacity = Column(Float)
65
    b2040_chp = Column(String(12))
66
    b2040_capacity = Column(Float)
67
68
69
class ScenarioCapacities(Dataset):
70
    """
71
    Create and fill table with installed generation capacities in Germany
72
73
    This dataset creates and fills a table with the installed generation capacities in
74
    Germany in a lower spatial resolution (either per federal state or on national level).
75
    This data is coming from external sources (e.g. German grid developement plan for scenario eGon2035).
76
    The table is in downstream datasets used to define target values for the installed capacities.
77
78
79
    *Dependencies*
80
      * :py:func:`Setup <egon.data.datasets.database.setup>`
81
      * :py:class:`PypsaEurSec <egon.data.datasets.pypsaeursec.PypsaEurSec>`
82
      * :py:class:`Vg250 <egon.data.datasets.vg250.Vg250>`
83
      * :py:class:`DataBundle <egon.data.datasets.data_bundle.DataBundle>`
84
      * :py:class:`ZensusPopulation <egon.data.datasets.zensus.ZensusPopulation>`
85
86
87
    *Resulting tables*
88
      * :py:class:`supply.egon_scenario_capacities <egon.data.datasets.scenario_capacities.EgonScenarioCapacities>` is created and filled
89
      * :py:class:`supply.egon_nep_2021_conventional_powerplants <egon.data.datasets.scenario_capacities.NEP2021ConvPowerPlants>` is created and filled
90
91
    """
92
93
    #:
94
    name: str = "ScenarioCapacities"
95
    #:
96
    version: str = "0.0.13"
97
98
    def __init__(self, dependencies):
99
        super().__init__(
100
            name=self.name,
101
            version=self.version,
102
            dependencies=dependencies,
103
            tasks=(create_table, insert_data_nep, eGon100_capacities, add_metadata),
104
        )
105
106
107
108
def create_table():
109
    """Create input tables for scenario setup
110
111
    Returns
112
    -------
113
    None.
114
115
    """
116
117
    engine = db.engine()
118
    db.execute_sql("CREATE SCHEMA IF NOT EXISTS supply;")
119
    EgonScenarioCapacities.__table__.drop(bind=engine, checkfirst=True)
120
    NEP2021ConvPowerPlants.__table__.drop(bind=engine, checkfirst=True)
121
    EgonScenarioCapacities.__table__.create(bind=engine, checkfirst=True)
122
    NEP2021ConvPowerPlants.__table__.create(bind=engine, checkfirst=True)
123
124
125
def nuts_mapping():
126
    nuts_mapping = {
127
        "BW": "DE1",
128
        "NW": "DEA",
129
        "HE": "DE7",
130
        "BB": "DE4",
131
        "HB": "DE5",
132
        "RP": "DEB",
133
        "ST": "DEE",
134
        "SH": "DEF",
135
        "MV": "DE8",
136
        "TH": "DEG",
137
        "NI": "DE9",
138
        "SN": "DED",
139
        "HH": "DE6",
140
        "SL": "DEC",
141
        "BE": "DE3",
142
        "BY": "DE2",
143
    }
144
145
    return nuts_mapping
146
147
148
def insert_capacities_per_federal_state_nep():
149
    """Inserts installed capacities per federal state accordning to
150
    NEP 2035 (version 2021), scenario 2035 C
151
152
    Returns
153
    -------
154
    None.
155
156
    """
157
158
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
159
    targets = egon.data.config.datasets()["scenario_input"]["targets"]
160
161
    # Connect to local database
162
    engine = db.engine()
163
164
    # Delete rows if already exist
165
    db.execute_sql(
166
        f"""
167
        DELETE FROM
168
        {targets['scenario_capacities']['schema']}.
169
        {targets['scenario_capacities']['table']}
170
        WHERE scenario_name = 'eGon2035'
171
        AND nuts != 'DE'
172
        """
173
    )
174
175
    # read-in installed capacities per federal state of germany
176
    target_file = (
177
        Path(".")
178
        / "data_bundle_egon_data"
179
        / "nep2035_version2021"
180
        / sources["eGon2035"]["capacities"]
181
    )
182
183
    df = pd.read_excel(
184
        target_file,
185
        sheet_name="1.Entwurf_NEP2035_V2021",
186
        index_col="Unnamed: 0",
187
    )
188
189
    df_draft = pd.read_excel(
190
        target_file,
191
        sheet_name="Entwurf_des_Szenariorahmens",
192
        index_col="Unnamed: 0",
193
    )
194
195
    # Import data on wind offshore capacities
196
    df_windoff = pd.read_excel(
197
        target_file,
198
        sheet_name="WInd_Offshore_NEP",
199
    ).dropna(subset=["Bundesland", "Netzverknuepfungspunkt"])
200
201
    # Remove trailing whitespace from column Bundesland
202
    df_windoff["Bundesland"] = df_windoff["Bundesland"].str.strip()
203
204
    # Group and sum capacities per federal state
205
    df_windoff_fs = (
206
        df_windoff[["Bundesland", "C 2035"]].groupby(["Bundesland"]).sum()
207
    )
208
209
    # List federal state with an assigned wind offshore capacity
210
    index_list = list(df_windoff_fs.index.values)
211
212
    # Overwrite capacities in df_windoff with more accurate values from
213
    # df_windoff_fs
214
215
    for state in index_list:
216
        df.at["Wind offshore", state] = (
217
            df_windoff_fs.at[state, "C 2035"] / 1000
218
        )
219
220
    # sort NEP-carriers:
221
    rename_carrier = {
222
        "Wind onshore": "wind_onshore",
223
        "Wind offshore": "wind_offshore",
224
        "sonstige Konventionelle": "others",
225
        "Speicherwasser": "reservoir",
226
        "Laufwasser": "run_of_river",
227
        "Biomasse": "biomass",
228
        "Erdgas": "gas",
229
        "Kuppelgas": "gas",
230
        "PV (Aufdach)": "solar_rooftop",
231
        "PV (Freiflaeche)": "solar",
232
        "Pumpspeicher": "pumped_hydro",
233
        "sonstige EE": "others",
234
        "Oel": "oil",
235
        "Haushaltswaermepumpen": "residential_rural_heat_pump",
236
        "KWK < 10 MW": "small_chp",
237
    }
238
    # 'Elektromobilitaet gesamt': 'transport',
239
    # 'Elektromobilitaet privat': 'transport'}
240
241
    # nuts1 to federal state in Germany
242
    map_nuts = pd.read_sql(
243
        f"""
244
        SELECT DISTINCT ON (nuts) gen, nuts
245
        FROM {sources['boundaries']['schema']}.{sources['boundaries']['table']}
246
        """,
247
        engine,
248
        index_col="gen",
249
    )
250
251
    insert_data = pd.DataFrame()
252
253
    scaled_carriers = [
254
        "Haushaltswaermepumpen",
255
        "PV (Aufdach)",
256
        "PV (Freiflaeche)",
257
    ]
258
259
    for bl in map_nuts.index:
260
        data = pd.DataFrame(df[bl])
261
262
        # if distribution to federal states is not provided,
263
        # use data from draft of scenario report
264
        for c in scaled_carriers:
265
            data.loc[c, bl] = (
266
                df_draft.loc[c, bl]
267
                / df_draft.loc[c, "Summe"]
268
                * df.loc[c, "Summe"]
269
            )
270
271
        # split hydro into run of river and reservoir
272
        # according to draft of scenario report
273
        if data.loc["Lauf- und Speicherwasser", bl] > 0:
274
            for c in ["Speicherwasser", "Laufwasser"]:
275
                data.loc[c, bl] = (
276
                    data.loc["Lauf- und Speicherwasser", bl]
277
                    * df_draft.loc[c, bl]
278
                    / df_draft.loc[["Speicherwasser", "Laufwasser"], bl].sum()
279
                )
280
281
        data["carrier"] = data.index.map(rename_carrier)
282
        data = data.groupby(data.carrier)[bl].sum().reset_index()
283
        data["component"] = "generator"
284
        data["nuts"] = map_nuts.nuts[bl]
285
        data["scenario_name"] = "eGon2035"
286
287
        # According to NEP, each heatpump has 5kW_el installed capacity
288
        # source: Entwurf des Szenariorahmens NEP 2035, version 2021, page 47
289
        data.loc[data.carrier == "residential_rural_heat_pump", bl] *= 5e-6
290
        data.loc[
291
            data.carrier == "residential_rural_heat_pump", "component"
292
        ] = "link"
293
294
        data = data.rename(columns={bl: "capacity"})
295
296
        # convert GW to MW
297
        data.capacity *= 1e3
298
299
        insert_data = insert_data.append(data)
300
301
    # Get aggregated capacities from nep's power plant list for certain carrier
302
303
    carriers = ["oil", "other_non_renewable", "pumped_hydro"]
304
305
    capacities_list = aggr_nep_capacities(carriers)
306
307
    # Filter by carrier
308
    updated = insert_data[insert_data["carrier"].isin(carriers)]
309
310
    # Merge to replace capacities for carriers "oil", "other_non_renewable" and
311
    # "pumped_hydro"
312
    updated = (
313
        updated.merge(capacities_list, on=["carrier", "nuts"], how="left")
314
        .fillna(0)
315
        .drop(["capacity"], axis=1)
316
        .rename(columns={"c2035_capacity": "capacity"})
317
    )
318
319
    # Remove updated entries from df
320
    original = insert_data[~insert_data["carrier"].isin(carriers)]
321
322
    # Join dfs
323
    insert_data = pd.concat([original, updated])
324
325
    # Insert data to db
326
    insert_data.to_sql(
327
        targets["scenario_capacities"]["table"],
328
        engine,
329
        schema=targets["scenario_capacities"]["schema"],
330
        if_exists="append",
331
        index=insert_data.index,
332
    )
333
334
    # Add district heating data accordning to energy and full load hours
335
    district_heating_input()
336
337
338
def population_share():
339
    """Calulate share of population in testmode
340
341
    Returns
342
    -------
343
    float
344
        Share of population in testmode
345
346
    """
347
348
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
349
350
    return (
351
        pd.read_sql(
352
            f"""
353
            SELECT SUM(population)
354
            FROM {sources['zensus_population']['schema']}.
355
            {sources['zensus_population']['table']}
356
            WHERE population>0
357
            """,
358
            con=db.engine(),
359
        )["sum"][0]
360
        / 80324282
361
    )
362
363
364
def aggr_nep_capacities(carriers):
365
    """Aggregates capacities from NEP power plants list by carrier and federal
366
    state
367
368
    Returns
369
    -------
370
    pandas.Dataframe
371
        Dataframe with capacities per federal state and carrier
372
373
    """
374
    # Get list of power plants from nep
375
    nep_capacities = insert_nep_list_powerplants(export=False)[
376
        ["federal_state", "carrier", "c2035_capacity"]
377
    ]
378
379
    # Sum up capacities per federal state and carrier
380
    capacities_list = (
381
        nep_capacities.groupby(["federal_state", "carrier"])["c2035_capacity"]
382
        .sum()
383
        .to_frame()
384
        .reset_index()
385
    )
386
387
    # Neglect entries with carriers not in argument
388
    capacities_list = capacities_list[capacities_list.carrier.isin(carriers)]
389
390
    # Include NUTS code
391
    capacities_list["nuts"] = capacities_list.federal_state.map(nuts_mapping())
392
393
    # Drop entries for foreign plants with nan values and federal_state column
394
    capacities_list = capacities_list.dropna(subset=["nuts"]).drop(
395
        columns=["federal_state"]
396
    )
397
398
    return capacities_list
399
400
401
def map_carrier():
402
    """Map carriers from NEP and Marktstammdatenregister to carriers from eGon
403
404
    Returns
405
    -------
406
    pandas.Series
407
        List of mapped carriers
408
409
    """
410
    return pd.Series(
411
        data={
412
            "Abfall": "others",
413
            "Erdgas": "gas",
414
            "Sonstige\nEnergieträger": "others",
415
            "Steinkohle": "coal",
416
            "Kuppelgase": "gas",
417
            "Mineralöl-\nprodukte": "oil",
418
            "Braunkohle": "lignite",
419
            "Waerme": "others",
420
            "Mineraloelprodukte": "oil",
421
            "NichtBiogenerAbfall": "others",
422
            "AndereGase": "gas",
423
            "Sonstige_Energietraeger": "others",
424
            "Kernenergie": "nuclear",
425
            "Pumpspeicher": "pumped_hydro",
426
            "Mineralöl-\nProdukte": "oil",
427
        }
428
    )
429
430
431
def insert_nep_list_powerplants(export=True):
432
    """Insert list of conventional powerplants attached to the approval
433
    of the scenario report by BNetzA
434
435
    Parameters
436
    ----------
437
    export : bool
438
        Choose if nep list should be exported to the data
439
        base. The default is True.
440
        If export=False a data frame will be returned
441
442
    Returns
443
    -------
444
    kw_liste_nep : pandas.DataFrame
445
        List of conventional power plants from nep if export=False
446
    """
447
448
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
449
    targets = egon.data.config.datasets()["scenario_input"]["targets"]
450
451
    # Connect to local database
452
    engine = db.engine()
453
454
    # Read-in data from csv-file
455
    target_file = (
456
        Path(".")
457
        / "data_bundle_egon_data"
458
        / "nep2035_version2021"
459
        / sources["eGon2035"]["list_conv_pp"]
460
    )
461
462
    kw_liste_nep = pd.read_csv(target_file, delimiter=";", decimal=",")
463
464
    # Adjust column names
465
    kw_liste_nep = kw_liste_nep.rename(
466
        columns={
467
            "BNetzA-ID": "bnetza_id",
468
            "Kraftwerksname": "name",
469
            "Blockname": "name_unit",
470
            "Energieträger": "carrier_nep",
471
            "KWK\nJa/Nein": "chp",
472
            "PLZ": "postcode",
473
            "Ort": "city",
474
            "Bundesland/\nLand": "federal_state",
475
            "Inbetrieb-\nnahmejahr": "commissioned",
476
            "Status": "status",
477
            "el. Leistung\n06.02.2020": "capacity",
478
            "A 2035:\nKWK-Ersatz": "a2035_chp",
479
            "A 2035:\nLeistung": "a2035_capacity",
480
            "B 2035\nKWK-Ersatz": "b2035_chp",
481
            "B 2035:\nLeistung": "b2035_capacity",
482
            "C 2035:\nKWK-Ersatz": "c2035_chp",
483
            "C 2035:\nLeistung": "c2035_capacity",
484
            "B 2040:\nKWK-Ersatz": "b2040_chp",
485
            "B 2040:\nLeistung": "b2040_capacity",
486
        }
487
    )
488
489
    # Cut data to federal state if in testmode
490
    boundary = settings()["egon-data"]["--dataset-boundary"]
491
    if boundary != "Everything":
492
        map_states = {
493
            "Baden-Württemberg": "BW",
494
            "Nordrhein-Westfalen": "NW",
495
            "Hessen": "HE",
496
            "Brandenburg": "BB",
497
            "Bremen": "HB",
498
            "Rheinland-Pfalz": "RP",
499
            "Sachsen-Anhalt": "ST",
500
            "Schleswig-Holstein": "SH",
501
            "Mecklenburg-Vorpommern": "MV",
502
            "Thüringen": "TH",
503
            "Niedersachsen": "NI",
504
            "Sachsen": "SN",
505
            "Hamburg": "HH",
506
            "Saarland": "SL",
507
            "Berlin": "BE",
508
            "Bayern": "BY",
509
        }
510
511
        kw_liste_nep = kw_liste_nep[
512
            kw_liste_nep.federal_state.isin([map_states[boundary], np.nan])
513
        ]
514
515
        for col in [
516
            "capacity",
517
            "a2035_capacity",
518
            "b2035_capacity",
519
            "c2035_capacity",
520
            "b2040_capacity",
521
        ]:
522
            kw_liste_nep.loc[
523
                kw_liste_nep[kw_liste_nep.federal_state.isnull()].index, col
524
            ] *= population_share()
525
526
    kw_liste_nep["carrier"] = map_carrier()[kw_liste_nep.carrier_nep].values
527
528
    if export is True:
529
        # Insert data to db
530
        kw_liste_nep.to_sql(
531
            targets["nep_conventional_powerplants"]["table"],
532
            engine,
533
            schema=targets["nep_conventional_powerplants"]["schema"],
534
            if_exists="replace",
535
        )
536
    else:
537
        return kw_liste_nep
538
539
540
def district_heating_input():
541
    """Imports data for district heating networks in Germany
542
543
    Returns
544
    -------
545
    None.
546
547
    """
548
549
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
550
551
    # import data to dataframe
552
    file = (
553
        Path(".")
554
        / "data_bundle_egon_data"
555
        / "nep2035_version2021"
556
        / sources["eGon2035"]["capacities"]
557
    )
558
    df = pd.read_excel(
559
        file, sheet_name="Kurzstudie_KWK", dtype={"Wert": float}
560
    )
561
    df.set_index(["Energietraeger", "Name"], inplace=True)
562
563
    # Scale values to population share in testmode
564
    if settings()["egon-data"]["--dataset-boundary"] != "Everything":
565
        df.loc[
566
            pd.IndexSlice[:, "Fernwaermeerzeugung"], "Wert"
567
        ] *= population_share()
568
569
    # Connect to database
570
    engine = db.engine()
571
    session = sessionmaker(bind=engine)()
572
573
    # insert heatpumps and resistive heater as link
574
    for c in ["Grosswaermepumpe", "Elektrodenheizkessel"]:
575
        entry = EgonScenarioCapacities(
576
            component="link",
577
            scenario_name="eGon2035",
578
            nuts="DE",
579
            carrier="urban_central_"
580
            + ("heat_pump" if c == "Grosswaermepumpe" else "resistive_heater"),
581
            capacity=df.loc[(c, "Fernwaermeerzeugung"), "Wert"]
582
            * 1e6
583
            / df.loc[(c, "Volllaststunden"), "Wert"]
584
            / df.loc[(c, "Wirkungsgrad"), "Wert"],
585
        )
586
587
        session.add(entry)
588
589
    # insert solar- and geothermal as generator
590
    for c in ["Geothermie", "Solarthermie"]:
591
        entry = EgonScenarioCapacities(
592
            component="generator",
593
            scenario_name="eGon2035",
594
            nuts="DE",
595
            carrier="urban_central_"
596
            + (
597
                "solar_thermal_collector"
598
                if c == "Solarthermie"
599
                else "geo_thermal"
600
            ),
601
            capacity=df.loc[(c, "Fernwaermeerzeugung"), "Wert"]
602
            * 1e6
603
            / df.loc[(c, "Volllaststunden"), "Wert"],
604
        )
605
606
        session.add(entry)
607
608
    session.commit()
609
610
611
def insert_data_nep():
612
    """Overall function for importing scenario input data for eGon2035 scenario
613
614
    Returns
615
    -------
616
    None.
617
618
    """
619
620
    insert_nep_list_powerplants(export=True)
621
622
    insert_capacities_per_federal_state_nep()
623
624
625
def eGon100_capacities():
626
    """Inserts installed capacities for the eGon100 scenario
627
628
    Returns
629
    -------
630
    None.
631
632
    """
633
634
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
635
    targets = egon.data.config.datasets()["scenario_input"]["targets"]
636
637
    # read-in installed capacities
638
    execute_pypsa_eur_sec = False
639
    cwd = Path(".")
640
641
    if execute_pypsa_eur_sec:
642
        filepath = cwd / "run-pypsa-eur-sec"
643
        pypsa_eur_sec_repos = filepath / "pypsa-eur-sec"
644
        # Read YAML file
645
        pes_egonconfig = pypsa_eur_sec_repos / "config_egon.yaml"
646
        with open(pes_egonconfig, "r") as stream:
647
            data_config = yaml.safe_load(stream)
648
649
        target_file = (
650
            pypsa_eur_sec_repos
651
            / "results"
652
            / data_config["run"]
653
            / "csvs"
654
            / sources["eGon100RE"]["capacities"]
655
        )
656
657
    else:
658
        target_file = (
659
            cwd
660
            / "data_bundle_egon_data"
661
            / "pypsa_eur_sec"
662
            / "2022-07-26-egondata-integration"
663
            / "csvs"
664
            / sources["eGon100RE"]["capacities"]
665
        )
666
667
    df = pd.read_csv(target_file, skiprows=5)
668
    df.columns = ["component", "country", "carrier", "p_nom"]
669
670
    df.set_index("carrier", inplace=True)
671
672
    df = df[df.country.str[:2] == "DE"]
673
674
    # Drop country column
675
    df.drop("country", axis=1, inplace=True)
676
677
    # Drop copmponents which will be optimized in eGo
678
    unused_carrier = [
679
        "BEV_charger",
680
        "DAC",
681
        "H2 Electrolysis",
682
        "electricity distribution grid",
683
        "home battery charger",
684
        "home battery discharger",
685
        "H2",
686
        "Li_ion",
687
        "home battery",
688
        "residential rural water tanks charger",
689
        "residential rural water tanks discharger",
690
        "services rural water tanks charger",
691
        "services rural water tanks discharger",
692
        "residential rural water tanks",
693
        "services rural water tanks",
694
        "urban central water tanks",
695
        "urban central water tanks charger",
696
        "urban central water tanks discharger",
697
        "H2 Fuel Cell",
698
    ]
699
700
    df = df[~df.index.isin(unused_carrier)]
701
702
    df.index = df.index.str.replace(" ", "_")
703
704
    # Aggregate offshore wind
705
    df = df.append(
706
        pd.DataFrame(
707
            index=["wind_offshore"],
708
            data={
709
                "p_nom": (df.p_nom["offwind-ac"] + df.p_nom["offwind-dc"]),
710
                "component": df.component["offwind-ac"],
711
            },
712
        )
713
    )
714
    df = df.drop(["offwind-ac", "offwind-dc"])
715
716
    # Aggregate technologies with and without carbon_capture (CC)
717
    for carrier in ["SMR", "urban_central_gas_CHP"]:
718
        df.p_nom[carrier] += df.p_nom[f"{carrier}_CC"]
719
        df = df.drop([f"{carrier}_CC"])
720
721
    # Aggregate residential and services rural heat supply
722
    for merge_carrier in [
723
        "rural_resistive_heater",
724
        "rural_ground_heat_pump",
725
        "rural_gas_boiler",
726
        "rural_solar_thermal",
727
    ]:
728
        if f"residential_{merge_carrier}" in df.index:
729
            df = df.append(
730
                pd.DataFrame(
731
                    index=[merge_carrier],
732
                    data={
733
                        "p_nom": (
734
                            df.p_nom[f"residential_{merge_carrier}"]
735
                            + df.p_nom[f"services_{merge_carrier}"]
736
                        ),
737
                        "component": df.component[
738
                            f"residential_{merge_carrier}"
739
                        ],
740
                    },
741
                )
742
            )
743
            df = df.drop(
744
                [f"residential_{merge_carrier}", f"services_{merge_carrier}"]
745
            )
746
747
    # Rename carriers
748
    df.rename(
749
        {
750
            "onwind": "wind_onshore",
751
            "ror": "run_of_river",
752
            "PHS": "pumped_hydro",
753
            "OCGT": "gas",
754
            "rural_ground_heat_pump": "residential_rural_heat_pump",
755
            "urban_central_air_heat_pump": "urban_central_heat_pump",
756
            "urban_central_solar_thermal": (
757
                "urban_central_solar_thermal_collector"
758
            ),
759
        },
760
        inplace=True,
761
    )
762
763
    # Reset index
764
    df = df.reset_index()
765
766
    # Rename columns
767
    df.rename(
768
        {"p_nom": "capacity", "index": "carrier"}, axis="columns", inplace=True
769
    )
770
771
    df["scenario_name"] = "eGon100RE"
772
    df["nuts"] = "DE"
773
774
    db.execute_sql(
775
        f"""
776
        DELETE FROM
777
        {targets['scenario_capacities']['schema']}.{targets['scenario_capacities']['table']}
778
        WHERE scenario_name='eGon100RE'
779
        """
780
    )
781
782
    df.to_sql(
783
        targets["scenario_capacities"]["table"],
784
        schema=targets["scenario_capacities"]["schema"],
785
        con=db.engine(),
786
        if_exists="append",
787
        index=False,
788
    )
789
790
791
792
def add_metadata():
793
    """Add metdata to supply.egon_scenario_capacities
794
795
    Returns
796
    -------
797
    None.
798
799
    """
800
801
    # Import column names and datatypes
802
    fields = pd.DataFrame(
803
        generate_resource_fields_from_sqla_model(EgonScenarioCapacities)
804
    ).set_index("name")
805
806
    # Set descriptions and units
807
    fields.loc["index", "description"] = "Index"
808
    fields.loc[
809
        "component", "description"
810
    ] = "Name of representative PyPSA component"
811
    fields.loc["carrier", "description"] = "Name of carrier"
812
    fields.loc["capacity", "description"] = "Installed capacity"
813
    fields.loc["capacity", "unit"] = "MW"
814
    fields.loc[
815
        "nuts", "description"
816
    ] = "NUTS region, either federal state or Germany"
817
    fields.loc[
818
        "scenario_name", "description"
819
    ] = "Name of corresponding eGon scenario"
820
821
    # Reformat pandas.DataFrame to dict
822
    fields = fields.reset_index().to_dict(orient="records")
823
824
    meta = {
825
        "name": "supply.egon_scenario_capacities",
826
        "title": "eGon scenario capacities",
827
        "id": "WILL_BE_SET_AT_PUBLICATION",
828
        "description": "Installed capacities of scenarios used in the eGon project",
829
        "language": ["de-DE"],
830
        "publicationDate": datetime.date.today().isoformat(),
831
        "context": context(),
832
        "spatial": {
833
            "location": None,
834
            "extent": "Germany",
835
            "resolution": None,
836
        },
837
        "sources": [
838
            sources()["nep2021"],
839
            sources()["vg250"],
840
            sources()["zensus"],
841
            sources()["egon-data"],
842
        ],
843
        "licenses": [
844
            license_ccby(
845
                "© Übertragungsnetzbetreiber; "
846
                "© Bundesamt für Kartographie und Geodäsie 2020 (Daten verändert); "
847
                "© Statistische Ämter des Bundes und der Länder 2014; "
848
                "© Jonathan Amme, Clara Büttner, Ilka Cußmann, Julian Endres, Carlos Epia, Stephan Günther, Ulf Müller, Amélia Nadal, Guido Pleßmann, Francesco Witte",
849
            )
850
        ],
851
        "contributors": [
852
            {
853
                "title": "Clara Büttner",
854
                "email": "http://github.com/ClaraBuettner",
855
                "date": time.strftime("%Y-%m-%d"),
856
                "object": None,
857
                "comment": "Imported data",
858
            },
859
        ],
860
        "resources": [
861
            {
862
                "profile": "tabular-data-resource",
863
                "name": "supply.egon_scenario_capacities",
864
                "path": None,
865
                "format": "PostgreSQL",
866
                "encoding": "UTF-8",
867
                "schema": {
868
                    "fields": fields,
869
                    "primaryKey": ["index"],
870
                    "foreignKeys": [],
871
                },
872
                "dialect": {"delimiter": None, "decimalSeparator": "."},
873
            }
874
        ],
875
        "metaMetadata": meta_metadata(),
876
    }
877
878
    # Create json dump
879
    meta_json = "'" + json.dumps(meta) + "'"
880
881
    # Add metadata as a comment to the table
882
    db.submit_comment(
883
        meta_json,
884
        EgonScenarioCapacities.__table__.schema,
885
        EgonScenarioCapacities.__table__.name,
886
    )
887