Passed
Pull Request — dev (#1122)
by
unknown
04:34
created

data.datasets.scenario_capacities.add_metadata()   B

Complexity

Conditions 1

Size

Total Lines 97
Code Lines 62

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 62
dl 0
loc 97
rs 8.2436
c 0
b 0
f 0
cc 1
nop 0

How to fix   Long Method   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

1
"""The central module containing all code dealing with importing data from
2
Netzentwicklungsplan 2035, Version 2031, Szenario C
3
"""
4
5
from pathlib import Path
6
import datetime
7
import json
8
import time
9
10
from sqlalchemy import Column, Float, Integer, String
11
from sqlalchemy.ext.declarative import declarative_base
12
from sqlalchemy.orm import sessionmaker
13
import numpy as np
14
import pandas as pd
15
import yaml
16
17
from egon.data import db
18
from egon.data.config import settings
19
from egon.data.datasets import Dataset
20
from egon.data.metadata import (
21
    context,
22
    generate_resource_fields_from_sqla_model,
23
    license_ccby,
24
    meta_metadata,
25
    sources,
26
)
27
import egon.data.config
28
29
Base = declarative_base()
30
31
32
class EgonScenarioCapacities(Base):
33
    __tablename__ = "egon_scenario_capacities"
34
    __table_args__ = {"schema": "supply"}
35
    index = Column(Integer, primary_key=True)
36
    component = Column(String(25))
37
    carrier = Column(String(50))
38
    capacity = Column(Float)
39
    nuts = Column(String(12))
40
    scenario_name = Column(String(50))
41
42
43
class NEP2021ConvPowerPlants(Base):
44
    __tablename__ = "egon_nep_2021_conventional_powerplants"
45
    __table_args__ = {"schema": "supply"}
46
    index = Column(String(50), primary_key=True)
47
    bnetza_id = Column(String(50))
48
    name = Column(String(100))
49
    name_unit = Column(String(50))
50
    carrier_nep = Column(String(50))
51
    carrier = Column(String(12))
52
    chp = Column(String(12))
53
    postcode = Column(String(12))
54
    city = Column(String(50))
55
    federal_state = Column(String(12))
56
    commissioned = Column(String(12))
57
    status = Column(String(50))
58
    capacity = Column(Float)
59
    a2035_chp = Column(String(12))
60
    a2035_capacity = Column(Float)
61
    b2035_chp = Column(String(12))
62
    b2035_capacity = Column(Float)
63
    c2035_chp = Column(String(12))
64
    c2035_capacity = Column(Float)
65
    b2040_chp = Column(String(12))
66
    b2040_capacity = Column(Float)
67
68
69
class ScenarioCapacities(Dataset):
70
    def __init__(self, dependencies):
71
        super().__init__(
72
            name="ScenarioCapacities",
73
            version="0.0.13",
74
            dependencies=dependencies,
75
            tasks=(
76
                create_table,
77
                insert_data_nep,
78
                eGon100_capacities,
79
                add_metadata,
80
            ),
81
        )
82
83
84
def create_table():
85
    """Create input tables for scenario setup
86
87
    Returns
88
    -------
89
    None.
90
91
    """
92
93
    engine = db.engine()
94
    db.execute_sql("CREATE SCHEMA IF NOT EXISTS supply;")
95
    EgonScenarioCapacities.__table__.drop(bind=engine, checkfirst=True)
96
    NEP2021ConvPowerPlants.__table__.drop(bind=engine, checkfirst=True)
97
    EgonScenarioCapacities.__table__.create(bind=engine, checkfirst=True)
98
    NEP2021ConvPowerPlants.__table__.create(bind=engine, checkfirst=True)
99
100
101
def nuts_mapping():
102
103
    nuts_mapping = {
104
        "BW": "DE1",
105
        "NW": "DEA",
106
        "HE": "DE7",
107
        "BB": "DE4",
108
        "HB": "DE5",
109
        "RP": "DEB",
110
        "ST": "DEE",
111
        "SH": "DEF",
112
        "MV": "DE8",
113
        "TH": "DEG",
114
        "NI": "DE9",
115
        "SN": "DED",
116
        "HH": "DE6",
117
        "SL": "DEC",
118
        "BE": "DE3",
119
        "BY": "DE2",
120
    }
121
122
    return nuts_mapping
123
124
125
def insert_capacities_per_federal_state_nep():
126
    """Inserts installed capacities per federal state accordning to
127
    NEP 2035 (version 2021), scenario 2035 C
128
129
    Returns
130
    -------
131
    None.
132
133
    """
134
135
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
136
    targets = egon.data.config.datasets()["scenario_input"]["targets"]
137
138
    # Connect to local database
139
    engine = db.engine()
140
141
    # Delete rows if already exist
142
    db.execute_sql(
143
        f"""
144
        DELETE FROM
145
        {targets['scenario_capacities']['schema']}.
146
        {targets['scenario_capacities']['table']}
147
        WHERE scenario_name = 'eGon2035'
148
        AND nuts != 'DE'
149
        """
150
    )
151
152
    # read-in installed capacities per federal state of germany
153
    target_file = (
154
        Path(".")
155
        / "data_bundle_egon_data"
156
        / "nep2035_version2021"
157
        / sources["eGon2035"]["capacities"]
158
    )
159
160
    df = pd.read_excel(
161
        target_file,
162
        sheet_name="1.Entwurf_NEP2035_V2021",
163
        index_col="Unnamed: 0",
164
    )
165
166
    df_draft = pd.read_excel(
167
        target_file,
168
        sheet_name="Entwurf_des_Szenariorahmens",
169
        index_col="Unnamed: 0",
170
    )
171
172
    # Import data on wind offshore capacities
173
    df_windoff = pd.read_excel(
174
        target_file,
175
        sheet_name="WInd_Offshore_NEP",
176
    ).dropna(subset=["Bundesland", "Netzverknuepfungspunkt"])
177
178
    # Remove trailing whitespace from column Bundesland
179
    df_windoff["Bundesland"] = df_windoff["Bundesland"].str.strip()
180
181
    # Group and sum capacities per federal state
182
    df_windoff_fs = (
183
        df_windoff[["Bundesland", "C 2035"]].groupby(["Bundesland"]).sum()
184
    )
185
186
    # List federal state with an assigned wind offshore capacity
187
    index_list = list(df_windoff_fs.index.values)
188
189
    # Overwrite capacities in df_windoff with more accurate values from
190
    # df_windoff_fs
191
192
    for state in index_list:
193
194
        df.at["Wind offshore", state] = (
195
            df_windoff_fs.at[state, "C 2035"] / 1000
196
        )
197
198
    # sort NEP-carriers:
199
    rename_carrier = {
200
        "Wind onshore": "wind_onshore",
201
        "Wind offshore": "wind_offshore",
202
        "sonstige Konventionelle": "others",
203
        "Speicherwasser": "reservoir",
204
        "Laufwasser": "run_of_river",
205
        "Biomasse": "biomass",
206
        "Erdgas": "gas",
207
        "Kuppelgas": "gas",
208
        "PV (Aufdach)": "solar_rooftop",
209
        "PV (Freiflaeche)": "solar",
210
        "Pumpspeicher": "pumped_hydro",
211
        "sonstige EE": "others",
212
        "Oel": "oil",
213
        "Haushaltswaermepumpen": "residential_rural_heat_pump",
214
        "KWK < 10 MW": "small_chp",
215
    }
216
    # 'Elektromobilitaet gesamt': 'transport',
217
    # 'Elektromobilitaet privat': 'transport'}
218
219
    # nuts1 to federal state in Germany
220
    map_nuts = pd.read_sql(
221
        f"""
222
        SELECT DISTINCT ON (nuts) gen, nuts
223
        FROM {sources['boundaries']['schema']}.{sources['boundaries']['table']}
224
        """,
225
        engine,
226
        index_col="gen",
227
    )
228
229
    insert_data = pd.DataFrame()
230
231
    scaled_carriers = [
232
        "Haushaltswaermepumpen",
233
        "PV (Aufdach)",
234
        "PV (Freiflaeche)",
235
    ]
236
237
    for bl in map_nuts.index:
238
239
        data = pd.DataFrame(df[bl])
240
241
        # if distribution to federal states is not provided,
242
        # use data from draft of scenario report
243
        for c in scaled_carriers:
244
            data.loc[c, bl] = (
245
                df_draft.loc[c, bl]
246
                / df_draft.loc[c, "Summe"]
247
                * df.loc[c, "Summe"]
248
            )
249
250
        # split hydro into run of river and reservoir
251
        # according to draft of scenario report
252
        if data.loc["Lauf- und Speicherwasser", bl] > 0:
253
            for c in ["Speicherwasser", "Laufwasser"]:
254
                data.loc[c, bl] = (
255
                    data.loc["Lauf- und Speicherwasser", bl]
256
                    * df_draft.loc[c, bl]
257
                    / df_draft.loc[["Speicherwasser", "Laufwasser"], bl].sum()
258
                )
259
260
        data["carrier"] = data.index.map(rename_carrier)
261
        data = data.groupby(data.carrier)[bl].sum().reset_index()
262
        data["component"] = "generator"
263
        data["nuts"] = map_nuts.nuts[bl]
264
        data["scenario_name"] = "eGon2035"
265
266
        # According to NEP, each heatpump has 5kW_el installed capacity
267
        # source: Entwurf des Szenariorahmens NEP 2035, version 2021, page 47
268
        data.loc[data.carrier == "residential_rural_heat_pump", bl] *= 5e-6
269
        data.loc[
270
            data.carrier == "residential_rural_heat_pump", "component"
271
        ] = "link"
272
273
        data = data.rename(columns={bl: "capacity"})
274
275
        # convert GW to MW
276
        data.capacity *= 1e3
277
278
        insert_data = insert_data.append(data)
279
280
    # Get aggregated capacities from nep's power plant list for certain carrier
281
282
    carriers = ["oil", "other_non_renewable", "pumped_hydro"]
283
284
    capacities_list = aggr_nep_capacities(carriers)
285
286
    # Filter by carrier
287
    updated = insert_data[insert_data["carrier"].isin(carriers)]
288
289
    # Merge to replace capacities for carriers "oil", "other_non_renewable" and
290
    # "pumped_hydro"
291
    updated = (
292
        updated.merge(capacities_list, on=["carrier", "nuts"], how="left")
293
        .fillna(0)
294
        .drop(["capacity"], axis=1)
295
        .rename(columns={"c2035_capacity": "capacity"})
296
    )
297
298
    # Remove updated entries from df
299
    original = insert_data[~insert_data["carrier"].isin(carriers)]
300
301
    # Join dfs
302
    insert_data = pd.concat([original, updated])
303
304
    # Insert data to db
305
    insert_data.to_sql(
306
        targets["scenario_capacities"]["table"],
307
        engine,
308
        schema=targets["scenario_capacities"]["schema"],
309
        if_exists="append",
310
        index=insert_data.index,
311
    )
312
313
    # Add district heating data accordning to energy and full load hours
314
    district_heating_input()
315
316
317
def population_share():
318
    """Calulate share of population in testmode
319
320
    Returns
321
    -------
322
    float
323
        Share of population in testmode
324
325
    """
326
327
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
328
329
    return (
330
        pd.read_sql(
331
            f"""
332
            SELECT SUM(population)
333
            FROM {sources['zensus_population']['schema']}.
334
            {sources['zensus_population']['table']}
335
            WHERE population>0
336
            """,
337
            con=db.engine(),
338
        )["sum"][0]
339
        / 80324282
340
    )
341
342
343
def aggr_nep_capacities(carriers):
344
    """Aggregates capacities from NEP power plants list by carrier and federal
345
    state
346
347
    Returns
348
    -------
349
    pandas.Dataframe
350
        Dataframe with capacities per federal state and carrier
351
352
    """
353
    # Get list of power plants from nep
354
    nep_capacities = insert_nep_list_powerplants(export=False)[
355
        ["federal_state", "carrier", "c2035_capacity"]
356
    ]
357
358
    # Sum up capacities per federal state and carrier
359
    capacities_list = (
360
        nep_capacities.groupby(["federal_state", "carrier"])["c2035_capacity"]
361
        .sum()
362
        .to_frame()
363
        .reset_index()
364
    )
365
366
    # Neglect entries with carriers not in argument
367
    capacities_list = capacities_list[capacities_list.carrier.isin(carriers)]
368
369
    # Include NUTS code
370
    capacities_list["nuts"] = capacities_list.federal_state.map(nuts_mapping())
371
372
    # Drop entries for foreign plants with nan values and federal_state column
373
    capacities_list = capacities_list.dropna(subset=["nuts"]).drop(
374
        columns=["federal_state"]
375
    )
376
377
    return capacities_list
378
379
380
def map_carrier():
381
    """Map carriers from NEP and Marktstammdatenregister to carriers from eGon
382
383
    Returns
384
    -------
385
    pandas.Series
386
        List of mapped carriers
387
388
    """
389
    return pd.Series(
390
        data={
391
            "Abfall": "others",
392
            "Erdgas": "gas",
393
            "Sonstige\nEnergieträger": "others",
394
            "Steinkohle": "coal",
395
            "Kuppelgase": "gas",
396
            "Mineralöl-\nprodukte": "oil",
397
            "Braunkohle": "lignite",
398
            "Waerme": "others",
399
            "Mineraloelprodukte": "oil",
400
            "NichtBiogenerAbfall": "others",
401
            "AndereGase": "gas",
402
            "Sonstige_Energietraeger": "others",
403
            "Kernenergie": "nuclear",
404
            "Pumpspeicher": "pumped_hydro",
405
            "Mineralöl-\nProdukte": "oil",
406
        }
407
    )
408
409
410
def insert_nep_list_powerplants(export=True):
411
    """Insert list of conventional powerplants attached to the approval
412
    of the scenario report by BNetzA
413
414
    Parameters
415
    ----------
416
    export : bool
417
        Choose if nep list should be exported to the data
418
        base. The default is True.
419
        If export=False a data frame will be returned
420
421
    Returns
422
    -------
423
    kw_liste_nep : pandas.DataFrame
424
        List of conventional power plants from nep if export=False
425
    """
426
427
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
428
    targets = egon.data.config.datasets()["scenario_input"]["targets"]
429
430
    # Connect to local database
431
    engine = db.engine()
432
433
    # Read-in data from csv-file
434
    target_file = (
435
        Path(".")
436
        / "data_bundle_egon_data"
437
        / "nep2035_version2021"
438
        / sources["eGon2035"]["list_conv_pp"]
439
    )
440
441
    kw_liste_nep = pd.read_csv(target_file, delimiter=";", decimal=",")
442
443
    # Adjust column names
444
    kw_liste_nep = kw_liste_nep.rename(
445
        columns={
446
            "BNetzA-ID": "bnetza_id",
447
            "Kraftwerksname": "name",
448
            "Blockname": "name_unit",
449
            "Energieträger": "carrier_nep",
450
            "KWK\nJa/Nein": "chp",
451
            "PLZ": "postcode",
452
            "Ort": "city",
453
            "Bundesland/\nLand": "federal_state",
454
            "Inbetrieb-\nnahmejahr": "commissioned",
455
            "Status": "status",
456
            "el. Leistung\n06.02.2020": "capacity",
457
            "A 2035:\nKWK-Ersatz": "a2035_chp",
458
            "A 2035:\nLeistung": "a2035_capacity",
459
            "B 2035\nKWK-Ersatz": "b2035_chp",
460
            "B 2035:\nLeistung": "b2035_capacity",
461
            "C 2035:\nKWK-Ersatz": "c2035_chp",
462
            "C 2035:\nLeistung": "c2035_capacity",
463
            "B 2040:\nKWK-Ersatz": "b2040_chp",
464
            "B 2040:\nLeistung": "b2040_capacity",
465
        }
466
    )
467
468
    # Cut data to federal state if in testmode
469
    boundary = settings()["egon-data"]["--dataset-boundary"]
470
    if boundary != "Everything":
471
        map_states = {
472
            "Baden-Württemberg": "BW",
473
            "Nordrhein-Westfalen": "NW",
474
            "Hessen": "HE",
475
            "Brandenburg": "BB",
476
            "Bremen": "HB",
477
            "Rheinland-Pfalz": "RP",
478
            "Sachsen-Anhalt": "ST",
479
            "Schleswig-Holstein": "SH",
480
            "Mecklenburg-Vorpommern": "MV",
481
            "Thüringen": "TH",
482
            "Niedersachsen": "NI",
483
            "Sachsen": "SN",
484
            "Hamburg": "HH",
485
            "Saarland": "SL",
486
            "Berlin": "BE",
487
            "Bayern": "BY",
488
        }
489
490
        kw_liste_nep = kw_liste_nep[
491
            kw_liste_nep.federal_state.isin([map_states[boundary], np.nan])
492
        ]
493
494
        for col in [
495
            "capacity",
496
            "a2035_capacity",
497
            "b2035_capacity",
498
            "c2035_capacity",
499
            "b2040_capacity",
500
        ]:
501
            kw_liste_nep.loc[
502
                kw_liste_nep[kw_liste_nep.federal_state.isnull()].index, col
503
            ] *= population_share()
504
505
    kw_liste_nep["carrier"] = map_carrier()[kw_liste_nep.carrier_nep].values
506
507
    if export is True:
508
        # Insert data to db
509
        kw_liste_nep.to_sql(
510
            targets["nep_conventional_powerplants"]["table"],
511
            engine,
512
            schema=targets["nep_conventional_powerplants"]["schema"],
513
            if_exists="replace",
514
        )
515
    else:
516
        return kw_liste_nep
517
518
519
def district_heating_input():
520
    """Imports data for district heating networks in Germany
521
522
    Returns
523
    -------
524
    None.
525
526
    """
527
528
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
529
530
    # import data to dataframe
531
    file = (
532
        Path(".")
533
        / "data_bundle_egon_data"
534
        / "nep2035_version2021"
535
        / sources["eGon2035"]["capacities"]
536
    )
537
    df = pd.read_excel(
538
        file, sheet_name="Kurzstudie_KWK", dtype={"Wert": float}
539
    )
540
    df.set_index(["Energietraeger", "Name"], inplace=True)
541
542
    # Scale values to population share in testmode
543
    if settings()["egon-data"]["--dataset-boundary"] != "Everything":
544
        df.loc[
545
            pd.IndexSlice[:, "Fernwaermeerzeugung"], "Wert"
546
        ] *= population_share()
547
548
    # Connect to database
549
    engine = db.engine()
550
    session = sessionmaker(bind=engine)()
551
552
    # insert heatpumps and resistive heater as link
553
    for c in ["Grosswaermepumpe", "Elektrodenheizkessel"]:
554
        entry = EgonScenarioCapacities(
555
            component="link",
556
            scenario_name="eGon2035",
557
            nuts="DE",
558
            carrier="urban_central_"
559
            + ("heat_pump" if c == "Grosswaermepumpe" else "resistive_heater"),
560
            capacity=df.loc[(c, "Fernwaermeerzeugung"), "Wert"]
561
            * 1e6
562
            / df.loc[(c, "Volllaststunden"), "Wert"]
563
            / df.loc[(c, "Wirkungsgrad"), "Wert"],
564
        )
565
566
        session.add(entry)
567
568
    # insert solar- and geothermal as generator
569
    for c in ["Geothermie", "Solarthermie"]:
570
        entry = EgonScenarioCapacities(
571
            component="generator",
572
            scenario_name="eGon2035",
573
            nuts="DE",
574
            carrier="urban_central_"
575
            + (
576
                "solar_thermal_collector"
577
                if c == "Solarthermie"
578
                else "geo_thermal"
579
            ),
580
            capacity=df.loc[(c, "Fernwaermeerzeugung"), "Wert"]
581
            * 1e6
582
            / df.loc[(c, "Volllaststunden"), "Wert"],
583
        )
584
585
        session.add(entry)
586
587
    session.commit()
588
589
590
def insert_data_nep():
591
    """Overall function for importing scenario input data for eGon2035 scenario
592
593
    Returns
594
    -------
595
    None.
596
597
    """
598
599
    insert_nep_list_powerplants(export=True)
600
601
    insert_capacities_per_federal_state_nep()
602
603
604
def eGon100_capacities():
605
    """Inserts installed capacities for the eGon100 scenario
606
607
    Returns
608
    -------
609
    None.
610
611
    """
612
613
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
614
    targets = egon.data.config.datasets()["scenario_input"]["targets"]
615
616
    # read-in installed capacities
617
    execute_pypsa_eur_sec = False
618
    cwd = Path(".")
619
620
    if execute_pypsa_eur_sec:
621
        filepath = cwd / "run-pypsa-eur-sec"
622
        pypsa_eur_sec_repos = filepath / "pypsa-eur-sec"
623
        # Read YAML file
624
        pes_egonconfig = pypsa_eur_sec_repos / "config_egon.yaml"
625
        with open(pes_egonconfig, "r") as stream:
626
            data_config = yaml.safe_load(stream)
627
628
        target_file = (
629
            pypsa_eur_sec_repos
630
            / "results"
631
            / data_config["run"]
632
            / "csvs"
633
            / sources["eGon100RE"]["capacities"]
634
        )
635
636
    else:
637
        target_file = (
638
            cwd
639
            / "data_bundle_egon_data"
640
            / "pypsa_eur_sec"
641
            / "2022-07-26-egondata-integration"
642
            / "csvs"
643
            / sources["eGon100RE"]["capacities"]
644
        )
645
646
    df = pd.read_csv(target_file, skiprows=5)
647
    df.columns = ["component", "country", "carrier", "p_nom"]
648
649
    df.set_index("carrier", inplace=True)
650
651
    df = df[df.country.str[:2] == "DE"]
652
653
    # Drop country column
654
    df.drop("country", axis=1, inplace=True)
655
656
    # Drop copmponents which will be optimized in eGo
657
    unused_carrier = [
658
        "BEV_charger",
659
        "DAC",
660
        "H2 Electrolysis",
661
        "electricity distribution grid",
662
        "home battery charger",
663
        "home battery discharger",
664
        "H2",
665
        "Li_ion",
666
        "home battery",
667
        "residential rural water tanks charger",
668
        "residential rural water tanks discharger",
669
        "services rural water tanks charger",
670
        "services rural water tanks discharger",
671
        "residential rural water tanks",
672
        "services rural water tanks",
673
        "urban central water tanks",
674
        "urban central water tanks charger",
675
        "urban central water tanks discharger",
676
        "H2 Fuel Cell",
677
    ]
678
679
    df = df[~df.index.isin(unused_carrier)]
680
681
    df.index = df.index.str.replace(" ", "_")
682
683
    # Aggregate offshore wind
684
    df = df.append(
685
        pd.DataFrame(
686
            index=["wind_offshore"],
687
            data={
688
                "p_nom": (df.p_nom["offwind-ac"] + df.p_nom["offwind-dc"]),
689
                "component": df.component["offwind-ac"],
690
            },
691
        )
692
    )
693
    df = df.drop(["offwind-ac", "offwind-dc"])
694
695
    # Aggregate technologies with and without carbon_capture (CC)
696
    for carrier in ["SMR", "urban_central_gas_CHP"]:
697
        df.p_nom[carrier] += df.p_nom[f"{carrier}_CC"]
698
        df = df.drop([f"{carrier}_CC"])
699
700
    # Aggregate residential and services rural heat supply
701
    for merge_carrier in [
702
        "rural_resistive_heater",
703
        "rural_ground_heat_pump",
704
        "rural_gas_boiler",
705
        "rural_solar_thermal",
706
    ]:
707
        if f"residential_{merge_carrier}" in df.index:
708
            df = df.append(
709
                pd.DataFrame(
710
                    index=[merge_carrier],
711
                    data={
712
                        "p_nom": (
713
                            df.p_nom[f"residential_{merge_carrier}"]
714
                            + df.p_nom[f"services_{merge_carrier}"]
715
                        ),
716
                        "component": df.component[
717
                            f"residential_{merge_carrier}"
718
                        ],
719
                    },
720
                )
721
            )
722
            df = df.drop(
723
                [f"residential_{merge_carrier}", f"services_{merge_carrier}"]
724
            )
725
726
    # Rename carriers
727
    df.rename(
728
        {
729
            "onwind": "wind_onshore",
730
            "ror": "run_of_river",
731
            "PHS": "pumped_hydro",
732
            "OCGT": "gas",
733
            "rural_ground_heat_pump": "residential_rural_heat_pump",
734
            "urban_central_air_heat_pump": "urban_central_heat_pump",
735
            "urban_central_solar_thermal": (
736
                "urban_central_solar_thermal_collector"
737
            ),
738
        },
739
        inplace=True,
740
    )
741
742
    # Reset index
743
    df = df.reset_index()
744
745
    # Rename columns
746
    df.rename(
747
        {"p_nom": "capacity", "index": "carrier"}, axis="columns", inplace=True
748
    )
749
750
    df["scenario_name"] = "eGon100RE"
751
    df["nuts"] = "DE"
752
753
    db.execute_sql(
754
        f"""
755
        DELETE FROM
756
        {targets['scenario_capacities']['schema']}.{targets['scenario_capacities']['table']}
757
        WHERE scenario_name='eGon100RE'
758
        """
759
    )
760
761
    df.to_sql(
762
        targets["scenario_capacities"]["table"],
763
        schema=targets["scenario_capacities"]["schema"],
764
        con=db.engine(),
765
        if_exists="append",
766
        index=False,
767
    )
768
769
770
def add_metadata():
771
    """Add metdata to supply.egon_scenario_capacities
772
773
    Returns
774
    -------
775
    None.
776
777
    """
778
779
    # Import column names and datatypes
780
    fields = pd.DataFrame(
781
        generate_resource_fields_from_sqla_model(EgonScenarioCapacities)
782
    ).set_index("name")
783
784
    # Set descriptions and units
785
    fields.loc["index", "description"] = "Index"
786
    fields.loc[
787
        "component", "description"
788
    ] = "Name of representative PyPSA component"
789
    fields.loc["carrier", "description"] = "Name of carrier"
790
    fields.loc["capacity", "description"] = "Installed capacity"
791
    fields.loc["capacity", "unit"] = "MW"
792
    fields.loc[
793
        "nuts", "description"
794
    ] = "NUTS region, either federal state or Germany"
795
    fields.loc[
796
        "scenario_name", "description"
797
    ] = "Name of corresponding eGon scenario"
798
799
    # Reformat pandas.DataFrame to dict
800
    fields = fields.reset_index().to_dict(orient="records")
801
802
    meta = {
803
        "name": "supply.egon_scenario_capacities",
804
        "title": "eGon scenario capacities",
805
        "id": "WILL_BE_SET_AT_PUBLICATION",
806
        "description": (
807
            "Installed capacities of scenarios used in the eGon project"
808
        ),
809
        "language": ["de-DE"],
810
        "publicationDate": datetime.date.today().isoformat(),
811
        "context": context(),
812
        "spatial": {
813
            "location": None,
814
            "extent": "Germany",
815
            "resolution": None,
816
        },
817
        "sources": [
818
            sources()["nep2021"],
819
            sources()["vg250"],
820
            sources()["zensus"],
821
            sources()["egon-data"],
822
        ],
823
        "licenses": [
824
            license_ccby(
825
                "© Übertragungsnetzbetreiber; © Bundesamt für Kartographie und"
826
                " Geodäsie 2020 (Daten verändert); © Statistische Ämter des "
827
                "Bundes und der Länder 2014; © Jonathan Amme, Clara Büttner, "
828
                "Ilka Cußmann, Julian Endres, Carlos Epia, Stephan Günther, "
829
                "Ulf Müller, Amélia Nadal, Guido Pleßmann, Francesco Witte",
830
            )
831
        ],
832
        "contributors": [
833
            {
834
                "title": "Clara Büttner",
835
                "email": "http://github.com/ClaraBuettner",
836
                "date": time.strftime("%Y-%m-%d"),
837
                "object": None,
838
                "comment": "Imported data",
839
            },
840
        ],
841
        "resources": [
842
            {
843
                "profile": "tabular-data-resource",
844
                "name": "supply.egon_scenario_capacities",
845
                "path": None,
846
                "format": "PostgreSQL",
847
                "encoding": "UTF-8",
848
                "schema": {
849
                    "fields": fields,
850
                    "primaryKey": ["index"],
851
                    "foreignKeys": [],
852
                },
853
                "dialect": {"delimiter": None, "decimalSeparator": "."},
854
            }
855
        ],
856
        "metaMetadata": meta_metadata(),
857
    }
858
859
    # Create json dump
860
    meta_json = "'" + json.dumps(meta) + "'"
861
862
    # Add metadata as a comment to the table
863
    db.submit_comment(
864
        meta_json,
865
        EgonScenarioCapacities.__table__.schema,
866
        EgonScenarioCapacities.__table__.name,
867
    )
868