Passed
Pull Request — dev (#568)
by
unknown
01:53
created

population_share()   A

Complexity

Conditions 1

Size

Total Lines 23
Code Lines 8

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 8
dl 0
loc 23
rs 10
c 0
b 0
f 0
cc 1
nop 0
1
"""The central module containing all code dealing with importing data from
2
Netzentwicklungsplan 2035, Version 2031, Szenario C
3
"""
4
5
from pathlib import Path
6
import datetime
7
import json
8
import time
9
10
from sqlalchemy import Column, Float, Integer, String
11
from sqlalchemy.ext.declarative import declarative_base
12
from sqlalchemy.orm import sessionmaker
13
import numpy as np
14
import pandas as pd
15
import yaml
16
17
from egon.data import db
18
from egon.data.config import settings
19
from egon.data.datasets import Dataset
20
from egon.data.metadata import (
21
    context,
22
    generate_resource_fields_from_sqla_model,
23
    license_ccby,
24
    meta_metadata,
25
    sources,
26
)
27
import egon.data.config
28
29
Base = declarative_base()
30
31
32
class EgonScenarioCapacities(Base):
33
    __tablename__ = "egon_scenario_capacities"
34
    __table_args__ = {"schema": "supply"}
35
    index = Column(Integer, primary_key=True)
36
    component = Column(String(25))
37
    carrier = Column(String(50))
38
    capacity = Column(Float)
39
    nuts = Column(String(12))
40
    scenario_name = Column(String(50))
41
42
43
class NEP2021ConvPowerPlants(Base):
44
    __tablename__ = "egon_nep_2021_conventional_powerplants"
45
    __table_args__ = {"schema": "supply"}
46
    index = Column(String(50), primary_key=True)
47
    bnetza_id = Column(String(50))
48
    name = Column(String(100))
49
    name_unit = Column(String(50))
50
    carrier_nep = Column(String(50))
51
    carrier = Column(String(12))
52
    chp = Column(String(12))
53
    postcode = Column(String(12))
54
    city = Column(String(50))
55
    federal_state = Column(String(12))
56
    commissioned = Column(String(12))
57
    status = Column(String(50))
58
    capacity = Column(Float)
59
    a2035_chp = Column(String(12))
60
    a2035_capacity = Column(Float)
61
    b2035_chp = Column(String(12))
62
    b2035_capacity = Column(Float)
63
    c2035_chp = Column(String(12))
64
    c2035_capacity = Column(Float)
65
    b2040_chp = Column(String(12))
66
    b2040_capacity = Column(Float)
67
68
69
class ScenarioCapacities(Dataset):
70
    def __init__(self, dependencies):
71
        super().__init__(
72
            name="ScenarioCapacities",
73
            version="0.0.12",
74
            dependencies=dependencies,
75
            tasks=(create_table, insert_data_nep, eGon100_capacities, add_metadata),
76
        )
77
78
79
80
def create_table():
81
    """Create input tables for scenario setup
82
83
    Returns
84
    -------
85
    None.
86
87
    """
88
89
    engine = db.engine()
90
    db.execute_sql("CREATE SCHEMA IF NOT EXISTS supply;")
91
    EgonScenarioCapacities.__table__.drop(bind=engine, checkfirst=True)
92
    NEP2021ConvPowerPlants.__table__.drop(bind=engine, checkfirst=True)
93
    EgonScenarioCapacities.__table__.create(bind=engine, checkfirst=True)
94
    NEP2021ConvPowerPlants.__table__.create(bind=engine, checkfirst=True)
95
96
97
def nuts_mapping():
98
99
    nuts_mapping = {
100
        "BW": "DE1",
101
        "NW": "DEA",
102
        "HE": "DE7",
103
        "BB": "DE4",
104
        "HB": "DE5",
105
        "RP": "DEB",
106
        "ST": "DEE",
107
        "SH": "DEF",
108
        "MV": "DE8",
109
        "TH": "DEG",
110
        "NI": "DE9",
111
        "SN": "DED",
112
        "HH": "DE6",
113
        "SL": "DEC",
114
        "BE": "DE3",
115
        "BY": "DE2",
116
    }
117
118
    return nuts_mapping
119
120
121
def insert_capacities_per_federal_state_nep():
122
    """Inserts installed capacities per federal state accordning to
123
    NEP 2035 (version 2021), scenario 2035 C
124
125
    Returns
126
    -------
127
    None.
128
129
    """
130
131
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
132
    targets = egon.data.config.datasets()["scenario_input"]["targets"]
133
134
    # Connect to local database
135
    engine = db.engine()
136
137
    # Delete rows if already exist
138
    db.execute_sql(
139
        f"""
140
        DELETE FROM
141
        {targets['scenario_capacities']['schema']}.
142
        {targets['scenario_capacities']['table']}
143
        WHERE scenario_name = 'eGon2035'
144
        AND nuts != 'DE'
145
        """
146
    )
147
148
    # read-in installed capacities per federal state of germany
149
    target_file = (
150
        Path(".")
151
        / "data_bundle_egon_data"
152
        / "nep2035_version2021"
153
        / sources["eGon2035"]["capacities"]
154
    )
155
156
    df = pd.read_excel(
157
        target_file,
158
        sheet_name="1.Entwurf_NEP2035_V2021",
159
        index_col="Unnamed: 0",
160
    )
161
162
    df_draft = pd.read_excel(
163
        target_file,
164
        sheet_name="Entwurf_des_Szenariorahmens",
165
        index_col="Unnamed: 0",
166
    )
167
168
    # Import data on wind offshore capacities
169
    df_windoff = pd.read_excel(
170
        target_file,
171
        sheet_name="WInd_Offshore_NEP",
172
    ).dropna(subset=["Bundesland", "Netzverknuepfungspunkt"])
173
174
    # Remove trailing whitespace from column Bundesland
175
    df_windoff["Bundesland"] = df_windoff["Bundesland"].str.strip()
176
177
    # Group and sum capacities per federal state
178
    df_windoff_fs = (
179
        df_windoff[["Bundesland", "C 2035"]].groupby(["Bundesland"]).sum()
180
    )
181
182
    # List federal state with an assigned wind offshore capacity
183
    index_list = list(df_windoff_fs.index.values)
184
185
    # Overwrite capacities in df_windoff with more accurate values from
186
    # df_windoff_fs
187
188
    for state in index_list:
189
190
        df.at["Wind offshore", state] = (
191
            df_windoff_fs.at[state, "C 2035"] / 1000
192
        )
193
194
    # sort NEP-carriers:
195
    rename_carrier = {
196
        "Wind onshore": "wind_onshore",
197
        "Wind offshore": "wind_offshore",
198
        "sonstige Konventionelle": "others",
199
        "Speicherwasser": "reservoir",
200
        "Laufwasser": "run_of_river",
201
        "Biomasse": "biomass",
202
        "Erdgas": "gas",
203
        "Kuppelgas": "gas",
204
        "PV (Aufdach)": "solar_rooftop",
205
        "PV (Freiflaeche)": "solar",
206
        "Pumpspeicher": "pumped_hydro",
207
        "sonstige EE": "others",
208
        "Oel": "oil",
209
        "Haushaltswaermepumpen": "residential_rural_heat_pump",
210
        "KWK < 10 MW": "small_chp",
211
    }
212
    # 'Elektromobilitaet gesamt': 'transport',
213
    # 'Elektromobilitaet privat': 'transport'}
214
215
    # nuts1 to federal state in Germany
216
    map_nuts = pd.read_sql(
217
        f"""
218
        SELECT DISTINCT ON (nuts) gen, nuts
219
        FROM {sources['boundaries']['schema']}.{sources['boundaries']['table']}
220
        """,
221
        engine,
222
        index_col="gen",
223
    )
224
225
    insert_data = pd.DataFrame()
226
227
    scaled_carriers = [
228
        "Haushaltswaermepumpen",
229
        "PV (Aufdach)",
230
        "PV (Freiflaeche)",
231
    ]
232
233
    for bl in map_nuts.index:
234
235
        data = pd.DataFrame(df[bl])
236
237
        # if distribution to federal states is not provided,
238
        # use data from draft of scenario report
239
        for c in scaled_carriers:
240
            data.loc[c, bl] = (
241
                df_draft.loc[c, bl]
242
                / df_draft.loc[c, "Summe"]
243
                * df.loc[c, "Summe"]
244
            )
245
246
        # split hydro into run of river and reservoir
247
        # according to draft of scenario report
248
        if data.loc["Lauf- und Speicherwasser", bl] > 0:
249
            for c in ["Speicherwasser", "Laufwasser"]:
250
                data.loc[c, bl] = (
251
                    data.loc["Lauf- und Speicherwasser", bl]
252
                    * df_draft.loc[c, bl]
253
                    / df_draft.loc[["Speicherwasser", "Laufwasser"], bl].sum()
254
                )
255
256
        data["carrier"] = data.index.map(rename_carrier)
257
        data = data.groupby(data.carrier)[bl].sum().reset_index()
258
        data["component"] = "generator"
259
        data["nuts"] = map_nuts.nuts[bl]
260
        data["scenario_name"] = "eGon2035"
261
262
        # According to NEP, each heatpump has 5kW_el installed capacity
263
        # source: Entwurf des Szenariorahmens NEP 2035, version 2021, page 47
264
        data.loc[data.carrier == "residential_rural_heat_pump", bl] *= 5e-6
265
        data.loc[
266
            data.carrier == "residential_rural_heat_pump", "component"
267
        ] = "link"
268
269
        data = data.rename(columns={bl: "capacity"})
270
271
        # convert GW to MW
272
        data.capacity *= 1e3
273
274
        insert_data = insert_data.append(data)
275
276
    # Get aggregated capacities from nep's power plant list for certain carrier
277
278
    carriers = ["oil", "other_non_renewable", "pumped_hydro"]
279
280
    capacities_list = aggr_nep_capacities(carriers)
281
282
    # Filter by carrier
283
    updated = insert_data[insert_data["carrier"].isin(carriers)]
284
285
    # Merge to replace capacities for carriers "oil", "other_non_renewable" and
286
    # "pumped_hydro"
287
    updated = (
288
        updated.merge(capacities_list, on=["carrier", "nuts"], how="left")
289
        .fillna(0)
290
        .drop(["capacity"], axis=1)
291
        .rename(columns={"c2035_capacity": "capacity"})
292
    )
293
294
    # Remove updated entries from df
295
    original = insert_data[~insert_data["carrier"].isin(carriers)]
296
297
    # Join dfs
298
    insert_data = pd.concat([original, updated])
299
300
    # Insert data to db
301
    insert_data.to_sql(
302
        targets["scenario_capacities"]["table"],
303
        engine,
304
        schema=targets["scenario_capacities"]["schema"],
305
        if_exists="append",
306
        index=insert_data.index,
307
    )
308
309
    # Add district heating data accordning to energy and full load hours
310
    district_heating_input()
311
312
313
def population_share():
314
    """Calulate share of population in testmode
315
316
    Returns
317
    -------
318
    float
319
        Share of population in testmode
320
321
    """
322
323
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
324
325
    return (
326
        pd.read_sql(
327
            f"""
328
            SELECT SUM(population)
329
            FROM {sources['zensus_population']['schema']}.
330
            {sources['zensus_population']['table']}
331
            WHERE population>0
332
            """,
333
            con=db.engine(),
334
        )["sum"][0]
335
        / 80324282
336
    )
337
338
339
def aggr_nep_capacities(carriers):
340
    """Aggregates capacities from NEP power plants list by carrier and federal
341
    state
342
343
    Returns
344
    -------
345
    pandas.Dataframe
346
        Dataframe with capacities per federal state and carrier
347
348
    """
349
    # Get list of power plants from nep
350
    nep_capacities = insert_nep_list_powerplants(export=False)[
351
        ["federal_state", "carrier", "c2035_capacity"]
352
    ]
353
354
    # Sum up capacities per federal state and carrier
355
    capacities_list = (
356
        nep_capacities.groupby(["federal_state", "carrier"])["c2035_capacity"]
357
        .sum()
358
        .to_frame()
359
        .reset_index()
360
    )
361
362
    # Neglect entries with carriers not in argument
363
    capacities_list = capacities_list[capacities_list.carrier.isin(carriers)]
364
365
    # Include NUTS code
366
    capacities_list["nuts"] = capacities_list.federal_state.map(nuts_mapping())
367
368
    # Drop entries for foreign plants with nan values and federal_state column
369
    capacities_list = capacities_list.dropna(subset=["nuts"]).drop(
370
        columns=["federal_state"]
371
    )
372
373
    return capacities_list
374
375
376
def map_carrier():
377
    """Map carriers from NEP and Marktstammdatenregister to carriers from eGon
378
379
    Returns
380
    -------
381
    pandas.Series
382
        List of mapped carriers
383
384
    """
385
    return pd.Series(
386
        data={
387
            "Abfall": "others",
388
            "Erdgas": "gas",
389
            "Sonstige\nEnergieträger": "others",
390
            "Steinkohle": "coal",
391
            "Kuppelgase": "gas",
392
            "Mineralöl-\nprodukte": "oil",
393
            "Braunkohle": "lignite",
394
            "Waerme": "others",
395
            "Mineraloelprodukte": "oil",
396
            "NichtBiogenerAbfall": "others",
397
            "AndereGase": "gas",
398
            "Sonstige_Energietraeger": "others",
399
            "Kernenergie": "nuclear",
400
            "Pumpspeicher": "pumped_hydro",
401
            "Mineralöl-\nProdukte": "oil",
402
        }
403
    )
404
405
406
def insert_nep_list_powerplants(export=True):
407
    """Insert list of conventional powerplants attached to the approval
408
    of the scenario report by BNetzA
409
410
    Parameters
411
    ----------
412
    export : bool
413
        Choose if nep list should be exported to the data
414
        base. The default is True.
415
        If export=False a data frame will be returned
416
417
    Returns
418
    -------
419
    kw_liste_nep : pandas.DataFrame
420
        List of conventional power plants from nep if export=False
421
    """
422
423
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
424
    targets = egon.data.config.datasets()["scenario_input"]["targets"]
425
426
    # Connect to local database
427
    engine = db.engine()
428
429
    # Read-in data from csv-file
430
    target_file = (
431
        Path(".")
432
        / "data_bundle_egon_data"
433
        / "nep2035_version2021"
434
        / sources["eGon2035"]["list_conv_pp"]
435
    )
436
437
    kw_liste_nep = pd.read_csv(target_file, delimiter=";", decimal=",")
438
439
    # Adjust column names
440
    kw_liste_nep = kw_liste_nep.rename(
441
        columns={
442
            "BNetzA-ID": "bnetza_id",
443
            "Kraftwerksname": "name",
444
            "Blockname": "name_unit",
445
            "Energieträger": "carrier_nep",
446
            "KWK\nJa/Nein": "chp",
447
            "PLZ": "postcode",
448
            "Ort": "city",
449
            "Bundesland/\nLand": "federal_state",
450
            "Inbetrieb-\nnahmejahr": "commissioned",
451
            "Status": "status",
452
            "el. Leistung\n06.02.2020": "capacity",
453
            "A 2035:\nKWK-Ersatz": "a2035_chp",
454
            "A 2035:\nLeistung": "a2035_capacity",
455
            "B 2035\nKWK-Ersatz": "b2035_chp",
456
            "B 2035:\nLeistung": "b2035_capacity",
457
            "C 2035:\nKWK-Ersatz": "c2035_chp",
458
            "C 2035:\nLeistung": "c2035_capacity",
459
            "B 2040:\nKWK-Ersatz": "b2040_chp",
460
            "B 2040:\nLeistung": "b2040_capacity",
461
        }
462
    )
463
464
    # Cut data to federal state if in testmode
465
    boundary = settings()["egon-data"]["--dataset-boundary"]
466
    if boundary != "Everything":
467
        map_states = {
468
            "Baden-Württemberg": "BW",
469
            "Nordrhein-Westfalen": "NW",
470
            "Hessen": "HE",
471
            "Brandenburg": "BB",
472
            "Bremen": "HB",
473
            "Rheinland-Pfalz": "RP",
474
            "Sachsen-Anhalt": "ST",
475
            "Schleswig-Holstein": "SH",
476
            "Mecklenburg-Vorpommern": "MV",
477
            "Thüringen": "TH",
478
            "Niedersachsen": "NI",
479
            "Sachsen": "SN",
480
            "Hamburg": "HH",
481
            "Saarland": "SL",
482
            "Berlin": "BE",
483
            "Bayern": "BY",
484
        }
485
486
        kw_liste_nep = kw_liste_nep[
487
            kw_liste_nep.federal_state.isin([map_states[boundary], np.nan])
488
        ]
489
490
        for col in [
491
            "capacity",
492
            "a2035_capacity",
493
            "b2035_capacity",
494
            "c2035_capacity",
495
            "b2040_capacity",
496
        ]:
497
            kw_liste_nep.loc[
498
                kw_liste_nep[kw_liste_nep.federal_state.isnull()].index, col
499
            ] *= population_share()
500
501
    kw_liste_nep["carrier"] = map_carrier()[kw_liste_nep.carrier_nep].values
502
503
    if export is True:
504
        # Insert data to db
505
        kw_liste_nep.to_sql(
506
            targets["nep_conventional_powerplants"]["table"],
507
            engine,
508
            schema=targets["nep_conventional_powerplants"]["schema"],
509
            if_exists="replace",
510
        )
511
    else:
512
        return kw_liste_nep
513
514
515
def district_heating_input():
516
    """Imports data for district heating networks in Germany
517
518
    Returns
519
    -------
520
    None.
521
522
    """
523
524
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
525
526
    # import data to dataframe
527
    file = (
528
        Path(".")
529
        / "data_bundle_egon_data"
530
        / "nep2035_version2021"
531
        / sources["eGon2035"]["capacities"]
532
    )
533
    df = pd.read_excel(
534
        file, sheet_name="Kurzstudie_KWK", dtype={"Wert": float}
535
    )
536
    df.set_index(["Energietraeger", "Name"], inplace=True)
537
538
    # Scale values to population share in testmode
539
    if settings()["egon-data"]["--dataset-boundary"] != "Everything":
540
        df.loc[
541
            pd.IndexSlice[:, "Fernwaermeerzeugung"], "Wert"
542
        ] *= population_share()
543
544
    # Connect to database
545
    engine = db.engine()
546
    session = sessionmaker(bind=engine)()
547
548
    # insert heatpumps and resistive heater as link
549
    for c in ["Grosswaermepumpe", "Elektrodenheizkessel"]:
550
        entry = EgonScenarioCapacities(
551
            component="link",
552
            scenario_name="eGon2035",
553
            nuts="DE",
554
            carrier="urban_central_"
555
            + ("heat_pump" if c == "Grosswaermepumpe" else "resistive_heater"),
556
            capacity=df.loc[(c, "Fernwaermeerzeugung"), "Wert"]
557
            * 1e6
558
            / df.loc[(c, "Volllaststunden"), "Wert"]
559
            / df.loc[(c, "Wirkungsgrad"), "Wert"],
560
        )
561
562
        session.add(entry)
563
564
    # insert solar- and geothermal as generator
565
    for c in ["Geothermie", "Solarthermie"]:
566
        entry = EgonScenarioCapacities(
567
            component="generator",
568
            scenario_name="eGon2035",
569
            nuts="DE",
570
            carrier="urban_central_"
571
            + (
572
                "solar_thermal_collector"
573
                if c == "Solarthermie"
574
                else "geo_thermal"
575
            ),
576
            capacity=df.loc[(c, "Fernwaermeerzeugung"), "Wert"]
577
            * 1e6
578
            / df.loc[(c, "Volllaststunden"), "Wert"],
579
        )
580
581
        session.add(entry)
582
583
    session.commit()
584
585
586
def insert_data_nep():
587
    """Overall function for importing scenario input data for eGon2035 scenario
588
589
    Returns
590
    -------
591
    None.
592
593
    """
594
595
    insert_nep_list_powerplants(export=True)
596
597
    insert_capacities_per_federal_state_nep()
598
599
600
def eGon100_capacities():
601
    """Inserts installed capacities for the eGon100 scenario
602
603
    Returns
604
    -------
605
    None.
606
607
    """
608
609
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
610
    targets = egon.data.config.datasets()["scenario_input"]["targets"]
611
612
    # read-in installed capacities
613
    execute_pypsa_eur_sec = False
614
    cwd = Path(".")
615
616
    if execute_pypsa_eur_sec:
617
        filepath = cwd / "run-pypsa-eur-sec"
618
        pypsa_eur_sec_repos = filepath / "pypsa-eur-sec"
619
        # Read YAML file
620
        pes_egonconfig = pypsa_eur_sec_repos / "config_egon.yaml"
621
        with open(pes_egonconfig, "r") as stream:
622
            data_config = yaml.safe_load(stream)
623
624
        target_file = (
625
            pypsa_eur_sec_repos
626
            / "results"
627
            / data_config["run"]
628
            / "csvs"
629
            / sources["eGon100RE"]["capacities"]
630
        )
631
632
    else:
633
        target_file = (
634
            cwd
635
            / "data_bundle_egon_data"
636
            / "pypsa_eur_sec"
637
            / "2022-07-26-egondata-integration"
638
            / "csvs"
639
            / sources["eGon100RE"]["capacities"]
640
        )
641
642
    df = pd.read_csv(target_file, skiprows=5)
643
    df.columns = ["component", "country", "carrier", "p_nom"]
644
645
    df.set_index("carrier", inplace=True)
646
647
    df = df[df.country.str[:2] == "DE"]
648
649
    # Drop country column
650
    df.drop("country", axis=1, inplace=True)
651
652
    # Drop copmponents which will be optimized in eGo
653
    unused_carrier = [
654
        "BEV charger",
655
        "DAC",
656
        "H2 Electrolysis",
657
        "electricity distribution grid",
658
        "home battery charger",
659
        "home battery discharger",
660
        "H2",
661
        "Li ion",
662
        "home battery",
663
        "residential rural water tanks charger",
664
        "residential rural water tanks discharger",
665
        "services rural water tanks charger",
666
        "services rural water tanks discharger",
667
        "residential rural water tanks",
668
        "services rural water tanks",
669
        "urban central water tanks",
670
        "urban central water tanks charger",
671
        "urban central water tanks discharger",
672
        "H2 Fuel Cell",
673
    ]
674
675
    df = df[~df.index.isin(unused_carrier)]
676
677
    df.index = df.index.str.replace(" ", "_")
678
679
    # Aggregate offshore wind
680
    df = df.append(
681
        pd.DataFrame(
682
            index=["wind_offshore"],
683
            data={
684
                "p_nom": (df.p_nom["offwind-ac"] + df.p_nom["offwind-dc"]),
685
                "component": df.component["offwind-ac"],
686
            },
687
        )
688
    )
689
    df = df.drop(["offwind-ac", "offwind-dc"])
690
691
    # Aggregate technologies with and without carbon_capture (CC)
692
    for carrier in ["SMR", "urban_central_gas_CHP"]:
693
        df.p_nom[carrier] += df.p_nom[f"{carrier}_CC"]
694
        df = df.drop([f"{carrier}_CC"])
695
696
    # Aggregate residential and services rural heat supply
697
    for merge_carrier in [
698
        "rural_resistive_heater",
699
        "rural_ground_heat_pump",
700
        "rural_gas_boiler",
701
        "rural_solar_thermal",
702
    ]:
703
        if f"residential_{merge_carrier}" in df.index:
704
            df = df.append(
705
                pd.DataFrame(
706
                    index=[merge_carrier],
707
                    data={
708
                        "p_nom": (
709
                            df.p_nom[f"residential_{merge_carrier}"]
710
                            + df.p_nom[f"services_{merge_carrier}"]
711
                        ),
712
                        "component": df.component[
713
                            f"residential_{merge_carrier}"
714
                        ],
715
                    },
716
                )
717
            )
718
            df = df.drop(
719
                [f"residential_{merge_carrier}", f"services_{merge_carrier}"]
720
            )
721
722
    # Rename carriers
723
    df.rename(
724
        {
725
            "onwind": "wind_onshore",
726
            "ror": "run_of_river",
727
            "PHS": "pumped_hydro",
728
            "OCGT": "gas",
729
            "rural_ground_heat_pump": "residential_rural_heat_pump",
730
            "urban_central_air_heat_pump": "urban_central_heat_pump",
731
            "urban_central_solar_thermal": (
732
                "urban_central_solar_thermal_collector"
733
            ),
734
        },
735
        inplace=True,
736
    )
737
738
    # Reset index
739
    df = df.reset_index()
740
741
    # Rename columns
742
    df.rename(
743
        {"p_nom": "capacity", "index": "carrier"}, axis="columns", inplace=True
744
    )
745
746
    df["scenario_name"] = "eGon100RE"
747
    df["nuts"] = "DE"
748
749
    db.execute_sql(
750
        f"""
751
        DELETE FROM
752
        {targets['scenario_capacities']['schema']}.{targets['scenario_capacities']['table']}
753
        WHERE scenario_name='eGon100RE'
754
        """
755
    )
756
757
    df.to_sql(
758
        targets["scenario_capacities"]["table"],
759
        schema=targets["scenario_capacities"]["schema"],
760
        con=db.engine(),
761
        if_exists="append",
762
        index=False,
763
    )
764
765
766
767
def add_metadata():
768
    """Add metdata to supply.egon_scenario_capacities
769
770
    Returns
771
    -------
772
    None.
773
774
    """
775
776
    # Import column names and datatypes
777
    fields = pd.DataFrame(
778
        generate_resource_fields_from_sqla_model(EgonScenarioCapacities)
779
    ).set_index("name")
780
781
    # Set descriptions and units
782
    fields.loc["index", "description"] = "Index"
783
    fields.loc[
784
        "component", "description"
785
    ] = "Name of representative PyPSA component"
786
    fields.loc["carrier", "description"] = "Name of carrier"
787
    fields.loc["capacity", "description"] = "Installed capacity"
788
    fields.loc["capacity", "unit"] = "MW"
789
    fields.loc[
790
        "nuts", "description"
791
    ] = "NUTS region, either federal state or Germany"
792
    fields.loc[
793
        "scenario_name", "description"
794
    ] = "Name of corresponding eGon scenario"
795
796
    # Reformat pandas.DataFrame to dict
797
    fields = fields.reset_index().to_dict(orient="records")
798
799
    meta = {
800
        "name": "supply.egon_scenario_capacities",
801
        "title": "eGon scenario capacities",
802
        "id": "WILL_BE_SET_AT_PUBLICATION",
803
        "description": "Installed capacities of scenarios used in the eGon project",
804
        "language": ["de-DE"],
805
        "publicationDate": datetime.date.today().isoformat(),
806
        "context": context(),
807
        "spatial": {
808
            "location": None,
809
            "extent": "Germany",
810
            "resolution": None,
811
        },
812
        "sources": [
813
            sources()["nep2021"],
814
            sources()["vg250"],
815
            sources()["zensus"],
816
            sources()["egon-data"],
817
        ],
818
        "licenses": [
819
            license_ccby(
820
                "© Übertragungsnetzbetreiber; "
821
                "© Bundesamt für Kartographie und Geodäsie 2020 (Daten verändert); "
822
                "© Statistische Ämter des Bundes und der Länder 2014",
823
                "© Jonathan Amme, Clara Büttner, Ilka Cußmann, Julian Endres, Carlos Epia, Stephan Günther, Ulf Müller, Amélia Nadal, Guido Pleßmann, Francesco Witte",
824
            )
825
        ],
826
        "contributors": [
827
            {
828
                "title": "Clara Büttner",
829
                "email": "http://github.com/ClaraBuettner",
830
                "date": time.strftime("%Y-%m-%d"),
831
                "object": None,
832
                "comment": "Imported data",
833
            },
834
        ],
835
        "resources": [
836
            {
837
                "profile": "tabular-data-resource",
838
                "name": "supply.egon_scenario_capacities",
839
                "path": None,
840
                "format": "PostgreSQL",
841
                "encoding": "UTF-8",
842
                "schema": {
843
                    "fields": fields,
844
                    "primaryKey": ["index"],
845
                    "foreignKeys": [],
846
                },
847
                "dialect": {"delimiter": None, "decimalSeparator": "."},
848
            }
849
        ],
850
        "metaMetadata": meta_metadata(),
851
    }
852
853
    # Create json dump
854
    meta_json = "'" + json.dumps(meta) + "'"
855
856
    # Add metadata as a comment to the table
857
    db.submit_comment(
858
        meta_json,
859
        EgonScenarioCapacities.__table__.schema,
860
        EgonScenarioCapacities.__table__.name,
861
    )
862