Passed
Pull Request — dev (#823)
by
unknown
01:45
created

insert_capacities_per_federal_state_nep()   C

Complexity

Conditions 6

Size

Total Lines 183
Code Lines 95

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 95
dl 0
loc 183
rs 6.2303
c 0
b 0
f 0
cc 6
nop 0

How to fix   Long Method   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

1
"""The central module containing all code dealing with importing data from
2
Netzentwicklungsplan 2035, Version 2031, Szenario C
3
"""
4
5
from pathlib import Path
6
7
from sqlalchemy import Column, Float, Integer, String
8
from sqlalchemy.ext.declarative import declarative_base
9
from sqlalchemy.orm import sessionmaker
10
import numpy as np
11
import pandas as pd
12
import yaml
13
14
from egon.data import db
15
from egon.data.config import settings
16
from egon.data.datasets import Dataset
17
import egon.data.config
18
19
### will be later imported from another file ###
20
Base = declarative_base()
21
22
23
class EgonScenarioCapacities(Base):
24
    __tablename__ = "egon_scenario_capacities"
25
    __table_args__ = {"schema": "supply"}
26
    index = Column(Integer, primary_key=True)
27
    component = Column(String(25))
28
    carrier = Column(String(50))
29
    capacity = Column(Float)
30
    nuts = Column(String(12))
31
    scenario_name = Column(String(50))
32
33
34
class NEP2021ConvPowerPlants(Base):
35
    __tablename__ = "egon_nep_2021_conventional_powerplants"
36
    __table_args__ = {"schema": "supply"}
37
    index = Column(String(50), primary_key=True)
38
    bnetza_id = Column(String(50))
39
    name = Column(String(100))
40
    name_unit = Column(String(50))
41
    carrier_nep = Column(String(50))
42
    carrier = Column(String(12))
43
    chp = Column(String(12))
44
    postcode = Column(String(12))
45
    city = Column(String(50))
46
    federal_state = Column(String(12))
47
    commissioned = Column(String(12))
48
    status = Column(String(50))
49
    capacity = Column(Float)
50
    a2035_chp = Column(String(12))
51
    a2035_capacity = Column(Float)
52
    b2035_chp = Column(String(12))
53
    b2035_capacity = Column(Float)
54
    c2035_chp = Column(String(12))
55
    c2035_capacity = Column(Float)
56
    b2040_chp = Column(String(12))
57
    b2040_capacity = Column(Float)
58
59
60
class ScenarioCapacities(Dataset):
61
    def __init__(self, dependencies):
62
        super().__init__(
63
            name="ScenarioCapacities",
64
            version="0.0.11",
65
            dependencies=dependencies,
66
            tasks=(create_table, insert_data_nep, eGon100_capacities),
67
        )
68
69
70
def create_table():
71
    """Create input tables for scenario setup
72
73
    Returns
74
    -------
75
    None.
76
77
    """
78
79
    engine = db.engine()
80
    db.execute_sql("CREATE SCHEMA IF NOT EXISTS supply;")
81
    EgonScenarioCapacities.__table__.drop(bind=engine, checkfirst=True)
82
    NEP2021ConvPowerPlants.__table__.drop(bind=engine, checkfirst=True)
83
    EgonScenarioCapacities.__table__.create(bind=engine, checkfirst=True)
84
    NEP2021ConvPowerPlants.__table__.create(bind=engine, checkfirst=True)
85
86
87
def nuts_mapping():
88
89
    nuts_mapping = {
90
        "BW": "DE1",
91
        "NW": "DEA",
92
        "HE": "DE7",
93
        "BB": "DE4",
94
        "HB": "DE5",
95
        "RP": "DEB",
96
        "ST": "DEE",
97
        "SH": "DEF",
98
        "MV": "DE8",
99
        "TH": "DEG",
100
        "NI": "DE9",
101
        "SN": "DED",
102
        "HH": "DE6",
103
        "SL": "DEC",
104
        "BE": "DE3",
105
        "BY": "DE2",
106
    }
107
108
    return nuts_mapping
109
110
111
def insert_capacities_per_federal_state_nep():
112
    """Inserts installed capacities per federal state accordning to
113
    NEP 2035 (version 2021), scenario 2035 C
114
115
    Returns
116
    -------
117
    None.
118
119
    """
120
121
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
122
    targets = egon.data.config.datasets()["scenario_input"]["targets"]
123
124
    # Connect to local database
125
    engine = db.engine()
126
127
    # Delete rows if already exist
128
    db.execute_sql(
129
        f"""
130
        DELETE FROM
131
        {targets['scenario_capacities']['schema']}.{targets['scenario_capacities']['table']}
132
        WHERE scenario_name = 'eGon2035'
133
        AND nuts != 'DE'
134
        """
135
    )
136
137
    # read-in installed capacities per federal state of germany
138
    target_file = (
139
        Path(".")
140
        / "data_bundle_egon_data"
141
        / "nep2035_version2021"
142
        / sources["eGon2035"]["capacities"]
143
    )
144
145
    df = pd.read_excel(
146
        target_file,
147
        sheet_name="1.Entwurf_NEP2035_V2021",
148
        index_col="Unnamed: 0",
149
    )
150
151
    df_draft = pd.read_excel(
152
        target_file,
153
        sheet_name="Entwurf_des_Szenariorahmens",
154
        index_col="Unnamed: 0",
155
    )
156
157
    # Import data on wind offshore capacities
158
    df_windoff = pd.read_excel(
159
        target_file,
160
        sheet_name="WInd_Offshore_NEP",
161
    ).dropna(subset=['Bundesland', 'Netzverknuepfungspunkt'])
162
163
    # Remove trailing whitespace from column Bundesland
164
    df_windoff['Bundesland']= df_windoff['Bundesland'].str.strip()
165
166
    # Group and sum capacities per federal state
167
    df_windoff_fs = df_windoff[['Bundesland', 'C 2035']].groupby(['Bundesland']).sum()
168
169
    # List federal state with an assigned wind offshore capacity
170
    index_list = list(df_windoff_fs.index.values)
171
172
    # Overwrite capacities in df_windoff with more accurate values from df_windoff_fs
173
174
    for state in index_list:
175
176
        df.at['Wind offshore', state] = df_windoff_fs.at[state, 'C 2035']/1000
177
178
179
    # sort NEP-carriers:
180
    rename_carrier = {
181
        "Wind onshore": "wind_onshore",
182
        "Wind offshore": "wind_offshore",
183
        "sonstige Konventionelle": "other_non_renewable",
184
        "Speicherwasser": "reservoir",
185
        "Laufwasser": "run_of_river",
186
        "Biomasse": "biomass",
187
        "Erdgas": "gas",
188
        "Kuppelgas": "gas",
189
        "PV (Aufdach)": "solar_rooftop",
190
        "PV (Freiflaeche)": "solar",
191
        "Pumpspeicher": "pumped_hydro",
192
        "sonstige EE": "other_renewable",
193
        "Oel": "oil",
194
        "Haushaltswaermepumpen": "residential_rural_heat_pump",
195
        "KWK < 10 MW": "small_chp",
196
    }
197
    #'Elektromobilitaet gesamt': 'transport',
198
    # 'Elektromobilitaet privat': 'transport'}
199
200
    # nuts1 to federal state in Germany
201
    map_nuts = pd.read_sql(
202
        f"""
203
        SELECT DISTINCT ON (nuts) gen, nuts
204
        FROM {sources['boundaries']['schema']}.{sources['boundaries']['table']}
205
        """,
206
        engine,
207
        index_col="gen",
208
    )
209
210
    insert_data = pd.DataFrame()
211
212
    scaled_carriers = [
213
        "Haushaltswaermepumpen",
214
        "PV (Aufdach)",
215
        "PV (Freiflaeche)",
216
    ]
217
218
    for bl in map_nuts.index:
219
220
        data = pd.DataFrame(df[bl])
221
222
        # if distribution to federal states is not provided,
223
        # use data from draft of scenario report
224
        for c in scaled_carriers:
225
            data.loc[c, bl] = (
226
                df_draft.loc[c, bl]
227
                / df_draft.loc[c, "Summe"]
228
                * df.loc[c, "Summe"]
229
            )
230
231
        # split hydro into run of river and reservoir
232
        # according to draft of scenario report
233
        if data.loc["Lauf- und Speicherwasser", bl] > 0:
234
            for c in ["Speicherwasser", "Laufwasser"]:
235
                data.loc[c, bl] = (
236
                    data.loc["Lauf- und Speicherwasser", bl]
237
                    * df_draft.loc[c, bl]
238
                    / df_draft.loc[["Speicherwasser", "Laufwasser"], bl].sum()
239
                )
240
241
        data["carrier"] = data.index.map(rename_carrier)
242
        data = data.groupby(data.carrier)[bl].sum().reset_index()
243
        data["component"] = "generator"
244
        data["nuts"] = map_nuts.nuts[bl]
245
        data["scenario_name"] = "eGon2035"
246
247
        # According to NEP, each heatpump has 3kW_el installed capacity
248
        data.loc[data.carrier == "residential_rural_heat_pump", bl] *= 3e-6
249
        data.loc[
250
            data.carrier == "residential_rural_heat_pump", "component"
251
        ] = "link"
252
253
        data = data.rename(columns={bl: "capacity"})
254
255
        # convert GW to MW
256
        data.capacity *= 1e3
257
258
        insert_data = insert_data.append(data)
259
260
    # Get aggregated capacities from nep's power plant list for certain carrier
261
262
    carriers = ["oil", "other_non_renewable", "pumped_hydro"]
263
264
    capacities_list = aggr_nep_capacities(carriers)
265
266
    # Filter by carrier
267
    updated = insert_data[insert_data["carrier"].isin(carriers)]
268
269
    # Merge to replace capacities for carriers "oil", "other_non_renewable" and "pumped_hydro"
270
    updated = (
271
        updated.merge(capacities_list, on=["carrier", "nuts"], how="left")
272
        .fillna(0)
273
        .drop(["capacity"], axis=1)
274
        .rename(columns={"c2035_capacity": "capacity"})
275
    )
276
277
    # Remove updated entries from df
278
    original = insert_data[~insert_data["carrier"].isin(carriers)]
279
280
    # Join dfs
281
    insert_data = pd.concat([original, updated])
282
283
    # Insert data to db
284
    insert_data.to_sql(
285
        targets["scenario_capacities"]["table"],
286
        engine,
287
        schema=targets["scenario_capacities"]["schema"],
288
        if_exists="append",
289
        index=insert_data.index,
290
    )
291
292
    # Add district heating data accordning to energy and full load hours
293
    district_heating_input()
294
295
296
def population_share():
297
    """Calulate share of population in testmode
298
299
    Returns
300
    -------
301
    float
302
        Share of population in testmode
303
304
    """
305
306
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
307
308
    return (
309
        pd.read_sql(
310
            f"""
311
            SELECT SUM(population)
312
            FROM {sources['zensus_population']['schema']}.{sources['zensus_population']['table']}
313
            WHERE population>0
314
            """,
315
            con=db.engine(),
316
        )["sum"][0]
317
        / 80324282
318
    )
319
320
321
def aggr_nep_capacities(carriers):
322
    """Aggregates capacities from NEP power plants list by carrier and federal
323
    state
324
325
    Returns
326
    -------
327
    pandas.Dataframe
328
        Dataframe with capacities per federal state and carrier
329
330
    """
331
    # Get list of power plants from nep
332
    nep_capacities = insert_nep_list_powerplants(export=False)[
333
        ["federal_state", "carrier", "c2035_capacity"]
334
    ]
335
336
    # Sum up capacities per federal state and carrier
337
    capacities_list = (
338
        nep_capacities.groupby(["federal_state", "carrier"])["c2035_capacity"]
339
        .sum()
340
        .to_frame()
341
        .reset_index()
342
    )
343
344
    # Neglect entries with carriers not in argument
345
    capacities_list = capacities_list[capacities_list.carrier.isin(carriers)]
346
347
    # Include NUTS code
348
    capacities_list["nuts"] = capacities_list.federal_state.map(nuts_mapping())
349
350
    # Drop entries for foreign plants with nan values and federal_state column
351
    capacities_list = capacities_list.dropna(subset=["nuts"]).drop(
352
        columns=["federal_state"]
353
    )
354
355
    return capacities_list
356
357
358
def map_carrier():
359
    """Map carriers from NEP and Marktstammdatenregister to carriers from eGon
360
361
    Returns
362
    -------
363
    pandas.Series
364
        List of mapped carriers
365
366
    """
367
    return pd.Series(
368
        data={
369
            "Abfall": "other_non_renewable",
370
            "Erdgas": "gas",
371
            "Sonstige\nEnergieträger": "other_non_renewable",
372
            "Steinkohle": "coal",
373
            "Kuppelgase": "gas",
374
            "Mineralöl-\nprodukte": "oil",
375
            "Braunkohle": "lignite",
376
            "Waerme": "other_non_renewable",
377
            "Mineraloelprodukte": "oil",
378
            "NichtBiogenerAbfall": "other_non_renewable",
379
            "AndereGase": "gas",
380
            "Sonstige_Energietraeger": "other_non_renewable",
381
            "Kernenergie": "nuclear",
382
            "Pumpspeicher": "pumped_hydro",
383
            "Mineralöl-\nProdukte": "oil",
384
        }
385
    )
386
387
388
def insert_nep_list_powerplants(export=True):
389
    """Insert list of conventional powerplants attached to the approval
390
    of the scenario report by BNetzA
391
392
    Parameters
393
    ----------
394
    export : bool
395
        Choose if nep list should be exported to the data
396
        base. The default is True.
397
        If export=False a data frame will be returned
398
399
    Returns
400
    -------
401
    kw_liste_nep : pandas.DataFrame
402
        List of conventional power plants from nep if export=False
403
    """
404
405
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
406
    targets = egon.data.config.datasets()["scenario_input"]["targets"]
407
408
    # Connect to local database
409
    engine = db.engine()
410
411
    # Read-in data from csv-file
412
    target_file = (
413
        Path(".")
414
        / "data_bundle_egon_data"
415
        / "nep2035_version2021"
416
        / sources["eGon2035"]["list_conv_pp"]
417
    )
418
419
    kw_liste_nep = pd.read_csv(target_file, delimiter=";", decimal=",")
420
421
    # Adjust column names
422
    kw_liste_nep = kw_liste_nep.rename(
423
        columns={
424
            "BNetzA-ID": "bnetza_id",
425
            "Kraftwerksname": "name",
426
            "Blockname": "name_unit",
427
            "Energieträger": "carrier_nep",
428
            "KWK\nJa/Nein": "chp",
429
            "PLZ": "postcode",
430
            "Ort": "city",
431
            "Bundesland/\nLand": "federal_state",
432
            "Inbetrieb-\nnahmejahr": "commissioned",
433
            "Status": "status",
434
            "el. Leistung\n06.02.2020": "capacity",
435
            "A 2035:\nKWK-Ersatz": "a2035_chp",
436
            "A 2035:\nLeistung": "a2035_capacity",
437
            "B 2035\nKWK-Ersatz": "b2035_chp",
438
            "B 2035:\nLeistung": "b2035_capacity",
439
            "C 2035:\nKWK-Ersatz": "c2035_chp",
440
            "C 2035:\nLeistung": "c2035_capacity",
441
            "B 2040:\nKWK-Ersatz": "b2040_chp",
442
            "B 2040:\nLeistung": "b2040_capacity",
443
        }
444
    )
445
446
    # Cut data to federal state if in testmode
447
    boundary = settings()["egon-data"]["--dataset-boundary"]
448
    if boundary != "Everything":
449
        map_states = {
450
            "Baden-Württemberg": "BW",
451
            "Nordrhein-Westfalen": "NW",
452
            "Hessen": "HE",
453
            "Brandenburg": "BB",
454
            "Bremen": "HB",
455
            "Rheinland-Pfalz": "RP",
456
            "Sachsen-Anhalt": "ST",
457
            "Schleswig-Holstein": "SH",
458
            "Mecklenburg-Vorpommern": "MV",
459
            "Thüringen": "TH",
460
            "Niedersachsen": "NI",
461
            "Sachsen": "SN",
462
            "Hamburg": "HH",
463
            "Saarland": "SL",
464
            "Berlin": "BE",
465
            "Bayern": "BY",
466
        }
467
468
        kw_liste_nep = kw_liste_nep[
469
            kw_liste_nep.federal_state.isin([map_states[boundary], np.nan])
470
        ]
471
472
        for col in [
473
            "capacity",
474
            "a2035_capacity",
475
            "b2035_capacity",
476
            "c2035_capacity",
477
            "b2040_capacity",
478
        ]:
479
            kw_liste_nep.loc[
480
                kw_liste_nep[kw_liste_nep.federal_state.isnull()].index, col
481
            ] *= population_share()
482
483
    kw_liste_nep["carrier"] = map_carrier()[kw_liste_nep.carrier_nep].values
484
485
    if export is True:
486
        # Insert data to db
487
        kw_liste_nep.to_sql(
488
            targets["nep_conventional_powerplants"]["table"],
489
            engine,
490
            schema=targets["nep_conventional_powerplants"]["schema"],
491
            if_exists="replace",
492
        )
493
    else:
494
        return kw_liste_nep
495
496
497
def district_heating_input():
498
    """Imports data for district heating networks in Germany
499
500
    Returns
501
    -------
502
    None.
503
504
    """
505
506
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
507
508
    # import data to dataframe
509
    file = (
510
        Path(".")
511
        / "data_bundle_egon_data"
512
        / "nep2035_version2021"
513
        / sources["eGon2035"]["capacities"]
514
    )
515
    df = pd.read_excel(
516
        file, sheet_name="Kurzstudie_KWK", dtype={"Wert": float}
517
    )
518
    df.set_index(["Energietraeger", "Name"], inplace=True)
519
520
    # Scale values to population share in testmode
521
    if settings()["egon-data"]["--dataset-boundary"] != "Everything":
522
        df.loc[
523
            pd.IndexSlice[:, "Fernwaermeerzeugung"], "Wert"
524
        ] *= population_share()
525
526
    # Connect to database
527
    engine = db.engine()
528
    session = sessionmaker(bind=engine)()
529
530
    # insert heatpumps and resistive heater as link
531
    for c in ["Grosswaermepumpe", "Elektrodenheizkessel"]:
532
        entry = EgonScenarioCapacities(
533
            component="link",
534
            scenario_name="eGon2035",
535
            nuts="DE",
536
            carrier="urban_central_"
537
            + ("heat_pump" if c == "Grosswaermepumpe" else "resistive_heater"),
538
            capacity=df.loc[(c, "Fernwaermeerzeugung"), "Wert"]
539
            * 1e6
540
            / df.loc[(c, "Volllaststunden"), "Wert"]
541
            / df.loc[(c, "Wirkungsgrad"), "Wert"],
542
        )
543
544
        session.add(entry)
545
546
    # insert solar- and geothermal as generator
547
    for c in ["Geothermie", "Solarthermie"]:
548
        entry = EgonScenarioCapacities(
549
            component="generator",
550
            scenario_name="eGon2035",
551
            nuts="DE",
552
            carrier="urban_central_"
553
            + (
554
                "solar_thermal_collector"
555
                if c == "Solarthermie"
556
                else "geo_thermal"
557
            ),
558
            capacity=df.loc[(c, "Fernwaermeerzeugung"), "Wert"]
559
            * 1e6
560
            / df.loc[(c, "Volllaststunden"), "Wert"],
561
        )
562
563
        session.add(entry)
564
565
    session.commit()
566
567
568
def insert_data_nep():
569
    """Overall function for importing scenario input data for eGon2035 scenario
570
571
    Returns
572
    -------
573
    None.
574
575
    """
576
577
    insert_nep_list_powerplants(export=True)
578
579
    insert_capacities_per_federal_state_nep()
580
581
582
def eGon100_capacities():
583
    """Inserts installed capacities for the eGon100 scenario
584
585
    Returns
586
    -------
587
    None.
588
589
    """
590
591
    sources = egon.data.config.datasets()["scenario_input"]["sources"]
592
    targets = egon.data.config.datasets()["scenario_input"]["targets"]
593
594
    # read-in installed capacities
595
    execute_pypsa_eur_sec = False
596
    cwd = Path(".")
597
598
    if execute_pypsa_eur_sec:
599
        filepath = cwd / "run-pypsa-eur-sec"
600
        pypsa_eur_sec_repos = filepath / "pypsa-eur-sec"
601
        # Read YAML file
602
        pes_egonconfig = pypsa_eur_sec_repos / "config_egon.yaml"
603
        with open(pes_egonconfig, "r") as stream:
604
            data_config = yaml.safe_load(stream)
605
606
        target_file = (
607
            pypsa_eur_sec_repos
608
            / "results"
609
            / data_config["run"]
610
            / "csvs"
611
            / sources["eGon100RE"]["capacities"]
612
        )
613
614
    else:
615
        target_file = (
616
            cwd
617
            / "data_bundle_egon_data"
618
            / "pypsa_eur_sec"
619
            / "2022-07-26-egondata-integration"
620
            / "csvs"
621
            / sources["eGon100RE"]["capacities"]
622
        )
623
624
    df = pd.read_csv(target_file, skiprows=5)
625
    df.columns = ["component", "country", "carrier", "p_nom"]
626
627
    df.set_index("carrier", inplace=True)
628
629
    df = df[df.country.str[:2] == "DE"]
630
631
    # Drop country column
632
    df.drop("country", axis=1, inplace=True)
633
634
    # Drop copmponents which will be optimized in eGo
635
    unused_carrier = [
636
        "BEV charger",
637
        "DAC",
638
        "H2 Electrolysis",
639
        "electricity distribution grid",
640
        "home battery charger",
641
        "home battery discharger",
642
        "H2",
643
        "Li ion",
644
        "home battery",
645
        "residential rural water tanks charger",
646
        "residential rural water tanks discharger",
647
        "services rural water tanks charger",
648
        "services rural water tanks discharger",
649
        "residential rural water tanks",
650
        "services rural water tanks",
651
        "urban central water tanks",
652
        "urban central water tanks charger",
653
        "urban central water tanks discharger",
654
        "H2 Fuel Cell",
655
    ]
656
657
    df = df[~df.index.isin(unused_carrier)]
658
659
    df.index = df.index.str.replace(" ", "_")
660
661
    # Aggregate offshore wind
662
    df = df.append(
663
        pd.DataFrame(
664
            index=["wind_offshore"],
665
            data={
666
                "p_nom": (df.p_nom["offwind-ac"] + df.p_nom["offwind-dc"]),
667
                "component": df.component["offwind-ac"],
668
            },
669
        )
670
    )
671
    df = df.drop(["offwind-ac", "offwind-dc"])
672
673
    # Aggregate technologies with and without carbon_capture (CC)
674
    for carrier in ["SMR", "urban_central_gas_CHP"]:
675
        df.p_nom[carrier] += df.p_nom[f"{carrier}_CC"]
676
        df = df.drop([f"{carrier}_CC"])
677
678
    # Aggregate residential and services rural heat supply
679
    for merge_carrier in [
680
        "rural_resistive_heater",
681
        "rural_ground_heat_pump",
682
        "rural_gas_boiler",
683
        "rural_solar_thermal",
684
    ]:
685
        if f"residential_{merge_carrier}" in df.index:
686
            df = df.append(
687
                pd.DataFrame(
688
                    index=[merge_carrier],
689
                    data={
690
                        "p_nom": (
691
                            df.p_nom[f"residential_{merge_carrier}"]
692
                            + df.p_nom[f"services_{merge_carrier}"]
693
                        ),
694
                        "component": df.component[f"residential_{merge_carrier}"],
695
                    },
696
                )
697
            )
698
            df = df.drop(
699
                [f"residential_{merge_carrier}", f"services_{merge_carrier}"]
700
            )
701
702
    # Rename carriers
703
    df.rename(
704
        {
705
            "onwind": "wind_onshore",
706
            "ror": "run_of_river",
707
            "PHS": "pumped_hydro",
708
            "OCGT": "gas",
709
            "rural_ground_heat_pump": "residential_rural_heat_pump",
710
            "urban_central_air_heat_pump": "urban_central_heat_pump",
711
            "urban_central_solar_thermal": "urban_central_solar_thermal_collector",
712
        },
713
        inplace=True,
714
    )
715
716
    # Reset index
717
    df = df.reset_index()
718
719
    # Rename columns
720
    df.rename(
721
        {"p_nom": "capacity", "index": "carrier"}, axis="columns", inplace=True
722
    )
723
724
    df["scenario_name"] = "eGon100RE"
725
    df["nuts"] = "DE"
726
727
    db.execute_sql(
728
        f"""
729
        DELETE FROM
730
        {targets['scenario_capacities']['schema']}.{targets['scenario_capacities']['table']}
731
        WHERE scenario_name='eGon100RE'
732
        """
733
    )
734
735
    df.to_sql(
736
        targets["scenario_capacities"]["table"],
737
        schema=targets["scenario_capacities"]["schema"],
738
        con=db.engine(),
739
        if_exists="append",
740
        index=False,
741
    )
742