data.datasets.pypsaeur.read_network()   A
last analyzed

Complexity

Conditions 3

Size

Total Lines 31
Code Lines 21

Duplication

Lines 31
Ratio 100 %

Importance

Changes 0
Metric Value
eloc 21
dl 31
loc 31
rs 9.376
c 0
b 0
f 0
cc 3
nop 1
1
"""The central module containing all code dealing with importing data from
2
the pysa-eur-sec scenario parameter creation
3
"""
4
5
from pathlib import Path
6
from urllib.request import urlretrieve
7
import json
8
import shutil
9
10
from shapely.geometry import LineString
11
import geopandas as gpd
12
import importlib_resources as resources
13
import numpy as np
14
import pandas as pd
15
import pypsa
16
import requests
17
import yaml
18
19
from egon.data import __path__, config, db, logger
20
from egon.data.datasets import Dataset
21
from egon.data.datasets.scenario_parameters import get_sector_parameters
22
from egon.data.datasets.scenario_parameters.parameters import (
23
    annualize_capital_costs,
24
)
25
import egon.data.config
26
import egon.data.subprocess as subproc
27
28
29
class PreparePypsaEur(Dataset):
30
    def __init__(self, dependencies):
31
        super().__init__(
32
            name="PreparePypsaEur",
33
            version="0.0.42",
34
            dependencies=dependencies,
35
            tasks=(
36
                download,
37
                prepare_network,
38
            ),
39
        )
40
41
42
class RunPypsaEur(Dataset):
43
    def __init__(self, dependencies):
44
        super().__init__(
45
            name="SolvePypsaEur",
46
            version="0.0.42",
47
            dependencies=dependencies,
48
            tasks=(
49
                prepare_network_2,
50
                execute,
51
                solve_network,
52
                clean_database,
53
                electrical_neighbours_egon100,
54
                h2_neighbours_egon2035,
55
                # Dropped until we decided how we deal with the H2 grid
56
                # overwrite_H2_pipeline_share,
57
            ),
58
        )
59
60
def countries_list():
61
    return [
62
        "DE",
63
        "AT",
64
        "CH",
65
        "CZ",
66
        "PL",
67
        "SE",
68
        "NO",
69
        "DK",
70
        "GB",
71
        "NL",
72
        "BE",
73
        "FR",
74
        "LU",
75
    ]
76
77
78
def h2_neighbours_egon2035():
79
    """
80
    This function load the pypsa_eur network for eGon2035, processes the H2
81
    buses and insert them into the grid.egon_etrago_bus table.
82
83
    Returns
84
    -------
85
    None.
86
87
    """
88
    if "eGon2035" in config.settings()["egon-data"]["--scenarios"]:
89
        # Delete buses from previous executions
90
        db.execute_sql(
91
            """
92
            DELETE FROM grid.egon_etrago_bus WHERE carrier = 'H2'
93
            AND scn_name = 'eGon2035'
94
            AND country <> 'DE'
95
            """
96
        )
97
98
        # Load calculated network for eGon2035
99
        n = read_network(planning_horizon=2035)
100
101
        # Filter only H2 buses in selected foreign countries
102
        h2_bus = n.buses[(n.buses.country != "DE") & (n.buses.carrier == "H2")]
103
        wanted_countries = countries_list()
104
        h2_bus = h2_bus[
105
            (h2_bus.country.isin(wanted_countries))
106
            & (~h2_bus.index.str.contains("FR6"))
107
        ]
108
109
        # Add geometry column
110
        h2_bus = (
111
            gpd.GeoDataFrame(
112
                h2_bus, geometry=gpd.points_from_xy(h2_bus.x, h2_bus.y)
113
            )
114
            .rename_geometry("geom")
115
            .set_crs(4326)
116
        )
117
118
        # Adjust dataframe to the database table format
119
        h2_bus["scn_name"] = "eGon2035"
120
121
        bus_id = db.next_etrago_id("bus")  # will be change in PR1287
122
        ### Delete when PR1287 is merged ###
123
        bus_id = range(bus_id, bus_id + len(h2_bus.index))
124
        ####################################
125
        h2_bus["bus_id"] = bus_id
126
127
        h2_bus.drop(
128
            columns=[
129
                "unit",
130
                "control",
131
                "generator",
132
                "location",
133
                "substation_off",
134
                "substation_lv",
135
                "sub_network",
136
            ],
137
            inplace=True,
138
        )
139
140
        # Connect to local database and write results
141
        engine = db.engine()
142
143
        h2_bus.to_postgis(
144
            "egon_etrago_bus",
145
            engine,
146
            schema="grid",
147
            if_exists="append",
148
            index=False,
149
        )
150
    else:
151
        print("eGon2035 is not in the list of scenarios")
152
153
154
def download():
155
    cwd = Path(".")
156
    filepath = cwd / "run-pypsa-eur"
157
    filepath.mkdir(parents=True, exist_ok=True)
158
159
    pypsa_eur_repos = filepath / "pypsa-eur"
160
    if config.settings()["egon-data"]["--run-pypsa-eur"]:
161
        if not pypsa_eur_repos.exists():
162
            subproc.run(
163
                [
164
                    "git",
165
                    "clone",
166
                    "--branch",
167
                    "master",
168
                    "https://github.com/PyPSA/pypsa-eur.git",
169
                    pypsa_eur_repos,
170
                ]
171
            )
172
173
            subproc.run(
174
                [
175
                    "git",
176
                    "checkout",
177
                    "2119f4cee05c256509f48d4e9fe0d8fd9e9e3632",
178
                ],
179
                cwd=pypsa_eur_repos,
180
            )
181
182
            # Add gurobi solver to environment:
183
            # Read YAML file
184
            # path_to_env = pypsa_eur_repos / "envs" / "environment.yaml"
185
            # with open(path_to_env, "r") as stream:
186
            #    env = yaml.safe_load(stream)
187
188
            # The version of gurobipy has to fit to the version of gurobi.
189
            # Since we mainly use gurobi 10.0 this is set here.
190
            # env["dependencies"][-1]["pip"].append("gurobipy==10.0.0")
191
192
            # Set python version to <3.12
193
            # Python<=3.12 needs gurobipy>=11.0, in case gurobipy is updated,
194
            # this can be removed
195
            # env["dependencies"] = [
196
            #    "python>=3.8,<3.12" if x == "python>=3.8" else x
197
            #    for x in env["dependencies"]
198
            # ]
199
200
            # Limit geopandas version
201
            # our pypsa-eur version is not compatible to geopandas>1
202
            # env["dependencies"] = [
203
            #    "geopandas>=0.11.0,<1" if x == "geopandas>=0.11.0" else x
204
            #    for x in env["dependencies"]
205
            # ]
206
207
            # Write YAML file
208
            # with open(path_to_env, "w", encoding="utf8") as outfile:
209
            #    yaml.dump(
210
            #        env, outfile, default_flow_style=False, allow_unicode=True
211
            #    )
212
213
            # Copy config file for egon-data to pypsa-eur directory
214
            shutil.copy(
215
                Path(
216
                    __path__[0], "datasets", "pypsaeur", "config_prepare.yaml"
217
                ),
218
                pypsa_eur_repos / "config" / "config.yaml",
219
            )
220
221
            # Copy custom_extra_functionality.py file for egon-data to pypsa-eur directory
222
            shutil.copy(
223
                Path(
224
                    __path__[0],
225
                    "datasets",
226
                    "pypsaeur",
227
                    "custom_extra_functionality.py",
228
                ),
229
                pypsa_eur_repos / "data",
230
            )
231
232
            with open(filepath / "Snakefile", "w") as snakefile:
233
                snakefile.write(
234
                    resources.read_text(
235
                        "egon.data.datasets.pypsaeur", "Snakefile"
236
                    )
237
                )
238
239
        # Copy era5 weather data to folder for pypsaeur
240
        era5_pypsaeur_path = filepath / "pypsa-eur" / "cutouts"
241
242
        if not era5_pypsaeur_path.exists():
243
            era5_pypsaeur_path.mkdir(parents=True, exist_ok=True)
244
            copy_from = config.datasets()["era5_weather_data"]["targets"][
245
                "weather_data"
246
            ]["path"]
247
            filename = "europe-2011-era5.nc"
248
            shutil.copy(
249
                copy_from + "/" + filename, era5_pypsaeur_path / filename
250
            )
251
252
        # Workaround to download natura, shipdensity and globalenergymonitor
253
        # data, which is not working in the regular snakemake workflow.
254
        # The same files are downloaded from the same directory as in pypsa-eur
255
        # version 0.10 here. Is is stored in the folders from pypsa-eur.
256
        if not (filepath / "pypsa-eur" / "resources").exists():
257
            (filepath / "pypsa-eur" / "resources").mkdir(
258
                parents=True, exist_ok=True
259
            )
260
        urlretrieve(
261
            "https://zenodo.org/record/4706686/files/natura.tiff",
262
            filepath / "pypsa-eur" / "resources" / "natura.tiff",
263
        )
264
265
        if not (filepath / "pypsa-eur" / "data").exists():
266
            (filepath / "pypsa-eur" / "data").mkdir(
267
                parents=True, exist_ok=True
268
            )
269
        urlretrieve(
270
            "https://zenodo.org/record/13757228/files/shipdensity_global.zip",
271
            filepath / "pypsa-eur" / "data" / "shipdensity_global.zip",
272
        )
273
274
        if not (
275
            filepath
276
            / "pypsa-eur"
277
            / "zenodo.org"
278
            / "records"
279
            / "13757228"
280
            / "files"
281
        ).exists():
282
            (
283
                filepath
284
                / "pypsa-eur"
285
                / "zenodo.org"
286
                / "records"
287
                / "13757228"
288
                / "files"
289
            ).mkdir(parents=True, exist_ok=True)
290
291
        urlretrieve(
292
            "https://zenodo.org/records/10356004/files/ENSPRESO_BIOMASS.xlsx",
293
            filepath
294
            / "pypsa-eur"
295
            / "zenodo.org"
296
            / "records"
297
            / "13757228"
298
            / "files"
299
            / "ENSPRESO_BIOMASS.xlsx",
300
        )
301
302
        if not (filepath / "pypsa-eur" / "data" / "gem").exists():
303
            (filepath / "pypsa-eur" / "data" / "gem").mkdir(
304
                parents=True, exist_ok=True
305
            )
306
307
        r = requests.get(
308
            "https://tubcloud.tu-berlin.de/s/LMBJQCsN6Ez5cN2/download/"
309
            "Europe-Gas-Tracker-2024-05.xlsx"
310
        )
311
        with open(
312
            filepath
313
            / "pypsa-eur"
314
            / "data"
315
            / "gem"
316
            / "Europe-Gas-Tracker-2024-05.xlsx",
317
            "wb",
318
        ) as outfile:
319
            outfile.write(r.content)
320
321
        if not (filepath / "pypsa-eur" / "data" / "gem").exists():
322
            (filepath / "pypsa-eur" / "data" / "gem").mkdir(
323
                parents=True, exist_ok=True
324
            )
325
326
        r = requests.get(
327
            "https://tubcloud.tu-berlin.de/s/Aqebo3rrQZWKGsG/download/"
328
            "Global-Steel-Plant-Tracker-April-2024-Standard-Copy-V1.xlsx"
329
        )
330
        with open(
331
            filepath
332
            / "pypsa-eur"
333
            / "data"
334
            / "gem"
335
            / "Global-Steel-Plant-Tracker-April-2024-Standard-Copy-V1.xlsx",
336
            "wb",
337
        ) as outfile:
338
            outfile.write(r.content)
339
340
    else:
341
        print("Pypsa-eur is not executed due to the settings of egon-data")
342
343
344
def prepare_network():
345
    cwd = Path(".")
346
    filepath = cwd / "run-pypsa-eur"
347
348
    if config.settings()["egon-data"]["--run-pypsa-eur"]:
349
        subproc.run(
350
            [
351
                "snakemake",
352
                "-j1",
353
                "--directory",
354
                filepath,
355
                "--snakefile",
356
                filepath / "Snakefile",
357
                "--use-conda",
358
                "--conda-frontend=conda",
359
                "--cores",
360
                "8",
361
                "prepare",
362
            ]
363
        )
364
        execute()
365
366
        path = filepath / "pypsa-eur" / "results" / "prenetworks"
367
368
        path_2 = path / "prenetwork_post-manipulate_pre-solve"
369
        path_2.mkdir(parents=True, exist_ok=True)
370
371
        with open(
372
            __path__[0] + "/datasets/pypsaeur/config_prepare.yaml", "r"
373
        ) as stream:
374
            data_config = yaml.safe_load(stream)
375
376
        for i in range(0, len(data_config["scenario"]["planning_horizons"])):
377
            nc_file = (
378
                f"base_s_{data_config['scenario']['clusters'][0]}"
379
                f"_l{data_config['scenario']['ll'][0]}"
380
                f"_{data_config['scenario']['opts'][0]}"
381
                f"_{data_config['scenario']['sector_opts'][0]}"
382
                f"_{data_config['scenario']['planning_horizons'][i]}.nc"
383
            )
384
385
            shutil.copy(Path(path, nc_file), path_2)
386
387
    else:
388
        print("Pypsa-eur is not executed due to the settings of egon-data")
389
390
391
def prepare_network_2():
392
    cwd = Path(".")
393
    filepath = cwd / "run-pypsa-eur"
394
395
    if config.settings()["egon-data"]["--run-pypsa-eur"]:
396
        shutil.copy(
397
            Path(__path__[0], "datasets", "pypsaeur", "config_solve.yaml"),
398
            filepath / "pypsa-eur" / "config" / "config.yaml",
399
        )
400
401
        subproc.run(
402
            [
403
                "snakemake",
404
                "-j1",
405
                "--directory",
406
                filepath,
407
                "--snakefile",
408
                filepath / "Snakefile",
409
                "--use-conda",
410
                "--conda-frontend=conda",
411
                "--cores",
412
                "8",
413
                "prepare",
414
            ]
415
        )
416
    else:
417
        print("Pypsa-eur is not executed due to the settings of egon-data")
418
419
420
def solve_network():
421
    cwd = Path(".")
422
    filepath = cwd / "run-pypsa-eur"
423
424
    if config.settings()["egon-data"]["--run-pypsa-eur"]:
425
        subproc.run(
426
            [
427
                "snakemake",
428
                "-j1",
429
                "--cores",
430
                "8",
431
                "--directory",
432
                filepath,
433
                "--snakefile",
434
                filepath / "Snakefile",
435
                "--use-conda",
436
                "--conda-frontend=conda",
437
                "solve",
438
            ]
439
        )
440
441
        postprocessing_biomass_2045()
442
443
        subproc.run(
444
            [
445
                "snakemake",
446
                "-j1",
447
                "--directory",
448
                filepath,
449
                "--snakefile",
450
                filepath / "Snakefile",
451
                "--use-conda",
452
                "--conda-frontend=conda",
453
                "summary",
454
            ]
455
        )
456
    else:
457
        print("Pypsa-eur is not executed due to the settings of egon-data")
458
459
460 View Code Duplication
def read_network(planning_horizon=2045):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
461
    if config.settings()["egon-data"]["--run-pypsa-eur"]:
462
        with open(
463
            __path__[0] + "/datasets/pypsaeur/config_solve.yaml", "r"
464
        ) as stream:
465
            data_config = yaml.safe_load(stream)
466
467
        target_file = (
468
            Path(".")
469
            / "run-pypsa-eur"
470
            / "pypsa-eur"
471
            / "results"
472
            / data_config["run"]["name"]
473
            / "postnetworks"
474
            / f"base_s_{data_config['scenario']['clusters'][0]}"
475
            f"_l{data_config['scenario']['ll'][0]}"
476
            f"_{data_config['scenario']['opts'][0]}"
477
            f"_{data_config['scenario']['sector_opts'][0]}"
478
            f"_{planning_horizon}.nc"
479
        )
480
481
    else:
482
        target_file = (
483
            Path(".")
484
            / "data_bundle_egon_data"
485
            / "pypsa_eur"
486
            / "postnetworks"
487
            / f"base_s_39_lc1.25__cb40ex0-T-H-I-B-solar+p3-dist1_{planning_horizon}.nc"
488
        )
489
490
    return pypsa.Network(target_file)
491
492
493
def clean_database():
494
    """Remove all components abroad for eGon100RE of the database
495
496
    Remove all components abroad and their associated time series of
497
    the datase for the scenario 'eGon100RE'.
498
499
    Parameters
500
    ----------
501
    None
502
503
    Returns
504
    -------
505
    None
506
507
    """
508
    scn_name = "eGon100RE"
509
510
    comp_one_port = ["load", "generator", "store", "storage"]
511
512
    # delete existing components and associated timeseries
513
    for comp in comp_one_port:
514
        db.execute_sql(
515
            f"""
516
            DELETE FROM {"grid.egon_etrago_" + comp + "_timeseries"}
517
            WHERE {comp + "_id"} IN (
518
                SELECT {comp + "_id"} FROM {"grid.egon_etrago_" + comp}
519
                WHERE bus IN (
520
                    SELECT bus_id FROM grid.egon_etrago_bus
521
                    WHERE country != 'DE'
522
                    AND scn_name = '{scn_name}')
523
                AND scn_name = '{scn_name}'
524
            );
525
526
            DELETE FROM {"grid.egon_etrago_" + comp}
527
            WHERE bus IN (
528
                SELECT bus_id FROM grid.egon_etrago_bus
529
                WHERE country != 'DE'
530
                AND scn_name = '{scn_name}')
531
            AND scn_name = '{scn_name}';"""
532
        )
533
534
    comp_2_ports = [
535
        "line",
536
        "link",
537
    ]
538
539
    for comp, id in zip(comp_2_ports, ["line_id", "link_id"]):
540
        db.execute_sql(
541
            f"""
542
            DELETE FROM {"grid.egon_etrago_" + comp + "_timeseries"}
543
            WHERE scn_name = '{scn_name}'
544
            AND {id} IN (
545
                SELECT {id} FROM {"grid.egon_etrago_" + comp}
546
            WHERE "bus0" IN (
547
            SELECT bus_id FROM grid.egon_etrago_bus
548
                WHERE country != 'DE'
549
                AND scn_name = '{scn_name}'
550
                AND bus_id NOT IN (SELECT bus_i FROM osmtgmod_results.bus_data))
551
            AND "bus1" IN (
552
            SELECT bus_id FROM grid.egon_etrago_bus
553
                WHERE country != 'DE'
554
                AND scn_name = '{scn_name}'
555
                AND bus_id NOT IN (SELECT bus_i FROM osmtgmod_results.bus_data))
556
            );
557
558
559
            DELETE FROM {"grid.egon_etrago_" + comp}
560
            WHERE scn_name = '{scn_name}'
561
            AND "bus0" IN (
562
            SELECT bus_id FROM grid.egon_etrago_bus
563
                WHERE country != 'DE'
564
                AND scn_name = '{scn_name}'
565
                AND bus_id NOT IN (SELECT bus_i FROM osmtgmod_results.bus_data))
566
            AND "bus1" IN (
567
            SELECT bus_id FROM grid.egon_etrago_bus
568
                WHERE country != 'DE'
569
                AND scn_name = '{scn_name}'
570
                AND bus_id NOT IN (SELECT bus_i FROM osmtgmod_results.bus_data))
571
            ;"""
572
        )
573
574
    db.execute_sql(
575
        f"""
576
        DELETE FROM grid.egon_etrago_bus
577
        WHERE scn_name = '{scn_name}'
578
        AND country <> 'DE'
579
        AND carrier <> 'AC'
580
        """
581
    )
582
583
584
def electrical_neighbours_egon100():
585
    if "eGon100RE" in egon.data.config.settings()["egon-data"]["--scenarios"]:
586
        neighbor_reduction()
587
588
    else:
589
        print(
590
            "eGon100RE is not in the list of created scenarios, this task is skipped."
591
        )
592
593
594
def combine_decentral_and_rural_heat(network_solved, network_prepared):
595
596
    for comp in network_solved.iterate_components():
597
598
        if comp.name in ["Bus", "Link", "Store"]:
599
            urban_decentral = comp.df[
600
                comp.df.carrier.str.contains("urban decentral")
601
            ]
602
            rural = comp.df[comp.df.carrier.str.contains("rural")]
603
            for i, row in urban_decentral.iterrows():
604
                if not "DE" in i:
605
                    if comp.name in ["Bus"]:
606
                        network_solved.remove("Bus", i)
607
                    if comp.name in ["Link", "Generator"]:
608
                        if (
609
                            i.replace("urban decentral", "rural")
610
                            in rural.index
611
                        ):
612
                            rural.loc[
613
                                i.replace("urban decentral", "rural"),
614
                                "p_nom_opt",
615
                            ] += urban_decentral.loc[i, "p_nom_opt"]
616
                            rural.loc[
617
                                i.replace("urban decentral", "rural"), "p_nom"
618
                            ] += urban_decentral.loc[i, "p_nom"]
619
                            network_solved.remove(comp.name, i)
620
                        else:
621
                            print(i)
622
                            comp.df.loc[i, "bus0"] = comp.df.loc[
623
                                i, "bus0"
624
                            ].replace("urban decentral", "rural")
625
                            comp.df.loc[i, "bus1"] = comp.df.loc[
626
                                i, "bus1"
627
                            ].replace("urban decentral", "rural")
628
                            comp.df.loc[i, "carrier"] = comp.df.loc[
629
                                i, "carrier"
630
                            ].replace("urban decentral", "rural")
631
                    if comp.name in ["Store"]:
632
                        if (
633
                            i.replace("urban decentral", "rural")
634
                            in rural.index
635
                        ):
636
                            rural.loc[
637
                                i.replace("urban decentral", "rural"),
638
                                "e_nom_opt",
639
                            ] += urban_decentral.loc[i, "e_nom_opt"]
640
                            rural.loc[
641
                                i.replace("urban decentral", "rural"), "e_nom"
642
                            ] += urban_decentral.loc[i, "e_nom"]
643
                            network_solved.remove(comp.name, i)
644
645
                        else:
646
                            print(i)
647
                            network_solved.stores.loc[i, "bus"] = (
648
                                network_solved.stores.loc[i, "bus"].replace(
649
                                    "urban decentral", "rural"
650
                                )
651
                            )
652
                            network_solved.stores.loc[i, "carrier"] = (
653
                                "rural water tanks"
654
                            )
655
656
    urban_decentral_loads = network_prepared.loads[
657
        network_prepared.loads.carrier.str.contains("urban decentral")
658
    ]
659
660
    for i, row in urban_decentral_loads.iterrows():
661
        if i in network_prepared.loads_t.p_set.columns:
662
            network_prepared.loads_t.p_set[
663
                i.replace("urban decentral", "rural")
664
            ] += network_prepared.loads_t.p_set[i]
665
    network_prepared.mremove("Load", urban_decentral_loads.index)
666
667
    return network_prepared, network_solved
668
669
670
def neighbor_reduction():
671
    network_solved = read_network(planning_horizon=2045)
672
    network_prepared = prepared_network(planning_horizon="2045")
673
674
    # network.links.drop("pipe_retrofit", axis="columns", inplace=True)
675
676
    wanted_countries = countries_list()
677
678
    foreign_buses = network_solved.buses[
679
        (~network_solved.buses.index.str.contains("|".join(wanted_countries)))
680
        | (network_solved.buses.index.str.contains("FR6"))
681
    ]
682
    network_solved.buses = network_solved.buses.drop(
683
        network_solved.buses.loc[foreign_buses.index].index
684
    )
685
686
    # Add H2 demand of Fischer-Tropsch process and methanolisation
687
    # to industrial H2 demands
688
    industrial_hydrogen = network_prepared.loads.loc[
689
        network_prepared.loads.carrier == "H2 for industry"
690
    ]
691
    fischer_tropsch = (
692
        network_solved.links_t.p0[
693
            network_solved.links.loc[
694
                network_solved.links.carrier == "Fischer-Tropsch"
695
            ].index
696
        ]
697
        .mul(network_solved.snapshot_weightings.generators, axis=0)
698
        .sum()
699
    )
700
    methanolisation = (
701
        network_solved.links_t.p0[
702
            network_solved.links.loc[
703
                network_solved.links.carrier == "methanolisation"
704
            ].index
705
        ]
706
        .mul(network_solved.snapshot_weightings.generators, axis=0)
707
        .sum()
708
    )
709
    for i, row in industrial_hydrogen.iterrows():
710
        network_prepared.loads.loc[i, "p_set"] += (
711
            fischer_tropsch[
712
                fischer_tropsch.index.str.startswith(row.bus[:5])
713
            ].sum()
714
            / 8760
715
        )
716
        network_prepared.loads.loc[i, "p_set"] += (
717
            methanolisation[
718
                methanolisation.index.str.startswith(row.bus[:5])
719
            ].sum()
720
            / 8760
721
        )
722
    # drop foreign lines and links from the 2nd row
723
724
    network_solved.lines = network_solved.lines.drop(
725
        network_solved.lines[
726
            (
727
                network_solved.lines["bus0"].isin(network_solved.buses.index)
728
                == False
729
            )
730
            & (
731
                network_solved.lines["bus1"].isin(network_solved.buses.index)
732
                == False
733
            )
734
        ].index
735
    )
736
737
    # select all lines which have at bus1 the bus which is kept
738
    lines_cb_1 = network_solved.lines[
739
        (
740
            network_solved.lines["bus0"].isin(network_solved.buses.index)
741
            == False
742
        )
743
    ]
744
745
    # create a load at bus1 with the line's hourly loading
746
    for i, k in zip(lines_cb_1.bus1.values, lines_cb_1.index):
747
748
        # Copy loading of lines into hourly resolution
749
        pset = pd.Series(
750
            index=network_prepared.snapshots,
751
            data=network_solved.lines_t.p1[k].resample("H").ffill(),
752
        )
753
        pset["2011-12-31 22:00:00"] = pset["2011-12-31 21:00:00"]
754
        pset["2011-12-31 23:00:00"] = pset["2011-12-31 21:00:00"]
755
756
        # Loads are all imported from the prepared network in the end
757
        network_prepared.add(
758
            "Load",
759
            "slack_fix " + i + " " + k,
760
            bus=i,
761
            p_set=pset,
762
            carrier=lines_cb_1.loc[k, "carrier"],
763
        )
764
765
    # select all lines which have at bus0 the bus which is kept
766
    lines_cb_0 = network_solved.lines[
767
        (
768
            network_solved.lines["bus1"].isin(network_solved.buses.index)
769
            == False
770
        )
771
    ]
772
773
    # create a load at bus0 with the line's hourly loading
774
    for i, k in zip(lines_cb_0.bus0.values, lines_cb_0.index):
775
        # Copy loading of lines into hourly resolution
776
        pset = pd.Series(
777
            index=network_prepared.snapshots,
778
            data=network_solved.lines_t.p0[k].resample("H").ffill(),
779
        )
780
        pset["2011-12-31 22:00:00"] = pset["2011-12-31 21:00:00"]
781
        pset["2011-12-31 23:00:00"] = pset["2011-12-31 21:00:00"]
782
783
        network_prepared.add(
784
            "Load",
785
            "slack_fix " + i + " " + k,
786
            bus=i,
787
            p_set=pset,
788
            carrier=lines_cb_0.loc[k, "carrier"],
789
        )
790
791
    # do the same for links
792
    network_solved.mremove(
793
        "Link",
794
        network_solved.links[
795
            (~network_solved.links.bus0.isin(network_solved.buses.index))
796
            | (~network_solved.links.bus1.isin(network_solved.buses.index))
797
        ].index,
798
    )
799
800
    # select all links which have at bus1 the bus which is kept
801
    links_cb_1 = network_solved.links[
802
        (
803
            network_solved.links["bus0"].isin(network_solved.buses.index)
804
            == False
805
        )
806
    ]
807
808
    # create a load at bus1 with the link's hourly loading
809
    for i, k in zip(links_cb_1.bus1.values, links_cb_1.index):
810
        pset = pd.Series(
811
            index=network_prepared.snapshots,
812
            data=network_solved.links_t.p1[k].resample("H").ffill(),
813
        )
814
        pset["2011-12-31 22:00:00"] = pset["2011-12-31 21:00:00"]
815
        pset["2011-12-31 23:00:00"] = pset["2011-12-31 21:00:00"]
816
817
        network_prepared.add(
818
            "Load",
819
            "slack_fix_links " + i + " " + k,
820
            bus=i,
821
            p_set=pset,
822
            carrier=links_cb_1.loc[k, "carrier"],
823
        )
824
825
    # select all links which have at bus0 the bus which is kept
826
    links_cb_0 = network_solved.links[
827
        (
828
            network_solved.links["bus1"].isin(network_solved.buses.index)
829
            == False
830
        )
831
    ]
832
833
    # create a load at bus0 with the link's hourly loading
834
    for i, k in zip(links_cb_0.bus0.values, links_cb_0.index):
835
        pset = pd.Series(
836
            index=network_prepared.snapshots,
837
            data=network_solved.links_t.p0[k].resample("H").ffill(),
838
        )
839
        pset["2011-12-31 22:00:00"] = pset["2011-12-31 21:00:00"]
840
        pset["2011-12-31 23:00:00"] = pset["2011-12-31 21:00:00"]
841
842
        network_prepared.add(
843
            "Load",
844
            "slack_fix_links " + i + " " + k,
845
            bus=i,
846
            p_set=pset,
847
            carrier=links_cb_0.carrier[k],
848
        )
849
850
    # drop remaining foreign components
851
    for comp in network_solved.iterate_components():
852
        if "bus0" in comp.df.columns:
853
            network_solved.mremove(
854
                comp.name,
855
                comp.df[~comp.df.bus0.isin(network_solved.buses.index)].index,
856
            )
857
            network_solved.mremove(
858
                comp.name,
859
                comp.df[~comp.df.bus1.isin(network_solved.buses.index)].index,
860
            )
861
        elif "bus" in comp.df.columns:
862
            network_solved.mremove(
863
                comp.name,
864
                comp.df[~comp.df.bus.isin(network_solved.buses.index)].index,
865
            )
866
867
    # Combine urban decentral and rural heat
868
    network_prepared, network_solved = combine_decentral_and_rural_heat(
869
        network_solved, network_prepared
870
    )
871
872
    # writing components of neighboring countries to etrago tables
873
874
    # Set country tag for all buses
875
    network_solved.buses.country = network_solved.buses.index.str[:2]
876
    neighbors = network_solved.buses[network_solved.buses.country != "DE"]
877
878
    neighbors["new_index"] = (
879
        db.next_etrago_id("bus") + neighbors.reset_index().index
880
    )
881
882
    # Use index of AC buses created by electrical_neigbors
883
    foreign_ac_buses = db.select_dataframe(
884
        """
885
        SELECT * FROM grid.egon_etrago_bus
886
        WHERE carrier = 'AC' AND v_nom = 380
887
        AND country!= 'DE' AND scn_name ='eGon100RE'
888
        AND bus_id NOT IN (SELECT bus_i FROM osmtgmod_results.bus_data)
889
        """
890
    )
891
    buses_with_defined_id = neighbors[
892
        (neighbors.carrier == "AC")
893
        & (neighbors.country.isin(foreign_ac_buses.country.values))
894
    ].index
895
    neighbors.loc[buses_with_defined_id, "new_index"] = (
896
        foreign_ac_buses.set_index("x")
897
        .loc[neighbors.loc[buses_with_defined_id, "x"]]
898
        .bus_id.values
899
    )
900
901
    # lines, the foreign crossborder lines
902
    # (without crossborder lines to Germany!)
903
904
    neighbor_lines = network_solved.lines[
905
        network_solved.lines.bus0.isin(neighbors.index)
906
        & network_solved.lines.bus1.isin(neighbors.index)
907
    ]
908
    if not network_solved.lines_t["s_max_pu"].empty:
909
        neighbor_lines_t = network_prepared.lines_t["s_max_pu"][
910
            neighbor_lines.index
911
        ]
912
913
    neighbor_lines.reset_index(inplace=True)
914
    neighbor_lines.bus0 = (
915
        neighbors.loc[neighbor_lines.bus0, "new_index"].reset_index().new_index
916
    )
917
    neighbor_lines.bus1 = (
918
        neighbors.loc[neighbor_lines.bus1, "new_index"].reset_index().new_index
919
    )
920
    neighbor_lines.index += db.next_etrago_id("line")
921
922
    if not network_solved.lines_t["s_max_pu"].empty:
923
        for i in neighbor_lines_t.columns:
0 ignored issues
show
introduced by
The variable neighbor_lines_t does not seem to be defined in case BooleanNotNode on line 908 is False. Are you sure this can never be the case?
Loading history...
924
            new_index = neighbor_lines[neighbor_lines["name"] == i].index
925
            neighbor_lines_t.rename(columns={i: new_index[0]}, inplace=True)
926
927
    # links
928
    neighbor_links = network_solved.links[
929
        network_solved.links.bus0.isin(neighbors.index)
930
        & network_solved.links.bus1.isin(neighbors.index)
931
    ]
932
933
    neighbor_links.reset_index(inplace=True)
934
    neighbor_links.bus0 = (
935
        neighbors.loc[neighbor_links.bus0, "new_index"].reset_index().new_index
936
    )
937
    neighbor_links.bus1 = (
938
        neighbors.loc[neighbor_links.bus1, "new_index"].reset_index().new_index
939
    )
940
    neighbor_links.index += db.next_etrago_id("link")
941
942
    # generators
943
    neighbor_gens = network_solved.generators[
944
        network_solved.generators.bus.isin(neighbors.index)
945
    ]
946
    neighbor_gens_t = network_prepared.generators_t["p_max_pu"][
947
        neighbor_gens[
948
            neighbor_gens.index.isin(
949
                network_prepared.generators_t["p_max_pu"].columns
950
            )
951
        ].index
952
    ]
953
954
    gen_time = [
955
        "solar",
956
        "onwind",
957
        "solar rooftop",
958
        "offwind-ac",
959
        "offwind-dc",
960
        "solar-hsat",
961
        "urban central solar thermal",
962
        "rural solar thermal",
963
        "offwind-float",
964
    ]
965
966
    missing_gent = neighbor_gens[
967
        neighbor_gens["carrier"].isin(gen_time)
968
        & ~neighbor_gens.index.isin(neighbor_gens_t.columns)
969
    ].index
970
971
    gen_timeseries = network_prepared.generators_t["p_max_pu"].copy()
972
    for mgt in missing_gent:  # mgt: missing generator timeseries
973
        try:
974
            neighbor_gens_t[mgt] = gen_timeseries.loc[:, mgt[0:-5]]
975
        except:
976
            print(f"There are not timeseries for {mgt}")
977
978
    neighbor_gens.reset_index(inplace=True)
979
    neighbor_gens.bus = (
980
        neighbors.loc[neighbor_gens.bus, "new_index"].reset_index().new_index
981
    )
982
    neighbor_gens.index += db.next_etrago_id("generator")
983
984
    for i in neighbor_gens_t.columns:
985
        new_index = neighbor_gens[neighbor_gens["Generator"] == i].index
986
        neighbor_gens_t.rename(columns={i: new_index[0]}, inplace=True)
987
988
    # loads
989
    # imported from prenetwork in 1h-resolution
990
    neighbor_loads = network_prepared.loads[
991
        network_prepared.loads.bus.isin(neighbors.index)
992
    ]
993
    neighbor_loads_t_index = neighbor_loads.index[
994
        neighbor_loads.index.isin(network_prepared.loads_t.p_set.columns)
995
    ]
996
    neighbor_loads_t = network_prepared.loads_t["p_set"][
997
        neighbor_loads_t_index
998
    ]
999
1000
    neighbor_loads.reset_index(inplace=True)
1001
    neighbor_loads.bus = (
1002
        neighbors.loc[neighbor_loads.bus, "new_index"].reset_index().new_index
1003
    )
1004
    neighbor_loads.index += db.next_etrago_id("load")
1005
1006
    for i in neighbor_loads_t.columns:
1007
        new_index = neighbor_loads[neighbor_loads["Load"] == i].index
1008
        neighbor_loads_t.rename(columns={i: new_index[0]}, inplace=True)
1009
1010
    # stores
1011
    neighbor_stores = network_solved.stores[
1012
        network_solved.stores.bus.isin(neighbors.index)
1013
    ]
1014
    neighbor_stores_t_index = neighbor_stores.index[
1015
        neighbor_stores.index.isin(network_solved.stores_t.e_min_pu.columns)
1016
    ]
1017
    neighbor_stores_t = network_prepared.stores_t["e_min_pu"][
1018
        neighbor_stores_t_index
1019
    ]
1020
1021
    neighbor_stores.reset_index(inplace=True)
1022
    neighbor_stores.bus = (
1023
        neighbors.loc[neighbor_stores.bus, "new_index"].reset_index().new_index
1024
    )
1025
    neighbor_stores.index += db.next_etrago_id("store")
1026
1027
    for i in neighbor_stores_t.columns:
1028
        new_index = neighbor_stores[neighbor_stores["Store"] == i].index
1029
        neighbor_stores_t.rename(columns={i: new_index[0]}, inplace=True)
1030
1031
    # storage_units
1032
    neighbor_storage = network_solved.storage_units[
1033
        network_solved.storage_units.bus.isin(neighbors.index)
1034
    ]
1035
    neighbor_storage_t_index = neighbor_storage.index[
1036
        neighbor_storage.index.isin(
1037
            network_solved.storage_units_t.inflow.columns
1038
        )
1039
    ]
1040
    neighbor_storage_t = network_prepared.storage_units_t["inflow"][
1041
        neighbor_storage_t_index
1042
    ]
1043
1044
    neighbor_storage.reset_index(inplace=True)
1045
    neighbor_storage.bus = (
1046
        neighbors.loc[neighbor_storage.bus, "new_index"]
1047
        .reset_index()
1048
        .new_index
1049
    )
1050
    neighbor_storage.index += db.next_etrago_id("storage")
1051
1052
    for i in neighbor_storage_t.columns:
1053
        new_index = neighbor_storage[
1054
            neighbor_storage["StorageUnit"] == i
1055
        ].index
1056
        neighbor_storage_t.rename(columns={i: new_index[0]}, inplace=True)
1057
1058
    # Connect to local database
1059
    engine = db.engine()
1060
1061
    neighbors["scn_name"] = "eGon100RE"
1062
    neighbors.index = neighbors["new_index"]
1063
1064
    # Correct geometry for non AC buses
1065
    carriers = set(neighbors.carrier.to_list())
1066
    carriers = [e for e in carriers if e not in ("AC")]
1067
    non_AC_neighbors = pd.DataFrame()
1068
    for c in carriers:
1069
        c_neighbors = neighbors[neighbors.carrier == c].set_index(
1070
            "location", drop=False
1071
        )
1072
        for i in ["x", "y"]:
1073
            c_neighbors = c_neighbors.drop(i, axis=1)
1074
        coordinates = neighbors[neighbors.carrier == "AC"][
1075
            ["location", "x", "y"]
1076
        ].set_index("location")
1077
        c_neighbors = pd.concat([coordinates, c_neighbors], axis=1).set_index(
1078
            "new_index", drop=False
1079
        )
1080
        non_AC_neighbors = pd.concat([non_AC_neighbors, c_neighbors])
1081
1082
    neighbors = pd.concat(
1083
        [neighbors[neighbors.carrier == "AC"], non_AC_neighbors]
1084
    )
1085
1086
    for i in [
1087
        "new_index",
1088
        "control",
1089
        "generator",
1090
        "location",
1091
        "sub_network",
1092
        "unit",
1093
        "substation_lv",
1094
        "substation_off",
1095
    ]:
1096
        neighbors = neighbors.drop(i, axis=1)
1097
1098
    # Add geometry column
1099
    neighbors = (
1100
        gpd.GeoDataFrame(
1101
            neighbors, geometry=gpd.points_from_xy(neighbors.x, neighbors.y)
1102
        )
1103
        .rename_geometry("geom")
1104
        .set_crs(4326)
1105
    )
1106
1107
    # Unify carrier names
1108
    neighbors.carrier = neighbors.carrier.str.replace(" ", "_")
1109
    neighbors.carrier.replace(
1110
        {
1111
            "gas": "CH4",
1112
            "gas_for_industry": "CH4_for_industry",
1113
            "urban_central_heat": "central_heat",
1114
            "EV_battery": "Li_ion",
1115
            "urban_central_water_tanks": "central_heat_store",
1116
            "rural_water_tanks": "rural_heat_store",
1117
        },
1118
        inplace=True,
1119
    )
1120
1121
    neighbors[~neighbors.carrier.isin(["AC"])].to_postgis(
1122
        "egon_etrago_bus",
1123
        engine,
1124
        schema="grid",
1125
        if_exists="append",
1126
        index=True,
1127
        index_label="bus_id",
1128
    )
1129
1130
    # prepare and write neighboring crossborder lines to etrago tables
1131
    def lines_to_etrago(neighbor_lines=neighbor_lines, scn="eGon100RE"):
1132
        neighbor_lines["scn_name"] = scn
1133
        neighbor_lines["cables"] = 3 * neighbor_lines["num_parallel"].astype(
1134
            int
1135
        )
1136
        neighbor_lines["s_nom"] = neighbor_lines["s_nom_min"]
1137
1138
        for i in [
1139
            "Line",
1140
            "x_pu_eff",
1141
            "r_pu_eff",
1142
            "sub_network",
1143
            "x_pu",
1144
            "r_pu",
1145
            "g_pu",
1146
            "b_pu",
1147
            "s_nom_opt",
1148
            "i_nom",
1149
            "dc",
1150
        ]:
1151
            neighbor_lines = neighbor_lines.drop(i, axis=1)
1152
1153
        # Define geometry and add to lines dataframe as 'topo'
1154
        gdf = gpd.GeoDataFrame(index=neighbor_lines.index)
1155
        gdf["geom_bus0"] = neighbors.geom[neighbor_lines.bus0].values
1156
        gdf["geom_bus1"] = neighbors.geom[neighbor_lines.bus1].values
1157
        gdf["geometry"] = gdf.apply(
1158
            lambda x: LineString([x["geom_bus0"], x["geom_bus1"]]), axis=1
1159
        )
1160
1161
        neighbor_lines = (
1162
            gpd.GeoDataFrame(neighbor_lines, geometry=gdf["geometry"])
1163
            .rename_geometry("topo")
1164
            .set_crs(4326)
1165
        )
1166
1167
        neighbor_lines["lifetime"] = get_sector_parameters("electricity", scn)[
1168
            "lifetime"
1169
        ]["ac_ehv_overhead_line"]
1170
1171
        neighbor_lines.to_postgis(
1172
            "egon_etrago_line",
1173
            engine,
1174
            schema="grid",
1175
            if_exists="append",
1176
            index=True,
1177
            index_label="line_id",
1178
        )
1179
1180
    lines_to_etrago(neighbor_lines=neighbor_lines, scn="eGon100RE")
1181
1182
    def links_to_etrago(neighbor_links, scn="eGon100RE", extendable=True):
1183
        """Prepare and write neighboring crossborder links to eTraGo table
1184
1185
        This function prepare the neighboring crossborder links
1186
        generated the PyPSA-eur-sec (p-e-s) run by:
1187
          * Delete the useless columns
1188
          * If extendable is false only (non default case):
1189
              * Replace p_nom = 0 with the p_nom_op values (arrising
1190
                from the p-e-s optimisation)
1191
              * Setting p_nom_extendable to false
1192
          * Add geomtry to the links: 'geom' and 'topo' columns
1193
          * Change the name of the carriers to have the consistent in
1194
            eGon-data
1195
1196
        The function insert then the link to the eTraGo table and has
1197
        no return.
1198
1199
        Parameters
1200
        ----------
1201
        neighbor_links : pandas.DataFrame
1202
            Dataframe containing the neighboring crossborder links
1203
        scn_name : str
1204
            Name of the scenario
1205
        extendable : bool
1206
            Boolean expressing if the links should be extendable or not
1207
1208
        Returns
1209
        -------
1210
        None
1211
1212
        """
1213
        neighbor_links["scn_name"] = scn
1214
1215
        dropped_carriers = [
1216
            "Link",
1217
            "geometry",
1218
            "tags",
1219
            "under_construction",
1220
            "underground",
1221
            "underwater_fraction",
1222
            "bus2",
1223
            "bus3",
1224
            "bus4",
1225
            "efficiency2",
1226
            "efficiency3",
1227
            "efficiency4",
1228
            "lifetime",
1229
            "pipe_retrofit",
1230
            "committable",
1231
            "start_up_cost",
1232
            "shut_down_cost",
1233
            "min_up_time",
1234
            "min_down_time",
1235
            "up_time_before",
1236
            "down_time_before",
1237
            "ramp_limit_up",
1238
            "ramp_limit_down",
1239
            "ramp_limit_start_up",
1240
            "ramp_limit_shut_down",
1241
            "length_original",
1242
            "reversed",
1243
            "location",
1244
            "project_status",
1245
            "dc",
1246
            "voltage",
1247
        ]
1248
1249
        if extendable:
1250
            dropped_carriers.append("p_nom_opt")
1251
            neighbor_links = neighbor_links.drop(
1252
                columns=dropped_carriers,
1253
                errors="ignore",
1254
            )
1255
1256
        else:
1257
            dropped_carriers.append("p_nom")
1258
            dropped_carriers.append("p_nom_extendable")
1259
            neighbor_links = neighbor_links.drop(
1260
                columns=dropped_carriers,
1261
                errors="ignore",
1262
            )
1263
            neighbor_links = neighbor_links.rename(
1264
                columns={"p_nom_opt": "p_nom"}
1265
            )
1266
            neighbor_links["p_nom_extendable"] = False
1267
1268
        if neighbor_links.empty:
1269
            print("No links selected")
1270
            return
1271
1272
        # Define geometry and add to lines dataframe as 'topo'
1273
        gdf = gpd.GeoDataFrame(
1274
            index=neighbor_links.index,
1275
            data={
1276
                "geom_bus0": neighbors.loc[neighbor_links.bus0, "geom"].values,
1277
                "geom_bus1": neighbors.loc[neighbor_links.bus1, "geom"].values,
1278
            },
1279
        )
1280
1281
        gdf["geometry"] = gdf.apply(
1282
            lambda x: LineString([x["geom_bus0"], x["geom_bus1"]]), axis=1
1283
        )
1284
1285
        neighbor_links = (
1286
            gpd.GeoDataFrame(neighbor_links, geometry=gdf["geometry"])
1287
            .rename_geometry("topo")
1288
            .set_crs(4326)
1289
        )
1290
1291
        # Unify carrier names
1292
        neighbor_links.carrier = neighbor_links.carrier.str.replace(" ", "_")
1293
1294
        neighbor_links.carrier.replace(
1295
            {
1296
                "H2_Electrolysis": "power_to_H2",
1297
                "H2_Fuel_Cell": "H2_to_power",
1298
                "H2_pipeline_retrofitted": "H2_retrofit",
1299
                "SMR": "CH4_to_H2",
1300
                "Sabatier": "H2_to_CH4",
1301
                "gas_for_industry": "CH4_for_industry",
1302
                "gas_pipeline": "CH4",
1303
                "urban_central_gas_boiler": "central_gas_boiler",
1304
                "urban_central_resistive_heater": "central_resistive_heater",
1305
                "urban_central_water_tanks_charger": "central_heat_store_charger",
1306
                "urban_central_water_tanks_discharger": "central_heat_store_discharger",
1307
                "rural_water_tanks_charger": "rural_heat_store_charger",
1308
                "rural_water_tanks_discharger": "rural_heat_store_discharger",
1309
                "urban_central_gas_CHP": "central_gas_CHP",
1310
                "urban_central_air_heat_pump": "central_heat_pump",
1311
                "rural_ground_heat_pump": "rural_heat_pump",
1312
            },
1313
            inplace=True,
1314
        )
1315
1316
        H2_links = {
1317
            "H2_to_CH4": "H2_to_CH4",
1318
            "H2_to_power": "H2_to_power",
1319
            "power_to_H2": "power_to_H2_system",
1320
            "CH4_to_H2": "CH4_to_H2",
1321
        }
1322
1323
        for c in H2_links.keys():
1324
1325
            neighbor_links.loc[
1326
                (neighbor_links.carrier == c),
1327
                "lifetime",
1328
            ] = get_sector_parameters("gas", "eGon100RE")["lifetime"][
1329
                H2_links[c]
1330
            ]
1331
1332
        neighbor_links.to_postgis(
1333
            "egon_etrago_link",
1334
            engine,
1335
            schema="grid",
1336
            if_exists="append",
1337
            index=True,
1338
            index_label="link_id",
1339
        )
1340
1341
    extendable_links_carriers = [
1342
        "battery charger",
1343
        "battery discharger",
1344
        "home battery charger",
1345
        "home battery discharger",
1346
        "rural water tanks charger",
1347
        "rural water tanks discharger",
1348
        "urban central water tanks charger",
1349
        "urban central water tanks discharger",
1350
        "urban decentral water tanks charger",
1351
        "urban decentral water tanks discharger",
1352
        "H2 Electrolysis",
1353
        "H2 Fuel Cell",
1354
        "SMR",
1355
        "Sabatier",
1356
    ]
1357
1358
    # delete unwanted carriers for eTraGo
1359
    excluded_carriers = [
1360
        "gas for industry CC",
1361
        "SMR CC",
1362
        "DAC",
1363
    ]
1364
    neighbor_links = neighbor_links[
1365
        ~neighbor_links.carrier.isin(excluded_carriers)
1366
    ]
1367
1368
    # Combine CHP_CC and CHP
1369
    chp_cc = neighbor_links[
1370
        neighbor_links.carrier == "urban central gas CHP CC"
1371
    ]
1372
    for index, row in chp_cc.iterrows():
1373
        neighbor_links.loc[
1374
            neighbor_links.Link == row.Link.replace("CHP CC", "CHP"),
1375
            "p_nom_opt",
1376
        ] += row.p_nom_opt
1377
        neighbor_links.loc[
1378
            neighbor_links.Link == row.Link.replace("CHP CC", "CHP"), "p_nom"
1379
        ] += row.p_nom
1380
        neighbor_links.drop(index, inplace=True)
1381
1382
    # Combine heat pumps
1383
    # Like in Germany, there are air heat pumps in central heat grids
1384
    # and ground heat pumps in rural areas
1385
    rural_air = neighbor_links[neighbor_links.carrier == "rural air heat pump"]
1386
    for index, row in rural_air.iterrows():
1387
        neighbor_links.loc[
1388
            neighbor_links.Link == row.Link.replace("air", "ground"),
1389
            "p_nom_opt",
1390
        ] += row.p_nom_opt
1391
        neighbor_links.loc[
1392
            neighbor_links.Link == row.Link.replace("air", "ground"), "p_nom"
1393
        ] += row.p_nom
1394
        neighbor_links.drop(index, inplace=True)
1395
    links_to_etrago(
1396
        neighbor_links[neighbor_links.carrier.isin(extendable_links_carriers)],
1397
        "eGon100RE",
1398
    )
1399
    links_to_etrago(
1400
        neighbor_links[
1401
            ~neighbor_links.carrier.isin(extendable_links_carriers)
1402
        ],
1403
        "eGon100RE",
1404
        extendable=False,
1405
    )
1406
    # Include links time-series
1407
    # For heat_pumps
1408
    hp = neighbor_links[neighbor_links["carrier"].str.contains("heat pump")]
1409
1410
    neighbor_eff_t = network_prepared.links_t["efficiency"][
1411
        hp[hp.Link.isin(network_prepared.links_t["efficiency"].columns)].index
1412
    ]
1413
1414
    missing_hp = hp[~hp["Link"].isin(neighbor_eff_t.columns)].Link
1415
1416
    eff_timeseries = network_prepared.links_t["efficiency"].copy()
1417
    for met in missing_hp:  # met: missing efficiency timeseries
1418
        try:
1419
            neighbor_eff_t[met] = eff_timeseries.loc[:, met[0:-5]]
1420
        except:
1421
            print(f"There are not timeseries for heat_pump {met}")
1422
1423
    for i in neighbor_eff_t.columns:
1424
        new_index = neighbor_links[neighbor_links["Link"] == i].index
1425
        neighbor_eff_t.rename(columns={i: new_index[0]}, inplace=True)
1426
1427
    # Include links time-series
1428
    # For ev_chargers
1429
    ev = neighbor_links[neighbor_links["carrier"].str.contains("BEV charger")]
1430
1431
    ev_p_max_pu = network_prepared.links_t["p_max_pu"][
1432
        ev[ev.Link.isin(network_prepared.links_t["p_max_pu"].columns)].index
1433
    ]
1434
1435
    missing_ev = ev[~ev["Link"].isin(ev_p_max_pu.columns)].Link
1436
1437
    ev_p_max_pu_timeseries = network_prepared.links_t["p_max_pu"].copy()
1438
    for mct in missing_ev:  # evt: missing charger timeseries
1439
        try:
1440
            ev_p_max_pu[mct] = ev_p_max_pu_timeseries.loc[:, mct[0:-5]]
1441
        except:
1442
            print(f"There are not timeseries for EV charger {mct}")
1443
1444
    for i in ev_p_max_pu.columns:
1445
        new_index = neighbor_links[neighbor_links["Link"] == i].index
1446
        ev_p_max_pu.rename(columns={i: new_index[0]}, inplace=True)
1447
1448
    # prepare neighboring generators for etrago tables
1449
    neighbor_gens["scn_name"] = "eGon100RE"
1450
    neighbor_gens["p_nom"] = neighbor_gens["p_nom_opt"]
1451
    neighbor_gens["p_nom_extendable"] = False
1452
1453
    # Unify carrier names
1454
    neighbor_gens.carrier = neighbor_gens.carrier.str.replace(" ", "_")
1455
1456
    neighbor_gens.carrier.replace(
1457
        {
1458
            "onwind": "wind_onshore",
1459
            "ror": "run_of_river",
1460
            "offwind-ac": "wind_offshore",
1461
            "offwind-dc": "wind_offshore",
1462
            "offwind-float": "wind_offshore",
1463
            "urban_central_solar_thermal": "urban_central_solar_thermal_collector",
1464
            "residential_rural_solar_thermal": "residential_rural_solar_thermal_collector",
1465
            "services_rural_solar_thermal": "services_rural_solar_thermal_collector",
1466
            "solar-hsat": "solar",
1467
        },
1468
        inplace=True,
1469
    )
1470
1471
    for i in [
1472
        "Generator",
1473
        "weight",
1474
        "lifetime",
1475
        "p_set",
1476
        "q_set",
1477
        "p_nom_opt",
1478
        "e_sum_min",
1479
        "e_sum_max",
1480
    ]:
1481
        neighbor_gens = neighbor_gens.drop(i, axis=1)
1482
1483
    neighbor_gens.to_sql(
1484
        "egon_etrago_generator",
1485
        engine,
1486
        schema="grid",
1487
        if_exists="append",
1488
        index=True,
1489
        index_label="generator_id",
1490
    )
1491
1492
    # prepare neighboring loads for etrago tables
1493
    neighbor_loads["scn_name"] = "eGon100RE"
1494
1495
    # Unify carrier names
1496
    neighbor_loads.carrier = neighbor_loads.carrier.str.replace(" ", "_")
1497
1498
    neighbor_loads.carrier.replace(
1499
        {
1500
            "electricity": "AC",
1501
            "DC": "AC",
1502
            "industry_electricity": "AC",
1503
            "H2_pipeline_retrofitted": "H2_system_boundary",
1504
            "gas_pipeline": "CH4_system_boundary",
1505
            "gas_for_industry": "CH4_for_industry",
1506
            "urban_central_heat": "central_heat",
1507
        },
1508
        inplace=True,
1509
    )
1510
1511
    neighbor_loads = neighbor_loads.drop(
1512
        columns=["Load"],
1513
        errors="ignore",
1514
    )
1515
1516
    neighbor_loads.to_sql(
1517
        "egon_etrago_load",
1518
        engine,
1519
        schema="grid",
1520
        if_exists="append",
1521
        index=True,
1522
        index_label="load_id",
1523
    )
1524
1525
    # prepare neighboring stores for etrago tables
1526
    neighbor_stores["scn_name"] = "eGon100RE"
1527
1528
    # Unify carrier names
1529
    neighbor_stores.carrier = neighbor_stores.carrier.str.replace(" ", "_")
1530
1531
    neighbor_stores.carrier.replace(
1532
        {
1533
            "Li_ion": "battery",
1534
            "gas": "CH4",
1535
            "urban_central_water_tanks": "central_heat_store",
1536
            "rural_water_tanks": "rural_heat_store",
1537
            "EV_battery": "battery_storage",
1538
        },
1539
        inplace=True,
1540
    )
1541
    neighbor_stores.loc[
1542
        (
1543
            (neighbor_stores.e_nom_max <= 1e9)
1544
            & (neighbor_stores.carrier == "H2_Store")
1545
        ),
1546
        "carrier",
1547
    ] = "H2_underground"
1548
    neighbor_stores.loc[
1549
        (
1550
            (neighbor_stores.e_nom_max > 1e9)
1551
            & (neighbor_stores.carrier == "H2_Store")
1552
        ),
1553
        "carrier",
1554
    ] = "H2_overground"
1555
1556
    for i in [
1557
        "Store",
1558
        "p_set",
1559
        "q_set",
1560
        "e_nom_opt",
1561
        "lifetime",
1562
        "e_initial_per_period",
1563
        "e_cyclic_per_period",
1564
        "location",
1565
    ]:
1566
        neighbor_stores = neighbor_stores.drop(i, axis=1, errors="ignore")
1567
1568
    for c in ["H2_underground", "H2_overground"]:
1569
        neighbor_stores.loc[
1570
            (neighbor_stores.carrier == c),
1571
            "lifetime",
1572
        ] = get_sector_parameters("gas", "eGon100RE")["lifetime"][c]
1573
1574
    neighbor_stores.to_sql(
1575
        "egon_etrago_store",
1576
        engine,
1577
        schema="grid",
1578
        if_exists="append",
1579
        index=True,
1580
        index_label="store_id",
1581
    )
1582
1583
    # prepare neighboring storage_units for etrago tables
1584
    neighbor_storage["scn_name"] = "eGon100RE"
1585
1586
    # Unify carrier names
1587
    neighbor_storage.carrier = neighbor_storage.carrier.str.replace(" ", "_")
1588
1589
    neighbor_storage.carrier.replace(
1590
        {"PHS": "pumped_hydro", "hydro": "reservoir"}, inplace=True
1591
    )
1592
1593
    for i in [
1594
        "StorageUnit",
1595
        "p_nom_opt",
1596
        "state_of_charge_initial_per_period",
1597
        "cyclic_state_of_charge_per_period",
1598
    ]:
1599
        neighbor_storage = neighbor_storage.drop(i, axis=1, errors="ignore")
1600
1601
    neighbor_storage.to_sql(
1602
        "egon_etrago_storage",
1603
        engine,
1604
        schema="grid",
1605
        if_exists="append",
1606
        index=True,
1607
        index_label="storage_id",
1608
    )
1609
1610
    # writing neighboring loads_t p_sets to etrago tables
1611
1612
    neighbor_loads_t_etrago = pd.DataFrame(
1613
        columns=["scn_name", "temp_id", "p_set"],
1614
        index=neighbor_loads_t.columns,
1615
    )
1616
    neighbor_loads_t_etrago["scn_name"] = "eGon100RE"
1617
    neighbor_loads_t_etrago["temp_id"] = 1
1618
    for i in neighbor_loads_t.columns:
1619
        neighbor_loads_t_etrago["p_set"][i] = neighbor_loads_t[
1620
            i
1621
        ].values.tolist()
1622
1623
    neighbor_loads_t_etrago.to_sql(
1624
        "egon_etrago_load_timeseries",
1625
        engine,
1626
        schema="grid",
1627
        if_exists="append",
1628
        index=True,
1629
        index_label="load_id",
1630
    )
1631
1632
    # writing neighboring link_t efficiency and p_max_pu to etrago tables
1633
    neighbor_link_t_etrago = pd.DataFrame(
1634
        columns=["scn_name", "temp_id", "p_max_pu", "efficiency"],
1635
        index=neighbor_eff_t.columns.to_list() + ev_p_max_pu.columns.to_list(),
1636
    )
1637
    neighbor_link_t_etrago["scn_name"] = "eGon100RE"
1638
    neighbor_link_t_etrago["temp_id"] = 1
1639
    for i in neighbor_eff_t.columns:
1640
        neighbor_link_t_etrago["efficiency"][i] = neighbor_eff_t[
1641
            i
1642
        ].values.tolist()
1643
    for i in ev_p_max_pu.columns:
1644
        neighbor_link_t_etrago["p_max_pu"][i] = ev_p_max_pu[i].values.tolist()
1645
1646
    neighbor_link_t_etrago.to_sql(
1647
        "egon_etrago_link_timeseries",
1648
        engine,
1649
        schema="grid",
1650
        if_exists="append",
1651
        index=True,
1652
        index_label="link_id",
1653
    )
1654
1655
    # writing neighboring generator_t p_max_pu to etrago tables
1656
    neighbor_gens_t_etrago = pd.DataFrame(
1657
        columns=["scn_name", "temp_id", "p_max_pu"],
1658
        index=neighbor_gens_t.columns,
1659
    )
1660
    neighbor_gens_t_etrago["scn_name"] = "eGon100RE"
1661
    neighbor_gens_t_etrago["temp_id"] = 1
1662
    for i in neighbor_gens_t.columns:
1663
        neighbor_gens_t_etrago["p_max_pu"][i] = neighbor_gens_t[
1664
            i
1665
        ].values.tolist()
1666
1667
    neighbor_gens_t_etrago.to_sql(
1668
        "egon_etrago_generator_timeseries",
1669
        engine,
1670
        schema="grid",
1671
        if_exists="append",
1672
        index=True,
1673
        index_label="generator_id",
1674
    )
1675
1676
    # writing neighboring stores_t e_min_pu to etrago tables
1677
    neighbor_stores_t_etrago = pd.DataFrame(
1678
        columns=["scn_name", "temp_id", "e_min_pu"],
1679
        index=neighbor_stores_t.columns,
1680
    )
1681
    neighbor_stores_t_etrago["scn_name"] = "eGon100RE"
1682
    neighbor_stores_t_etrago["temp_id"] = 1
1683
    for i in neighbor_stores_t.columns:
1684
        neighbor_stores_t_etrago["e_min_pu"][i] = neighbor_stores_t[
1685
            i
1686
        ].values.tolist()
1687
1688
    neighbor_stores_t_etrago.to_sql(
1689
        "egon_etrago_store_timeseries",
1690
        engine,
1691
        schema="grid",
1692
        if_exists="append",
1693
        index=True,
1694
        index_label="store_id",
1695
    )
1696
1697
    # writing neighboring storage_units inflow to etrago tables
1698
    neighbor_storage_t_etrago = pd.DataFrame(
1699
        columns=["scn_name", "temp_id", "inflow"],
1700
        index=neighbor_storage_t.columns,
1701
    )
1702
    neighbor_storage_t_etrago["scn_name"] = "eGon100RE"
1703
    neighbor_storage_t_etrago["temp_id"] = 1
1704
    for i in neighbor_storage_t.columns:
1705
        neighbor_storage_t_etrago["inflow"][i] = neighbor_storage_t[
1706
            i
1707
        ].values.tolist()
1708
1709
    neighbor_storage_t_etrago.to_sql(
1710
        "egon_etrago_storage_timeseries",
1711
        engine,
1712
        schema="grid",
1713
        if_exists="append",
1714
        index=True,
1715
        index_label="storage_id",
1716
    )
1717
1718
    # writing neighboring lines_t s_max_pu to etrago tables
1719
    if not network_solved.lines_t["s_max_pu"].empty:
1720
        neighbor_lines_t_etrago = pd.DataFrame(
1721
            columns=["scn_name", "s_max_pu"], index=neighbor_lines_t.columns
1722
        )
1723
        neighbor_lines_t_etrago["scn_name"] = "eGon100RE"
1724
1725
        for i in neighbor_lines_t.columns:
1726
            neighbor_lines_t_etrago["s_max_pu"][i] = neighbor_lines_t[
1727
                i
1728
            ].values.tolist()
1729
1730
        neighbor_lines_t_etrago.to_sql(
1731
            "egon_etrago_line_timeseries",
1732
            engine,
1733
            schema="grid",
1734
            if_exists="append",
1735
            index=True,
1736
            index_label="line_id",
1737
        )
1738
1739
1740 View Code Duplication
def prepared_network(planning_horizon=3):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
1741
    if egon.data.config.settings()["egon-data"]["--run-pypsa-eur"]:
1742
        with open(
1743
            __path__[0] + "/datasets/pypsaeur/config_prepare.yaml", "r"
1744
        ) as stream:
1745
            data_config = yaml.safe_load(stream)
1746
1747
        target_file = (
1748
            Path(".")
1749
            / "run-pypsa-eur"
1750
            / "pypsa-eur"
1751
            / "results"
1752
            / data_config["run"]["name"]
1753
            / "prenetworks"
1754
            / f"base_s_{data_config['scenario']['clusters'][0]}"
1755
            f"_l{data_config['scenario']['ll'][0]}"
1756
            f"_{data_config['scenario']['opts'][0]}"
1757
            f"_{data_config['scenario']['sector_opts'][0]}"
1758
            f"_{data_config['scenario']['planning_horizons'][planning_horizon]}.nc"
1759
        )
1760
1761
    else:
1762
        target_file = (
1763
            Path(".")
1764
            / "data_bundle_egon_data"
1765
            / "pypsa_eur"
1766
            / "prenetworks"
1767
            / "prenetwork_post-manipulate_pre-solve"
1768
            / "base_s_39_lc1.25__cb40ex0-T-H-I-B-solar+p3-dist1_2045.nc"
1769
        )
1770
1771
    return pypsa.Network(target_file.absolute().as_posix())
1772
1773
1774
def overwrite_H2_pipeline_share():
1775
    """Overwrite retrofitted_CH4pipeline-to-H2pipeline_share value
1776
1777
    Overwrite retrofitted_CH4pipeline-to-H2pipeline_share in the
1778
    scenario parameter table if p-e-s is run.
1779
    This function write in the database and has no return.
1780
1781
    """
1782
    scn_name = "eGon100RE"
1783
    # Select source and target from dataset configuration
1784
    target = egon.data.config.datasets()["pypsa-eur-sec"]["target"]
1785
1786
    n = read_network(planning_horizon=2045)
1787
1788
    H2_pipelines = n.links[n.links["carrier"] == "H2 pipeline retrofitted"]
1789
    CH4_pipelines = n.links[n.links["carrier"] == "gas pipeline"]
1790
    H2_pipes_share = np.mean(
1791
        [
1792
            (i / j)
1793
            for i, j in zip(
1794
                H2_pipelines.p_nom_opt.to_list(), CH4_pipelines.p_nom.to_list()
1795
            )
1796
        ]
1797
    )
1798
    logger.info(
1799
        "retrofitted_CH4pipeline-to-H2pipeline_share = " + str(H2_pipes_share)
1800
    )
1801
1802
    parameters = db.select_dataframe(
1803
        f"""
1804
        SELECT *
1805
        FROM {target['scenario_parameters']['schema']}.{target['scenario_parameters']['table']}
1806
        WHERE name = '{scn_name}'
1807
        """
1808
    )
1809
1810
    gas_param = parameters.loc[0, "gas_parameters"]
1811
    gas_param["retrofitted_CH4pipeline-to-H2pipeline_share"] = H2_pipes_share
1812
    gas_param = json.dumps(gas_param)
1813
1814
    # Update data in db
1815
    db.execute_sql(
1816
        f"""
1817
    UPDATE {target['scenario_parameters']['schema']}.{target['scenario_parameters']['table']}
1818
    SET gas_parameters = '{gas_param}'
1819
    WHERE name = '{scn_name}';
1820
    """
1821
    )
1822
1823
1824
def update_electrical_timeseries_germany(network):
1825
    """Replace electrical demand time series in Germany with data from egon-data
1826
1827
    Parameters
1828
    ----------
1829
    network : pypsa.Network
1830
        Network including demand time series from pypsa-eur
1831
1832
    Returns
1833
    -------
1834
    network : pypsa.Network
1835
        Network including electrical demand time series in Germany from egon-data
1836
1837
    """
1838
    year = network.year
1839
    skip = network.snapshot_weightings.objective.iloc[0].astype("int")
1840
    df = pd.read_csv(
1841
        "input-pypsa-eur-sec/electrical_demand_timeseries_DE_eGon100RE.csv"
1842
    )
1843
1844
    annual_demand = pd.Series(index=[2019, 2037])
1845
    annual_demand_industry = pd.Series(index=[2019, 2037])
1846
    # Define values from status2019 for interpolation
1847
    # Residential and service (in TWh)
1848
    annual_demand.loc[2019] = 124.71 + 143.26
1849
    # Industry (in TWh)
1850
    annual_demand_industry.loc[2019] = 241.925
1851
1852
    # Define values from NEP 2023 scenario B 2037 for interpolation
1853
    # Residential and service (in TWh)
1854
    annual_demand.loc[2037] = 104 + 153.1
1855
    # Industry (in TWh)
1856
    annual_demand_industry.loc[2037] = 334.0
1857
1858
    # Set interpolated demands for years between 2019 and 2045
1859
    if year < 2037:
1860
        # Calculate annual demands for year by linear interpolating between
1861
        # 2019 and 2037
1862
        # Done seperatly for industry and residential and service to fit
1863
        # to pypsa-eurs structure
1864
        annual_rate = (annual_demand.loc[2037] - annual_demand.loc[2019]) / (
1865
            2037 - 2019
1866
        )
1867
        annual_demand_year = annual_demand.loc[2019] + annual_rate * (
1868
            year - 2019
1869
        )
1870
1871
        annual_rate_industry = (
1872
            annual_demand_industry.loc[2037] - annual_demand_industry.loc[2019]
1873
        ) / (2037 - 2019)
1874
        annual_demand_year_industry = annual_demand_industry.loc[
1875
            2019
1876
        ] + annual_rate_industry * (year - 2019)
1877
1878
        # Scale time series for 100% scenario with the annual demands
1879
        # The shape of the curve is taken from the 100% scenario since the
1880
        # same weather and calender year is used there
1881
        network.loads_t.p_set.loc[:, "DE0 0"] = (
1882
            df["residential_and_service"].loc[::skip]
1883
            / df["residential_and_service"].sum()
1884
            * annual_demand_year
1885
            * 1e6
1886
        ).values
1887
1888
        network.loads_t.p_set.loc[:, "DE0 0 industry electricity"] = (
1889
            df["industry"].loc[::skip]
1890
            / df["industry"].sum()
1891
            * annual_demand_year_industry
1892
            * 1e6
1893
        ).values
1894
1895
    elif year == 2045:
1896
        network.loads_t.p_set.loc[:, "DE0 0"] = df[
1897
            "residential_and_service"
1898
        ].loc[::skip]
1899
1900
        network.loads_t.p_set.loc[:, "DE0 0 industry electricity"] = (
1901
            df["industry"].loc[::skip].values
1902
        )
1903
1904
    else:
1905
        print(
1906
            "Scaling not implemented for years between 2037 and 2045 and beyond."
1907
        )
1908
        return
1909
1910
    network.loads.loc["DE0 0 industry electricity", "p_set"] = 0.0
1911
1912
    return network
1913
1914
1915
def geothermal_district_heating(network):
1916
    """Add the option to build geothermal power plants in district heating in Germany
1917
1918
    Parameters
1919
    ----------
1920
    network : pypsa.Network
1921
        Network from pypsa-eur without geothermal generators
1922
1923
    Returns
1924
    -------
1925
    network : pypsa.Network
1926
        Updated network with geothermal generators
1927
1928
    """
1929
1930
    costs_and_potentials = pd.read_csv(
1931
        "input-pypsa-eur-sec/geothermal_potential_germany.csv"
1932
    )
1933
1934
    network.add("Carrier", "urban central geo thermal")
1935
1936
    for i, row in costs_and_potentials.iterrows():
1937
        # Set lifetime of geothermal plant to 30 years based on:
1938
        # Ableitung eines Korridors für den Ausbau der erneuerbaren Wärme im Gebäudebereich,
1939
        # Beuth Hochschule für Technik, Berlin ifeu – Institut für Energie- und Umweltforschung Heidelberg GmbH
1940
        # Februar 2017
1941
        lifetime_geothermal = 30
1942
1943
        network.add(
1944
            "Generator",
1945
            f"DE0 0 urban central geo thermal {i}",
1946
            bus="DE0 0 urban central heat",
1947
            carrier="urban central geo thermal",
1948
            p_nom_extendable=True,
1949
            p_nom_max=row["potential [MW]"],
1950
            capital_cost=annualize_capital_costs(
1951
                row["cost [EUR/kW]"] * 1e6, lifetime_geothermal, 0.07
1952
            ),
1953
        )
1954
    return network
1955
1956
1957
def h2_overground_stores(network):
1958
    """Add hydrogen overground stores to each hydrogen node
1959
1960
    In pypsa-eur, only countries without the potential of underground hydrogen
1961
    stores have to option to build overground hydrogen tanks.
1962
    Overground stores are more expensive, but are not resitcted by the geological
1963
    potential. To allow higher hydrogen store capacities in each country, optional
1964
    hydogen overground tanks are also added to node with a potential for
1965
    underground stores.
1966
1967
    Parameters
1968
    ----------
1969
    network : pypsa.Network
1970
        Network without hydrogen overground stores at each hydrogen node
1971
1972
    Returns
1973
    -------
1974
    network : pypsa.Network
1975
        Network with hydrogen overground stores at each hydrogen node
1976
1977
    """
1978
1979
    underground_h2_stores = network.stores[
1980
        (network.stores.carrier == "H2 Store")
1981
        & (network.stores.e_nom_max != np.inf)
1982
    ]
1983
1984
    overground_h2_stores = network.stores[
1985
        (network.stores.carrier == "H2 Store")
1986
        & (network.stores.e_nom_max == np.inf)
1987
    ]
1988
1989
    network.madd(
1990
        "Store",
1991
        underground_h2_stores.bus + " overground Store",
1992
        bus=underground_h2_stores.bus.values,
1993
        e_nom_extendable=True,
1994
        e_cyclic=True,
1995
        carrier="H2 Store",
1996
        capital_cost=overground_h2_stores.capital_cost.mean(),
1997
    )
1998
1999
    return network
2000
2001
2002
def update_heat_timeseries_germany(network):
2003
    network.loads
2004
    # Import heat demand curves for Germany from eGon-data
2005
    df_egon_heat_demand = pd.read_csv(
2006
        "input-pypsa-eur-sec/heat_demand_timeseries_DE_eGon100RE.csv"
2007
    )
2008
2009
    # Replace heat demand curves in Germany with values from eGon-data
2010
    network.loads_t.p_set.loc[:, "DE1 0 rural heat"] = (
2011
        df_egon_heat_demand.loc[:, "residential rural"].values
2012
        + df_egon_heat_demand.loc[:, "service rural"].values
2013
    )
2014
2015
    network.loads_t.p_set.loc[:, "DE1 0 urban central heat"] = (
2016
        df_egon_heat_demand.loc[:, "urban central"].values
2017
    )
2018
2019
    return network
2020
2021
2022
def drop_biomass(network):
2023
    carrier = "biomass"
2024
2025
    for c in network.iterate_components():
2026
        network.mremove(c.name, c.df[c.df.index.str.contains(carrier)].index)
2027
    return network
2028
2029
2030
def postprocessing_biomass_2045():
2031
2032
    network = read_network(planning_horizon=2045)
2033
    network = drop_biomass(network)
2034
2035
    with open(
2036
        __path__[0] + "/datasets/pypsaeur/config_solve.yaml", "r"
2037
    ) as stream:
2038
        data_config = yaml.safe_load(stream)
2039
2040
    target_file = (
2041
        Path(".")
2042
        / "run-pypsa-eur"
2043
        / "pypsa-eur"
2044
        / "results"
2045
        / data_config["run"]["name"]
2046
        / "postnetworks"
2047
        / f"base_s_{data_config['scenario']['clusters'][0]}"
2048
        f"_l{data_config['scenario']['ll'][0]}"
2049
        f"_{data_config['scenario']['opts'][0]}"
2050
        f"_{data_config['scenario']['sector_opts'][0]}"
2051
        f"_{data_config['scenario']['planning_horizons'][3]}.nc"
2052
    )
2053
2054
    network.export_to_netcdf(target_file)
2055
2056
2057
def drop_urban_decentral_heat(network):
2058
    carrier = "urban decentral heat"
2059
2060
    # Add urban decentral heat demand to urban central heat demand
2061
    for country in network.loads.loc[
2062
        network.loads.carrier == carrier, "bus"
2063
    ].str[:5]:
2064
2065
        if f"{country} {carrier}" in network.loads_t.p_set.columns:
2066
            network.loads_t.p_set[
2067
                f"{country} rural heat"
2068
            ] += network.loads_t.p_set[f"{country} {carrier}"]
2069
        else:
2070
            print(
2071
                f"""No time series available for {country} {carrier}.
2072
                  Using static p_set."""
2073
            )
2074
2075
            network.loads_t.p_set[
2076
                f"{country} rural heat"
2077
            ] += network.loads.loc[f"{country} {carrier}", "p_set"]
2078
2079
    # In some cases low-temperature heat for industry is connected to the urban
2080
    # decentral heat bus since there is no urban central heat bus.
2081
    # These loads are connected to the representatiive rural heat bus:
2082
    network.loads.loc[
2083
        (network.loads.bus.str.contains(carrier))
2084
        & (~network.loads.carrier.str.contains(carrier.replace(" heat", ""))),
2085
        "bus",
2086
    ] = network.loads.loc[
2087
        (network.loads.bus.str.contains(carrier))
2088
        & (~network.loads.carrier.str.contains(carrier.replace(" heat", ""))),
2089
        "bus",
2090
    ].str.replace(
2091
        "urban decentral", "rural"
2092
    )
2093
2094
    # Drop componentents attached to urban decentral heat
2095
    for c in network.iterate_components():
2096
        network.mremove(
2097
            c.name, c.df[c.df.index.str.contains("urban decentral")].index
2098
        )
2099
2100
    return network
2101
2102
2103
def district_heating_shares(network):
2104
    df = pd.read_csv(
2105
        "data_bundle_powerd_data/district_heating_shares_egon.csv"
2106
    ).set_index("country_code")
2107
2108
    heat_demand_per_country = (
2109
        network.loads_t.p_set[
2110
            network.loads[
2111
                (network.loads.carrier.str.contains("heat"))
2112
                & network.loads.index.isin(network.loads_t.p_set.columns)
2113
            ].index
2114
        ]
2115
        .groupby(network.loads.bus.str[:5], axis=1)
2116
        .sum()
2117
    )
2118
2119
    for country in heat_demand_per_country.columns:
2120
        network.loads_t.p_set[f"{country} urban central heat"] = (
2121
            heat_demand_per_country.loc[:, country].mul(
2122
                df.loc[country[:2]].values[0]
2123
            )
2124
        )
2125
        network.loads_t.p_set[f"{country} rural heat"] = (
2126
            heat_demand_per_country.loc[:, country].mul(
2127
                (1 - df.loc[country[:2]].values[0])
2128
            )
2129
        )
2130
2131
    # Drop links with undefined buses or carrier
2132
    network.mremove(
2133
        "Link",
2134
        network.links[
2135
            ~network.links.bus0.isin(network.buses.index.values)
2136
        ].index,
2137
    )
2138
    network.mremove(
2139
        "Link",
2140
        network.links[network.links.carrier == ""].index,
2141
    )
2142
2143
    return network
2144
2145
2146
def drop_new_gas_pipelines(network):
2147
    network.mremove(
2148
        "Link",
2149
        network.links[
2150
            network.links.index.str.contains("gas pipeline new")
2151
        ].index,
2152
    )
2153
2154
    return network
2155
2156
2157
def drop_fossil_gas(network):
2158
    network.mremove(
2159
        "Generator",
2160
        network.generators[network.generators.carrier == "gas"].index,
2161
    )
2162
2163
    return network
2164
2165
2166
def drop_conventional_power_plants(network):
2167
2168
    # Drop lignite and coal power plants in Germany
2169
    network.mremove(
2170
        "Link",
2171
        network.links[
2172
            (network.links.carrier.isin(["coal", "lignite"]))
2173
            & (network.links.bus1.str.startswith("DE"))
2174
        ].index,
2175
    )
2176
2177
    return network
2178
2179
2180
def rual_heat_technologies(network):
2181
    network.mremove(
2182
        "Link",
2183
        network.links[
2184
            network.links.index.str.contains("rural gas boiler")
2185
        ].index,
2186
    )
2187
2188
    network.mremove(
2189
        "Generator",
2190
        network.generators[
2191
            network.generators.carrier.str.contains("rural solar thermal")
2192
        ].index,
2193
    )
2194
2195
    return network
2196
2197
2198
def coal_exit_D():
2199
2200
    df = pd.read_csv(
2201
        "run-pypsa-eur/pypsa-eur/resources/powerplants_s_39.csv", index_col=0
2202
    )
2203
    df_de_coal = df[
2204
        (df.Country == "DE")
2205
        & ((df.Fueltype == "Lignite") | (df.Fueltype == "Hard Coal"))
2206
    ]
2207
    df_de_coal.loc[df_de_coal.DateOut.values >= 2035, "DateOut"] = 2034
2208
    df.loc[df_de_coal.index] = df_de_coal
2209
2210
    df.to_csv("run-pypsa-eur/pypsa-eur/resources/powerplants_s_39.csv")
2211
2212
2213
def offwind_potential_D(network, capacity_per_sqkm=4):
2214
2215
    offwind_ac_factor = 1942
2216
    offwind_dc_factor = 10768
2217
    offwind_float_factor = 134
2218
2219
    # set p_nom_max for German offshore with respect to capacity_per_sqkm = 4 instead of default 2 (which is applied for the rest of Europe)
2220
    network.generators.loc[
2221
        (network.generators.bus == "DE0 0")
2222
        & (network.generators.carrier == "offwind-ac"),
2223
        "p_nom_max",
2224
    ] = (
2225
        offwind_ac_factor * capacity_per_sqkm
2226
    )
2227
    network.generators.loc[
2228
        (network.generators.bus == "DE0 0")
2229
        & (network.generators.carrier == "offwind-dc"),
2230
        "p_nom_max",
2231
    ] = (
2232
        offwind_dc_factor * capacity_per_sqkm
2233
    )
2234
    network.generators.loc[
2235
        (network.generators.bus == "DE0 0")
2236
        & (network.generators.carrier == "offwind-float"),
2237
        "p_nom_max",
2238
    ] = (
2239
        offwind_float_factor * capacity_per_sqkm
2240
    )
2241
2242
    return network
2243
2244
2245
def additional_grid_expansion_2045(network):
2246
2247
    network.global_constraints.loc["lc_limit", "constant"] *= 1.05
2248
2249
    return network
2250
2251
2252
def execute():
2253
    if egon.data.config.settings()["egon-data"]["--run-pypsa-eur"]:
2254
        with open(
2255
            __path__[0] + "/datasets/pypsaeur/config.yaml", "r"
2256
        ) as stream:
2257
            data_config = yaml.safe_load(stream)
2258
2259
        if data_config["foresight"] == "myopic":
2260
2261
            print("Adjusting scenarios on the myopic pathway...")
2262
2263
            coal_exit_D()
2264
2265
            networks = pd.Series()
2266
2267
            for i in range(
2268
                0, len(data_config["scenario"]["planning_horizons"])
2269
            ):
2270
                nc_file = pd.Series(
2271
                    f"base_s_{data_config['scenario']['clusters'][0]}"
2272
                    f"_l{data_config['scenario']['ll'][0]}"
2273
                    f"_{data_config['scenario']['opts'][0]}"
2274
                    f"_{data_config['scenario']['sector_opts'][0]}"
2275
                    f"_{data_config['scenario']['planning_horizons'][i]}.nc"
2276
                )
2277
                networks = networks._append(nc_file)
2278
2279
            scn_path = pd.DataFrame(
2280
                index=["2025", "2030", "2035", "2045"],
2281
                columns=["prenetwork", "functions"],
2282
            )
2283
2284
            for year in scn_path.index:
2285
                scn_path.at[year, "prenetwork"] = networks[
2286
                    networks.str.contains(year)
2287
                ].values
2288
2289
            for year in ["2025", "2030", "2035"]:
2290
                scn_path.loc[year, "functions"] = [
2291
                    # drop_urban_decentral_heat,
2292
                    update_electrical_timeseries_germany,
2293
                    geothermal_district_heating,
2294
                    h2_overground_stores,
2295
                    drop_new_gas_pipelines,
2296
                    offwind_potential_D,
2297
                ]
2298
2299
            scn_path.loc["2045", "functions"] = [
2300
                drop_biomass,
2301
                # drop_urban_decentral_heat,
2302
                update_electrical_timeseries_germany,
2303
                geothermal_district_heating,
2304
                h2_overground_stores,
2305
                drop_new_gas_pipelines,
2306
                drop_fossil_gas,
2307
                offwind_potential_D,
2308
                additional_grid_expansion_2045,
2309
                # drop_conventional_power_plants,
2310
                # rual_heat_technologies, #To be defined
2311
            ]
2312
2313
            network_path = (
2314
                Path(".")
2315
                / "run-pypsa-eur"
2316
                / "pypsa-eur"
2317
                / "results"
2318
                / data_config["run"]["name"]
2319
                / "prenetworks"
2320
            )
2321
2322
            for scn in scn_path.index:
2323
                path = network_path / scn_path.at[scn, "prenetwork"]
2324
                network = pypsa.Network(path)
2325
                network.year = int(scn)
2326
                for manipulator in scn_path.at[scn, "functions"]:
2327
                    network = manipulator(network)
2328
                network.export_to_netcdf(path)
2329
2330
        elif (data_config["foresight"] == "overnight") & (
2331
            int(data_config["scenario"]["planning_horizons"][0]) > 2040
2332
        ):
2333
2334
            print("Adjusting overnight long-term scenario...")
2335
2336
            network_path = (
2337
                Path(".")
2338
                / "run-pypsa-eur"
2339
                / "pypsa-eur"
2340
                / "results"
2341
                / data_config["run"]["name"]
2342
                / "prenetworks"
2343
                / f"elec_s_{data_config['scenario']['clusters'][0]}"
2344
                f"_l{data_config['scenario']['ll'][0]}"
2345
                f"_{data_config['scenario']['opts'][0]}"
2346
                f"_{data_config['scenario']['sector_opts'][0]}"
2347
                f"_{data_config['scenario']['planning_horizons'][0]}.nc"
2348
            )
2349
2350
            network = pypsa.Network(network_path)
2351
2352
            network = drop_biomass(network)
2353
2354
            network = drop_urban_decentral_heat(network)
2355
2356
            network = district_heating_shares(network)
2357
2358
            network = update_heat_timeseries_germany(network)
2359
2360
            network = update_electrical_timeseries_germany(network)
2361
2362
            network = geothermal_district_heating(network)
2363
2364
            network = h2_overground_stores(network)
2365
2366
            network = drop_new_gas_pipelines(network)
2367
2368
            network = drop_fossil_gas(network)
2369
2370
            network = rual_heat_technologies(network)
2371
2372
            network.export_to_netcdf(network_path)
2373
2374
        else:
2375
            print(
2376
                f"""Adjustments on prenetworks are not implemented for
2377
                foresight option {data_config['foresight']} and
2378
                year int(data_config['scenario']['planning_horizons'][0].
2379
                Please check the pypsaeur.execute function.
2380
                """
2381
            )
2382
    else:
2383
        print("Pypsa-eur is not executed due to the settings of egon-data")
2384