data.datasets.pypsaeur.download()   F
last analyzed

Complexity

Conditions 12

Size

Total Lines 188
Code Lines 111

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 111
dl 0
loc 188
rs 3.36
c 0
b 0
f 0
cc 12
nop 0

How to fix   Long Method    Complexity   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

Complexity

Complex classes like data.datasets.pypsaeur.download() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.

Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.

1
"""The central module containing all code dealing with importing data from
2
the pysa-eur-sec scenario parameter creation
3
"""
4
5
from pathlib import Path
6
from urllib.request import urlretrieve
7
import json
8
import shutil
9
10
from shapely.geometry import LineString
11
import geopandas as gpd
12
import importlib_resources as resources
13
import numpy as np
14
import pandas as pd
15
import pypsa
16
import requests
17
import yaml
18
19
from egon.data import __path__, config, db, logger
20
from egon.data.datasets import Dataset
21
from egon.data.datasets.scenario_parameters import get_sector_parameters
22
from egon.data.datasets.scenario_parameters.parameters import (
23
    annualize_capital_costs,
24
)
25
import egon.data.config
26
import egon.data.subprocess as subproc
27
28
29
class PreparePypsaEur(Dataset):
30
    def __init__(self, dependencies):
31
        super().__init__(
32
            name="PreparePypsaEur",
33
            version="0.0.42",
34
            dependencies=dependencies,
35
            tasks=(
36
                download,
37
                prepare_network,
38
            ),
39
        )
40
41
42
class RunPypsaEur(Dataset):
43
    def __init__(self, dependencies):
44
        super().__init__(
45
            name="SolvePypsaEur",
46
            version="0.0.42",
47
            dependencies=dependencies,
48
            tasks=(
49
                prepare_network_2,
50
                execute,
51
                solve_network,
52
                clean_database,
53
                electrical_neighbours_egon100,
54
                h2_neighbours_egon2035,
55
                # Dropped until we decided how we deal with the H2 grid
56
                # overwrite_H2_pipeline_share,
57
            ),
58
        )
59
60
61
def countries_list():
62
    return [
63
        "DE",
64
        "AT",
65
        "CH",
66
        "CZ",
67
        "PL",
68
        "SE",
69
        "NO",
70
        "DK",
71
        "GB",
72
        "NL",
73
        "BE",
74
        "FR",
75
        "LU",
76
    ]
77
78
79
def h2_neighbours_egon2035():
80
    """
81
    This function load the pypsa_eur network for eGon2035, processes the H2
82
    buses and insert them into the grid.egon_etrago_bus table.
83
84
    Returns
85
    -------
86
    None.
87
88
    """
89
    if "eGon2035" in config.settings()["egon-data"]["--scenarios"]:
90
        # Delete buses from previous executions
91
        db.execute_sql(
92
            """
93
            DELETE FROM grid.egon_etrago_bus WHERE carrier = 'H2'
94
            AND scn_name = 'eGon2035'
95
            AND country <> 'DE'
96
            """
97
        )
98
99
        # Load calculated network for eGon2035
100
        n = read_network(planning_horizon=2035)
101
102
        # Filter only H2 buses in selected foreign countries
103
        h2_bus = n.buses[(n.buses.country != "DE") & (n.buses.carrier == "H2")]
104
        wanted_countries = countries_list()
105
        h2_bus = h2_bus[
106
            (h2_bus.country.isin(wanted_countries))
107
            & (~h2_bus.index.str.contains("FR6"))
108
        ]
109
110
        # Add geometry column
111
        h2_bus = (
112
            gpd.GeoDataFrame(
113
                h2_bus, geometry=gpd.points_from_xy(h2_bus.x, h2_bus.y)
114
            )
115
            .rename_geometry("geom")
116
            .set_crs(4326)
117
        )
118
119
        # Adjust dataframe to the database table format
120
        h2_bus["scn_name"] = "eGon2035"
121
122
        bus_id = db.next_etrago_id("bus")  # will be change in PR1287
123
        ### Delete when PR1287 is merged ###
124
        bus_id = range(bus_id, bus_id + len(h2_bus.index))
125
        ####################################
126
        h2_bus["bus_id"] = bus_id
127
128
        h2_bus.drop(
129
            columns=[
130
                "unit",
131
                "control",
132
                "generator",
133
                "location",
134
                "substation_off",
135
                "substation_lv",
136
                "sub_network",
137
            ],
138
            inplace=True,
139
        )
140
141
        # Connect to local database and write results
142
        engine = db.engine()
143
144
        h2_bus.to_postgis(
145
            "egon_etrago_bus",
146
            engine,
147
            schema="grid",
148
            if_exists="append",
149
            index=False,
150
        )
151
    else:
152
        print("eGon2035 is not in the list of scenarios")
153
154
155
def download():
156
    cwd = Path(".")
157
    filepath = cwd / "run-pypsa-eur"
158
    filepath.mkdir(parents=True, exist_ok=True)
159
160
    pypsa_eur_repos = filepath / "pypsa-eur"
161
    if config.settings()["egon-data"]["--run-pypsa-eur"]:
162
        if not pypsa_eur_repos.exists():
163
            subproc.run(
164
                [
165
                    "git",
166
                    "clone",
167
                    "--branch",
168
                    "master",
169
                    "https://github.com/PyPSA/pypsa-eur.git",
170
                    pypsa_eur_repos,
171
                ]
172
            )
173
174
            subproc.run(
175
                [
176
                    "git",
177
                    "checkout",
178
                    "2119f4cee05c256509f48d4e9fe0d8fd9e9e3632",
179
                ],
180
                cwd=pypsa_eur_repos,
181
            )
182
183
            # Add gurobi solver to environment:
184
            # Read YAML file
185
            # path_to_env = pypsa_eur_repos / "envs" / "environment.yaml"
186
            # with open(path_to_env, "r") as stream:
187
            #    env = yaml.safe_load(stream)
188
189
            # The version of gurobipy has to fit to the version of gurobi.
190
            # Since we mainly use gurobi 10.0 this is set here.
191
            # env["dependencies"][-1]["pip"].append("gurobipy==10.0.0")
192
193
            # Set python version to <3.12
194
            # Python<=3.12 needs gurobipy>=11.0, in case gurobipy is updated,
195
            # this can be removed
196
            # env["dependencies"] = [
197
            #    "python>=3.8,<3.12" if x == "python>=3.8" else x
198
            #    for x in env["dependencies"]
199
            # ]
200
201
            # Limit geopandas version
202
            # our pypsa-eur version is not compatible to geopandas>1
203
            # env["dependencies"] = [
204
            #    "geopandas>=0.11.0,<1" if x == "geopandas>=0.11.0" else x
205
            #    for x in env["dependencies"]
206
            # ]
207
208
            # Write YAML file
209
            # with open(path_to_env, "w", encoding="utf8") as outfile:
210
            #    yaml.dump(
211
            #        env, outfile, default_flow_style=False, allow_unicode=True
212
            #    )
213
214
            # Copy config file for egon-data to pypsa-eur directory
215
            shutil.copy(
216
                Path(
217
                    __path__[0], "datasets", "pypsaeur", "config_prepare.yaml"
218
                ),
219
                pypsa_eur_repos / "config" / "config.yaml",
220
            )
221
222
            # Copy custom_extra_functionality.py file for egon-data to pypsa-eur directory
223
            shutil.copy(
224
                Path(
225
                    __path__[0],
226
                    "datasets",
227
                    "pypsaeur",
228
                    "custom_extra_functionality.py",
229
                ),
230
                pypsa_eur_repos / "data",
231
            )
232
233
            with open(filepath / "Snakefile", "w") as snakefile:
234
                snakefile.write(
235
                    resources.read_text(
236
                        "egon.data.datasets.pypsaeur", "Snakefile"
237
                    )
238
                )
239
240
        # Copy era5 weather data to folder for pypsaeur
241
        era5_pypsaeur_path = filepath / "pypsa-eur" / "cutouts"
242
243
        if not era5_pypsaeur_path.exists():
244
            era5_pypsaeur_path.mkdir(parents=True, exist_ok=True)
245
            copy_from = config.datasets()["era5_weather_data"]["targets"][
246
                "weather_data"
247
            ]["path"]
248
            filename = "europe-2011-era5.nc"
249
            shutil.copy(
250
                copy_from + "/" + filename, era5_pypsaeur_path / filename
251
            )
252
253
        # Workaround to download natura, shipdensity and globalenergymonitor
254
        # data, which is not working in the regular snakemake workflow.
255
        # The same files are downloaded from the same directory as in pypsa-eur
256
        # version 0.10 here. Is is stored in the folders from pypsa-eur.
257
        if not (filepath / "pypsa-eur" / "resources").exists():
258
            (filepath / "pypsa-eur" / "resources").mkdir(
259
                parents=True, exist_ok=True
260
            )
261
        urlretrieve(
262
            "https://zenodo.org/record/4706686/files/natura.tiff",
263
            filepath / "pypsa-eur" / "resources" / "natura.tiff",
264
        )
265
266
        if not (filepath / "pypsa-eur" / "data").exists():
267
            (filepath / "pypsa-eur" / "data").mkdir(
268
                parents=True, exist_ok=True
269
            )
270
        urlretrieve(
271
            "https://zenodo.org/record/13757228/files/shipdensity_global.zip",
272
            filepath / "pypsa-eur" / "data" / "shipdensity_global.zip",
273
        )
274
275
        if not (
276
            filepath
277
            / "pypsa-eur"
278
            / "zenodo.org"
279
            / "records"
280
            / "13757228"
281
            / "files"
282
        ).exists():
283
            (
284
                filepath
285
                / "pypsa-eur"
286
                / "zenodo.org"
287
                / "records"
288
                / "13757228"
289
                / "files"
290
            ).mkdir(parents=True, exist_ok=True)
291
292
        urlretrieve(
293
            "https://zenodo.org/records/10356004/files/ENSPRESO_BIOMASS.xlsx",
294
            filepath
295
            / "pypsa-eur"
296
            / "zenodo.org"
297
            / "records"
298
            / "13757228"
299
            / "files"
300
            / "ENSPRESO_BIOMASS.xlsx",
301
        )
302
303
        if not (filepath / "pypsa-eur" / "data" / "gem").exists():
304
            (filepath / "pypsa-eur" / "data" / "gem").mkdir(
305
                parents=True, exist_ok=True
306
            )
307
308
        r = requests.get(
309
            "https://tubcloud.tu-berlin.de/s/LMBJQCsN6Ez5cN2/download/"
310
            "Europe-Gas-Tracker-2024-05.xlsx"
311
        )
312
        with open(
313
            filepath
314
            / "pypsa-eur"
315
            / "data"
316
            / "gem"
317
            / "Europe-Gas-Tracker-2024-05.xlsx",
318
            "wb",
319
        ) as outfile:
320
            outfile.write(r.content)
321
322
        if not (filepath / "pypsa-eur" / "data" / "gem").exists():
323
            (filepath / "pypsa-eur" / "data" / "gem").mkdir(
324
                parents=True, exist_ok=True
325
            )
326
327
        r = requests.get(
328
            "https://tubcloud.tu-berlin.de/s/Aqebo3rrQZWKGsG/download/"
329
            "Global-Steel-Plant-Tracker-April-2024-Standard-Copy-V1.xlsx"
330
        )
331
        with open(
332
            filepath
333
            / "pypsa-eur"
334
            / "data"
335
            / "gem"
336
            / "Global-Steel-Plant-Tracker-April-2024-Standard-Copy-V1.xlsx",
337
            "wb",
338
        ) as outfile:
339
            outfile.write(r.content)
340
341
    else:
342
        print("Pypsa-eur is not executed due to the settings of egon-data")
343
344
345
def prepare_network():
346
    cwd = Path(".")
347
    filepath = cwd / "run-pypsa-eur"
348
349
    if config.settings()["egon-data"]["--run-pypsa-eur"]:
350
        subproc.run(
351
            [
352
                "snakemake",
353
                "-j1",
354
                "--directory",
355
                filepath,
356
                "--snakefile",
357
                filepath / "Snakefile",
358
                "--use-conda",
359
                "--conda-frontend=conda",
360
                "--cores",
361
                "8",
362
                "prepare",
363
            ]
364
        )
365
        execute()
366
367
        path = filepath / "pypsa-eur" / "results" / "prenetworks"
368
369
        path_2 = path / "prenetwork_post-manipulate_pre-solve"
370
        path_2.mkdir(parents=True, exist_ok=True)
371
372
        with open(
373
            __path__[0] + "/datasets/pypsaeur/config_prepare.yaml", "r"
374
        ) as stream:
375
            data_config = yaml.safe_load(stream)
376
377
        for i in range(0, len(data_config["scenario"]["planning_horizons"])):
378
            nc_file = (
379
                f"base_s_{data_config['scenario']['clusters'][0]}"
380
                f"_l{data_config['scenario']['ll'][0]}"
381
                f"_{data_config['scenario']['opts'][0]}"
382
                f"_{data_config['scenario']['sector_opts'][0]}"
383
                f"_{data_config['scenario']['planning_horizons'][i]}.nc"
384
            )
385
386
            shutil.copy(Path(path, nc_file), path_2)
387
388
    else:
389
        print("Pypsa-eur is not executed due to the settings of egon-data")
390
391
392
def prepare_network_2():
393
    cwd = Path(".")
394
    filepath = cwd / "run-pypsa-eur"
395
396
    if config.settings()["egon-data"]["--run-pypsa-eur"]:
397
        shutil.copy(
398
            Path(__path__[0], "datasets", "pypsaeur", "config_solve.yaml"),
399
            filepath / "pypsa-eur" / "config" / "config.yaml",
400
        )
401
402
        subproc.run(
403
            [
404
                "snakemake",
405
                "-j1",
406
                "--directory",
407
                filepath,
408
                "--snakefile",
409
                filepath / "Snakefile",
410
                "--use-conda",
411
                "--conda-frontend=conda",
412
                "--cores",
413
                "8",
414
                "prepare",
415
            ]
416
        )
417
    else:
418
        print("Pypsa-eur is not executed due to the settings of egon-data")
419
420
421
def solve_network():
422
    cwd = Path(".")
423
    filepath = cwd / "run-pypsa-eur"
424
425
    if config.settings()["egon-data"]["--run-pypsa-eur"]:
426
        subproc.run(
427
            [
428
                "snakemake",
429
                "-j1",
430
                "--cores",
431
                "8",
432
                "--directory",
433
                filepath,
434
                "--snakefile",
435
                filepath / "Snakefile",
436
                "--use-conda",
437
                "--conda-frontend=conda",
438
                "solve",
439
            ]
440
        )
441
442
        postprocessing_biomass_2045()
443
444
        subproc.run(
445
            [
446
                "snakemake",
447
                "-j1",
448
                "--directory",
449
                filepath,
450
                "--snakefile",
451
                filepath / "Snakefile",
452
                "--use-conda",
453
                "--conda-frontend=conda",
454
                "summary",
455
            ]
456
        )
457
    else:
458
        print("Pypsa-eur is not executed due to the settings of egon-data")
459
460
461 View Code Duplication
def read_network(planning_horizon=2045):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
462
    if config.settings()["egon-data"]["--run-pypsa-eur"]:
463
        with open(
464
            __path__[0] + "/datasets/pypsaeur/config_solve.yaml", "r"
465
        ) as stream:
466
            data_config = yaml.safe_load(stream)
467
468
        target_file = (
469
            Path(".")
470
            / "run-pypsa-eur"
471
            / "pypsa-eur"
472
            / "results"
473
            / data_config["run"]["name"]
474
            / "postnetworks"
475
            / f"base_s_{data_config['scenario']['clusters'][0]}"
476
            f"_l{data_config['scenario']['ll'][0]}"
477
            f"_{data_config['scenario']['opts'][0]}"
478
            f"_{data_config['scenario']['sector_opts'][0]}"
479
            f"_{planning_horizon}.nc"
480
        )
481
482
    else:
483
        target_file = (
484
            Path(".")
485
            / "data_bundle_egon_data"
486
            / "pypsa_eur"
487
            / "postnetworks"
488
            / f"base_s_39_lc1.25__cb40ex0-T-H-I-B-solar+p3-dist1_{planning_horizon}.nc"
489
        )
490
491
    return pypsa.Network(target_file)
492
493
494
def clean_database():
495
    """Remove all components abroad for eGon100RE of the database
496
497
    Remove all components abroad and their associated time series of
498
    the datase for the scenario 'eGon100RE'.
499
500
    Parameters
501
    ----------
502
    None
503
504
    Returns
505
    -------
506
    None
507
508
    """
509
    scn_name = "eGon100RE"
510
511
    comp_one_port = ["load", "generator", "store", "storage"]
512
513
    # delete existing components and associated timeseries
514
    for comp in comp_one_port:
515
        db.execute_sql(
516
            f"""
517
            DELETE FROM {"grid.egon_etrago_" + comp + "_timeseries"}
518
            WHERE {comp + "_id"} IN (
519
                SELECT {comp + "_id"} FROM {"grid.egon_etrago_" + comp}
520
                WHERE bus IN (
521
                    SELECT bus_id FROM grid.egon_etrago_bus
522
                    WHERE country != 'DE'
523
                    AND scn_name = '{scn_name}')
524
                AND scn_name = '{scn_name}'
525
            );
526
527
            DELETE FROM {"grid.egon_etrago_" + comp}
528
            WHERE bus IN (
529
                SELECT bus_id FROM grid.egon_etrago_bus
530
                WHERE country != 'DE'
531
                AND scn_name = '{scn_name}')
532
            AND scn_name = '{scn_name}';"""
533
        )
534
535
    comp_2_ports = [
536
        "line",
537
        "link",
538
    ]
539
540
    for comp, id in zip(comp_2_ports, ["line_id", "link_id"]):
541
        db.execute_sql(
542
            f"""
543
            DELETE FROM {"grid.egon_etrago_" + comp + "_timeseries"}
544
            WHERE scn_name = '{scn_name}'
545
            AND {id} IN (
546
                SELECT {id} FROM {"grid.egon_etrago_" + comp}
547
            WHERE "bus0" IN (
548
            SELECT bus_id FROM grid.egon_etrago_bus
549
                WHERE country != 'DE'
550
                AND scn_name = '{scn_name}'
551
                AND bus_id NOT IN (SELECT bus_i FROM osmtgmod_results.bus_data))
552
            AND "bus1" IN (
553
            SELECT bus_id FROM grid.egon_etrago_bus
554
                WHERE country != 'DE'
555
                AND scn_name = '{scn_name}'
556
                AND bus_id NOT IN (SELECT bus_i FROM osmtgmod_results.bus_data))
557
            );
558
559
560
            DELETE FROM {"grid.egon_etrago_" + comp}
561
            WHERE scn_name = '{scn_name}'
562
            AND "bus0" IN (
563
            SELECT bus_id FROM grid.egon_etrago_bus
564
                WHERE country != 'DE'
565
                AND scn_name = '{scn_name}'
566
                AND bus_id NOT IN (SELECT bus_i FROM osmtgmod_results.bus_data))
567
            AND "bus1" IN (
568
            SELECT bus_id FROM grid.egon_etrago_bus
569
                WHERE country != 'DE'
570
                AND scn_name = '{scn_name}'
571
                AND bus_id NOT IN (SELECT bus_i FROM osmtgmod_results.bus_data))
572
            ;"""
573
        )
574
575
    db.execute_sql(
576
        f"""
577
        DELETE FROM grid.egon_etrago_bus
578
        WHERE scn_name = '{scn_name}'
579
        AND country <> 'DE'
580
        AND carrier <> 'AC'
581
        """
582
    )
583
584
585
def electrical_neighbours_egon100():
586
    if "eGon100RE" in egon.data.config.settings()["egon-data"]["--scenarios"]:
587
        neighbor_reduction()
588
589
    else:
590
        print(
591
            "eGon100RE is not in the list of created scenarios, this task is skipped."
592
        )
593
594
595
def combine_decentral_and_rural_heat(network_solved, network_prepared):
596
597
    for comp in network_solved.iterate_components():
598
599
        if comp.name in ["Bus", "Link", "Store"]:
600
            urban_decentral = comp.df[
601
                comp.df.carrier.str.contains("urban decentral")
602
            ]
603
            rural = comp.df[comp.df.carrier.str.contains("rural")]
604
            for i, row in urban_decentral.iterrows():
605
                if not "DE" in i:
606
                    if comp.name in ["Bus"]:
607
                        network_solved.remove("Bus", i)
608
                    if comp.name in ["Link", "Generator"]:
609
                        if (
610
                            i.replace("urban decentral", "rural")
611
                            in rural.index
612
                        ):
613
                            rural.loc[
614
                                i.replace("urban decentral", "rural"),
615
                                "p_nom_opt",
616
                            ] += urban_decentral.loc[i, "p_nom_opt"]
617
                            rural.loc[
618
                                i.replace("urban decentral", "rural"), "p_nom"
619
                            ] += urban_decentral.loc[i, "p_nom"]
620
                            network_solved.remove(comp.name, i)
621
                        else:
622
                            print(i)
623
                            comp.df.loc[i, "bus0"] = comp.df.loc[
624
                                i, "bus0"
625
                            ].replace("urban decentral", "rural")
626
                            comp.df.loc[i, "bus1"] = comp.df.loc[
627
                                i, "bus1"
628
                            ].replace("urban decentral", "rural")
629
                            comp.df.loc[i, "carrier"] = comp.df.loc[
630
                                i, "carrier"
631
                            ].replace("urban decentral", "rural")
632
                    if comp.name in ["Store"]:
633
                        if (
634
                            i.replace("urban decentral", "rural")
635
                            in rural.index
636
                        ):
637
                            rural.loc[
638
                                i.replace("urban decentral", "rural"),
639
                                "e_nom_opt",
640
                            ] += urban_decentral.loc[i, "e_nom_opt"]
641
                            rural.loc[
642
                                i.replace("urban decentral", "rural"), "e_nom"
643
                            ] += urban_decentral.loc[i, "e_nom"]
644
                            network_solved.remove(comp.name, i)
645
646
                        else:
647
                            print(i)
648
                            network_solved.stores.loc[i, "bus"] = (
649
                                network_solved.stores.loc[i, "bus"].replace(
650
                                    "urban decentral", "rural"
651
                                )
652
                            )
653
                            network_solved.stores.loc[i, "carrier"] = (
654
                                "rural water tanks"
655
                            )
656
657
    urban_decentral_loads = network_prepared.loads[
658
        network_prepared.loads.carrier.str.contains("urban decentral")
659
    ]
660
661
    for i, row in urban_decentral_loads.iterrows():
662
        if i in network_prepared.loads_t.p_set.columns:
663
            network_prepared.loads_t.p_set[
664
                i.replace("urban decentral", "rural")
665
            ] += network_prepared.loads_t.p_set[i]
666
    network_prepared.mremove("Load", urban_decentral_loads.index)
667
668
    return network_prepared, network_solved
669
670
671
def neighbor_reduction():
672
    network_solved = read_network(planning_horizon=2045)
673
    network_prepared = prepared_network(planning_horizon="2045")
674
675
    # network.links.drop("pipe_retrofit", axis="columns", inplace=True)
676
677
    wanted_countries = countries_list()
678
679
    foreign_buses = network_solved.buses[
680
        (~network_solved.buses.index.str.contains("|".join(wanted_countries)))
681
        | (network_solved.buses.index.str.contains("FR6"))
682
    ]
683
    network_solved.buses = network_solved.buses.drop(
684
        network_solved.buses.loc[foreign_buses.index].index
685
    )
686
687
    # Add H2 demand of Fischer-Tropsch process and methanolisation
688
    # to industrial H2 demands
689
    industrial_hydrogen = network_prepared.loads.loc[
690
        network_prepared.loads.carrier == "H2 for industry"
691
    ]
692
    fischer_tropsch = (
693
        network_solved.links_t.p0[
694
            network_solved.links.loc[
695
                network_solved.links.carrier == "Fischer-Tropsch"
696
            ].index
697
        ]
698
        .mul(network_solved.snapshot_weightings.generators, axis=0)
699
        .sum()
700
    )
701
    methanolisation = (
702
        network_solved.links_t.p0[
703
            network_solved.links.loc[
704
                network_solved.links.carrier == "methanolisation"
705
            ].index
706
        ]
707
        .mul(network_solved.snapshot_weightings.generators, axis=0)
708
        .sum()
709
    )
710
    for i, row in industrial_hydrogen.iterrows():
711
        network_prepared.loads.loc[i, "p_set"] += (
712
            fischer_tropsch[
713
                fischer_tropsch.index.str.startswith(row.bus[:5])
714
            ].sum()
715
            / 8760
716
        )
717
        network_prepared.loads.loc[i, "p_set"] += (
718
            methanolisation[
719
                methanolisation.index.str.startswith(row.bus[:5])
720
            ].sum()
721
            / 8760
722
        )
723
    # drop foreign lines and links from the 2nd row
724
725
    network_solved.lines = network_solved.lines.drop(
726
        network_solved.lines[
727
            (
728
                network_solved.lines["bus0"].isin(network_solved.buses.index)
729
                == False
730
            )
731
            & (
732
                network_solved.lines["bus1"].isin(network_solved.buses.index)
733
                == False
734
            )
735
        ].index
736
    )
737
738
    # select all lines which have at bus1 the bus which is kept
739
    lines_cb_1 = network_solved.lines[
740
        (
741
            network_solved.lines["bus0"].isin(network_solved.buses.index)
742
            == False
743
        )
744
    ]
745
746
    # create a load at bus1 with the line's hourly loading
747
    for i, k in zip(lines_cb_1.bus1.values, lines_cb_1.index):
748
749
        # Copy loading of lines into hourly resolution
750
        pset = pd.Series(
751
            index=network_prepared.snapshots,
752
            data=network_solved.lines_t.p1[k].resample("H").ffill(),
753
        )
754
        pset["2011-12-31 22:00:00"] = pset["2011-12-31 21:00:00"]
755
        pset["2011-12-31 23:00:00"] = pset["2011-12-31 21:00:00"]
756
757
        # Loads are all imported from the prepared network in the end
758
        network_prepared.add(
759
            "Load",
760
            "slack_fix " + i + " " + k,
761
            bus=i,
762
            p_set=pset,
763
            carrier=lines_cb_1.loc[k, "carrier"],
764
        )
765
766
    # select all lines which have at bus0 the bus which is kept
767
    lines_cb_0 = network_solved.lines[
768
        (
769
            network_solved.lines["bus1"].isin(network_solved.buses.index)
770
            == False
771
        )
772
    ]
773
774
    # create a load at bus0 with the line's hourly loading
775
    for i, k in zip(lines_cb_0.bus0.values, lines_cb_0.index):
776
        # Copy loading of lines into hourly resolution
777
        pset = pd.Series(
778
            index=network_prepared.snapshots,
779
            data=network_solved.lines_t.p0[k].resample("H").ffill(),
780
        )
781
        pset["2011-12-31 22:00:00"] = pset["2011-12-31 21:00:00"]
782
        pset["2011-12-31 23:00:00"] = pset["2011-12-31 21:00:00"]
783
784
        network_prepared.add(
785
            "Load",
786
            "slack_fix " + i + " " + k,
787
            bus=i,
788
            p_set=pset,
789
            carrier=lines_cb_0.loc[k, "carrier"],
790
        )
791
792
    # do the same for links
793
    network_solved.mremove(
794
        "Link",
795
        network_solved.links[
796
            (~network_solved.links.bus0.isin(network_solved.buses.index))
797
            | (~network_solved.links.bus1.isin(network_solved.buses.index))
798
        ].index,
799
    )
800
801
    # select all links which have at bus1 the bus which is kept
802
    links_cb_1 = network_solved.links[
803
        (
804
            network_solved.links["bus0"].isin(network_solved.buses.index)
805
            == False
806
        )
807
    ]
808
809
    # create a load at bus1 with the link's hourly loading
810
    for i, k in zip(links_cb_1.bus1.values, links_cb_1.index):
811
        pset = pd.Series(
812
            index=network_prepared.snapshots,
813
            data=network_solved.links_t.p1[k].resample("H").ffill(),
814
        )
815
        pset["2011-12-31 22:00:00"] = pset["2011-12-31 21:00:00"]
816
        pset["2011-12-31 23:00:00"] = pset["2011-12-31 21:00:00"]
817
818
        network_prepared.add(
819
            "Load",
820
            "slack_fix_links " + i + " " + k,
821
            bus=i,
822
            p_set=pset,
823
            carrier=links_cb_1.loc[k, "carrier"],
824
        )
825
826
    # select all links which have at bus0 the bus which is kept
827
    links_cb_0 = network_solved.links[
828
        (
829
            network_solved.links["bus1"].isin(network_solved.buses.index)
830
            == False
831
        )
832
    ]
833
834
    # create a load at bus0 with the link's hourly loading
835
    for i, k in zip(links_cb_0.bus0.values, links_cb_0.index):
836
        pset = pd.Series(
837
            index=network_prepared.snapshots,
838
            data=network_solved.links_t.p0[k].resample("H").ffill(),
839
        )
840
        pset["2011-12-31 22:00:00"] = pset["2011-12-31 21:00:00"]
841
        pset["2011-12-31 23:00:00"] = pset["2011-12-31 21:00:00"]
842
843
        network_prepared.add(
844
            "Load",
845
            "slack_fix_links " + i + " " + k,
846
            bus=i,
847
            p_set=pset,
848
            carrier=links_cb_0.carrier[k],
849
        )
850
851
    # drop remaining foreign components
852
    for comp in network_solved.iterate_components():
853
        if "bus0" in comp.df.columns:
854
            network_solved.mremove(
855
                comp.name,
856
                comp.df[~comp.df.bus0.isin(network_solved.buses.index)].index,
857
            )
858
            network_solved.mremove(
859
                comp.name,
860
                comp.df[~comp.df.bus1.isin(network_solved.buses.index)].index,
861
            )
862
        elif "bus" in comp.df.columns:
863
            network_solved.mremove(
864
                comp.name,
865
                comp.df[~comp.df.bus.isin(network_solved.buses.index)].index,
866
            )
867
868
    # Combine urban decentral and rural heat
869
    network_prepared, network_solved = combine_decentral_and_rural_heat(
870
        network_solved, network_prepared
871
    )
872
873
    # writing components of neighboring countries to etrago tables
874
875
    # Set country tag for all buses
876
    network_solved.buses.country = network_solved.buses.index.str[:2]
877
    neighbors = network_solved.buses[network_solved.buses.country != "DE"]
878
879
    neighbors["new_index"] = (
880
        db.next_etrago_id("bus", len(neighbors.index))
881
    )
882
883
    # Use index of AC buses created by electrical_neigbors
884
    foreign_ac_buses = db.select_dataframe(
885
        """
886
        SELECT * FROM grid.egon_etrago_bus
887
        WHERE carrier = 'AC' AND v_nom = 380
888
        AND country!= 'DE' AND scn_name ='eGon100RE'
889
        AND bus_id NOT IN (SELECT bus_i FROM osmtgmod_results.bus_data)
890
        """
891
    )
892
    buses_with_defined_id = neighbors[
893
        (neighbors.carrier == "AC")
894
        & (neighbors.country.isin(foreign_ac_buses.country.values))
895
    ].index
896
    neighbors.loc[buses_with_defined_id, "new_index"] = (
897
        foreign_ac_buses.set_index("x")
898
        .loc[neighbors.loc[buses_with_defined_id, "x"]]
899
        .bus_id.values
900
    )
901
902
    # lines, the foreign crossborder lines
903
    # (without crossborder lines to Germany!)
904
905
    neighbor_lines = network_solved.lines[
906
        network_solved.lines.bus0.isin(neighbors.index)
907
        & network_solved.lines.bus1.isin(neighbors.index)
908
    ]
909
    if not network_solved.lines_t["s_max_pu"].empty:
910
        neighbor_lines_t = network_prepared.lines_t["s_max_pu"][
911
            neighbor_lines.index
912
        ]
913
914
    neighbor_lines.reset_index(inplace=True)
915
    neighbor_lines.bus0 = (
916
        neighbors.loc[neighbor_lines.bus0, "new_index"].reset_index().new_index
917
    )
918
    neighbor_lines.bus1 = (
919
        neighbors.loc[neighbor_lines.bus1, "new_index"].reset_index().new_index
920
    )
921
    neighbor_lines.index = db.next_etrago_id("line", len(neighbor_lines.index))
922
923
    if not network_solved.lines_t["s_max_pu"].empty:
924
        for i in neighbor_lines_t.columns:
0 ignored issues
show
introduced by
The variable neighbor_lines_t does not seem to be defined in case BooleanNotNode on line 909 is False. Are you sure this can never be the case?
Loading history...
925
            new_index = neighbor_lines[neighbor_lines["name"] == i].index
926
            neighbor_lines_t.rename(columns={i: new_index[0]}, inplace=True)
927
928
    # links
929
    neighbor_links = network_solved.links[
930
        network_solved.links.bus0.isin(neighbors.index)
931
        & network_solved.links.bus1.isin(neighbors.index)
932
    ]
933
934
    neighbor_links.reset_index(inplace=True)
935
    neighbor_links.bus0 = (
936
        neighbors.loc[neighbor_links.bus0, "new_index"].reset_index().new_index
937
    )
938
    neighbor_links.bus1 = (
939
        neighbors.loc[neighbor_links.bus1, "new_index"].reset_index().new_index
940
    )
941
    neighbor_links.index = db.next_etrago_id("link", len(neighbor_links.index))
942
943
    # generators
944
    neighbor_gens = network_solved.generators[
945
        network_solved.generators.bus.isin(neighbors.index)
946
    ]
947
    neighbor_gens_t = network_prepared.generators_t["p_max_pu"][
948
        neighbor_gens[
949
            neighbor_gens.index.isin(
950
                network_prepared.generators_t["p_max_pu"].columns
951
            )
952
        ].index
953
    ]
954
955
    gen_time = [
956
        "solar",
957
        "onwind",
958
        "solar rooftop",
959
        "offwind-ac",
960
        "offwind-dc",
961
        "solar-hsat",
962
        "urban central solar thermal",
963
        "rural solar thermal",
964
        "offwind-float",
965
    ]
966
967
    missing_gent = neighbor_gens[
968
        neighbor_gens["carrier"].isin(gen_time)
969
        & ~neighbor_gens.index.isin(neighbor_gens_t.columns)
970
    ].index
971
972
    gen_timeseries = network_prepared.generators_t["p_max_pu"].copy()
973
    for mgt in missing_gent:  # mgt: missing generator timeseries
974
        try:
975
            neighbor_gens_t[mgt] = gen_timeseries.loc[:, mgt[0:-5]]
976
        except:
977
            print(f"There are not timeseries for {mgt}")
978
979
    neighbor_gens.reset_index(inplace=True)
980
    neighbor_gens.bus = (
981
        neighbors.loc[neighbor_gens.bus, "new_index"].reset_index().new_index
982
    )
983
    neighbor_gens.index = db.next_etrago_id(
984
        "generator", len(neighbor_gens.index))
985
986
    for i in neighbor_gens_t.columns:
987
        new_index = neighbor_gens[neighbor_gens["Generator"] == i].index
988
        neighbor_gens_t.rename(columns={i: new_index[0]}, inplace=True)
989
990
    # loads
991
    # imported from prenetwork in 1h-resolution
992
    neighbor_loads = network_prepared.loads[
993
        network_prepared.loads.bus.isin(neighbors.index)
994
    ]
995
    neighbor_loads_t_index = neighbor_loads.index[
996
        neighbor_loads.index.isin(network_prepared.loads_t.p_set.columns)
997
    ]
998
    neighbor_loads_t = network_prepared.loads_t["p_set"][
999
        neighbor_loads_t_index
1000
    ]
1001
1002
    neighbor_loads.reset_index(inplace=True)
1003
    neighbor_loads.bus = (
1004
        neighbors.loc[neighbor_loads.bus, "new_index"].reset_index().new_index
1005
    )
1006
    neighbor_loads.index = db.next_etrago_id("load", len(neighbor_loads.index))
1007
1008
    for i in neighbor_loads_t.columns:
1009
        new_index = neighbor_loads[neighbor_loads["Load"] == i].index
1010
        neighbor_loads_t.rename(columns={i: new_index[0]}, inplace=True)
1011
1012
    # stores
1013
    neighbor_stores = network_solved.stores[
1014
        network_solved.stores.bus.isin(neighbors.index)
1015
    ]
1016
    neighbor_stores_t_index = neighbor_stores.index[
1017
        neighbor_stores.index.isin(network_solved.stores_t.e_min_pu.columns)
1018
    ]
1019
    neighbor_stores_t = network_prepared.stores_t["e_min_pu"][
1020
        neighbor_stores_t_index
1021
    ]
1022
1023
    neighbor_stores.reset_index(inplace=True)
1024
    neighbor_stores.bus = (
1025
        neighbors.loc[neighbor_stores.bus, "new_index"].reset_index().new_index
1026
    )
1027
    neighbor_stores.index = db.next_etrago_id(
1028
        "store", len(neighbor_stores.index))
1029
1030
    for i in neighbor_stores_t.columns:
1031
        new_index = neighbor_stores[neighbor_stores["Store"] == i].index
1032
        neighbor_stores_t.rename(columns={i: new_index[0]}, inplace=True)
1033
1034
    # storage_units
1035
    neighbor_storage = network_solved.storage_units[
1036
        network_solved.storage_units.bus.isin(neighbors.index)
1037
    ]
1038
    neighbor_storage_t_index = neighbor_storage.index[
1039
        neighbor_storage.index.isin(
1040
            network_solved.storage_units_t.inflow.columns
1041
        )
1042
    ]
1043
    neighbor_storage_t = network_prepared.storage_units_t["inflow"][
1044
        neighbor_storage_t_index
1045
    ]
1046
1047
    neighbor_storage.reset_index(inplace=True)
1048
    neighbor_storage.bus = (
1049
        neighbors.loc[neighbor_storage.bus, "new_index"]
1050
        .reset_index()
1051
        .new_index
1052
    )
1053
    neighbor_storage.index = db.next_etrago_id(
1054
        "storage", len(neighbor_storage.index))
1055
1056
    for i in neighbor_storage_t.columns:
1057
        new_index = neighbor_storage[
1058
            neighbor_storage["StorageUnit"] == i
1059
        ].index
1060
        neighbor_storage_t.rename(columns={i: new_index[0]}, inplace=True)
1061
1062
    # Connect to local database
1063
    engine = db.engine()
1064
1065
    neighbors["scn_name"] = "eGon100RE"
1066
    neighbors.index = neighbors["new_index"]
1067
1068
    # Correct geometry for non AC buses
1069
    carriers = set(neighbors.carrier.to_list())
1070
    carriers = [e for e in carriers if e not in ("AC")]
1071
    non_AC_neighbors = pd.DataFrame()
1072
    for c in carriers:
1073
        c_neighbors = neighbors[neighbors.carrier == c].set_index(
1074
            "location", drop=False
1075
        )
1076
        for i in ["x", "y"]:
1077
            c_neighbors = c_neighbors.drop(i, axis=1)
1078
        coordinates = neighbors[neighbors.carrier == "AC"][
1079
            ["location", "x", "y"]
1080
        ].set_index("location")
1081
        c_neighbors = pd.concat([coordinates, c_neighbors], axis=1).set_index(
1082
            "new_index", drop=False
1083
        )
1084
        non_AC_neighbors = pd.concat([non_AC_neighbors, c_neighbors])
1085
1086
    neighbors = pd.concat(
1087
        [neighbors[neighbors.carrier == "AC"], non_AC_neighbors]
1088
    )
1089
1090
    for i in [
1091
        "new_index",
1092
        "control",
1093
        "generator",
1094
        "location",
1095
        "sub_network",
1096
        "unit",
1097
        "substation_lv",
1098
        "substation_off",
1099
    ]:
1100
        neighbors = neighbors.drop(i, axis=1)
1101
1102
    # Add geometry column
1103
    neighbors = (
1104
        gpd.GeoDataFrame(
1105
            neighbors, geometry=gpd.points_from_xy(neighbors.x, neighbors.y)
1106
        )
1107
        .rename_geometry("geom")
1108
        .set_crs(4326)
1109
    )
1110
1111
    # Unify carrier names
1112
    neighbors.carrier = neighbors.carrier.str.replace(" ", "_")
1113
    neighbors.carrier.replace(
1114
        {
1115
            "gas": "CH4",
1116
            "gas_for_industry": "CH4_for_industry",
1117
            "urban_central_heat": "central_heat",
1118
            "EV_battery": "Li_ion",
1119
            "urban_central_water_tanks": "central_heat_store",
1120
            "rural_water_tanks": "rural_heat_store",
1121
        },
1122
        inplace=True,
1123
    )
1124
1125
    neighbors[~neighbors.carrier.isin(["AC"])].to_postgis(
1126
        "egon_etrago_bus",
1127
        engine,
1128
        schema="grid",
1129
        if_exists="append",
1130
        index=True,
1131
        index_label="bus_id",
1132
    )
1133
1134
    # prepare and write neighboring crossborder lines to etrago tables
1135
    def lines_to_etrago(neighbor_lines=neighbor_lines, scn="eGon100RE"):
1136
        neighbor_lines["scn_name"] = scn
1137
        neighbor_lines["cables"] = 3 * neighbor_lines["num_parallel"].astype(
1138
            int
1139
        )
1140
        neighbor_lines["s_nom"] = neighbor_lines["s_nom_min"]
1141
1142
        for i in [
1143
            "Line",
1144
            "x_pu_eff",
1145
            "r_pu_eff",
1146
            "sub_network",
1147
            "x_pu",
1148
            "r_pu",
1149
            "g_pu",
1150
            "b_pu",
1151
            "s_nom_opt",
1152
            "i_nom",
1153
            "dc",
1154
        ]:
1155
            neighbor_lines = neighbor_lines.drop(i, axis=1)
1156
1157
        # Define geometry and add to lines dataframe as 'topo'
1158
        gdf = gpd.GeoDataFrame(index=neighbor_lines.index)
1159
        gdf["geom_bus0"] = neighbors.geom[neighbor_lines.bus0].values
1160
        gdf["geom_bus1"] = neighbors.geom[neighbor_lines.bus1].values
1161
        gdf["geometry"] = gdf.apply(
1162
            lambda x: LineString([x["geom_bus0"], x["geom_bus1"]]), axis=1
1163
        )
1164
1165
        neighbor_lines = (
1166
            gpd.GeoDataFrame(neighbor_lines, geometry=gdf["geometry"])
1167
            .rename_geometry("topo")
1168
            .set_crs(4326)
1169
        )
1170
1171
        neighbor_lines["lifetime"] = get_sector_parameters("electricity", scn)[
1172
            "lifetime"
1173
        ]["ac_ehv_overhead_line"]
1174
1175
        neighbor_lines.to_postgis(
1176
            "egon_etrago_line",
1177
            engine,
1178
            schema="grid",
1179
            if_exists="append",
1180
            index=True,
1181
            index_label="line_id",
1182
        )
1183
1184
    lines_to_etrago(neighbor_lines=neighbor_lines, scn="eGon100RE")
1185
1186
    def links_to_etrago(neighbor_links, scn="eGon100RE", extendable=True):
1187
        """Prepare and write neighboring crossborder links to eTraGo table
1188
1189
        This function prepare the neighboring crossborder links
1190
        generated the PyPSA-eur-sec (p-e-s) run by:
1191
          * Delete the useless columns
1192
          * If extendable is false only (non default case):
1193
              * Replace p_nom = 0 with the p_nom_op values (arrising
1194
                from the p-e-s optimisation)
1195
              * Setting p_nom_extendable to false
1196
          * Add geomtry to the links: 'geom' and 'topo' columns
1197
          * Change the name of the carriers to have the consistent in
1198
            eGon-data
1199
1200
        The function insert then the link to the eTraGo table and has
1201
        no return.
1202
1203
        Parameters
1204
        ----------
1205
        neighbor_links : pandas.DataFrame
1206
            Dataframe containing the neighboring crossborder links
1207
        scn_name : str
1208
            Name of the scenario
1209
        extendable : bool
1210
            Boolean expressing if the links should be extendable or not
1211
1212
        Returns
1213
        -------
1214
        None
1215
1216
        """
1217
        neighbor_links["scn_name"] = scn
1218
1219
        dropped_carriers = [
1220
            "Link",
1221
            "geometry",
1222
            "tags",
1223
            "under_construction",
1224
            "underground",
1225
            "underwater_fraction",
1226
            "bus2",
1227
            "bus3",
1228
            "bus4",
1229
            "efficiency2",
1230
            "efficiency3",
1231
            "efficiency4",
1232
            "lifetime",
1233
            "pipe_retrofit",
1234
            "committable",
1235
            "start_up_cost",
1236
            "shut_down_cost",
1237
            "min_up_time",
1238
            "min_down_time",
1239
            "up_time_before",
1240
            "down_time_before",
1241
            "ramp_limit_up",
1242
            "ramp_limit_down",
1243
            "ramp_limit_start_up",
1244
            "ramp_limit_shut_down",
1245
            "length_original",
1246
            "reversed",
1247
            "location",
1248
            "project_status",
1249
            "dc",
1250
            "voltage",
1251
        ]
1252
1253
        if extendable:
1254
            dropped_carriers.append("p_nom_opt")
1255
            neighbor_links = neighbor_links.drop(
1256
                columns=dropped_carriers,
1257
                errors="ignore",
1258
            )
1259
1260
        else:
1261
            dropped_carriers.append("p_nom")
1262
            dropped_carriers.append("p_nom_extendable")
1263
            neighbor_links = neighbor_links.drop(
1264
                columns=dropped_carriers,
1265
                errors="ignore",
1266
            )
1267
            neighbor_links = neighbor_links.rename(
1268
                columns={"p_nom_opt": "p_nom"}
1269
            )
1270
            neighbor_links["p_nom_extendable"] = False
1271
1272
        if neighbor_links.empty:
1273
            print("No links selected")
1274
            return
1275
1276
        # Define geometry and add to lines dataframe as 'topo'
1277
        gdf = gpd.GeoDataFrame(
1278
            index=neighbor_links.index,
1279
            data={
1280
                "geom_bus0": neighbors.loc[neighbor_links.bus0, "geom"].values,
1281
                "geom_bus1": neighbors.loc[neighbor_links.bus1, "geom"].values,
1282
            },
1283
        )
1284
1285
        gdf["geometry"] = gdf.apply(
1286
            lambda x: LineString([x["geom_bus0"], x["geom_bus1"]]), axis=1
1287
        )
1288
1289
        neighbor_links = (
1290
            gpd.GeoDataFrame(neighbor_links, geometry=gdf["geometry"])
1291
            .rename_geometry("topo")
1292
            .set_crs(4326)
1293
        )
1294
1295
        # Unify carrier names
1296
        neighbor_links.carrier = neighbor_links.carrier.str.replace(" ", "_")
1297
1298
        neighbor_links.carrier.replace(
1299
            {
1300
                "H2_Electrolysis": "power_to_H2",
1301
                "H2_Fuel_Cell": "H2_to_power",
1302
                "H2_pipeline_retrofitted": "H2_retrofit",
1303
                "SMR": "CH4_to_H2",
1304
                "Sabatier": "H2_to_CH4",
1305
                "gas_for_industry": "CH4_for_industry",
1306
                "gas_pipeline": "CH4",
1307
                "urban_central_gas_boiler": "central_gas_boiler",
1308
                "urban_central_resistive_heater": "central_resistive_heater",
1309
                "urban_central_water_tanks_charger": "central_heat_store_charger",
1310
                "urban_central_water_tanks_discharger": "central_heat_store_discharger",
1311
                "rural_water_tanks_charger": "rural_heat_store_charger",
1312
                "rural_water_tanks_discharger": "rural_heat_store_discharger",
1313
                "urban_central_gas_CHP": "central_gas_CHP",
1314
                "urban_central_air_heat_pump": "central_heat_pump",
1315
                "rural_ground_heat_pump": "rural_heat_pump",
1316
            },
1317
            inplace=True,
1318
        )
1319
1320
        H2_links = {
1321
            "H2_to_CH4": "H2_to_CH4",
1322
            "H2_to_power": "H2_to_power",
1323
            "power_to_H2": "power_to_H2_system",
1324
            "CH4_to_H2": "CH4_to_H2",
1325
        }
1326
1327
        for c in H2_links.keys():
1328
1329
            neighbor_links.loc[
1330
                (neighbor_links.carrier == c),
1331
                "lifetime",
1332
            ] = get_sector_parameters("gas", "eGon100RE")["lifetime"][
1333
                H2_links[c]
1334
            ]
1335
1336
        neighbor_links.to_postgis(
1337
            "egon_etrago_link",
1338
            engine,
1339
            schema="grid",
1340
            if_exists="append",
1341
            index=True,
1342
            index_label="link_id",
1343
        )
1344
1345
    extendable_links_carriers = [
1346
        "battery charger",
1347
        "battery discharger",
1348
        "home battery charger",
1349
        "home battery discharger",
1350
        "rural water tanks charger",
1351
        "rural water tanks discharger",
1352
        "urban central water tanks charger",
1353
        "urban central water tanks discharger",
1354
        "urban decentral water tanks charger",
1355
        "urban decentral water tanks discharger",
1356
        "H2 Electrolysis",
1357
        "H2 Fuel Cell",
1358
        "SMR",
1359
        "Sabatier",
1360
    ]
1361
1362
    # delete unwanted carriers for eTraGo
1363
    excluded_carriers = [
1364
        "gas for industry CC",
1365
        "SMR CC",
1366
        "DAC",
1367
    ]
1368
    neighbor_links = neighbor_links[
1369
        ~neighbor_links.carrier.isin(excluded_carriers)
1370
    ]
1371
1372
    # Combine CHP_CC and CHP
1373
    chp_cc = neighbor_links[
1374
        neighbor_links.carrier == "urban central gas CHP CC"
1375
    ]
1376
    for index, row in chp_cc.iterrows():
1377
        neighbor_links.loc[
1378
            neighbor_links.Link == row.Link.replace("CHP CC", "CHP"),
1379
            "p_nom_opt",
1380
        ] += row.p_nom_opt
1381
        neighbor_links.loc[
1382
            neighbor_links.Link == row.Link.replace("CHP CC", "CHP"), "p_nom"
1383
        ] += row.p_nom
1384
        neighbor_links.drop(index, inplace=True)
1385
1386
    # Combine heat pumps
1387
    # Like in Germany, there are air heat pumps in central heat grids
1388
    # and ground heat pumps in rural areas
1389
    rural_air = neighbor_links[neighbor_links.carrier == "rural air heat pump"]
1390
    for index, row in rural_air.iterrows():
1391
        neighbor_links.loc[
1392
            neighbor_links.Link == row.Link.replace("air", "ground"),
1393
            "p_nom_opt",
1394
        ] += row.p_nom_opt
1395
        neighbor_links.loc[
1396
            neighbor_links.Link == row.Link.replace("air", "ground"), "p_nom"
1397
        ] += row.p_nom
1398
        neighbor_links.drop(index, inplace=True)
1399
    links_to_etrago(
1400
        neighbor_links[neighbor_links.carrier.isin(extendable_links_carriers)],
1401
        "eGon100RE",
1402
    )
1403
    links_to_etrago(
1404
        neighbor_links[
1405
            ~neighbor_links.carrier.isin(extendable_links_carriers)
1406
        ],
1407
        "eGon100RE",
1408
        extendable=False,
1409
    )
1410
    # Include links time-series
1411
    # For heat_pumps
1412
    hp = neighbor_links[neighbor_links["carrier"].str.contains("heat pump")]
1413
1414
    neighbor_eff_t = network_prepared.links_t["efficiency"][
1415
        hp[hp.Link.isin(network_prepared.links_t["efficiency"].columns)].index
1416
    ]
1417
1418
    missing_hp = hp[~hp["Link"].isin(neighbor_eff_t.columns)].Link
1419
1420
    eff_timeseries = network_prepared.links_t["efficiency"].copy()
1421
    for met in missing_hp:  # met: missing efficiency timeseries
1422
        try:
1423
            neighbor_eff_t[met] = eff_timeseries.loc[:, met[0:-5]]
1424
        except:
1425
            print(f"There are not timeseries for heat_pump {met}")
1426
1427
    for i in neighbor_eff_t.columns:
1428
        new_index = neighbor_links[neighbor_links["Link"] == i].index
1429
        neighbor_eff_t.rename(columns={i: new_index[0]}, inplace=True)
1430
1431
    # Include links time-series
1432
    # For ev_chargers
1433
    ev = neighbor_links[neighbor_links["carrier"].str.contains("BEV charger")]
1434
1435
    ev_p_max_pu = network_prepared.links_t["p_max_pu"][
1436
        ev[ev.Link.isin(network_prepared.links_t["p_max_pu"].columns)].index
1437
    ]
1438
1439
    missing_ev = ev[~ev["Link"].isin(ev_p_max_pu.columns)].Link
1440
1441
    ev_p_max_pu_timeseries = network_prepared.links_t["p_max_pu"].copy()
1442
    for mct in missing_ev:  # evt: missing charger timeseries
1443
        try:
1444
            ev_p_max_pu[mct] = ev_p_max_pu_timeseries.loc[:, mct[0:-5]]
1445
        except:
1446
            print(f"There are not timeseries for EV charger {mct}")
1447
1448
    for i in ev_p_max_pu.columns:
1449
        new_index = neighbor_links[neighbor_links["Link"] == i].index
1450
        ev_p_max_pu.rename(columns={i: new_index[0]}, inplace=True)
1451
1452
    # prepare neighboring generators for etrago tables
1453
    neighbor_gens["scn_name"] = "eGon100RE"
1454
    neighbor_gens["p_nom"] = neighbor_gens["p_nom_opt"]
1455
    neighbor_gens["p_nom_extendable"] = False
1456
1457
    # Unify carrier names
1458
    neighbor_gens.carrier = neighbor_gens.carrier.str.replace(" ", "_")
1459
1460
    neighbor_gens.carrier.replace(
1461
        {
1462
            "onwind": "wind_onshore",
1463
            "ror": "run_of_river",
1464
            "offwind-ac": "wind_offshore",
1465
            "offwind-dc": "wind_offshore",
1466
            "offwind-float": "wind_offshore",
1467
            "urban_central_solar_thermal": "urban_central_solar_thermal_collector",
1468
            "residential_rural_solar_thermal": "residential_rural_solar_thermal_collector",
1469
            "services_rural_solar_thermal": "services_rural_solar_thermal_collector",
1470
            "solar-hsat": "solar",
1471
        },
1472
        inplace=True,
1473
    )
1474
1475
    for i in [
1476
        "Generator",
1477
        "weight",
1478
        "lifetime",
1479
        "p_set",
1480
        "q_set",
1481
        "p_nom_opt",
1482
        "e_sum_min",
1483
        "e_sum_max",
1484
    ]:
1485
        neighbor_gens = neighbor_gens.drop(i, axis=1)
1486
1487
    neighbor_gens.to_sql(
1488
        "egon_etrago_generator",
1489
        engine,
1490
        schema="grid",
1491
        if_exists="append",
1492
        index=True,
1493
        index_label="generator_id",
1494
    )
1495
1496
    # prepare neighboring loads for etrago tables
1497
    neighbor_loads["scn_name"] = "eGon100RE"
1498
1499
    # Unify carrier names
1500
    neighbor_loads.carrier = neighbor_loads.carrier.str.replace(" ", "_")
1501
1502
    neighbor_loads.carrier.replace(
1503
        {
1504
            "electricity": "AC",
1505
            "DC": "AC",
1506
            "industry_electricity": "AC",
1507
            "H2_pipeline_retrofitted": "H2_system_boundary",
1508
            "gas_pipeline": "CH4_system_boundary",
1509
            "gas_for_industry": "CH4_for_industry",
1510
            "urban_central_heat": "central_heat",
1511
        },
1512
        inplace=True,
1513
    )
1514
1515
    neighbor_loads = neighbor_loads.drop(
1516
        columns=["Load"],
1517
        errors="ignore",
1518
    )
1519
1520
    neighbor_loads.to_sql(
1521
        "egon_etrago_load",
1522
        engine,
1523
        schema="grid",
1524
        if_exists="append",
1525
        index=True,
1526
        index_label="load_id",
1527
    )
1528
1529
    # prepare neighboring stores for etrago tables
1530
    neighbor_stores["scn_name"] = "eGon100RE"
1531
1532
    # Unify carrier names
1533
    neighbor_stores.carrier = neighbor_stores.carrier.str.replace(" ", "_")
1534
1535
    neighbor_stores.carrier.replace(
1536
        {
1537
            "Li_ion": "battery",
1538
            "gas": "CH4",
1539
            "urban_central_water_tanks": "central_heat_store",
1540
            "rural_water_tanks": "rural_heat_store",
1541
            "EV_battery": "battery_storage",
1542
        },
1543
        inplace=True,
1544
    )
1545
    neighbor_stores.loc[
1546
        (
1547
            (neighbor_stores.e_nom_max <= 1e9)
1548
            & (neighbor_stores.carrier == "H2_Store")
1549
        ),
1550
        "carrier",
1551
    ] = "H2_underground"
1552
    neighbor_stores.loc[
1553
        (
1554
            (neighbor_stores.e_nom_max > 1e9)
1555
            & (neighbor_stores.carrier == "H2_Store")
1556
        ),
1557
        "carrier",
1558
    ] = "H2_overground"
1559
1560
    for i in [
1561
        "Store",
1562
        "p_set",
1563
        "q_set",
1564
        "e_nom_opt",
1565
        "lifetime",
1566
        "e_initial_per_period",
1567
        "e_cyclic_per_period",
1568
        "location",
1569
    ]:
1570
        neighbor_stores = neighbor_stores.drop(i, axis=1, errors="ignore")
1571
1572
    for c in ["H2_underground", "H2_overground"]:
1573
        neighbor_stores.loc[
1574
            (neighbor_stores.carrier == c),
1575
            "lifetime",
1576
        ] = get_sector_parameters("gas", "eGon100RE")["lifetime"][c]
1577
1578
    neighbor_stores.to_sql(
1579
        "egon_etrago_store",
1580
        engine,
1581
        schema="grid",
1582
        if_exists="append",
1583
        index=True,
1584
        index_label="store_id",
1585
    )
1586
1587
    # prepare neighboring storage_units for etrago tables
1588
    neighbor_storage["scn_name"] = "eGon100RE"
1589
1590
    # Unify carrier names
1591
    neighbor_storage.carrier = neighbor_storage.carrier.str.replace(" ", "_")
1592
1593
    neighbor_storage.carrier.replace(
1594
        {"PHS": "pumped_hydro", "hydro": "reservoir"}, inplace=True
1595
    )
1596
1597
    for i in [
1598
        "StorageUnit",
1599
        "p_nom_opt",
1600
        "state_of_charge_initial_per_period",
1601
        "cyclic_state_of_charge_per_period",
1602
    ]:
1603
        neighbor_storage = neighbor_storage.drop(i, axis=1, errors="ignore")
1604
1605
    neighbor_storage.to_sql(
1606
        "egon_etrago_storage",
1607
        engine,
1608
        schema="grid",
1609
        if_exists="append",
1610
        index=True,
1611
        index_label="storage_id",
1612
    )
1613
1614
    # writing neighboring loads_t p_sets to etrago tables
1615
1616
    neighbor_loads_t_etrago = pd.DataFrame(
1617
        columns=["scn_name", "temp_id", "p_set"],
1618
        index=neighbor_loads_t.columns,
1619
    )
1620
    neighbor_loads_t_etrago["scn_name"] = "eGon100RE"
1621
    neighbor_loads_t_etrago["temp_id"] = 1
1622
    for i in neighbor_loads_t.columns:
1623
        neighbor_loads_t_etrago["p_set"][i] = neighbor_loads_t[
1624
            i
1625
        ].values.tolist()
1626
1627
    neighbor_loads_t_etrago.to_sql(
1628
        "egon_etrago_load_timeseries",
1629
        engine,
1630
        schema="grid",
1631
        if_exists="append",
1632
        index=True,
1633
        index_label="load_id",
1634
    )
1635
1636
    # writing neighboring link_t efficiency and p_max_pu to etrago tables
1637
    neighbor_link_t_etrago = pd.DataFrame(
1638
        columns=["scn_name", "temp_id", "p_max_pu", "efficiency"],
1639
        index=neighbor_eff_t.columns.to_list() + ev_p_max_pu.columns.to_list(),
1640
    )
1641
    neighbor_link_t_etrago["scn_name"] = "eGon100RE"
1642
    neighbor_link_t_etrago["temp_id"] = 1
1643
    for i in neighbor_eff_t.columns:
1644
        neighbor_link_t_etrago["efficiency"][i] = neighbor_eff_t[
1645
            i
1646
        ].values.tolist()
1647
    for i in ev_p_max_pu.columns:
1648
        neighbor_link_t_etrago["p_max_pu"][i] = ev_p_max_pu[i].values.tolist()
1649
1650
    neighbor_link_t_etrago.to_sql(
1651
        "egon_etrago_link_timeseries",
1652
        engine,
1653
        schema="grid",
1654
        if_exists="append",
1655
        index=True,
1656
        index_label="link_id",
1657
    )
1658
1659
    # writing neighboring generator_t p_max_pu to etrago tables
1660
    neighbor_gens_t_etrago = pd.DataFrame(
1661
        columns=["scn_name", "temp_id", "p_max_pu"],
1662
        index=neighbor_gens_t.columns,
1663
    )
1664
    neighbor_gens_t_etrago["scn_name"] = "eGon100RE"
1665
    neighbor_gens_t_etrago["temp_id"] = 1
1666
    for i in neighbor_gens_t.columns:
1667
        neighbor_gens_t_etrago["p_max_pu"][i] = neighbor_gens_t[
1668
            i
1669
        ].values.tolist()
1670
1671
    neighbor_gens_t_etrago.to_sql(
1672
        "egon_etrago_generator_timeseries",
1673
        engine,
1674
        schema="grid",
1675
        if_exists="append",
1676
        index=True,
1677
        index_label="generator_id",
1678
    )
1679
1680
    # writing neighboring stores_t e_min_pu to etrago tables
1681
    neighbor_stores_t_etrago = pd.DataFrame(
1682
        columns=["scn_name", "temp_id", "e_min_pu"],
1683
        index=neighbor_stores_t.columns,
1684
    )
1685
    neighbor_stores_t_etrago["scn_name"] = "eGon100RE"
1686
    neighbor_stores_t_etrago["temp_id"] = 1
1687
    for i in neighbor_stores_t.columns:
1688
        neighbor_stores_t_etrago["e_min_pu"][i] = neighbor_stores_t[
1689
            i
1690
        ].values.tolist()
1691
1692
    neighbor_stores_t_etrago.to_sql(
1693
        "egon_etrago_store_timeseries",
1694
        engine,
1695
        schema="grid",
1696
        if_exists="append",
1697
        index=True,
1698
        index_label="store_id",
1699
    )
1700
1701
    # writing neighboring storage_units inflow to etrago tables
1702
    neighbor_storage_t_etrago = pd.DataFrame(
1703
        columns=["scn_name", "temp_id", "inflow"],
1704
        index=neighbor_storage_t.columns,
1705
    )
1706
    neighbor_storage_t_etrago["scn_name"] = "eGon100RE"
1707
    neighbor_storage_t_etrago["temp_id"] = 1
1708
    for i in neighbor_storage_t.columns:
1709
        neighbor_storage_t_etrago["inflow"][i] = neighbor_storage_t[
1710
            i
1711
        ].values.tolist()
1712
1713
    neighbor_storage_t_etrago.to_sql(
1714
        "egon_etrago_storage_timeseries",
1715
        engine,
1716
        schema="grid",
1717
        if_exists="append",
1718
        index=True,
1719
        index_label="storage_id",
1720
    )
1721
1722
    # writing neighboring lines_t s_max_pu to etrago tables
1723
    if not network_solved.lines_t["s_max_pu"].empty:
1724
        neighbor_lines_t_etrago = pd.DataFrame(
1725
            columns=["scn_name", "s_max_pu"], index=neighbor_lines_t.columns
1726
        )
1727
        neighbor_lines_t_etrago["scn_name"] = "eGon100RE"
1728
1729
        for i in neighbor_lines_t.columns:
1730
            neighbor_lines_t_etrago["s_max_pu"][i] = neighbor_lines_t[
1731
                i
1732
            ].values.tolist()
1733
1734
        neighbor_lines_t_etrago.to_sql(
1735
            "egon_etrago_line_timeseries",
1736
            engine,
1737
            schema="grid",
1738
            if_exists="append",
1739
            index=True,
1740
            index_label="line_id",
1741
        )
1742
1743
1744 View Code Duplication
def prepared_network(planning_horizon=3):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
1745
    if egon.data.config.settings()["egon-data"]["--run-pypsa-eur"]:
1746
        with open(
1747
            __path__[0] + "/datasets/pypsaeur/config_prepare.yaml", "r"
1748
        ) as stream:
1749
            data_config = yaml.safe_load(stream)
1750
1751
        target_file = (
1752
            Path(".")
1753
            / "run-pypsa-eur"
1754
            / "pypsa-eur"
1755
            / "results"
1756
            / data_config["run"]["name"]
1757
            / "prenetworks"
1758
            / f"base_s_{data_config['scenario']['clusters'][0]}"
1759
            f"_l{data_config['scenario']['ll'][0]}"
1760
            f"_{data_config['scenario']['opts'][0]}"
1761
            f"_{data_config['scenario']['sector_opts'][0]}"
1762
            f"_{data_config['scenario']['planning_horizons'][planning_horizon]}.nc"
1763
        )
1764
1765
    else:
1766
        target_file = (
1767
            Path(".")
1768
            / "data_bundle_egon_data"
1769
            / "pypsa_eur"
1770
            / "prenetworks"
1771
            / "prenetwork_post-manipulate_pre-solve"
1772
            / "base_s_39_lc1.25__cb40ex0-T-H-I-B-solar+p3-dist1_2045.nc"
1773
        )
1774
1775
    return pypsa.Network(target_file.absolute().as_posix())
1776
1777
1778
def overwrite_H2_pipeline_share():
1779
    """Overwrite retrofitted_CH4pipeline-to-H2pipeline_share value
1780
1781
    Overwrite retrofitted_CH4pipeline-to-H2pipeline_share in the
1782
    scenario parameter table if p-e-s is run.
1783
    This function write in the database and has no return.
1784
1785
    """
1786
    scn_name = "eGon100RE"
1787
    # Select source and target from dataset configuration
1788
    target = egon.data.config.datasets()["pypsa-eur-sec"]["target"]
1789
1790
    n = read_network(planning_horizon=2045)
1791
1792
    H2_pipelines = n.links[n.links["carrier"] == "H2 pipeline retrofitted"]
1793
    CH4_pipelines = n.links[n.links["carrier"] == "gas pipeline"]
1794
    H2_pipes_share = np.mean(
1795
        [
1796
            (i / j)
1797
            for i, j in zip(
1798
                H2_pipelines.p_nom_opt.to_list(), CH4_pipelines.p_nom.to_list()
1799
            )
1800
        ]
1801
    )
1802
    logger.info(
1803
        "retrofitted_CH4pipeline-to-H2pipeline_share = " + str(H2_pipes_share)
1804
    )
1805
1806
    parameters = db.select_dataframe(
1807
        f"""
1808
        SELECT *
1809
        FROM {target['scenario_parameters']['schema']}.{target['scenario_parameters']['table']}
1810
        WHERE name = '{scn_name}'
1811
        """
1812
    )
1813
1814
    gas_param = parameters.loc[0, "gas_parameters"]
1815
    gas_param["retrofitted_CH4pipeline-to-H2pipeline_share"] = H2_pipes_share
1816
    gas_param = json.dumps(gas_param)
1817
1818
    # Update data in db
1819
    db.execute_sql(
1820
        f"""
1821
    UPDATE {target['scenario_parameters']['schema']}.{target['scenario_parameters']['table']}
1822
    SET gas_parameters = '{gas_param}'
1823
    WHERE name = '{scn_name}';
1824
    """
1825
    )
1826
1827
1828
def update_electrical_timeseries_germany(network):
1829
    """Replace electrical demand time series in Germany with data from egon-data
1830
1831
    Parameters
1832
    ----------
1833
    network : pypsa.Network
1834
        Network including demand time series from pypsa-eur
1835
1836
    Returns
1837
    -------
1838
    network : pypsa.Network
1839
        Network including electrical demand time series in Germany from egon-data
1840
1841
    """
1842
    year = network.year
1843
    skip = network.snapshot_weightings.objective.iloc[0].astype("int")
1844
    df = pd.read_csv(
1845
        "input-pypsa-eur-sec/electrical_demand_timeseries_DE_eGon100RE.csv"
1846
    )
1847
1848
    annual_demand = pd.Series(index=[2019, 2037])
1849
    annual_demand_industry = pd.Series(index=[2019, 2037])
1850
    # Define values from status2019 for interpolation
1851
    # Residential and service (in TWh)
1852
    annual_demand.loc[2019] = 124.71 + 143.26
1853
    # Industry (in TWh)
1854
    annual_demand_industry.loc[2019] = 241.925
1855
1856
    # Define values from NEP 2023 scenario B 2037 for interpolation
1857
    # Residential and service (in TWh)
1858
    annual_demand.loc[2037] = 104 + 153.1
1859
    # Industry (in TWh)
1860
    annual_demand_industry.loc[2037] = 334.0
1861
1862
    # Set interpolated demands for years between 2019 and 2045
1863
    if year < 2037:
1864
        # Calculate annual demands for year by linear interpolating between
1865
        # 2019 and 2037
1866
        # Done seperatly for industry and residential and service to fit
1867
        # to pypsa-eurs structure
1868
        annual_rate = (annual_demand.loc[2037] - annual_demand.loc[2019]) / (
1869
            2037 - 2019
1870
        )
1871
        annual_demand_year = annual_demand.loc[2019] + annual_rate * (
1872
            year - 2019
1873
        )
1874
1875
        annual_rate_industry = (
1876
            annual_demand_industry.loc[2037] - annual_demand_industry.loc[2019]
1877
        ) / (2037 - 2019)
1878
        annual_demand_year_industry = annual_demand_industry.loc[
1879
            2019
1880
        ] + annual_rate_industry * (year - 2019)
1881
1882
        # Scale time series for 100% scenario with the annual demands
1883
        # The shape of the curve is taken from the 100% scenario since the
1884
        # same weather and calender year is used there
1885
        network.loads_t.p_set.loc[:, "DE0 0"] = (
1886
            df["residential_and_service"].loc[::skip]
1887
            / df["residential_and_service"].sum()
1888
            * annual_demand_year
1889
            * 1e6
1890
        ).values
1891
1892
        network.loads_t.p_set.loc[:, "DE0 0 industry electricity"] = (
1893
            df["industry"].loc[::skip]
1894
            / df["industry"].sum()
1895
            * annual_demand_year_industry
1896
            * 1e6
1897
        ).values
1898
1899
    elif year == 2045:
1900
        network.loads_t.p_set.loc[:, "DE0 0"] = df[
1901
            "residential_and_service"
1902
        ].loc[::skip]
1903
1904
        network.loads_t.p_set.loc[:, "DE0 0 industry electricity"] = (
1905
            df["industry"].loc[::skip].values
1906
        )
1907
1908
    else:
1909
        print(
1910
            "Scaling not implemented for years between 2037 and 2045 and beyond."
1911
        )
1912
        return
1913
1914
    network.loads.loc["DE0 0 industry electricity", "p_set"] = 0.0
1915
1916
    return network
1917
1918
1919
def geothermal_district_heating(network):
1920
    """Add the option to build geothermal power plants in district heating in Germany
1921
1922
    Parameters
1923
    ----------
1924
    network : pypsa.Network
1925
        Network from pypsa-eur without geothermal generators
1926
1927
    Returns
1928
    -------
1929
    network : pypsa.Network
1930
        Updated network with geothermal generators
1931
1932
    """
1933
1934
    costs_and_potentials = pd.read_csv(
1935
        "input-pypsa-eur-sec/geothermal_potential_germany.csv"
1936
    )
1937
1938
    network.add("Carrier", "urban central geo thermal")
1939
1940
    for i, row in costs_and_potentials.iterrows():
1941
        # Set lifetime of geothermal plant to 30 years based on:
1942
        # Ableitung eines Korridors für den Ausbau der erneuerbaren Wärme im Gebäudebereich,
1943
        # Beuth Hochschule für Technik, Berlin ifeu – Institut für Energie- und Umweltforschung Heidelberg GmbH
1944
        # Februar 2017
1945
        lifetime_geothermal = 30
1946
1947
        network.add(
1948
            "Generator",
1949
            f"DE0 0 urban central geo thermal {i}",
1950
            bus="DE0 0 urban central heat",
1951
            carrier="urban central geo thermal",
1952
            p_nom_extendable=True,
1953
            p_nom_max=row["potential [MW]"],
1954
            capital_cost=annualize_capital_costs(
1955
                row["cost [EUR/kW]"] * 1e6, lifetime_geothermal, 0.07
1956
            ),
1957
        )
1958
    return network
1959
1960
1961
def h2_overground_stores(network):
1962
    """Add hydrogen overground stores to each hydrogen node
1963
1964
    In pypsa-eur, only countries without the potential of underground hydrogen
1965
    stores have to option to build overground hydrogen tanks.
1966
    Overground stores are more expensive, but are not resitcted by the geological
1967
    potential. To allow higher hydrogen store capacities in each country, optional
1968
    hydogen overground tanks are also added to node with a potential for
1969
    underground stores.
1970
1971
    Parameters
1972
    ----------
1973
    network : pypsa.Network
1974
        Network without hydrogen overground stores at each hydrogen node
1975
1976
    Returns
1977
    -------
1978
    network : pypsa.Network
1979
        Network with hydrogen overground stores at each hydrogen node
1980
1981
    """
1982
1983
    underground_h2_stores = network.stores[
1984
        (network.stores.carrier == "H2 Store")
1985
        & (network.stores.e_nom_max != np.inf)
1986
    ]
1987
1988
    overground_h2_stores = network.stores[
1989
        (network.stores.carrier == "H2 Store")
1990
        & (network.stores.e_nom_max == np.inf)
1991
    ]
1992
1993
    network.madd(
1994
        "Store",
1995
        underground_h2_stores.bus + " overground Store",
1996
        bus=underground_h2_stores.bus.values,
1997
        e_nom_extendable=True,
1998
        e_cyclic=True,
1999
        carrier="H2 Store",
2000
        capital_cost=overground_h2_stores.capital_cost.mean(),
2001
    )
2002
2003
    return network
2004
2005
2006
def update_heat_timeseries_germany(network):
2007
    network.loads
2008
    # Import heat demand curves for Germany from eGon-data
2009
    df_egon_heat_demand = pd.read_csv(
2010
        "input-pypsa-eur-sec/heat_demand_timeseries_DE_eGon100RE.csv"
2011
    )
2012
2013
    # Replace heat demand curves in Germany with values from eGon-data
2014
    network.loads_t.p_set.loc[:, "DE1 0 rural heat"] = (
2015
        df_egon_heat_demand.loc[:, "residential rural"].values
2016
        + df_egon_heat_demand.loc[:, "service rural"].values
2017
    )
2018
2019
    network.loads_t.p_set.loc[:, "DE1 0 urban central heat"] = (
2020
        df_egon_heat_demand.loc[:, "urban central"].values
2021
    )
2022
2023
    return network
2024
2025
2026
def drop_biomass(network):
2027
    carrier = "biomass"
2028
2029
    for c in network.iterate_components():
2030
        network.mremove(c.name, c.df[c.df.index.str.contains(carrier)].index)
2031
    return network
2032
2033
2034
def postprocessing_biomass_2045():
2035
2036
    network = read_network(planning_horizon=2045)
2037
    network = drop_biomass(network)
2038
2039
    with open(
2040
        __path__[0] + "/datasets/pypsaeur/config_solve.yaml", "r"
2041
    ) as stream:
2042
        data_config = yaml.safe_load(stream)
2043
2044
    target_file = (
2045
        Path(".")
2046
        / "run-pypsa-eur"
2047
        / "pypsa-eur"
2048
        / "results"
2049
        / data_config["run"]["name"]
2050
        / "postnetworks"
2051
        / f"base_s_{data_config['scenario']['clusters'][0]}"
2052
        f"_l{data_config['scenario']['ll'][0]}"
2053
        f"_{data_config['scenario']['opts'][0]}"
2054
        f"_{data_config['scenario']['sector_opts'][0]}"
2055
        f"_{data_config['scenario']['planning_horizons'][3]}.nc"
2056
    )
2057
2058
    network.export_to_netcdf(target_file)
2059
2060
2061
def drop_urban_decentral_heat(network):
2062
    carrier = "urban decentral heat"
2063
2064
    # Add urban decentral heat demand to urban central heat demand
2065
    for country in network.loads.loc[
2066
        network.loads.carrier == carrier, "bus"
2067
    ].str[:5]:
2068
2069
        if f"{country} {carrier}" in network.loads_t.p_set.columns:
2070
            network.loads_t.p_set[
2071
                f"{country} rural heat"
2072
            ] += network.loads_t.p_set[f"{country} {carrier}"]
2073
        else:
2074
            print(
2075
                f"""No time series available for {country} {carrier}.
2076
                  Using static p_set."""
2077
            )
2078
2079
            network.loads_t.p_set[
2080
                f"{country} rural heat"
2081
            ] += network.loads.loc[f"{country} {carrier}", "p_set"]
2082
2083
    # In some cases low-temperature heat for industry is connected to the urban
2084
    # decentral heat bus since there is no urban central heat bus.
2085
    # These loads are connected to the representatiive rural heat bus:
2086
    network.loads.loc[
2087
        (network.loads.bus.str.contains(carrier))
2088
        & (~network.loads.carrier.str.contains(carrier.replace(" heat", ""))),
2089
        "bus",
2090
    ] = network.loads.loc[
2091
        (network.loads.bus.str.contains(carrier))
2092
        & (~network.loads.carrier.str.contains(carrier.replace(" heat", ""))),
2093
        "bus",
2094
    ].str.replace(
2095
        "urban decentral", "rural"
2096
    )
2097
2098
    # Drop componentents attached to urban decentral heat
2099
    for c in network.iterate_components():
2100
        network.mremove(
2101
            c.name, c.df[c.df.index.str.contains("urban decentral")].index
2102
        )
2103
2104
    return network
2105
2106
2107
def district_heating_shares(network):
2108
    df = pd.read_csv(
2109
        "data_bundle_powerd_data/district_heating_shares_egon.csv"
2110
    ).set_index("country_code")
2111
2112
    heat_demand_per_country = (
2113
        network.loads_t.p_set[
2114
            network.loads[
2115
                (network.loads.carrier.str.contains("heat"))
2116
                & network.loads.index.isin(network.loads_t.p_set.columns)
2117
            ].index
2118
        ]
2119
        .groupby(network.loads.bus.str[:5], axis=1)
2120
        .sum()
2121
    )
2122
2123
    for country in heat_demand_per_country.columns:
2124
        network.loads_t.p_set[f"{country} urban central heat"] = (
2125
            heat_demand_per_country.loc[:, country].mul(
2126
                df.loc[country[:2]].values[0]
2127
            )
2128
        )
2129
        network.loads_t.p_set[f"{country} rural heat"] = (
2130
            heat_demand_per_country.loc[:, country].mul(
2131
                (1 - df.loc[country[:2]].values[0])
2132
            )
2133
        )
2134
2135
    # Drop links with undefined buses or carrier
2136
    network.mremove(
2137
        "Link",
2138
        network.links[
2139
            ~network.links.bus0.isin(network.buses.index.values)
2140
        ].index,
2141
    )
2142
    network.mremove(
2143
        "Link",
2144
        network.links[network.links.carrier == ""].index,
2145
    )
2146
2147
    return network
2148
2149
2150
def drop_new_gas_pipelines(network):
2151
    network.mremove(
2152
        "Link",
2153
        network.links[
2154
            network.links.index.str.contains("gas pipeline new")
2155
        ].index,
2156
    )
2157
2158
    return network
2159
2160
2161
def drop_fossil_gas(network):
2162
    network.mremove(
2163
        "Generator",
2164
        network.generators[network.generators.carrier == "gas"].index,
2165
    )
2166
2167
    return network
2168
2169
2170
def drop_conventional_power_plants(network):
2171
2172
    # Drop lignite and coal power plants in Germany
2173
    network.mremove(
2174
        "Link",
2175
        network.links[
2176
            (network.links.carrier.isin(["coal", "lignite"]))
2177
            & (network.links.bus1.str.startswith("DE"))
2178
        ].index,
2179
    )
2180
2181
    return network
2182
2183
2184
def rual_heat_technologies(network):
2185
    network.mremove(
2186
        "Link",
2187
        network.links[
2188
            network.links.index.str.contains("rural gas boiler")
2189
        ].index,
2190
    )
2191
2192
    network.mremove(
2193
        "Generator",
2194
        network.generators[
2195
            network.generators.carrier.str.contains("rural solar thermal")
2196
        ].index,
2197
    )
2198
2199
    return network
2200
2201
2202
def coal_exit_D():
2203
2204
    df = pd.read_csv(
2205
        "run-pypsa-eur/pypsa-eur/resources/powerplants_s_39.csv", index_col=0
2206
    )
2207
    df_de_coal = df[
2208
        (df.Country == "DE")
2209
        & ((df.Fueltype == "Lignite") | (df.Fueltype == "Hard Coal"))
2210
    ]
2211
    df_de_coal.loc[df_de_coal.DateOut.values >= 2035, "DateOut"] = 2034
2212
    df.loc[df_de_coal.index] = df_de_coal
2213
2214
    df.to_csv("run-pypsa-eur/pypsa-eur/resources/powerplants_s_39.csv")
2215
2216
2217
def offwind_potential_D(network, capacity_per_sqkm=4):
2218
2219
    offwind_ac_factor = 1942
2220
    offwind_dc_factor = 10768
2221
    offwind_float_factor = 134
2222
2223
    # set p_nom_max for German offshore with respect to capacity_per_sqkm = 4 instead of default 2 (which is applied for the rest of Europe)
2224
    network.generators.loc[
2225
        (network.generators.bus == "DE0 0")
2226
        & (network.generators.carrier == "offwind-ac"),
2227
        "p_nom_max",
2228
    ] = (
2229
        offwind_ac_factor * capacity_per_sqkm
2230
    )
2231
    network.generators.loc[
2232
        (network.generators.bus == "DE0 0")
2233
        & (network.generators.carrier == "offwind-dc"),
2234
        "p_nom_max",
2235
    ] = (
2236
        offwind_dc_factor * capacity_per_sqkm
2237
    )
2238
    network.generators.loc[
2239
        (network.generators.bus == "DE0 0")
2240
        & (network.generators.carrier == "offwind-float"),
2241
        "p_nom_max",
2242
    ] = (
2243
        offwind_float_factor * capacity_per_sqkm
2244
    )
2245
2246
    return network
2247
2248
2249
def additional_grid_expansion_2045(network):
2250
2251
    network.global_constraints.loc["lc_limit", "constant"] *= 1.05
2252
2253
    return network
2254
2255
2256
def execute():
2257
    if egon.data.config.settings()["egon-data"]["--run-pypsa-eur"]:
2258
        with open(
2259
            __path__[0] + "/datasets/pypsaeur/config.yaml", "r"
2260
        ) as stream:
2261
            data_config = yaml.safe_load(stream)
2262
2263
        if data_config["foresight"] == "myopic":
2264
2265
            print("Adjusting scenarios on the myopic pathway...")
2266
2267
            coal_exit_D()
2268
2269
            networks = pd.Series()
2270
2271
            for i in range(
2272
                0, len(data_config["scenario"]["planning_horizons"])
2273
            ):
2274
                nc_file = pd.Series(
2275
                    f"base_s_{data_config['scenario']['clusters'][0]}"
2276
                    f"_l{data_config['scenario']['ll'][0]}"
2277
                    f"_{data_config['scenario']['opts'][0]}"
2278
                    f"_{data_config['scenario']['sector_opts'][0]}"
2279
                    f"_{data_config['scenario']['planning_horizons'][i]}.nc"
2280
                )
2281
                networks = networks._append(nc_file)
2282
2283
            scn_path = pd.DataFrame(
2284
                index=["2025", "2030", "2035", "2045"],
2285
                columns=["prenetwork", "functions"],
2286
            )
2287
2288
            for year in scn_path.index:
2289
                scn_path.at[year, "prenetwork"] = networks[
2290
                    networks.str.contains(year)
2291
                ].values
2292
2293
            for year in ["2025", "2030", "2035"]:
2294
                scn_path.loc[year, "functions"] = [
2295
                    # drop_urban_decentral_heat,
2296
                    update_electrical_timeseries_germany,
2297
                    geothermal_district_heating,
2298
                    h2_overground_stores,
2299
                    drop_new_gas_pipelines,
2300
                    offwind_potential_D,
2301
                ]
2302
2303
            scn_path.loc["2045", "functions"] = [
2304
                drop_biomass,
2305
                # drop_urban_decentral_heat,
2306
                update_electrical_timeseries_germany,
2307
                geothermal_district_heating,
2308
                h2_overground_stores,
2309
                drop_new_gas_pipelines,
2310
                drop_fossil_gas,
2311
                offwind_potential_D,
2312
                additional_grid_expansion_2045,
2313
                # drop_conventional_power_plants,
2314
                # rual_heat_technologies, #To be defined
2315
            ]
2316
2317
            network_path = (
2318
                Path(".")
2319
                / "run-pypsa-eur"
2320
                / "pypsa-eur"
2321
                / "results"
2322
                / data_config["run"]["name"]
2323
                / "prenetworks"
2324
            )
2325
2326
            for scn in scn_path.index:
2327
                path = network_path / scn_path.at[scn, "prenetwork"]
2328
                network = pypsa.Network(path)
2329
                network.year = int(scn)
2330
                for manipulator in scn_path.at[scn, "functions"]:
2331
                    network = manipulator(network)
2332
                network.export_to_netcdf(path)
2333
2334
        elif (data_config["foresight"] == "overnight") & (
2335
            int(data_config["scenario"]["planning_horizons"][0]) > 2040
2336
        ):
2337
2338
            print("Adjusting overnight long-term scenario...")
2339
2340
            network_path = (
2341
                Path(".")
2342
                / "run-pypsa-eur"
2343
                / "pypsa-eur"
2344
                / "results"
2345
                / data_config["run"]["name"]
2346
                / "prenetworks"
2347
                / f"elec_s_{data_config['scenario']['clusters'][0]}"
2348
                f"_l{data_config['scenario']['ll'][0]}"
2349
                f"_{data_config['scenario']['opts'][0]}"
2350
                f"_{data_config['scenario']['sector_opts'][0]}"
2351
                f"_{data_config['scenario']['planning_horizons'][0]}.nc"
2352
            )
2353
2354
            network = pypsa.Network(network_path)
2355
2356
            network = drop_biomass(network)
2357
2358
            network = drop_urban_decentral_heat(network)
2359
2360
            network = district_heating_shares(network)
2361
2362
            network = update_heat_timeseries_germany(network)
2363
2364
            network = update_electrical_timeseries_germany(network)
2365
2366
            network = geothermal_district_heating(network)
2367
2368
            network = h2_overground_stores(network)
2369
2370
            network = drop_new_gas_pipelines(network)
2371
2372
            network = drop_fossil_gas(network)
2373
2374
            network = rual_heat_technologies(network)
2375
2376
            network.export_to_netcdf(network_path)
2377
2378
        else:
2379
            print(
2380
                f"""Adjustments on prenetworks are not implemented for
2381
                foresight option {data_config['foresight']} and
2382
                year int(data_config['scenario']['planning_horizons'][0].
2383
                Please check the pypsaeur.execute function.
2384
                """
2385
            )
2386
    else:
2387
        print("Pypsa-eur is not executed due to the settings of egon-data")
2388