Passed
Pull Request — dev (#1324)
by
unknown
02:21
created

data.datasets.pypsaeur.h2_neighbours_egon2035()   B

Complexity

Conditions 2

Size

Total Lines 81
Code Lines 43

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 43
dl 0
loc 81
rs 8.8478
c 0
b 0
f 0
cc 2
nop 0

How to fix   Long Method   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

1
"""The central module containing all code dealing with importing data from
2
the pysa-eur-sec scenario parameter creation
3
"""
4
5
from pathlib import Path
6
from urllib.request import urlretrieve
7
import json
8
import shutil
9
10
from shapely.geometry import LineString
11
import geopandas as gpd
12
import importlib_resources as resources
13
import numpy as np
14
import pandas as pd
15
import pypsa
16
import requests
17
import yaml
18
19
from egon.data import __path__, config, db, logger
20
from egon.data.datasets import Dataset
21
from egon.data.datasets.scenario_parameters import get_sector_parameters
22
from egon.data.datasets.scenario_parameters.parameters import (
23
    annualize_capital_costs,
24
)
25
import egon.data.config
26
import egon.data.subprocess as subproc
27
28
29
class PreparePypsaEur(Dataset):
30
    def __init__(self, dependencies):
31
        super().__init__(
32
            name="PreparePypsaEur",
33
            version="0.0.42",
34
            dependencies=dependencies,
35
            tasks=(
36
                download,
37
                prepare_network,
38
            ),
39
        )
40
41
42
class RunPypsaEur(Dataset):
43
    def __init__(self, dependencies):
44
        super().__init__(
45
            name="SolvePypsaEur",
46
            version="0.0.42",
47
            dependencies=dependencies,
48
            tasks=(
49
                prepare_network_2,
50
                execute,
51
                solve_network,
52
                clean_database,
53
                electrical_neighbours_egon100,
54
                h2_neighbours_egon2035,
55
                # Dropped until we decided how we deal with the H2 grid
56
                # overwrite_H2_pipeline_share,
57
            ),
58
        )
59
60
def countries_list():
61
    return [
62
        "DE",
63
        "AT",
64
        "CH",
65
        "CZ",
66
        "PL",
67
        "SE",
68
        "NO",
69
        "DK",
70
        "GB",
71
        "NL",
72
        "BE",
73
        "FR",
74
        "LU",
75
    ]
76
77
78
def h2_neighbours_egon2035():
79
    """
80
    This function load the pypsa_eur network for eGon2035, processes the H2
81
    buses and insert them into the grid.egon_etrago_bus table.
82
83
    Returns
84
    -------
85
    None.
86
87
    """
88
    if "eGon2035" in config.settings()["egon-data"]["--scenarios"]:
89
        # Delete buses from previous executions
90
        db.execute_sql(
91
            """
92
            DELETE FROM grid.egon_etrago_bus WHERE carrier = 'H2'
93
            AND scn_name = 'eGon2035'
94
            AND country <> 'DE'
95
            """
96
        )
97
98
        # Load calculated network for eGon2035
99
        target_file = (
100
            Path(".")
101
            / "data_bundle_egon_data"
102
            / "pypsa_eur"
103
            / "postnetworks"
104
            / "base_s_39_lc1.25__cb40ex0-T-H-I-B-solar+p3-dist1_2035.nc"
105
        )
106
        n = pypsa.Network(target_file)
107
108
        # Filter only H2 buses in selected foreign countries
109
        h2_bus = n.buses[(n.buses.country != "DE") & (n.buses.carrier == "H2")]
110
        wanted_countries = countries_list()
111
        h2_bus = h2_bus[
112
            (h2_bus.country.isin(wanted_countries))
113
            & (~h2_bus.index.str.contains("FR6"))
114
        ]
115
116
        # Add geometry column
117
        h2_bus = (
118
            gpd.GeoDataFrame(
119
                h2_bus, geometry=gpd.points_from_xy(h2_bus.x, h2_bus.y)
120
            )
121
            .rename_geometry("geom")
122
            .set_crs(4326)
123
        )
124
125
        # Adjust dataframe to the database table format
126
        h2_bus["scn_name"] = "eGon2035"
127
128
        bus_id = db.next_etrago_id("bus")  # will be change in PR1287
129
        ### Delete when PR1287 is merged ###
130
        bus_id = range(bus_id, bus_id + len(h2_bus.index))
131
        ####################################
132
        h2_bus["bus_id"] = bus_id
133
134
        h2_bus.drop(
135
            columns=[
136
                "unit",
137
                "control",
138
                "generator",
139
                "location",
140
                "substation_off",
141
                "substation_lv",
142
                "sub_network",
143
            ],
144
            inplace=True,
145
        )
146
147
        # Connect to local database and write results
148
        engine = db.engine()
149
150
        h2_bus.to_postgis(
151
            "egon_etrago_bus",
152
            engine,
153
            schema="grid",
154
            if_exists="append",
155
            index=False,
156
        )
157
    else:
158
        return
159
160
161
def download():
162
    cwd = Path(".")
163
    filepath = cwd / "run-pypsa-eur"
164
    filepath.mkdir(parents=True, exist_ok=True)
165
166
    pypsa_eur_repos = filepath / "pypsa-eur"
167
    if config.settings()["egon-data"]["--run-pypsa-eur"]:
168
        if not pypsa_eur_repos.exists():
169
            subproc.run(
170
                [
171
                    "git",
172
                    "clone",
173
                    "--branch",
174
                    "master",
175
                    "https://github.com/PyPSA/pypsa-eur.git",
176
                    pypsa_eur_repos,
177
                ]
178
            )
179
180
            subproc.run(
181
                [
182
                    "git",
183
                    "checkout",
184
                    "2119f4cee05c256509f48d4e9fe0d8fd9e9e3632",
185
                ],
186
                cwd=pypsa_eur_repos,
187
            )
188
189
            # Add gurobi solver to environment:
190
            # Read YAML file
191
            # path_to_env = pypsa_eur_repos / "envs" / "environment.yaml"
192
            # with open(path_to_env, "r") as stream:
193
            #    env = yaml.safe_load(stream)
194
195
            # The version of gurobipy has to fit to the version of gurobi.
196
            # Since we mainly use gurobi 10.0 this is set here.
197
            # env["dependencies"][-1]["pip"].append("gurobipy==10.0.0")
198
199
            # Set python version to <3.12
200
            # Python<=3.12 needs gurobipy>=11.0, in case gurobipy is updated,
201
            # this can be removed
202
            # env["dependencies"] = [
203
            #    "python>=3.8,<3.12" if x == "python>=3.8" else x
204
            #    for x in env["dependencies"]
205
            # ]
206
207
            # Limit geopandas version
208
            # our pypsa-eur version is not compatible to geopandas>1
209
            # env["dependencies"] = [
210
            #    "geopandas>=0.11.0,<1" if x == "geopandas>=0.11.0" else x
211
            #    for x in env["dependencies"]
212
            # ]
213
214
            # Write YAML file
215
            # with open(path_to_env, "w", encoding="utf8") as outfile:
216
            #    yaml.dump(
217
            #        env, outfile, default_flow_style=False, allow_unicode=True
218
            #    )
219
220
            # Copy config file for egon-data to pypsa-eur directory
221
            shutil.copy(
222
                Path(
223
                    __path__[0], "datasets", "pypsaeur", "config_prepare.yaml"
224
                ),
225
                pypsa_eur_repos / "config" / "config.yaml",
226
            )
227
228
            # Copy custom_extra_functionality.py file for egon-data to pypsa-eur directory
229
            shutil.copy(
230
                Path(
231
                    __path__[0],
232
                    "datasets",
233
                    "pypsaeur",
234
                    "custom_extra_functionality.py",
235
                ),
236
                pypsa_eur_repos / "data",
237
            )
238
239
            with open(filepath / "Snakefile", "w") as snakefile:
240
                snakefile.write(
241
                    resources.read_text(
242
                        "egon.data.datasets.pypsaeur", "Snakefile"
243
                    )
244
                )
245
246
        # Copy era5 weather data to folder for pypsaeur
247
        era5_pypsaeur_path = filepath / "pypsa-eur" / "cutouts"
248
249
        if not era5_pypsaeur_path.exists():
250
            era5_pypsaeur_path.mkdir(parents=True, exist_ok=True)
251
            copy_from = config.datasets()["era5_weather_data"]["targets"][
252
                "weather_data"
253
            ]["path"]
254
            filename = "europe-2011-era5.nc"
255
            shutil.copy(
256
                copy_from + "/" + filename, era5_pypsaeur_path / filename
257
            )
258
259
        # Workaround to download natura, shipdensity and globalenergymonitor
260
        # data, which is not working in the regular snakemake workflow.
261
        # The same files are downloaded from the same directory as in pypsa-eur
262
        # version 0.10 here. Is is stored in the folders from pypsa-eur.
263
        if not (filepath / "pypsa-eur" / "resources").exists():
264
            (filepath / "pypsa-eur" / "resources").mkdir(
265
                parents=True, exist_ok=True
266
            )
267
        urlretrieve(
268
            "https://zenodo.org/record/4706686/files/natura.tiff",
269
            filepath / "pypsa-eur" / "resources" / "natura.tiff",
270
        )
271
272
        if not (filepath / "pypsa-eur" / "data").exists():
273
            (filepath / "pypsa-eur" / "data").mkdir(
274
                parents=True, exist_ok=True
275
            )
276
        urlretrieve(
277
            "https://zenodo.org/record/13757228/files/shipdensity_global.zip",
278
            filepath / "pypsa-eur" / "data" / "shipdensity_global.zip",
279
        )
280
281
        if not (
282
            filepath
283
            / "pypsa-eur"
284
            / "zenodo.org"
285
            / "records"
286
            / "13757228"
287
            / "files"
288
        ).exists():
289
            (
290
                filepath
291
                / "pypsa-eur"
292
                / "zenodo.org"
293
                / "records"
294
                / "13757228"
295
                / "files"
296
            ).mkdir(parents=True, exist_ok=True)
297
298
        urlretrieve(
299
            "https://zenodo.org/records/10356004/files/ENSPRESO_BIOMASS.xlsx",
300
            filepath
301
            / "pypsa-eur"
302
            / "zenodo.org"
303
            / "records"
304
            / "13757228"
305
            / "files"
306
            / "ENSPRESO_BIOMASS.xlsx",
307
        )
308
309
        if not (filepath / "pypsa-eur" / "data" / "gem").exists():
310
            (filepath / "pypsa-eur" / "data" / "gem").mkdir(
311
                parents=True, exist_ok=True
312
            )
313
314
        r = requests.get(
315
            "https://tubcloud.tu-berlin.de/s/LMBJQCsN6Ez5cN2/download/"
316
            "Europe-Gas-Tracker-2024-05.xlsx"
317
        )
318
        with open(
319
            filepath
320
            / "pypsa-eur"
321
            / "data"
322
            / "gem"
323
            / "Europe-Gas-Tracker-2024-05.xlsx",
324
            "wb",
325
        ) as outfile:
326
            outfile.write(r.content)
327
328
        if not (filepath / "pypsa-eur" / "data" / "gem").exists():
329
            (filepath / "pypsa-eur" / "data" / "gem").mkdir(
330
                parents=True, exist_ok=True
331
            )
332
333
        r = requests.get(
334
            "https://tubcloud.tu-berlin.de/s/Aqebo3rrQZWKGsG/download/"
335
            "Global-Steel-Plant-Tracker-April-2024-Standard-Copy-V1.xlsx"
336
        )
337
        with open(
338
            filepath
339
            / "pypsa-eur"
340
            / "data"
341
            / "gem"
342
            / "Global-Steel-Plant-Tracker-April-2024-Standard-Copy-V1.xlsx",
343
            "wb",
344
        ) as outfile:
345
            outfile.write(r.content)
346
347
    else:
348
        print("Pypsa-eur is not executed due to the settings of egon-data")
349
350
351
def prepare_network():
352
    cwd = Path(".")
353
    filepath = cwd / "run-pypsa-eur"
354
355
    if config.settings()["egon-data"]["--run-pypsa-eur"]:
356
        subproc.run(
357
            [
358
                "snakemake",
359
                "-j1",
360
                "--directory",
361
                filepath,
362
                "--snakefile",
363
                filepath / "Snakefile",
364
                "--use-conda",
365
                "--conda-frontend=conda",
366
                "--cores",
367
                "8",
368
                "prepare",
369
            ]
370
        )
371
        execute()
372
373
        path = filepath / "pypsa-eur" / "results" / "prenetworks"
374
375
        path_2 = path / "prenetwork_post-manipulate_pre-solve"
376
        path_2.mkdir(parents=True, exist_ok=True)
377
378
        with open(
379
            __path__[0] + "/datasets/pypsaeur/config_prepare.yaml", "r"
380
        ) as stream:
381
            data_config = yaml.safe_load(stream)
382
383
        for i in range(0, len(data_config["scenario"]["planning_horizons"])):
384
            nc_file = (
385
                f"base_s_{data_config['scenario']['clusters'][0]}"
386
                f"_l{data_config['scenario']['ll'][0]}"
387
                f"_{data_config['scenario']['opts'][0]}"
388
                f"_{data_config['scenario']['sector_opts'][0]}"
389
                f"_{data_config['scenario']['planning_horizons'][i]}.nc"
390
            )
391
392
            shutil.copy(Path(path, nc_file), path_2)
393
394
    else:
395
        print("Pypsa-eur is not executed due to the settings of egon-data")
396
397
398
def prepare_network_2():
399
    cwd = Path(".")
400
    filepath = cwd / "run-pypsa-eur"
401
402
    if config.settings()["egon-data"]["--run-pypsa-eur"]:
403
        shutil.copy(
404
            Path(__path__[0], "datasets", "pypsaeur", "config_solve.yaml"),
405
            filepath / "pypsa-eur" / "config" / "config.yaml",
406
        )
407
408
        subproc.run(
409
            [
410
                "snakemake",
411
                "-j1",
412
                "--directory",
413
                filepath,
414
                "--snakefile",
415
                filepath / "Snakefile",
416
                "--use-conda",
417
                "--conda-frontend=conda",
418
                "--cores",
419
                "8",
420
                "prepare",
421
            ]
422
        )
423
    else:
424
        print("Pypsa-eur is not executed due to the settings of egon-data")
425
426
427
def solve_network():
428
    cwd = Path(".")
429
    filepath = cwd / "run-pypsa-eur"
430
431
    if config.settings()["egon-data"]["--run-pypsa-eur"]:
432
        subproc.run(
433
            [
434
                "snakemake",
435
                "-j1",
436
                "--cores",
437
                "8",
438
                "--directory",
439
                filepath,
440
                "--snakefile",
441
                filepath / "Snakefile",
442
                "--use-conda",
443
                "--conda-frontend=conda",
444
                "solve",
445
            ]
446
        )
447
448
        postprocessing_biomass_2045()
449
450
        subproc.run(
451
            [
452
                "snakemake",
453
                "-j1",
454
                "--directory",
455
                filepath,
456
                "--snakefile",
457
                filepath / "Snakefile",
458
                "--use-conda",
459
                "--conda-frontend=conda",
460
                "summary",
461
            ]
462
        )
463
    else:
464
        print("Pypsa-eur is not executed due to the settings of egon-data")
465
466
467 View Code Duplication
def read_network(planning_horizon=3):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
468
    if config.settings()["egon-data"]["--run-pypsa-eur"]:
469
        with open(
470
            __path__[0] + "/datasets/pypsaeur/config_solve.yaml", "r"
471
        ) as stream:
472
            data_config = yaml.safe_load(stream)
473
474
        target_file = (
475
            Path(".")
476
            / "run-pypsa-eur"
477
            / "pypsa-eur"
478
            / "results"
479
            / data_config["run"]["name"]
480
            / "postnetworks"
481
            / f"base_s_{data_config['scenario']['clusters'][0]}"
482
            f"_l{data_config['scenario']['ll'][0]}"
483
            f"_{data_config['scenario']['opts'][0]}"
484
            f"_{data_config['scenario']['sector_opts'][0]}"
485
            f"_{data_config['scenario']['planning_horizons'][planning_horizon]}.nc"
486
        )
487
488
    else:
489
        target_file = (
490
            Path(".")
491
            / "data_bundle_egon_data"
492
            / "pypsa_eur"
493
            / "postnetworks"
494
            / "base_s_39_lc1.25__cb40ex0-T-H-I-B-solar+p3-dist1_2045.nc"
495
        )
496
497
    return pypsa.Network(target_file)
498
499
500
def clean_database():
501
    """Remove all components abroad for eGon100RE of the database
502
503
    Remove all components abroad and their associated time series of
504
    the datase for the scenario 'eGon100RE'.
505
506
    Parameters
507
    ----------
508
    None
509
510
    Returns
511
    -------
512
    None
513
514
    """
515
    scn_name = "eGon100RE"
516
517
    comp_one_port = ["load", "generator", "store", "storage"]
518
519
    # delete existing components and associated timeseries
520
    for comp in comp_one_port:
521
        db.execute_sql(
522
            f"""
523
            DELETE FROM {"grid.egon_etrago_" + comp + "_timeseries"}
524
            WHERE {comp + "_id"} IN (
525
                SELECT {comp + "_id"} FROM {"grid.egon_etrago_" + comp}
526
                WHERE bus IN (
527
                    SELECT bus_id FROM grid.egon_etrago_bus
528
                    WHERE country != 'DE'
529
                    AND scn_name = '{scn_name}')
530
                AND scn_name = '{scn_name}'
531
            );
532
533
            DELETE FROM {"grid.egon_etrago_" + comp}
534
            WHERE bus IN (
535
                SELECT bus_id FROM grid.egon_etrago_bus
536
                WHERE country != 'DE'
537
                AND scn_name = '{scn_name}')
538
            AND scn_name = '{scn_name}';"""
539
        )
540
541
    comp_2_ports = [
542
        "line",
543
        "link",
544
    ]
545
546
    for comp, id in zip(comp_2_ports, ["line_id", "link_id"]):
547
        db.execute_sql(
548
            f"""
549
            DELETE FROM {"grid.egon_etrago_" + comp + "_timeseries"}
550
            WHERE scn_name = '{scn_name}'
551
            AND {id} IN (
552
                SELECT {id} FROM {"grid.egon_etrago_" + comp}
553
            WHERE "bus0" IN (
554
            SELECT bus_id FROM grid.egon_etrago_bus
555
                WHERE country != 'DE'
556
                AND scn_name = '{scn_name}'
557
                AND bus_id NOT IN (SELECT bus_i FROM osmtgmod_results.bus_data))
558
            AND "bus1" IN (
559
            SELECT bus_id FROM grid.egon_etrago_bus
560
                WHERE country != 'DE'
561
                AND scn_name = '{scn_name}'
562
                AND bus_id NOT IN (SELECT bus_i FROM osmtgmod_results.bus_data))
563
            );
564
565
566
            DELETE FROM {"grid.egon_etrago_" + comp}
567
            WHERE scn_name = '{scn_name}'
568
            AND "bus0" IN (
569
            SELECT bus_id FROM grid.egon_etrago_bus
570
                WHERE country != 'DE'
571
                AND scn_name = '{scn_name}'
572
                AND bus_id NOT IN (SELECT bus_i FROM osmtgmod_results.bus_data))
573
            AND "bus1" IN (
574
            SELECT bus_id FROM grid.egon_etrago_bus
575
                WHERE country != 'DE'
576
                AND scn_name = '{scn_name}'
577
                AND bus_id NOT IN (SELECT bus_i FROM osmtgmod_results.bus_data))
578
            ;"""
579
        )
580
581
    db.execute_sql(
582
        f"""
583
        DELETE FROM grid.egon_etrago_bus
584
        WHERE scn_name = '{scn_name}'
585
        AND country <> 'DE'
586
        AND carrier <> 'AC'
587
        """
588
    )
589
590
591
def electrical_neighbours_egon100():
592
    if "eGon100RE" in egon.data.config.settings()["egon-data"]["--scenarios"]:
593
        neighbor_reduction()
594
595
    else:
596
        print(
597
            "eGon100RE is not in the list of created scenarios, this task is skipped."
598
        )
599
600
601
def combine_decentral_and_rural_heat(network_solved, network_prepared):
602
603
    for comp in network_solved.iterate_components():
604
605
        if comp.name in ["Bus", "Link", "Store"]:
606
            urban_decentral = comp.df[
607
                comp.df.carrier.str.contains("urban decentral")
608
            ]
609
            rural = comp.df[comp.df.carrier.str.contains("rural")]
610
            for i, row in urban_decentral.iterrows():
611
                if not "DE" in i:
612
                    if comp.name in ["Bus"]:
613
                        network_solved.remove("Bus", i)
614
                    if comp.name in ["Link", "Generator"]:
615
                        if (
616
                            i.replace("urban decentral", "rural")
617
                            in rural.index
618
                        ):
619
                            rural.loc[
620
                                i.replace("urban decentral", "rural"),
621
                                "p_nom_opt",
622
                            ] += urban_decentral.loc[i, "p_nom_opt"]
623
                            rural.loc[
624
                                i.replace("urban decentral", "rural"), "p_nom"
625
                            ] += urban_decentral.loc[i, "p_nom"]
626
                            network_solved.remove(comp.name, i)
627
                        else:
628
                            print(i)
629
                            comp.df.loc[i, "bus0"] = comp.df.loc[
630
                                i, "bus0"
631
                            ].replace("urban decentral", "rural")
632
                            comp.df.loc[i, "bus1"] = comp.df.loc[
633
                                i, "bus1"
634
                            ].replace("urban decentral", "rural")
635
                            comp.df.loc[i, "carrier"] = comp.df.loc[
636
                                i, "carrier"
637
                            ].replace("urban decentral", "rural")
638
                    if comp.name in ["Store"]:
639
                        if (
640
                            i.replace("urban decentral", "rural")
641
                            in rural.index
642
                        ):
643
                            rural.loc[
644
                                i.replace("urban decentral", "rural"),
645
                                "e_nom_opt",
646
                            ] += urban_decentral.loc[i, "e_nom_opt"]
647
                            rural.loc[
648
                                i.replace("urban decentral", "rural"), "e_nom"
649
                            ] += urban_decentral.loc[i, "e_nom"]
650
                            network_solved.remove(comp.name, i)
651
652
                        else:
653
                            print(i)
654
                            network_solved.stores.loc[i, "bus"] = (
655
                                network_solved.stores.loc[i, "bus"].replace(
656
                                    "urban decentral", "rural"
657
                                )
658
                            )
659
                            network_solved.stores.loc[i, "carrier"] = (
660
                                "rural water tanks"
661
                            )
662
663
    urban_decentral_loads = network_prepared.loads[
664
        network_prepared.loads.carrier.str.contains("urban decentral")
665
    ]
666
667
    for i, row in urban_decentral_loads.iterrows():
668
        if i in network_prepared.loads_t.p_set.columns:
669
            network_prepared.loads_t.p_set[
670
                i.replace("urban decentral", "rural")
671
            ] += network_prepared.loads_t.p_set[i]
672
    network_prepared.mremove("Load", urban_decentral_loads.index)
673
674
    return network_prepared, network_solved
675
676
677
def neighbor_reduction():
678
    network_solved = read_network()
679
    network_prepared = prepared_network(planning_horizon="2045")
680
681
    # network.links.drop("pipe_retrofit", axis="columns", inplace=True)
682
683
    wanted_countries = countries_list()
684
685
    foreign_buses = network_solved.buses[
686
        (~network_solved.buses.index.str.contains("|".join(wanted_countries)))
687
        | (network_solved.buses.index.str.contains("FR6"))
688
    ]
689
    network_solved.buses = network_solved.buses.drop(
690
        network_solved.buses.loc[foreign_buses.index].index
691
    )
692
693
    # Add H2 demand of Fischer-Tropsch process and methanolisation
694
    # to industrial H2 demands
695
    industrial_hydrogen = network_prepared.loads.loc[
696
        network_prepared.loads.carrier == "H2 for industry"
697
    ]
698
    fischer_tropsch = (
699
        network_solved.links_t.p0[
700
            network_solved.links.loc[
701
                network_solved.links.carrier == "Fischer-Tropsch"
702
            ].index
703
        ]
704
        .mul(network_solved.snapshot_weightings.generators, axis=0)
705
        .sum()
706
    )
707
    methanolisation = (
708
        network_solved.links_t.p0[
709
            network_solved.links.loc[
710
                network_solved.links.carrier == "methanolisation"
711
            ].index
712
        ]
713
        .mul(network_solved.snapshot_weightings.generators, axis=0)
714
        .sum()
715
    )
716
    for i, row in industrial_hydrogen.iterrows():
717
        network_prepared.loads.loc[i, "p_set"] += (
718
            fischer_tropsch[
719
                fischer_tropsch.index.str.startswith(row.bus[:5])
720
            ].sum()
721
            / 8760
722
        )
723
        network_prepared.loads.loc[i, "p_set"] += (
724
            methanolisation[
725
                methanolisation.index.str.startswith(row.bus[:5])
726
            ].sum()
727
            / 8760
728
        )
729
    # drop foreign lines and links from the 2nd row
730
731
    network_solved.lines = network_solved.lines.drop(
732
        network_solved.lines[
733
            (
734
                network_solved.lines["bus0"].isin(network_solved.buses.index)
735
                == False
736
            )
737
            & (
738
                network_solved.lines["bus1"].isin(network_solved.buses.index)
739
                == False
740
            )
741
        ].index
742
    )
743
744
    # select all lines which have at bus1 the bus which is kept
745
    lines_cb_1 = network_solved.lines[
746
        (
747
            network_solved.lines["bus0"].isin(network_solved.buses.index)
748
            == False
749
        )
750
    ]
751
752
    # create a load at bus1 with the line's hourly loading
753
    for i, k in zip(lines_cb_1.bus1.values, lines_cb_1.index):
754
755
        # Copy loading of lines into hourly resolution
756
        pset = pd.Series(
757
            index=network_prepared.snapshots,
758
            data=network_solved.lines_t.p1[k].resample("H").ffill(),
759
        )
760
        pset["2011-12-31 22:00:00"] = pset["2011-12-31 21:00:00"]
761
        pset["2011-12-31 23:00:00"] = pset["2011-12-31 21:00:00"]
762
763
        # Loads are all imported from the prepared network in the end
764
        network_prepared.add(
765
            "Load",
766
            "slack_fix " + i + " " + k,
767
            bus=i,
768
            p_set=pset,
769
            carrier=lines_cb_1.loc[k, "carrier"],
770
        )
771
772
    # select all lines which have at bus0 the bus which is kept
773
    lines_cb_0 = network_solved.lines[
774
        (
775
            network_solved.lines["bus1"].isin(network_solved.buses.index)
776
            == False
777
        )
778
    ]
779
780
    # create a load at bus0 with the line's hourly loading
781
    for i, k in zip(lines_cb_0.bus0.values, lines_cb_0.index):
782
        # Copy loading of lines into hourly resolution
783
        pset = pd.Series(
784
            index=network_prepared.snapshots,
785
            data=network_solved.lines_t.p0[k].resample("H").ffill(),
786
        )
787
        pset["2011-12-31 22:00:00"] = pset["2011-12-31 21:00:00"]
788
        pset["2011-12-31 23:00:00"] = pset["2011-12-31 21:00:00"]
789
790
        network_prepared.add(
791
            "Load",
792
            "slack_fix " + i + " " + k,
793
            bus=i,
794
            p_set=pset,
795
            carrier=lines_cb_0.loc[k, "carrier"],
796
        )
797
798
    # do the same for links
799
    network_solved.mremove(
800
        "Link",
801
        network_solved.links[
802
            (~network_solved.links.bus0.isin(network_solved.buses.index))
803
            | (~network_solved.links.bus1.isin(network_solved.buses.index))
804
        ].index,
805
    )
806
807
    # select all links which have at bus1 the bus which is kept
808
    links_cb_1 = network_solved.links[
809
        (
810
            network_solved.links["bus0"].isin(network_solved.buses.index)
811
            == False
812
        )
813
    ]
814
815
    # create a load at bus1 with the link's hourly loading
816
    for i, k in zip(links_cb_1.bus1.values, links_cb_1.index):
817
        pset = pd.Series(
818
            index=network_prepared.snapshots,
819
            data=network_solved.links_t.p1[k].resample("H").ffill(),
820
        )
821
        pset["2011-12-31 22:00:00"] = pset["2011-12-31 21:00:00"]
822
        pset["2011-12-31 23:00:00"] = pset["2011-12-31 21:00:00"]
823
824
        network_prepared.add(
825
            "Load",
826
            "slack_fix_links " + i + " " + k,
827
            bus=i,
828
            p_set=pset,
829
            carrier=links_cb_1.loc[k, "carrier"],
830
        )
831
832
    # select all links which have at bus0 the bus which is kept
833
    links_cb_0 = network_solved.links[
834
        (
835
            network_solved.links["bus1"].isin(network_solved.buses.index)
836
            == False
837
        )
838
    ]
839
840
    # create a load at bus0 with the link's hourly loading
841
    for i, k in zip(links_cb_0.bus0.values, links_cb_0.index):
842
        pset = pd.Series(
843
            index=network_prepared.snapshots,
844
            data=network_solved.links_t.p0[k].resample("H").ffill(),
845
        )
846
        pset["2011-12-31 22:00:00"] = pset["2011-12-31 21:00:00"]
847
        pset["2011-12-31 23:00:00"] = pset["2011-12-31 21:00:00"]
848
849
        network_prepared.add(
850
            "Load",
851
            "slack_fix_links " + i + " " + k,
852
            bus=i,
853
            p_set=pset,
854
            carrier=links_cb_0.carrier[k],
855
        )
856
857
    # drop remaining foreign components
858
    for comp in network_solved.iterate_components():
859
        if "bus0" in comp.df.columns:
860
            network_solved.mremove(
861
                comp.name,
862
                comp.df[~comp.df.bus0.isin(network_solved.buses.index)].index,
863
            )
864
            network_solved.mremove(
865
                comp.name,
866
                comp.df[~comp.df.bus1.isin(network_solved.buses.index)].index,
867
            )
868
        elif "bus" in comp.df.columns:
869
            network_solved.mremove(
870
                comp.name,
871
                comp.df[~comp.df.bus.isin(network_solved.buses.index)].index,
872
            )
873
874
    # Combine urban decentral and rural heat
875
    network_prepared, network_solved = combine_decentral_and_rural_heat(
876
        network_solved, network_prepared
877
    )
878
879
    # writing components of neighboring countries to etrago tables
880
881
    # Set country tag for all buses
882
    network_solved.buses.country = network_solved.buses.index.str[:2]
883
    neighbors = network_solved.buses[network_solved.buses.country != "DE"]
884
885
    neighbors["new_index"] = (
886
        db.next_etrago_id("bus") + neighbors.reset_index().index
887
    )
888
889
    # Use index of AC buses created by electrical_neigbors
890
    foreign_ac_buses = db.select_dataframe(
891
        """
892
        SELECT * FROM grid.egon_etrago_bus
893
        WHERE carrier = 'AC' AND v_nom = 380
894
        AND country!= 'DE' AND scn_name ='eGon100RE'
895
        AND bus_id NOT IN (SELECT bus_i FROM osmtgmod_results.bus_data)
896
        """
897
    )
898
    buses_with_defined_id = neighbors[
899
        (neighbors.carrier == "AC")
900
        & (neighbors.country.isin(foreign_ac_buses.country.values))
901
    ].index
902
    neighbors.loc[buses_with_defined_id, "new_index"] = (
903
        foreign_ac_buses.set_index("x")
904
        .loc[neighbors.loc[buses_with_defined_id, "x"]]
905
        .bus_id.values
906
    )
907
908
    # lines, the foreign crossborder lines
909
    # (without crossborder lines to Germany!)
910
911
    neighbor_lines = network_solved.lines[
912
        network_solved.lines.bus0.isin(neighbors.index)
913
        & network_solved.lines.bus1.isin(neighbors.index)
914
    ]
915
    if not network_solved.lines_t["s_max_pu"].empty:
916
        neighbor_lines_t = network_prepared.lines_t["s_max_pu"][
917
            neighbor_lines.index
918
        ]
919
920
    neighbor_lines.reset_index(inplace=True)
921
    neighbor_lines.bus0 = (
922
        neighbors.loc[neighbor_lines.bus0, "new_index"].reset_index().new_index
923
    )
924
    neighbor_lines.bus1 = (
925
        neighbors.loc[neighbor_lines.bus1, "new_index"].reset_index().new_index
926
    )
927
    neighbor_lines.index += db.next_etrago_id("line")
928
929
    if not network_solved.lines_t["s_max_pu"].empty:
930
        for i in neighbor_lines_t.columns:
0 ignored issues
show
introduced by
The variable neighbor_lines_t does not seem to be defined in case BooleanNotNode on line 915 is False. Are you sure this can never be the case?
Loading history...
931
            new_index = neighbor_lines[neighbor_lines["name"] == i].index
932
            neighbor_lines_t.rename(columns={i: new_index[0]}, inplace=True)
933
934
    # links
935
    neighbor_links = network_solved.links[
936
        network_solved.links.bus0.isin(neighbors.index)
937
        & network_solved.links.bus1.isin(neighbors.index)
938
    ]
939
940
    neighbor_links.reset_index(inplace=True)
941
    neighbor_links.bus0 = (
942
        neighbors.loc[neighbor_links.bus0, "new_index"].reset_index().new_index
943
    )
944
    neighbor_links.bus1 = (
945
        neighbors.loc[neighbor_links.bus1, "new_index"].reset_index().new_index
946
    )
947
    neighbor_links.index += db.next_etrago_id("link")
948
949
    # generators
950
    neighbor_gens = network_solved.generators[
951
        network_solved.generators.bus.isin(neighbors.index)
952
    ]
953
    neighbor_gens_t = network_prepared.generators_t["p_max_pu"][
954
        neighbor_gens[
955
            neighbor_gens.index.isin(
956
                network_prepared.generators_t["p_max_pu"].columns
957
            )
958
        ].index
959
    ]
960
961
    gen_time = [
962
        "solar",
963
        "onwind",
964
        "solar rooftop",
965
        "offwind-ac",
966
        "offwind-dc",
967
        "solar-hsat",
968
        "urban central solar thermal",
969
        "rural solar thermal",
970
        "offwind-float",
971
    ]
972
973
    missing_gent = neighbor_gens[
974
        neighbor_gens["carrier"].isin(gen_time)
975
        & ~neighbor_gens.index.isin(neighbor_gens_t.columns)
976
    ].index
977
978
    gen_timeseries = network_prepared.generators_t["p_max_pu"].copy()
979
    for mgt in missing_gent:  # mgt: missing generator timeseries
980
        try:
981
            neighbor_gens_t[mgt] = gen_timeseries.loc[:, mgt[0:-5]]
982
        except:
983
            print(f"There are not timeseries for {mgt}")
984
985
    neighbor_gens.reset_index(inplace=True)
986
    neighbor_gens.bus = (
987
        neighbors.loc[neighbor_gens.bus, "new_index"].reset_index().new_index
988
    )
989
    neighbor_gens.index += db.next_etrago_id("generator")
990
991
    for i in neighbor_gens_t.columns:
992
        new_index = neighbor_gens[neighbor_gens["Generator"] == i].index
993
        neighbor_gens_t.rename(columns={i: new_index[0]}, inplace=True)
994
995
    # loads
996
    # imported from prenetwork in 1h-resolution
997
    neighbor_loads = network_prepared.loads[
998
        network_prepared.loads.bus.isin(neighbors.index)
999
    ]
1000
    neighbor_loads_t_index = neighbor_loads.index[
1001
        neighbor_loads.index.isin(network_prepared.loads_t.p_set.columns)
1002
    ]
1003
    neighbor_loads_t = network_prepared.loads_t["p_set"][
1004
        neighbor_loads_t_index
1005
    ]
1006
1007
    neighbor_loads.reset_index(inplace=True)
1008
    neighbor_loads.bus = (
1009
        neighbors.loc[neighbor_loads.bus, "new_index"].reset_index().new_index
1010
    )
1011
    neighbor_loads.index += db.next_etrago_id("load")
1012
1013
    for i in neighbor_loads_t.columns:
1014
        new_index = neighbor_loads[neighbor_loads["Load"] == i].index
1015
        neighbor_loads_t.rename(columns={i: new_index[0]}, inplace=True)
1016
1017
    # stores
1018
    neighbor_stores = network_solved.stores[
1019
        network_solved.stores.bus.isin(neighbors.index)
1020
    ]
1021
    neighbor_stores_t_index = neighbor_stores.index[
1022
        neighbor_stores.index.isin(network_solved.stores_t.e_min_pu.columns)
1023
    ]
1024
    neighbor_stores_t = network_prepared.stores_t["e_min_pu"][
1025
        neighbor_stores_t_index
1026
    ]
1027
1028
    neighbor_stores.reset_index(inplace=True)
1029
    neighbor_stores.bus = (
1030
        neighbors.loc[neighbor_stores.bus, "new_index"].reset_index().new_index
1031
    )
1032
    neighbor_stores.index += db.next_etrago_id("store")
1033
1034
    for i in neighbor_stores_t.columns:
1035
        new_index = neighbor_stores[neighbor_stores["Store"] == i].index
1036
        neighbor_stores_t.rename(columns={i: new_index[0]}, inplace=True)
1037
1038
    # storage_units
1039
    neighbor_storage = network_solved.storage_units[
1040
        network_solved.storage_units.bus.isin(neighbors.index)
1041
    ]
1042
    neighbor_storage_t_index = neighbor_storage.index[
1043
        neighbor_storage.index.isin(
1044
            network_solved.storage_units_t.inflow.columns
1045
        )
1046
    ]
1047
    neighbor_storage_t = network_prepared.storage_units_t["inflow"][
1048
        neighbor_storage_t_index
1049
    ]
1050
1051
    neighbor_storage.reset_index(inplace=True)
1052
    neighbor_storage.bus = (
1053
        neighbors.loc[neighbor_storage.bus, "new_index"]
1054
        .reset_index()
1055
        .new_index
1056
    )
1057
    neighbor_storage.index += db.next_etrago_id("storage")
1058
1059
    for i in neighbor_storage_t.columns:
1060
        new_index = neighbor_storage[
1061
            neighbor_storage["StorageUnit"] == i
1062
        ].index
1063
        neighbor_storage_t.rename(columns={i: new_index[0]}, inplace=True)
1064
1065
    # Connect to local database
1066
    engine = db.engine()
1067
1068
    neighbors["scn_name"] = "eGon100RE"
1069
    neighbors.index = neighbors["new_index"]
1070
1071
    # Correct geometry for non AC buses
1072
    carriers = set(neighbors.carrier.to_list())
1073
    carriers = [e for e in carriers if e not in ("AC")]
1074
    non_AC_neighbors = pd.DataFrame()
1075
    for c in carriers:
1076
        c_neighbors = neighbors[neighbors.carrier == c].set_index(
1077
            "location", drop=False
1078
        )
1079
        for i in ["x", "y"]:
1080
            c_neighbors = c_neighbors.drop(i, axis=1)
1081
        coordinates = neighbors[neighbors.carrier == "AC"][
1082
            ["location", "x", "y"]
1083
        ].set_index("location")
1084
        c_neighbors = pd.concat([coordinates, c_neighbors], axis=1).set_index(
1085
            "new_index", drop=False
1086
        )
1087
        non_AC_neighbors = pd.concat([non_AC_neighbors, c_neighbors])
1088
1089
    neighbors = pd.concat(
1090
        [neighbors[neighbors.carrier == "AC"], non_AC_neighbors]
1091
    )
1092
1093
    for i in [
1094
        "new_index",
1095
        "control",
1096
        "generator",
1097
        "location",
1098
        "sub_network",
1099
        "unit",
1100
        "substation_lv",
1101
        "substation_off",
1102
    ]:
1103
        neighbors = neighbors.drop(i, axis=1)
1104
1105
    # Add geometry column
1106
    neighbors = (
1107
        gpd.GeoDataFrame(
1108
            neighbors, geometry=gpd.points_from_xy(neighbors.x, neighbors.y)
1109
        )
1110
        .rename_geometry("geom")
1111
        .set_crs(4326)
1112
    )
1113
1114
    # Unify carrier names
1115
    neighbors.carrier = neighbors.carrier.str.replace(" ", "_")
1116
    neighbors.carrier.replace(
1117
        {
1118
            "gas": "CH4",
1119
            "gas_for_industry": "CH4_for_industry",
1120
            "urban_central_heat": "central_heat",
1121
            "EV_battery": "Li_ion",
1122
            "urban_central_water_tanks": "central_heat_store",
1123
            "rural_water_tanks": "rural_heat_store",
1124
        },
1125
        inplace=True,
1126
    )
1127
1128
    neighbors[~neighbors.carrier.isin(["AC"])].to_postgis(
1129
        "egon_etrago_bus",
1130
        engine,
1131
        schema="grid",
1132
        if_exists="append",
1133
        index=True,
1134
        index_label="bus_id",
1135
    )
1136
1137
    # prepare and write neighboring crossborder lines to etrago tables
1138
    def lines_to_etrago(neighbor_lines=neighbor_lines, scn="eGon100RE"):
1139
        neighbor_lines["scn_name"] = scn
1140
        neighbor_lines["cables"] = 3 * neighbor_lines["num_parallel"].astype(
1141
            int
1142
        )
1143
        neighbor_lines["s_nom"] = neighbor_lines["s_nom_min"]
1144
1145
        for i in [
1146
            "Line",
1147
            "x_pu_eff",
1148
            "r_pu_eff",
1149
            "sub_network",
1150
            "x_pu",
1151
            "r_pu",
1152
            "g_pu",
1153
            "b_pu",
1154
            "s_nom_opt",
1155
            "i_nom",
1156
            "dc",
1157
        ]:
1158
            neighbor_lines = neighbor_lines.drop(i, axis=1)
1159
1160
        # Define geometry and add to lines dataframe as 'topo'
1161
        gdf = gpd.GeoDataFrame(index=neighbor_lines.index)
1162
        gdf["geom_bus0"] = neighbors.geom[neighbor_lines.bus0].values
1163
        gdf["geom_bus1"] = neighbors.geom[neighbor_lines.bus1].values
1164
        gdf["geometry"] = gdf.apply(
1165
            lambda x: LineString([x["geom_bus0"], x["geom_bus1"]]), axis=1
1166
        )
1167
1168
        neighbor_lines = (
1169
            gpd.GeoDataFrame(neighbor_lines, geometry=gdf["geometry"])
1170
            .rename_geometry("topo")
1171
            .set_crs(4326)
1172
        )
1173
1174
        neighbor_lines["lifetime"] = get_sector_parameters("electricity", scn)[
1175
            "lifetime"
1176
        ]["ac_ehv_overhead_line"]
1177
1178
        neighbor_lines.to_postgis(
1179
            "egon_etrago_line",
1180
            engine,
1181
            schema="grid",
1182
            if_exists="append",
1183
            index=True,
1184
            index_label="line_id",
1185
        )
1186
1187
    lines_to_etrago(neighbor_lines=neighbor_lines, scn="eGon100RE")
1188
1189
    def links_to_etrago(neighbor_links, scn="eGon100RE", extendable=True):
1190
        """Prepare and write neighboring crossborder links to eTraGo table
1191
1192
        This function prepare the neighboring crossborder links
1193
        generated the PyPSA-eur-sec (p-e-s) run by:
1194
          * Delete the useless columns
1195
          * If extendable is false only (non default case):
1196
              * Replace p_nom = 0 with the p_nom_op values (arrising
1197
                from the p-e-s optimisation)
1198
              * Setting p_nom_extendable to false
1199
          * Add geomtry to the links: 'geom' and 'topo' columns
1200
          * Change the name of the carriers to have the consistent in
1201
            eGon-data
1202
1203
        The function insert then the link to the eTraGo table and has
1204
        no return.
1205
1206
        Parameters
1207
        ----------
1208
        neighbor_links : pandas.DataFrame
1209
            Dataframe containing the neighboring crossborder links
1210
        scn_name : str
1211
            Name of the scenario
1212
        extendable : bool
1213
            Boolean expressing if the links should be extendable or not
1214
1215
        Returns
1216
        -------
1217
        None
1218
1219
        """
1220
        neighbor_links["scn_name"] = scn
1221
1222
        dropped_carriers = [
1223
            "Link",
1224
            "geometry",
1225
            "tags",
1226
            "under_construction",
1227
            "underground",
1228
            "underwater_fraction",
1229
            "bus2",
1230
            "bus3",
1231
            "bus4",
1232
            "efficiency2",
1233
            "efficiency3",
1234
            "efficiency4",
1235
            "lifetime",
1236
            "pipe_retrofit",
1237
            "committable",
1238
            "start_up_cost",
1239
            "shut_down_cost",
1240
            "min_up_time",
1241
            "min_down_time",
1242
            "up_time_before",
1243
            "down_time_before",
1244
            "ramp_limit_up",
1245
            "ramp_limit_down",
1246
            "ramp_limit_start_up",
1247
            "ramp_limit_shut_down",
1248
            "length_original",
1249
            "reversed",
1250
            "location",
1251
            "project_status",
1252
            "dc",
1253
            "voltage",
1254
        ]
1255
1256
        if extendable:
1257
            dropped_carriers.append("p_nom_opt")
1258
            neighbor_links = neighbor_links.drop(
1259
                columns=dropped_carriers,
1260
                errors="ignore",
1261
            )
1262
1263
        else:
1264
            dropped_carriers.append("p_nom")
1265
            dropped_carriers.append("p_nom_extendable")
1266
            neighbor_links = neighbor_links.drop(
1267
                columns=dropped_carriers,
1268
                errors="ignore",
1269
            )
1270
            neighbor_links = neighbor_links.rename(
1271
                columns={"p_nom_opt": "p_nom"}
1272
            )
1273
            neighbor_links["p_nom_extendable"] = False
1274
1275
        if neighbor_links.empty:
1276
            print("No links selected")
1277
            return
1278
1279
        # Define geometry and add to lines dataframe as 'topo'
1280
        gdf = gpd.GeoDataFrame(
1281
            index=neighbor_links.index,
1282
            data={
1283
                "geom_bus0": neighbors.loc[neighbor_links.bus0, "geom"].values,
1284
                "geom_bus1": neighbors.loc[neighbor_links.bus1, "geom"].values,
1285
            },
1286
        )
1287
1288
        gdf["geometry"] = gdf.apply(
1289
            lambda x: LineString([x["geom_bus0"], x["geom_bus1"]]), axis=1
1290
        )
1291
1292
        neighbor_links = (
1293
            gpd.GeoDataFrame(neighbor_links, geometry=gdf["geometry"])
1294
            .rename_geometry("topo")
1295
            .set_crs(4326)
1296
        )
1297
1298
        # Unify carrier names
1299
        neighbor_links.carrier = neighbor_links.carrier.str.replace(" ", "_")
1300
1301
        neighbor_links.carrier.replace(
1302
            {
1303
                "H2_Electrolysis": "power_to_H2",
1304
                "H2_Fuel_Cell": "H2_to_power",
1305
                "H2_pipeline_retrofitted": "H2_retrofit",
1306
                "SMR": "CH4_to_H2",
1307
                "Sabatier": "H2_to_CH4",
1308
                "gas_for_industry": "CH4_for_industry",
1309
                "gas_pipeline": "CH4",
1310
                "urban_central_gas_boiler": "central_gas_boiler",
1311
                "urban_central_resistive_heater": "central_resistive_heater",
1312
                "urban_central_water_tanks_charger": "central_heat_store_charger",
1313
                "urban_central_water_tanks_discharger": "central_heat_store_discharger",
1314
                "rural_water_tanks_charger": "rural_heat_store_charger",
1315
                "rural_water_tanks_discharger": "rural_heat_store_discharger",
1316
                "urban_central_gas_CHP": "central_gas_CHP",
1317
                "urban_central_air_heat_pump": "central_heat_pump",
1318
                "rural_ground_heat_pump": "rural_heat_pump",
1319
            },
1320
            inplace=True,
1321
        )
1322
1323
        H2_links = {
1324
            "H2_to_CH4": "H2_to_CH4",
1325
            "H2_to_power": "H2_to_power",
1326
            "power_to_H2": "power_to_H2_system",
1327
            "CH4_to_H2": "CH4_to_H2",
1328
        }
1329
1330
        for c in H2_links.keys():
1331
1332
            neighbor_links.loc[
1333
                (neighbor_links.carrier == c),
1334
                "lifetime",
1335
            ] = get_sector_parameters("gas", "eGon100RE")["lifetime"][
1336
                H2_links[c]
1337
            ]
1338
1339
        neighbor_links.to_postgis(
1340
            "egon_etrago_link",
1341
            engine,
1342
            schema="grid",
1343
            if_exists="append",
1344
            index=True,
1345
            index_label="link_id",
1346
        )
1347
1348
    extendable_links_carriers = [
1349
        "battery charger",
1350
        "battery discharger",
1351
        "home battery charger",
1352
        "home battery discharger",
1353
        "rural water tanks charger",
1354
        "rural water tanks discharger",
1355
        "urban central water tanks charger",
1356
        "urban central water tanks discharger",
1357
        "urban decentral water tanks charger",
1358
        "urban decentral water tanks discharger",
1359
        "H2 Electrolysis",
1360
        "H2 Fuel Cell",
1361
        "SMR",
1362
        "Sabatier",
1363
    ]
1364
1365
    # delete unwanted carriers for eTraGo
1366
    excluded_carriers = [
1367
        "gas for industry CC",
1368
        "SMR CC",
1369
        "DAC",
1370
    ]
1371
    neighbor_links = neighbor_links[
1372
        ~neighbor_links.carrier.isin(excluded_carriers)
1373
    ]
1374
1375
    # Combine CHP_CC and CHP
1376
    chp_cc = neighbor_links[
1377
        neighbor_links.carrier == "urban central gas CHP CC"
1378
    ]
1379
    for index, row in chp_cc.iterrows():
1380
        neighbor_links.loc[
1381
            neighbor_links.Link == row.Link.replace("CHP CC", "CHP"),
1382
            "p_nom_opt",
1383
        ] += row.p_nom_opt
1384
        neighbor_links.loc[
1385
            neighbor_links.Link == row.Link.replace("CHP CC", "CHP"), "p_nom"
1386
        ] += row.p_nom
1387
        neighbor_links.drop(index, inplace=True)
1388
1389
    # Combine heat pumps
1390
    # Like in Germany, there are air heat pumps in central heat grids
1391
    # and ground heat pumps in rural areas
1392
    rural_air = neighbor_links[neighbor_links.carrier == "rural air heat pump"]
1393
    for index, row in rural_air.iterrows():
1394
        neighbor_links.loc[
1395
            neighbor_links.Link == row.Link.replace("air", "ground"),
1396
            "p_nom_opt",
1397
        ] += row.p_nom_opt
1398
        neighbor_links.loc[
1399
            neighbor_links.Link == row.Link.replace("air", "ground"), "p_nom"
1400
        ] += row.p_nom
1401
        neighbor_links.drop(index, inplace=True)
1402
    links_to_etrago(
1403
        neighbor_links[neighbor_links.carrier.isin(extendable_links_carriers)],
1404
        "eGon100RE",
1405
    )
1406
    links_to_etrago(
1407
        neighbor_links[
1408
            ~neighbor_links.carrier.isin(extendable_links_carriers)
1409
        ],
1410
        "eGon100RE",
1411
        extendable=False,
1412
    )
1413
    # Include links time-series
1414
    # For heat_pumps
1415
    hp = neighbor_links[neighbor_links["carrier"].str.contains("heat pump")]
1416
1417
    neighbor_eff_t = network_prepared.links_t["efficiency"][
1418
        hp[hp.Link.isin(network_prepared.links_t["efficiency"].columns)].index
1419
    ]
1420
1421
    missing_hp = hp[~hp["Link"].isin(neighbor_eff_t.columns)].Link
1422
1423
    eff_timeseries = network_prepared.links_t["efficiency"].copy()
1424
    for met in missing_hp:  # met: missing efficiency timeseries
1425
        try:
1426
            neighbor_eff_t[met] = eff_timeseries.loc[:, met[0:-5]]
1427
        except:
1428
            print(f"There are not timeseries for heat_pump {met}")
1429
1430
    for i in neighbor_eff_t.columns:
1431
        new_index = neighbor_links[neighbor_links["Link"] == i].index
1432
        neighbor_eff_t.rename(columns={i: new_index[0]}, inplace=True)
1433
1434
    # Include links time-series
1435
    # For ev_chargers
1436
    ev = neighbor_links[neighbor_links["carrier"].str.contains("BEV charger")]
1437
1438
    ev_p_max_pu = network_prepared.links_t["p_max_pu"][
1439
        ev[ev.Link.isin(network_prepared.links_t["p_max_pu"].columns)].index
1440
    ]
1441
1442
    missing_ev = ev[~ev["Link"].isin(ev_p_max_pu.columns)].Link
1443
1444
    ev_p_max_pu_timeseries = network_prepared.links_t["p_max_pu"].copy()
1445
    for mct in missing_ev:  # evt: missing charger timeseries
1446
        try:
1447
            ev_p_max_pu[mct] = ev_p_max_pu_timeseries.loc[:, mct[0:-5]]
1448
        except:
1449
            print(f"There are not timeseries for EV charger {mct}")
1450
1451
    for i in ev_p_max_pu.columns:
1452
        new_index = neighbor_links[neighbor_links["Link"] == i].index
1453
        ev_p_max_pu.rename(columns={i: new_index[0]}, inplace=True)
1454
1455
    # prepare neighboring generators for etrago tables
1456
    neighbor_gens["scn_name"] = "eGon100RE"
1457
    neighbor_gens["p_nom"] = neighbor_gens["p_nom_opt"]
1458
    neighbor_gens["p_nom_extendable"] = False
1459
1460
    # Unify carrier names
1461
    neighbor_gens.carrier = neighbor_gens.carrier.str.replace(" ", "_")
1462
1463
    neighbor_gens.carrier.replace(
1464
        {
1465
            "onwind": "wind_onshore",
1466
            "ror": "run_of_river",
1467
            "offwind-ac": "wind_offshore",
1468
            "offwind-dc": "wind_offshore",
1469
            "offwind-float": "wind_offshore",
1470
            "urban_central_solar_thermal": "urban_central_solar_thermal_collector",
1471
            "residential_rural_solar_thermal": "residential_rural_solar_thermal_collector",
1472
            "services_rural_solar_thermal": "services_rural_solar_thermal_collector",
1473
            "solar-hsat": "solar",
1474
        },
1475
        inplace=True,
1476
    )
1477
1478
    for i in [
1479
        "Generator",
1480
        "weight",
1481
        "lifetime",
1482
        "p_set",
1483
        "q_set",
1484
        "p_nom_opt",
1485
        "e_sum_min",
1486
        "e_sum_max",
1487
    ]:
1488
        neighbor_gens = neighbor_gens.drop(i, axis=1)
1489
1490
    neighbor_gens.to_sql(
1491
        "egon_etrago_generator",
1492
        engine,
1493
        schema="grid",
1494
        if_exists="append",
1495
        index=True,
1496
        index_label="generator_id",
1497
    )
1498
1499
    # prepare neighboring loads for etrago tables
1500
    neighbor_loads["scn_name"] = "eGon100RE"
1501
1502
    # Unify carrier names
1503
    neighbor_loads.carrier = neighbor_loads.carrier.str.replace(" ", "_")
1504
1505
    neighbor_loads.carrier.replace(
1506
        {
1507
            "electricity": "AC",
1508
            "DC": "AC",
1509
            "industry_electricity": "AC",
1510
            "H2_pipeline_retrofitted": "H2_system_boundary",
1511
            "gas_pipeline": "CH4_system_boundary",
1512
            "gas_for_industry": "CH4_for_industry",
1513
            "urban_central_heat": "central_heat",
1514
        },
1515
        inplace=True,
1516
    )
1517
1518
    neighbor_loads = neighbor_loads.drop(
1519
        columns=["Load"],
1520
        errors="ignore",
1521
    )
1522
1523
    neighbor_loads.to_sql(
1524
        "egon_etrago_load",
1525
        engine,
1526
        schema="grid",
1527
        if_exists="append",
1528
        index=True,
1529
        index_label="load_id",
1530
    )
1531
1532
    # prepare neighboring stores for etrago tables
1533
    neighbor_stores["scn_name"] = "eGon100RE"
1534
1535
    # Unify carrier names
1536
    neighbor_stores.carrier = neighbor_stores.carrier.str.replace(" ", "_")
1537
1538
    neighbor_stores.carrier.replace(
1539
        {
1540
            "Li_ion": "battery",
1541
            "gas": "CH4",
1542
            "urban_central_water_tanks": "central_heat_store",
1543
            "rural_water_tanks": "rural_heat_store",
1544
            "EV_battery": "battery_storage",
1545
        },
1546
        inplace=True,
1547
    )
1548
    neighbor_stores.loc[
1549
        (
1550
            (neighbor_stores.e_nom_max <= 1e9)
1551
            & (neighbor_stores.carrier == "H2_Store")
1552
        ),
1553
        "carrier",
1554
    ] = "H2_underground"
1555
    neighbor_stores.loc[
1556
        (
1557
            (neighbor_stores.e_nom_max > 1e9)
1558
            & (neighbor_stores.carrier == "H2_Store")
1559
        ),
1560
        "carrier",
1561
    ] = "H2_overground"
1562
1563
    for i in [
1564
        "Store",
1565
        "p_set",
1566
        "q_set",
1567
        "e_nom_opt",
1568
        "lifetime",
1569
        "e_initial_per_period",
1570
        "e_cyclic_per_period",
1571
        "location",
1572
    ]:
1573
        neighbor_stores = neighbor_stores.drop(i, axis=1, errors="ignore")
1574
1575
    for c in ["H2_underground", "H2_overground"]:
1576
        neighbor_stores.loc[
1577
            (neighbor_stores.carrier == c),
1578
            "lifetime",
1579
        ] = get_sector_parameters("gas", "eGon100RE")["lifetime"][c]
1580
1581
    neighbor_stores.to_sql(
1582
        "egon_etrago_store",
1583
        engine,
1584
        schema="grid",
1585
        if_exists="append",
1586
        index=True,
1587
        index_label="store_id",
1588
    )
1589
1590
    # prepare neighboring storage_units for etrago tables
1591
    neighbor_storage["scn_name"] = "eGon100RE"
1592
1593
    # Unify carrier names
1594
    neighbor_storage.carrier = neighbor_storage.carrier.str.replace(" ", "_")
1595
1596
    neighbor_storage.carrier.replace(
1597
        {"PHS": "pumped_hydro", "hydro": "reservoir"}, inplace=True
1598
    )
1599
1600
    for i in [
1601
        "StorageUnit",
1602
        "p_nom_opt",
1603
        "state_of_charge_initial_per_period",
1604
        "cyclic_state_of_charge_per_period",
1605
    ]:
1606
        neighbor_storage = neighbor_storage.drop(i, axis=1, errors="ignore")
1607
1608
    neighbor_storage.to_sql(
1609
        "egon_etrago_storage",
1610
        engine,
1611
        schema="grid",
1612
        if_exists="append",
1613
        index=True,
1614
        index_label="storage_id",
1615
    )
1616
1617
    # writing neighboring loads_t p_sets to etrago tables
1618
1619
    neighbor_loads_t_etrago = pd.DataFrame(
1620
        columns=["scn_name", "temp_id", "p_set"],
1621
        index=neighbor_loads_t.columns,
1622
    )
1623
    neighbor_loads_t_etrago["scn_name"] = "eGon100RE"
1624
    neighbor_loads_t_etrago["temp_id"] = 1
1625
    for i in neighbor_loads_t.columns:
1626
        neighbor_loads_t_etrago["p_set"][i] = neighbor_loads_t[
1627
            i
1628
        ].values.tolist()
1629
1630
    neighbor_loads_t_etrago.to_sql(
1631
        "egon_etrago_load_timeseries",
1632
        engine,
1633
        schema="grid",
1634
        if_exists="append",
1635
        index=True,
1636
        index_label="load_id",
1637
    )
1638
1639
    # writing neighboring link_t efficiency and p_max_pu to etrago tables
1640
    neighbor_link_t_etrago = pd.DataFrame(
1641
        columns=["scn_name", "temp_id", "p_max_pu", "efficiency"],
1642
        index=neighbor_eff_t.columns.to_list() + ev_p_max_pu.columns.to_list(),
1643
    )
1644
    neighbor_link_t_etrago["scn_name"] = "eGon100RE"
1645
    neighbor_link_t_etrago["temp_id"] = 1
1646
    for i in neighbor_eff_t.columns:
1647
        neighbor_link_t_etrago["efficiency"][i] = neighbor_eff_t[
1648
            i
1649
        ].values.tolist()
1650
    for i in ev_p_max_pu.columns:
1651
        neighbor_link_t_etrago["p_max_pu"][i] = ev_p_max_pu[i].values.tolist()
1652
1653
    neighbor_link_t_etrago.to_sql(
1654
        "egon_etrago_link_timeseries",
1655
        engine,
1656
        schema="grid",
1657
        if_exists="append",
1658
        index=True,
1659
        index_label="link_id",
1660
    )
1661
1662
    # writing neighboring generator_t p_max_pu to etrago tables
1663
    neighbor_gens_t_etrago = pd.DataFrame(
1664
        columns=["scn_name", "temp_id", "p_max_pu"],
1665
        index=neighbor_gens_t.columns,
1666
    )
1667
    neighbor_gens_t_etrago["scn_name"] = "eGon100RE"
1668
    neighbor_gens_t_etrago["temp_id"] = 1
1669
    for i in neighbor_gens_t.columns:
1670
        neighbor_gens_t_etrago["p_max_pu"][i] = neighbor_gens_t[
1671
            i
1672
        ].values.tolist()
1673
1674
    neighbor_gens_t_etrago.to_sql(
1675
        "egon_etrago_generator_timeseries",
1676
        engine,
1677
        schema="grid",
1678
        if_exists="append",
1679
        index=True,
1680
        index_label="generator_id",
1681
    )
1682
1683
    # writing neighboring stores_t e_min_pu to etrago tables
1684
    neighbor_stores_t_etrago = pd.DataFrame(
1685
        columns=["scn_name", "temp_id", "e_min_pu"],
1686
        index=neighbor_stores_t.columns,
1687
    )
1688
    neighbor_stores_t_etrago["scn_name"] = "eGon100RE"
1689
    neighbor_stores_t_etrago["temp_id"] = 1
1690
    for i in neighbor_stores_t.columns:
1691
        neighbor_stores_t_etrago["e_min_pu"][i] = neighbor_stores_t[
1692
            i
1693
        ].values.tolist()
1694
1695
    neighbor_stores_t_etrago.to_sql(
1696
        "egon_etrago_store_timeseries",
1697
        engine,
1698
        schema="grid",
1699
        if_exists="append",
1700
        index=True,
1701
        index_label="store_id",
1702
    )
1703
1704
    # writing neighboring storage_units inflow to etrago tables
1705
    neighbor_storage_t_etrago = pd.DataFrame(
1706
        columns=["scn_name", "temp_id", "inflow"],
1707
        index=neighbor_storage_t.columns,
1708
    )
1709
    neighbor_storage_t_etrago["scn_name"] = "eGon100RE"
1710
    neighbor_storage_t_etrago["temp_id"] = 1
1711
    for i in neighbor_storage_t.columns:
1712
        neighbor_storage_t_etrago["inflow"][i] = neighbor_storage_t[
1713
            i
1714
        ].values.tolist()
1715
1716
    neighbor_storage_t_etrago.to_sql(
1717
        "egon_etrago_storage_timeseries",
1718
        engine,
1719
        schema="grid",
1720
        if_exists="append",
1721
        index=True,
1722
        index_label="storage_id",
1723
    )
1724
1725
    # writing neighboring lines_t s_max_pu to etrago tables
1726
    if not network_solved.lines_t["s_max_pu"].empty:
1727
        neighbor_lines_t_etrago = pd.DataFrame(
1728
            columns=["scn_name", "s_max_pu"], index=neighbor_lines_t.columns
1729
        )
1730
        neighbor_lines_t_etrago["scn_name"] = "eGon100RE"
1731
1732
        for i in neighbor_lines_t.columns:
1733
            neighbor_lines_t_etrago["s_max_pu"][i] = neighbor_lines_t[
1734
                i
1735
            ].values.tolist()
1736
1737
        neighbor_lines_t_etrago.to_sql(
1738
            "egon_etrago_line_timeseries",
1739
            engine,
1740
            schema="grid",
1741
            if_exists="append",
1742
            index=True,
1743
            index_label="line_id",
1744
        )
1745
1746
1747 View Code Duplication
def prepared_network(planning_horizon=3):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
1748
    if egon.data.config.settings()["egon-data"]["--run-pypsa-eur"]:
1749
        with open(
1750
            __path__[0] + "/datasets/pypsaeur/config_prepare.yaml", "r"
1751
        ) as stream:
1752
            data_config = yaml.safe_load(stream)
1753
1754
        target_file = (
1755
            Path(".")
1756
            / "run-pypsa-eur"
1757
            / "pypsa-eur"
1758
            / "results"
1759
            / data_config["run"]["name"]
1760
            / "prenetworks"
1761
            / f"base_s_{data_config['scenario']['clusters'][0]}"
1762
            f"_l{data_config['scenario']['ll'][0]}"
1763
            f"_{data_config['scenario']['opts'][0]}"
1764
            f"_{data_config['scenario']['sector_opts'][0]}"
1765
            f"_{data_config['scenario']['planning_horizons'][planning_horizon]}.nc"
1766
        )
1767
1768
    else:
1769
        target_file = (
1770
            Path(".")
1771
            / "data_bundle_egon_data"
1772
            / "pypsa_eur"
1773
            / "prenetworks"
1774
            / "prenetwork_post-manipulate_pre-solve"
1775
            / "base_s_39_lc1.25__cb40ex0-T-H-I-B-solar+p3-dist1_2045.nc"
1776
        )
1777
1778
    return pypsa.Network(target_file.absolute().as_posix())
1779
1780
1781
def overwrite_H2_pipeline_share():
1782
    """Overwrite retrofitted_CH4pipeline-to-H2pipeline_share value
1783
1784
    Overwrite retrofitted_CH4pipeline-to-H2pipeline_share in the
1785
    scenario parameter table if p-e-s is run.
1786
    This function write in the database and has no return.
1787
1788
    """
1789
    scn_name = "eGon100RE"
1790
    # Select source and target from dataset configuration
1791
    target = egon.data.config.datasets()["pypsa-eur-sec"]["target"]
1792
1793
    n = read_network()
1794
1795
    H2_pipelines = n.links[n.links["carrier"] == "H2 pipeline retrofitted"]
1796
    CH4_pipelines = n.links[n.links["carrier"] == "gas pipeline"]
1797
    H2_pipes_share = np.mean(
1798
        [
1799
            (i / j)
1800
            for i, j in zip(
1801
                H2_pipelines.p_nom_opt.to_list(), CH4_pipelines.p_nom.to_list()
1802
            )
1803
        ]
1804
    )
1805
    logger.info(
1806
        "retrofitted_CH4pipeline-to-H2pipeline_share = " + str(H2_pipes_share)
1807
    )
1808
1809
    parameters = db.select_dataframe(
1810
        f"""
1811
        SELECT *
1812
        FROM {target['scenario_parameters']['schema']}.{target['scenario_parameters']['table']}
1813
        WHERE name = '{scn_name}'
1814
        """
1815
    )
1816
1817
    gas_param = parameters.loc[0, "gas_parameters"]
1818
    gas_param["retrofitted_CH4pipeline-to-H2pipeline_share"] = H2_pipes_share
1819
    gas_param = json.dumps(gas_param)
1820
1821
    # Update data in db
1822
    db.execute_sql(
1823
        f"""
1824
    UPDATE {target['scenario_parameters']['schema']}.{target['scenario_parameters']['table']}
1825
    SET gas_parameters = '{gas_param}'
1826
    WHERE name = '{scn_name}';
1827
    """
1828
    )
1829
1830
1831
def update_electrical_timeseries_germany(network):
1832
    """Replace electrical demand time series in Germany with data from egon-data
1833
1834
    Parameters
1835
    ----------
1836
    network : pypsa.Network
1837
        Network including demand time series from pypsa-eur
1838
1839
    Returns
1840
    -------
1841
    network : pypsa.Network
1842
        Network including electrical demand time series in Germany from egon-data
1843
1844
    """
1845
    year = network.year
1846
    skip = network.snapshot_weightings.objective.iloc[0].astype("int")
1847
    df = pd.read_csv(
1848
        "input-pypsa-eur-sec/electrical_demand_timeseries_DE_eGon100RE.csv"
1849
    )
1850
1851
    annual_demand = pd.Series(index=[2019, 2037])
1852
    annual_demand_industry = pd.Series(index=[2019, 2037])
1853
    # Define values from status2019 for interpolation
1854
    # Residential and service (in TWh)
1855
    annual_demand.loc[2019] = 124.71 + 143.26
1856
    # Industry (in TWh)
1857
    annual_demand_industry.loc[2019] = 241.925
1858
1859
    # Define values from NEP 2023 scenario B 2037 for interpolation
1860
    # Residential and service (in TWh)
1861
    annual_demand.loc[2037] = 104 + 153.1
1862
    # Industry (in TWh)
1863
    annual_demand_industry.loc[2037] = 334.0
1864
1865
    # Set interpolated demands for years between 2019 and 2045
1866
    if year < 2037:
1867
        # Calculate annual demands for year by linear interpolating between
1868
        # 2019 and 2037
1869
        # Done seperatly for industry and residential and service to fit
1870
        # to pypsa-eurs structure
1871
        annual_rate = (annual_demand.loc[2037] - annual_demand.loc[2019]) / (
1872
            2037 - 2019
1873
        )
1874
        annual_demand_year = annual_demand.loc[2019] + annual_rate * (
1875
            year - 2019
1876
        )
1877
1878
        annual_rate_industry = (
1879
            annual_demand_industry.loc[2037] - annual_demand_industry.loc[2019]
1880
        ) / (2037 - 2019)
1881
        annual_demand_year_industry = annual_demand_industry.loc[
1882
            2019
1883
        ] + annual_rate_industry * (year - 2019)
1884
1885
        # Scale time series for 100% scenario with the annual demands
1886
        # The shape of the curve is taken from the 100% scenario since the
1887
        # same weather and calender year is used there
1888
        network.loads_t.p_set.loc[:, "DE0 0"] = (
1889
            df["residential_and_service"].loc[::skip]
1890
            / df["residential_and_service"].sum()
1891
            * annual_demand_year
1892
            * 1e6
1893
        ).values
1894
1895
        network.loads_t.p_set.loc[:, "DE0 0 industry electricity"] = (
1896
            df["industry"].loc[::skip]
1897
            / df["industry"].sum()
1898
            * annual_demand_year_industry
1899
            * 1e6
1900
        ).values
1901
1902
    elif year == 2045:
1903
        network.loads_t.p_set.loc[:, "DE0 0"] = df[
1904
            "residential_and_service"
1905
        ].loc[::skip]
1906
1907
        network.loads_t.p_set.loc[:, "DE0 0 industry electricity"] = (
1908
            df["industry"].loc[::skip].values
1909
        )
1910
1911
    else:
1912
        print(
1913
            "Scaling not implemented for years between 2037 and 2045 and beyond."
1914
        )
1915
        return
1916
1917
    network.loads.loc["DE0 0 industry electricity", "p_set"] = 0.0
1918
1919
    return network
1920
1921
1922
def geothermal_district_heating(network):
1923
    """Add the option to build geothermal power plants in district heating in Germany
1924
1925
    Parameters
1926
    ----------
1927
    network : pypsa.Network
1928
        Network from pypsa-eur without geothermal generators
1929
1930
    Returns
1931
    -------
1932
    network : pypsa.Network
1933
        Updated network with geothermal generators
1934
1935
    """
1936
1937
    costs_and_potentials = pd.read_csv(
1938
        "input-pypsa-eur-sec/geothermal_potential_germany.csv"
1939
    )
1940
1941
    network.add("Carrier", "urban central geo thermal")
1942
1943
    for i, row in costs_and_potentials.iterrows():
1944
        # Set lifetime of geothermal plant to 30 years based on:
1945
        # Ableitung eines Korridors für den Ausbau der erneuerbaren Wärme im Gebäudebereich,
1946
        # Beuth Hochschule für Technik, Berlin ifeu – Institut für Energie- und Umweltforschung Heidelberg GmbH
1947
        # Februar 2017
1948
        lifetime_geothermal = 30
1949
1950
        network.add(
1951
            "Generator",
1952
            f"DE0 0 urban central geo thermal {i}",
1953
            bus="DE0 0 urban central heat",
1954
            carrier="urban central geo thermal",
1955
            p_nom_extendable=True,
1956
            p_nom_max=row["potential [MW]"],
1957
            capital_cost=annualize_capital_costs(
1958
                row["cost [EUR/kW]"] * 1e6, lifetime_geothermal, 0.07
1959
            ),
1960
        )
1961
    return network
1962
1963
1964
def h2_overground_stores(network):
1965
    """Add hydrogen overground stores to each hydrogen node
1966
1967
    In pypsa-eur, only countries without the potential of underground hydrogen
1968
    stores have to option to build overground hydrogen tanks.
1969
    Overground stores are more expensive, but are not resitcted by the geological
1970
    potential. To allow higher hydrogen store capacities in each country, optional
1971
    hydogen overground tanks are also added to node with a potential for
1972
    underground stores.
1973
1974
    Parameters
1975
    ----------
1976
    network : pypsa.Network
1977
        Network without hydrogen overground stores at each hydrogen node
1978
1979
    Returns
1980
    -------
1981
    network : pypsa.Network
1982
        Network with hydrogen overground stores at each hydrogen node
1983
1984
    """
1985
1986
    underground_h2_stores = network.stores[
1987
        (network.stores.carrier == "H2 Store")
1988
        & (network.stores.e_nom_max != np.inf)
1989
    ]
1990
1991
    overground_h2_stores = network.stores[
1992
        (network.stores.carrier == "H2 Store")
1993
        & (network.stores.e_nom_max == np.inf)
1994
    ]
1995
1996
    network.madd(
1997
        "Store",
1998
        underground_h2_stores.bus + " overground Store",
1999
        bus=underground_h2_stores.bus.values,
2000
        e_nom_extendable=True,
2001
        e_cyclic=True,
2002
        carrier="H2 Store",
2003
        capital_cost=overground_h2_stores.capital_cost.mean(),
2004
    )
2005
2006
    return network
2007
2008
2009
def update_heat_timeseries_germany(network):
2010
    network.loads
2011
    # Import heat demand curves for Germany from eGon-data
2012
    df_egon_heat_demand = pd.read_csv(
2013
        "input-pypsa-eur-sec/heat_demand_timeseries_DE_eGon100RE.csv"
2014
    )
2015
2016
    # Replace heat demand curves in Germany with values from eGon-data
2017
    network.loads_t.p_set.loc[:, "DE1 0 rural heat"] = (
2018
        df_egon_heat_demand.loc[:, "residential rural"].values
2019
        + df_egon_heat_demand.loc[:, "service rural"].values
2020
    )
2021
2022
    network.loads_t.p_set.loc[:, "DE1 0 urban central heat"] = (
2023
        df_egon_heat_demand.loc[:, "urban central"].values
2024
    )
2025
2026
    return network
2027
2028
2029
def drop_biomass(network):
2030
    carrier = "biomass"
2031
2032
    for c in network.iterate_components():
2033
        network.mremove(c.name, c.df[c.df.index.str.contains(carrier)].index)
2034
    return network
2035
2036
2037
def postprocessing_biomass_2045():
2038
2039
    network = read_network()
2040
    network = drop_biomass(network)
2041
2042
    with open(
2043
        __path__[0] + "/datasets/pypsaeur/config_solve.yaml", "r"
2044
    ) as stream:
2045
        data_config = yaml.safe_load(stream)
2046
2047
    target_file = (
2048
        Path(".")
2049
        / "run-pypsa-eur"
2050
        / "pypsa-eur"
2051
        / "results"
2052
        / data_config["run"]["name"]
2053
        / "postnetworks"
2054
        / f"base_s_{data_config['scenario']['clusters'][0]}"
2055
        f"_l{data_config['scenario']['ll'][0]}"
2056
        f"_{data_config['scenario']['opts'][0]}"
2057
        f"_{data_config['scenario']['sector_opts'][0]}"
2058
        f"_{data_config['scenario']['planning_horizons'][3]}.nc"
2059
    )
2060
2061
    network.export_to_netcdf(target_file)
2062
2063
2064
def drop_urban_decentral_heat(network):
2065
    carrier = "urban decentral heat"
2066
2067
    # Add urban decentral heat demand to urban central heat demand
2068
    for country in network.loads.loc[
2069
        network.loads.carrier == carrier, "bus"
2070
    ].str[:5]:
2071
2072
        if f"{country} {carrier}" in network.loads_t.p_set.columns:
2073
            network.loads_t.p_set[
2074
                f"{country} rural heat"
2075
            ] += network.loads_t.p_set[f"{country} {carrier}"]
2076
        else:
2077
            print(
2078
                f"""No time series available for {country} {carrier}.
2079
                  Using static p_set."""
2080
            )
2081
2082
            network.loads_t.p_set[
2083
                f"{country} rural heat"
2084
            ] += network.loads.loc[f"{country} {carrier}", "p_set"]
2085
2086
    # In some cases low-temperature heat for industry is connected to the urban
2087
    # decentral heat bus since there is no urban central heat bus.
2088
    # These loads are connected to the representatiive rural heat bus:
2089
    network.loads.loc[
2090
        (network.loads.bus.str.contains(carrier))
2091
        & (~network.loads.carrier.str.contains(carrier.replace(" heat", ""))),
2092
        "bus",
2093
    ] = network.loads.loc[
2094
        (network.loads.bus.str.contains(carrier))
2095
        & (~network.loads.carrier.str.contains(carrier.replace(" heat", ""))),
2096
        "bus",
2097
    ].str.replace(
2098
        "urban decentral", "rural"
2099
    )
2100
2101
    # Drop componentents attached to urban decentral heat
2102
    for c in network.iterate_components():
2103
        network.mremove(
2104
            c.name, c.df[c.df.index.str.contains("urban decentral")].index
2105
        )
2106
2107
    return network
2108
2109
2110
def district_heating_shares(network):
2111
    df = pd.read_csv(
2112
        "data_bundle_powerd_data/district_heating_shares_egon.csv"
2113
    ).set_index("country_code")
2114
2115
    heat_demand_per_country = (
2116
        network.loads_t.p_set[
2117
            network.loads[
2118
                (network.loads.carrier.str.contains("heat"))
2119
                & network.loads.index.isin(network.loads_t.p_set.columns)
2120
            ].index
2121
        ]
2122
        .groupby(network.loads.bus.str[:5], axis=1)
2123
        .sum()
2124
    )
2125
2126
    for country in heat_demand_per_country.columns:
2127
        network.loads_t.p_set[f"{country} urban central heat"] = (
2128
            heat_demand_per_country.loc[:, country].mul(
2129
                df.loc[country[:2]].values[0]
2130
            )
2131
        )
2132
        network.loads_t.p_set[f"{country} rural heat"] = (
2133
            heat_demand_per_country.loc[:, country].mul(
2134
                (1 - df.loc[country[:2]].values[0])
2135
            )
2136
        )
2137
2138
    # Drop links with undefined buses or carrier
2139
    network.mremove(
2140
        "Link",
2141
        network.links[
2142
            ~network.links.bus0.isin(network.buses.index.values)
2143
        ].index,
2144
    )
2145
    network.mremove(
2146
        "Link",
2147
        network.links[network.links.carrier == ""].index,
2148
    )
2149
2150
    return network
2151
2152
2153
def drop_new_gas_pipelines(network):
2154
    network.mremove(
2155
        "Link",
2156
        network.links[
2157
            network.links.index.str.contains("gas pipeline new")
2158
        ].index,
2159
    )
2160
2161
    return network
2162
2163
2164
def drop_fossil_gas(network):
2165
    network.mremove(
2166
        "Generator",
2167
        network.generators[network.generators.carrier == "gas"].index,
2168
    )
2169
2170
    return network
2171
2172
2173
def drop_conventional_power_plants(network):
2174
2175
    # Drop lignite and coal power plants in Germany
2176
    network.mremove(
2177
        "Link",
2178
        network.links[
2179
            (network.links.carrier.isin(["coal", "lignite"]))
2180
            & (network.links.bus1.str.startswith("DE"))
2181
        ].index,
2182
    )
2183
2184
    return network
2185
2186
2187
def rual_heat_technologies(network):
2188
    network.mremove(
2189
        "Link",
2190
        network.links[
2191
            network.links.index.str.contains("rural gas boiler")
2192
        ].index,
2193
    )
2194
2195
    network.mremove(
2196
        "Generator",
2197
        network.generators[
2198
            network.generators.carrier.str.contains("rural solar thermal")
2199
        ].index,
2200
    )
2201
2202
    return network
2203
2204
2205
def coal_exit_D():
2206
2207
    df = pd.read_csv(
2208
        "run-pypsa-eur/pypsa-eur/resources/powerplants_s_39.csv", index_col=0
2209
    )
2210
    df_de_coal = df[
2211
        (df.Country == "DE")
2212
        & ((df.Fueltype == "Lignite") | (df.Fueltype == "Hard Coal"))
2213
    ]
2214
    df_de_coal.loc[df_de_coal.DateOut.values >= 2035, "DateOut"] = 2034
2215
    df.loc[df_de_coal.index] = df_de_coal
2216
2217
    df.to_csv("run-pypsa-eur/pypsa-eur/resources/powerplants_s_39.csv")
2218
2219
2220
def offwind_potential_D(network, capacity_per_sqkm=4):
2221
2222
    offwind_ac_factor = 1942
2223
    offwind_dc_factor = 10768
2224
    offwind_float_factor = 134
2225
2226
    # set p_nom_max for German offshore with respect to capacity_per_sqkm = 4 instead of default 2 (which is applied for the rest of Europe)
2227
    network.generators.loc[
2228
        (network.generators.bus == "DE0 0")
2229
        & (network.generators.carrier == "offwind-ac"),
2230
        "p_nom_max",
2231
    ] = (
2232
        offwind_ac_factor * capacity_per_sqkm
2233
    )
2234
    network.generators.loc[
2235
        (network.generators.bus == "DE0 0")
2236
        & (network.generators.carrier == "offwind-dc"),
2237
        "p_nom_max",
2238
    ] = (
2239
        offwind_dc_factor * capacity_per_sqkm
2240
    )
2241
    network.generators.loc[
2242
        (network.generators.bus == "DE0 0")
2243
        & (network.generators.carrier == "offwind-float"),
2244
        "p_nom_max",
2245
    ] = (
2246
        offwind_float_factor * capacity_per_sqkm
2247
    )
2248
2249
    return network
2250
2251
2252
def additional_grid_expansion_2045(network):
2253
2254
    network.global_constraints.loc["lc_limit", "constant"] *= 1.05
2255
2256
    return network
2257
2258
2259
def execute():
2260
    if egon.data.config.settings()["egon-data"]["--run-pypsa-eur"]:
2261
        with open(
2262
            __path__[0] + "/datasets/pypsaeur/config.yaml", "r"
2263
        ) as stream:
2264
            data_config = yaml.safe_load(stream)
2265
2266
        if data_config["foresight"] == "myopic":
2267
2268
            print("Adjusting scenarios on the myopic pathway...")
2269
2270
            coal_exit_D()
2271
2272
            networks = pd.Series()
2273
2274
            for i in range(
2275
                0, len(data_config["scenario"]["planning_horizons"])
2276
            ):
2277
                nc_file = pd.Series(
2278
                    f"base_s_{data_config['scenario']['clusters'][0]}"
2279
                    f"_l{data_config['scenario']['ll'][0]}"
2280
                    f"_{data_config['scenario']['opts'][0]}"
2281
                    f"_{data_config['scenario']['sector_opts'][0]}"
2282
                    f"_{data_config['scenario']['planning_horizons'][i]}.nc"
2283
                )
2284
                networks = networks._append(nc_file)
2285
2286
            scn_path = pd.DataFrame(
2287
                index=["2025", "2030", "2035", "2045"],
2288
                columns=["prenetwork", "functions"],
2289
            )
2290
2291
            for year in scn_path.index:
2292
                scn_path.at[year, "prenetwork"] = networks[
2293
                    networks.str.contains(year)
2294
                ].values
2295
2296
            for year in ["2025", "2030", "2035"]:
2297
                scn_path.loc[year, "functions"] = [
2298
                    # drop_urban_decentral_heat,
2299
                    update_electrical_timeseries_germany,
2300
                    geothermal_district_heating,
2301
                    h2_overground_stores,
2302
                    drop_new_gas_pipelines,
2303
                    offwind_potential_D,
2304
                ]
2305
2306
            scn_path.loc["2045", "functions"] = [
2307
                drop_biomass,
2308
                # drop_urban_decentral_heat,
2309
                update_electrical_timeseries_germany,
2310
                geothermal_district_heating,
2311
                h2_overground_stores,
2312
                drop_new_gas_pipelines,
2313
                drop_fossil_gas,
2314
                offwind_potential_D,
2315
                additional_grid_expansion_2045,
2316
                # drop_conventional_power_plants,
2317
                # rual_heat_technologies, #To be defined
2318
            ]
2319
2320
            network_path = (
2321
                Path(".")
2322
                / "run-pypsa-eur"
2323
                / "pypsa-eur"
2324
                / "results"
2325
                / data_config["run"]["name"]
2326
                / "prenetworks"
2327
            )
2328
2329
            for scn in scn_path.index:
2330
                path = network_path / scn_path.at[scn, "prenetwork"]
2331
                network = pypsa.Network(path)
2332
                network.year = int(scn)
2333
                for manipulator in scn_path.at[scn, "functions"]:
2334
                    network = manipulator(network)
2335
                network.export_to_netcdf(path)
2336
2337
        elif (data_config["foresight"] == "overnight") & (
2338
            int(data_config["scenario"]["planning_horizons"][0]) > 2040
2339
        ):
2340
2341
            print("Adjusting overnight long-term scenario...")
2342
2343
            network_path = (
2344
                Path(".")
2345
                / "run-pypsa-eur"
2346
                / "pypsa-eur"
2347
                / "results"
2348
                / data_config["run"]["name"]
2349
                / "prenetworks"
2350
                / f"elec_s_{data_config['scenario']['clusters'][0]}"
2351
                f"_l{data_config['scenario']['ll'][0]}"
2352
                f"_{data_config['scenario']['opts'][0]}"
2353
                f"_{data_config['scenario']['sector_opts'][0]}"
2354
                f"_{data_config['scenario']['planning_horizons'][0]}.nc"
2355
            )
2356
2357
            network = pypsa.Network(network_path)
2358
2359
            network = drop_biomass(network)
2360
2361
            network = drop_urban_decentral_heat(network)
2362
2363
            network = district_heating_shares(network)
2364
2365
            network = update_heat_timeseries_germany(network)
2366
2367
            network = update_electrical_timeseries_germany(network)
2368
2369
            network = geothermal_district_heating(network)
2370
2371
            network = h2_overground_stores(network)
2372
2373
            network = drop_new_gas_pipelines(network)
2374
2375
            network = drop_fossil_gas(network)
2376
2377
            network = rual_heat_technologies(network)
2378
2379
            network.export_to_netcdf(network_path)
2380
2381
        else:
2382
            print(
2383
                f"""Adjustments on prenetworks are not implemented for
2384
                foresight option {data_config['foresight']} and
2385
                year int(data_config['scenario']['planning_horizons'][0].
2386
                Please check the pypsaeur.execute function.
2387
                """
2388
            )
2389
    else:
2390
        print("Pypsa-eur is not executed due to the settings of egon-data")
2391