Passed
Pull Request — dev (#885)
by
unknown
01:44
created

data.datasets.pypsaeursec.clean_database()   A

Complexity

Conditions 3

Size

Total Lines 79
Code Lines 15

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 15
dl 0
loc 79
rs 9.65
c 0
b 0
f 0
cc 3
nop 0

How to fix   Long Method   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

1
"""The central module containing all code dealing with importing data from
2
the pysa-eur-sec scenario parameter creation
3
"""
4
5
from pathlib import Path
6
from urllib.request import urlretrieve
7
import os
8
import tarfile
9
10
from shapely.geometry import LineString
11
import geopandas as gpd
12
import importlib_resources as resources
13
import pandas as pd
14
import pypsa
15
import yaml
16
17
from egon.data import __path__, db
18
from egon.data.datasets import Dataset
19
from egon.data.datasets.scenario_parameters import get_sector_parameters
20
import egon.data.config
21
import egon.data.subprocess as subproc
22
23
24
def run_pypsa_eur_sec():
25
26
    cwd = Path(".")
27
    filepath = cwd / "run-pypsa-eur-sec"
28
    filepath.mkdir(parents=True, exist_ok=True)
29
30
    pypsa_eur_repos = filepath / "pypsa-eur"
31
    pypsa_eur_repos_data = pypsa_eur_repos / "data"
32
    technology_data_repos = filepath / "technology-data"
33
    pypsa_eur_sec_repos = filepath / "pypsa-eur-sec"
34
    pypsa_eur_sec_repos_data = pypsa_eur_sec_repos / "data"
35
36
    if not pypsa_eur_repos.exists():
37
        subproc.run(
38
            [
39
                "git",
40
                "clone",
41
                "--branch",
42
                "v0.4.0",
43
                "https://github.com/PyPSA/pypsa-eur.git",
44
                pypsa_eur_repos,
45
            ]
46
        )
47
48
        # subproc.run(
49
        #     ["git", "checkout", "4e44822514755cdd0289687556547100fba6218b"],
50
        #     cwd=pypsa_eur_repos,
51
        # )
52
53
        file_to_copy = os.path.join(
54
            __path__[0], "datasets", "pypsaeursec", "pypsaeur", "Snakefile"
55
        )
56
57
        subproc.run(["cp", file_to_copy, pypsa_eur_repos])
58
59
        # Read YAML file
60
        path_to_env = pypsa_eur_repos / "envs" / "environment.yaml"
61
        with open(path_to_env, "r") as stream:
62
            env = yaml.safe_load(stream)
63
64
        env["dependencies"].append("gurobi")
65
66
        # Write YAML file
67
        with open(path_to_env, "w", encoding="utf8") as outfile:
68
            yaml.dump(
69
                env, outfile, default_flow_style=False, allow_unicode=True
70
            )
71
72
        datafile = "pypsa-eur-data-bundle.tar.xz"
73
        datapath = pypsa_eur_repos / datafile
74
        if not datapath.exists():
75
            urlretrieve(
76
                f"https://zenodo.org/record/3517935/files/{datafile}", datapath
77
            )
78
            tar = tarfile.open(datapath)
79
            tar.extractall(pypsa_eur_repos_data)
80
81
    if not technology_data_repos.exists():
82
        subproc.run(
83
            [
84
                "git",
85
                "clone",
86
                "--branch",
87
                "v0.3.0",
88
                "https://github.com/PyPSA/technology-data.git",
89
                technology_data_repos,
90
            ]
91
        )
92
93
    if not pypsa_eur_sec_repos.exists():
94
        subproc.run(
95
            [
96
                "git",
97
                "clone",
98
                "https://github.com/openego/pypsa-eur-sec.git",
99
                pypsa_eur_sec_repos,
100
            ]
101
        )
102
103
    datafile = "pypsa-eur-sec-data-bundle.tar.gz"
104
    datapath = pypsa_eur_sec_repos_data / datafile
105
    if not datapath.exists():
106
        urlretrieve(
107
            f"https://zenodo.org/record/5824485/files/{datafile}", datapath
108
        )
109
        tar = tarfile.open(datapath)
110
        tar.extractall(pypsa_eur_sec_repos_data)
111
112
    with open(filepath / "Snakefile", "w") as snakefile:
113
        snakefile.write(
114
            resources.read_text("egon.data.datasets.pypsaeursec", "Snakefile")
115
        )
116
117
    subproc.run(
118
        [
119
            "snakemake",
120
            "-j1",
121
            "--directory",
122
            filepath,
123
            "--snakefile",
124
            filepath / "Snakefile",
125
            "--use-conda",
126
            "--conda-frontend=conda",
127
            "Main",
128
        ]
129
    )
130
131
132
def read_network():
133
134
    # Set execute_pypsa_eur_sec to False until optional task is implemented
135
    execute_pypsa_eur_sec = False
136
    cwd = Path(".")
137
138
    if execute_pypsa_eur_sec:
139
        filepath = cwd / "run-pypsa-eur-sec"
140
        pypsa_eur_sec_repos = filepath / "pypsa-eur-sec"
141
        # Read YAML file
142
        pes_egonconfig = pypsa_eur_sec_repos / "config_egon.yaml"
143
        with open(pes_egonconfig, "r") as stream:
144
            data_config = yaml.safe_load(stream)
145
146
        simpl = data_config["scenario"]["simpl"][0]
147
        clusters = data_config["scenario"]["clusters"][0]
148
        lv = data_config["scenario"]["lv"][0]
149
        opts = data_config["scenario"]["opts"][0]
150
        sector_opts = data_config["scenario"]["sector_opts"][0]
151
        planning_horizons = data_config["scenario"]["planning_horizons"][0]
152
        file = "elec_s{simpl}_{clusters}_lv{lv}_{opts}_{sector_opts}_{planning_horizons}.nc".format(
153
            simpl=simpl,
154
            clusters=clusters,
155
            opts=opts,
156
            lv=lv,
157
            sector_opts=sector_opts,
158
            planning_horizons=planning_horizons,
159
        )
160
161
        target_file = (
162
            pypsa_eur_sec_repos
163
            / "results"
164
            / data_config["run"]
165
            / "postnetworks"
166
            / file
167
        )
168
169
    else:
170
        target_file = (
171
            cwd
172
            / "data_bundle_egon_data"
173
            / "pypsa_eur_sec"
174
            / "2022-05-04-egondata-integration"
175
            / "postnetworks"
176
            / "elec_s_37_lv2.0__Co2L0-3H-T-H-B-I-dist1_2050.nc"
177
        )
178
179
    return pypsa.Network(str(target_file))
180
181
182
def clean_database():
183
    """Remove all components abroad for eGon100RE of the database
184
185
    Remove all components abroad and their associated time series of
186
    the datase for the scenario 'eGon100RE'.
187
188
    Parameters
189
    ----------
190
    None
191
192
    Returns
193
    -------
194
    None
195
196
    """
197
    scn_name = "eGon100RE"
198
199
    comp_one_port = ["load", "generator", "store", "storage"]
200
201
    # delete existing components and associated timeseries
202
    for comp in comp_one_port:
203
        db.execute_sql(
204
            f"""
205
            DELETE FROM {"grid.egon_etrago_" + comp + "_timeseries"}
206
            WHERE {comp + "_id"} IN (
207
                SELECT {comp + "_id"} FROM {"grid.egon_etrago_" + comp}
208
                WHERE bus IN (
209
                    SELECT bus_id FROM grid.egon_etrago_bus
210
                    WHERE country != 'DE'
211
                    AND scn_name = '{scn_name}')
212
                AND scn_name = '{scn_name}'
213
            );
214
215
            DELETE FROM {"grid.egon_etrago_" + comp}
216
            WHERE bus IN (
217
                SELECT bus_id FROM grid.egon_etrago_bus
218
                WHERE country != 'DE'
219
                AND scn_name = '{scn_name}')
220
            AND scn_name = '{scn_name}';"""
221
        )
222
223
    comp_2_ports = [
224
        "line",
225
        "transformer",
226
        "link",
227
    ]
228
229
    for comp, id in zip(comp_2_ports, ["line_id", "trafo_id", "link_id"]):
230
        db.execute_sql(
231
            f"""
232
            DELETE FROM {"grid.egon_etrago_" + comp + "_timeseries"}
233
            WHERE scn_name = '{scn_name}'
234
            AND {id} IN (
235
                SELECT {id} FROM {"grid.egon_etrago_" + comp}
236
            WHERE "bus0" IN (
237
            SELECT bus_id FROM grid.egon_etrago_bus
238
                WHERE country != 'DE'
239
                AND scn_name = '{scn_name}')
240
            AND "bus1" IN (
241
            SELECT bus_id FROM grid.egon_etrago_bus
242
                WHERE country != 'DE'
243
                AND scn_name = '{scn_name}')
244
            );
245
246
            DELETE FROM {"grid.egon_etrago_" + comp}
247
            WHERE scn_name = '{scn_name}'
248
            AND "bus0" IN (
249
            SELECT bus_id FROM grid.egon_etrago_bus
250
                WHERE country != 'DE'
251
                AND scn_name = '{scn_name}')
252
            AND "bus1" IN (
253
            SELECT bus_id FROM grid.egon_etrago_bus
254
                WHERE country != 'DE'
255
                AND scn_name = '{scn_name}')
256
            ;"""
257
        )
258
259
    db.execute_sql(
260
        "DELETE FROM grid.egon_etrago_bus "
261
        "WHERE scn_name = '{scn_name}' "
262
        "AND country <> 'DE'"
263
    )
264
265
266
def neighbor_reduction():
267
268
    network = read_network()
269
270
    wanted_countries = [
271
        "DE",
272
        "AT",
273
        "CH",
274
        "CZ",
275
        "PL",
276
        "SE",
277
        "NO",
278
        "DK",
279
        "GB",
280
        "NL",
281
        "BE",
282
        "FR",
283
        "LU",
284
    ]
285
    foreign_buses = network.buses[
286
        ~network.buses.index.str.contains("|".join(wanted_countries))
287
    ]
288
    network.buses = network.buses.drop(
289
        network.buses.loc[foreign_buses.index].index
290
    )
291
292
    # drop foreign lines and links from the 2nd row
293
294
    network.lines = network.lines.drop(
295
        network.lines[
296
            (network.lines["bus0"].isin(network.buses.index) == False)
297
            & (network.lines["bus1"].isin(network.buses.index) == False)
298
        ].index
299
    )
300
301
    # select all lines which have at bus1 the bus which is kept
302
    lines_cb_1 = network.lines[
303
        (network.lines["bus0"].isin(network.buses.index) == False)
304
    ]
305
306
    # create a load at bus1 with the line's hourly loading
307
    for i, k in zip(lines_cb_1.bus1.values, lines_cb_1.index):
308
        network.add(
309
            "Load",
310
            "slack_fix " + i + " " + k,
311
            bus=i,
312
            p_set=network.lines_t.p1[k],
313
        )
314
        network.loads.carrier.loc[
315
            "slack_fix " + i + " " + k
316
        ] = lines_cb_1.carrier[k]
317
318
    # select all lines which have at bus0 the bus which is kept
319
    lines_cb_0 = network.lines[
320
        (network.lines["bus1"].isin(network.buses.index) == False)
321
    ]
322
323
    # create a load at bus0 with the line's hourly loading
324
    for i, k in zip(lines_cb_0.bus0.values, lines_cb_0.index):
325
        network.add(
326
            "Load",
327
            "slack_fix " + i + " " + k,
328
            bus=i,
329
            p_set=network.lines_t.p0[k],
330
        )
331
        network.loads.carrier.loc[
332
            "slack_fix " + i + " " + k
333
        ] = lines_cb_0.carrier[k]
334
335
    # do the same for links
336
337
    network.links = network.links.drop(
338
        network.links[
339
            (network.links["bus0"].isin(network.buses.index) == False)
340
            & (network.links["bus1"].isin(network.buses.index) == False)
341
        ].index
342
    )
343
344
    # select all links which have at bus1 the bus which is kept
345
    links_cb_1 = network.links[
346
        (network.links["bus0"].isin(network.buses.index) == False)
347
    ]
348
349
    # create a load at bus1 with the link's hourly loading
350
    for i, k in zip(links_cb_1.bus1.values, links_cb_1.index):
351
        network.add(
352
            "Load",
353
            "slack_fix_links " + i + " " + k,
354
            bus=i,
355
            p_set=network.links_t.p1[k],
356
        )
357
        network.loads.carrier.loc[
358
            "slack_fix_links " + i + " " + k
359
        ] = links_cb_1.carrier[k]
360
361
    # select all links which have at bus0 the bus which is kept
362
    links_cb_0 = network.links[
363
        (network.links["bus1"].isin(network.buses.index) == False)
364
    ]
365
366
    # create a load at bus0 with the link's hourly loading
367
    for i, k in zip(links_cb_0.bus0.values, links_cb_0.index):
368
        network.add(
369
            "Load",
370
            "slack_fix_links " + i + " " + k,
371
            bus=i,
372
            p_set=network.links_t.p0[k],
373
        )
374
        network.loads.carrier.loc[
375
            "slack_fix_links " + i + " " + k
376
        ] = links_cb_0.carrier[k]
377
378
    # drop remaining foreign components
379
380
    network.lines = network.lines.drop(
381
        network.lines[
382
            (network.lines["bus0"].isin(network.buses.index) == False)
383
            | (network.lines["bus1"].isin(network.buses.index) == False)
384
        ].index
385
    )
386
387
    network.links = network.links.drop(
388
        network.links[
389
            (network.links["bus0"].isin(network.buses.index) == False)
390
            | (network.links["bus1"].isin(network.buses.index) == False)
391
        ].index
392
    )
393
394
    network.transformers = network.transformers.drop(
395
        network.transformers[
396
            (network.transformers["bus0"].isin(network.buses.index) == False)
397
            | (network.transformers["bus1"].isin(network.buses.index) == False)
398
        ].index
399
    )
400
    network.generators = network.generators.drop(
401
        network.generators[
402
            (network.generators["bus"].isin(network.buses.index) == False)
403
        ].index
404
    )
405
406
    network.loads = network.loads.drop(
407
        network.loads[
408
            (network.loads["bus"].isin(network.buses.index) == False)
409
        ].index
410
    )
411
412
    network.storage_units = network.storage_units.drop(
413
        network.storage_units[
414
            (network.storage_units["bus"].isin(network.buses.index) == False)
415
        ].index
416
    )
417
418
    components = [
419
        "loads",
420
        "generators",
421
        "lines",
422
        "buses",
423
        "transformers",
424
        "links",
425
    ]
426
    for g in components:  # loads_t
427
        h = g + "_t"
428
        nw = getattr(network, h)  # network.loads_t
429
        for i in nw.keys():  # network.loads_t.p
430
            cols = [
431
                j
432
                for j in getattr(nw, i).columns
433
                if j not in getattr(network, g).index
434
            ]
435
            for k in cols:
436
                del getattr(nw, i)[k]
437
438
    # writing components of neighboring countries to etrago tables
439
440
    # Set country tag for all buses
441
    network.buses.country = network.buses.index.str[:2]
442
    neighbors = network.buses[network.buses.country != "DE"]
443
444
    neighbors["new_index"] = (
445
        db.next_etrago_id("bus") + neighbors.reset_index().index
446
    )
447
448
    # lines, the foreign crossborder lines
449
    # (without crossborder lines to Germany!)
450
451
    neighbor_lines = network.lines[
452
        network.lines.bus0.isin(neighbors.index)
453
        & network.lines.bus1.isin(neighbors.index)
454
    ]
455
    if not network.lines_t["s_max_pu"].empty:
456
        neighbor_lines_t = network.lines_t["s_max_pu"][neighbor_lines.index]
457
458
    neighbor_lines.reset_index(inplace=True)
459
    neighbor_lines.bus0 = (
460
        neighbors.loc[neighbor_lines.bus0, "new_index"].reset_index().new_index
461
    )
462
    neighbor_lines.bus1 = (
463
        neighbors.loc[neighbor_lines.bus1, "new_index"].reset_index().new_index
464
    )
465
    neighbor_lines.index += db.next_etrago_id("line")
466
467
    if not network.lines_t["s_max_pu"].empty:
468
        for i in neighbor_lines_t.columns:
0 ignored issues
show
introduced by
The variable neighbor_lines_t does not seem to be defined in case BooleanNotNode on line 455 is False. Are you sure this can never be the case?
Loading history...
469
            new_index = neighbor_lines[neighbor_lines["name"] == i].index
470
            neighbor_lines_t.rename(columns={i: new_index[0]}, inplace=True)
471
472
    # links
473
    neighbor_links = network.links[
474
        network.links.bus0.isin(neighbors.index)
475
        & network.links.bus1.isin(neighbors.index)
476
    ]
477
478
    neighbor_links.reset_index(inplace=True)
479
    neighbor_links.bus0 = (
480
        neighbors.loc[neighbor_links.bus0, "new_index"].reset_index().new_index
481
    )
482
    neighbor_links.bus1 = (
483
        neighbors.loc[neighbor_links.bus1, "new_index"].reset_index().new_index
484
    )
485
    neighbor_links.index += db.next_etrago_id("link")
486
487
    # generators
488
    neighbor_gens = network.generators[
489
        network.generators.bus.isin(neighbors.index)
490
    ]
491
    neighbor_gens_t = network.generators_t["p_max_pu"][
492
        neighbor_gens[
493
            neighbor_gens.index.isin(network.generators_t["p_max_pu"].columns)
494
        ].index
495
    ]
496
497
    neighbor_gens.reset_index(inplace=True)
498
    neighbor_gens.bus = (
499
        neighbors.loc[neighbor_gens.bus, "new_index"].reset_index().new_index
500
    )
501
    neighbor_gens.index += db.next_etrago_id("generator")
502
503
    for i in neighbor_gens_t.columns:
504
        new_index = neighbor_gens[neighbor_gens["name"] == i].index
505
        neighbor_gens_t.rename(columns={i: new_index[0]}, inplace=True)
506
507
    # loads
508
509
    neighbor_loads = network.loads[network.loads.bus.isin(neighbors.index)]
510
    neighbor_loads_t_index = neighbor_loads.index[
511
        neighbor_loads.index.isin(network.loads_t.p_set.columns)
512
    ]
513
    neighbor_loads_t = network.loads_t["p_set"][neighbor_loads_t_index]
514
515
    neighbor_loads.reset_index(inplace=True)
516
    neighbor_loads.bus = (
517
        neighbors.loc[neighbor_loads.bus, "new_index"].reset_index().new_index
518
    )
519
    neighbor_loads.index += db.next_etrago_id("load")
520
521
    for i in neighbor_loads_t.columns:
522
        new_index = neighbor_loads[neighbor_loads["index"] == i].index
523
        neighbor_loads_t.rename(columns={i: new_index[0]}, inplace=True)
524
525
    # stores
526
    neighbor_stores = network.stores[network.stores.bus.isin(neighbors.index)]
527
    neighbor_stores_t_index = neighbor_stores.index[
528
        neighbor_stores.index.isin(network.stores_t.e_min_pu.columns)
529
    ]
530
    neighbor_stores_t = network.stores_t["e_min_pu"][neighbor_stores_t_index]
531
532
    neighbor_stores.reset_index(inplace=True)
533
    neighbor_stores.bus = (
534
        neighbors.loc[neighbor_stores.bus, "new_index"].reset_index().new_index
535
    )
536
    neighbor_stores.index += db.next_etrago_id("store")
537
538
    for i in neighbor_stores_t.columns:
539
        new_index = neighbor_stores[neighbor_stores["name"] == i].index
540
        neighbor_stores_t.rename(columns={i: new_index[0]}, inplace=True)
541
542
    # storage_units
543
    neighbor_storage = network.storage_units[
544
        network.storage_units.bus.isin(neighbors.index)
545
    ]
546
    neighbor_storage_t_index = neighbor_storage.index[
547
        neighbor_storage.index.isin(network.storage_units_t.inflow.columns)
548
    ]
549
    neighbor_storage_t = network.storage_units_t["inflow"][
550
        neighbor_storage_t_index
551
    ]
552
553
    neighbor_storage.reset_index(inplace=True)
554
    neighbor_storage.bus = (
555
        neighbors.loc[neighbor_storage.bus, "new_index"]
556
        .reset_index()
557
        .new_index
558
    )
559
    neighbor_storage.index += db.next_etrago_id("storage")
560
561
    for i in neighbor_storage_t.columns:
562
        new_index = neighbor_storage[neighbor_storage["name"] == i].index
563
        neighbor_storage_t.rename(columns={i: new_index[0]}, inplace=True)
564
565
    # Connect to local database
566
    engine = db.engine()
567
568
    neighbors["scn_name"] = "eGon100RE"
569
    neighbors.index = neighbors["new_index"]
570
571
    # Correct geometry for non AC buses
572
    carriers = set(neighbors.carrier.to_list())
573
    carriers.remove("AC")
574
    non_AC_neighbors = pd.DataFrame()
575
    for c in carriers:
576
        c_neighbors = neighbors[neighbors.carrier == c].set_index(
577
            "location", drop=False
578
        )
579
        for i in ["x", "y"]:
580
            c_neighbors = c_neighbors.drop(i, axis=1)
581
        coordinates = neighbors[neighbors.carrier == "AC"][
582
            ["location", "x", "y"]
583
        ].set_index("location")
584
        c_neighbors = pd.concat([coordinates, c_neighbors], axis=1).set_index(
585
            "new_index", drop=False
586
        )
587
        non_AC_neighbors = non_AC_neighbors.append(c_neighbors)
588
    neighbors = neighbors[neighbors.carrier == "AC"].append(non_AC_neighbors)
589
590
    for i in ["new_index", "control", "generator", "location", "sub_network"]:
591
        neighbors = neighbors.drop(i, axis=1)
592
593
    # Add geometry column
594
    neighbors = (
595
        gpd.GeoDataFrame(
596
            neighbors, geometry=gpd.points_from_xy(neighbors.x, neighbors.y)
597
        )
598
        .rename_geometry("geom")
599
        .set_crs(4326)
600
    )
601
602
    # Unify carrier names
603
    neighbors.carrier = neighbors.carrier.str.replace(" ", "_")
604
    neighbors.carrier.replace(
605
        {
606
            "gas": "CH4",
607
            "gas_for_industry": "CH4_for_industry",
608
        },
609
        inplace=True,
610
    )
611
612
    neighbors.to_postgis(
613
        "egon_etrago_bus",
614
        engine,
615
        schema="grid",
616
        if_exists="append",
617
        index=True,
618
        index_label="bus_id",
619
    )
620
621
    # prepare and write neighboring crossborder lines to etrago tables
622
    def lines_to_etrago(neighbor_lines=neighbor_lines, scn="eGon100RE"):
623
        neighbor_lines["scn_name"] = scn
624
        neighbor_lines["cables"] = 3 * neighbor_lines["num_parallel"].astype(
625
            int
626
        )
627
        neighbor_lines["s_nom"] = neighbor_lines["s_nom_min"]
628
629
        for i in [
630
            "name",
631
            "x_pu_eff",
632
            "r_pu_eff",
633
            "sub_network",
634
            "x_pu",
635
            "r_pu",
636
            "g_pu",
637
            "b_pu",
638
            "s_nom_opt",
639
        ]:
640
            neighbor_lines = neighbor_lines.drop(i, axis=1)
641
642
        # Define geometry and add to lines dataframe as 'topo'
643
        gdf = gpd.GeoDataFrame(index=neighbor_lines.index)
644
        gdf["geom_bus0"] = neighbors.geom[neighbor_lines.bus0].values
645
        gdf["geom_bus1"] = neighbors.geom[neighbor_lines.bus1].values
646
        gdf["geometry"] = gdf.apply(
647
            lambda x: LineString([x["geom_bus0"], x["geom_bus1"]]), axis=1
648
        )
649
650
        neighbor_lines = (
651
            gpd.GeoDataFrame(neighbor_lines, geometry=gdf["geometry"])
652
            .rename_geometry("topo")
653
            .set_crs(4326)
654
        )
655
656
        neighbor_lines["lifetime"] = get_sector_parameters("electricity", scn)[
657
            "lifetime"
658
        ]["ac_ehv_overhead_line"]
659
660
        neighbor_lines.to_postgis(
661
            "egon_etrago_line",
662
            engine,
663
            schema="grid",
664
            if_exists="append",
665
            index=True,
666
            index_label="line_id",
667
        )
668
669
    lines_to_etrago(neighbor_lines=neighbor_lines, scn="eGon100RE")
670
    lines_to_etrago(neighbor_lines=neighbor_lines, scn="eGon2035")
671
672
    # prepare and write neighboring crossborder links to etrago tables
673
    def links_to_etrago(neighbor_links, scn="eGon100RE", extendable=True):
674
        neighbor_links["scn_name"] = scn
675
676
        if extendable == True:
677
            for i in [
678
                "name",
679
                "geometry",
680
                "tags",
681
                "under_construction",
682
                "underground",
683
                "underwater_fraction",
684
                "bus2",
685
                "bus3",
686
                "bus4",
687
                "efficiency2",
688
                "efficiency3",
689
                "efficiency4",
690
                "lifetime",
691
                "p_nom_opt",
692
                # "pipe_retrofit",
693
            ]:
694
                neighbor_links = neighbor_links.drop(i, axis=1)
695
696
        elif extendable == False:
697
            for i in [
698
                "name",
699
                "geometry",
700
                "tags",
701
                "under_construction",
702
                "underground",
703
                "underwater_fraction",
704
                "bus2",
705
                "bus3",
706
                "bus4",
707
                "efficiency2",
708
                "efficiency3",
709
                "efficiency4",
710
                "lifetime",
711
                "p_nom",
712
                # "pipe_retrofit",
713
                "p_nom_extendable",
714
            ]:
715
                neighbor_links = neighbor_links.drop(i, axis=1)
716
            neighbor_links = neighbor_links.rename(
717
                columns={"p_nom_opt": "p_nom"}
718
            )
719
            neighbor_links["p_nom_extendable"] = False
720
721
        # Define geometry and add to lines dataframe as 'topo'
722
        gdf = gpd.GeoDataFrame(index=neighbor_links.index)
723
        gdf["geom_bus0"] = neighbors.geom[neighbor_links.bus0].values
724
        gdf["geom_bus1"] = neighbors.geom[neighbor_links.bus1].values
725
        gdf["geometry"] = gdf.apply(
726
            lambda x: LineString([x["geom_bus0"], x["geom_bus1"]]), axis=1
727
        )
728
729
        neighbor_links = (
730
            gpd.GeoDataFrame(neighbor_links, geometry=gdf["geometry"])
731
            .rename_geometry("topo")
732
            .set_crs(4326)
733
        )
734
735
        # Unify carrier names
736
        neighbor_links.carrier = neighbor_links.carrier.str.replace(" ", "_")
737
738
        neighbor_links.carrier.replace(
739
            {
740
                "H2_Electrolysis": "power_to_H2",
741
                "H2_Fuel_Cell": "H2_to_power",
742
                "H2_pipeline_retrofitted": "H2_retrofit",
743
                "SMR": "CH4_to_H2",
744
                "Sabatier": "H2_to_CH4",
745
                "gas_for_industry": "CH4_for_industry",
746
                "gas_pipeline": "CH4",
747
            },
748
            inplace=True,
749
        )
750
751
        neighbor_links.to_postgis(
752
            "egon_etrago_link",
753
            engine,
754
            schema="grid",
755
            if_exists="append",
756
            index=True,
757
            index_label="link_id",
758
        )
759
760
    non_extendable_links_carriers = [
761
        "H2 pipeline retrofitted",
762
        "gas pipeline",
763
        "biogas to gas",
764
    ]
765
766
    # delete unwanted carriers for eTraGo
767
    excluded_carriers = ["gas for industry CC", "SMR CC"]
768
    neighbor_links = neighbor_links[
769
        ~neighbor_links.carrier.isin(excluded_carriers)
770
    ]
771
772
    links_to_etrago(
773
        neighbor_links[
774
            ~neighbor_links.carrier.isin(non_extendable_links_carriers)
775
        ],
776
        "eGon100RE",
777
    )
778
    links_to_etrago(
779
        neighbor_links[
780
            neighbor_links.carrier.isin(non_extendable_links_carriers)
781
        ],
782
        "eGon100RE",
783
        extendable=False,
784
    )
785
786
    links_to_etrago(neighbor_links[neighbor_links.carrier == "DC"], "eGon2035")
787
788
    # prepare neighboring generators for etrago tables
789
    neighbor_gens["scn_name"] = "eGon100RE"
790
    neighbor_gens["p_nom"] = neighbor_gens["p_nom_opt"]
791
    neighbor_gens["p_nom_extendable"] = False
792
793
    # Unify carrier names
794
    neighbor_gens.carrier = neighbor_gens.carrier.str.replace(" ", "_")
795
796
    neighbor_gens.carrier.replace(
797
        {
798
            "onwind": "wind_onshore",
799
            "ror": "run_of_river",
800
            "offwind-ac": "wind_offshore",
801
            "offwind-dc": "wind_offshore",
802
            "urban_central_solar_thermal": "urban_central_solar_thermal_collector",
803
            "residential_rural_solar_thermal": "residential_rural_solar_thermal_collector",
804
            "services_rural_solar_thermal": "services_rural_solar_thermal_collector",
805
        },
806
        inplace=True,
807
    )
808
809
    for i in ["name", "weight", "lifetime", "p_set", "q_set", "p_nom_opt"]:
810
        neighbor_gens = neighbor_gens.drop(i, axis=1)
811
812
    neighbor_gens.to_sql(
813
        "egon_etrago_generator",
814
        engine,
815
        schema="grid",
816
        if_exists="append",
817
        index=True,
818
        index_label="generator_id",
819
    )
820
821
    # prepare neighboring loads for etrago tables
822
    neighbor_loads["scn_name"] = "eGon100RE"
823
824
    # Unify carrier names
825
    neighbor_loads.carrier = neighbor_loads.carrier.str.replace(" ", "_")
826
827
    neighbor_loads.carrier.replace(
828
        {
829
            "electricity": "AC",
830
            "DC": "AC",
831
            "industry_electricity": "AC",
832
            "H2_pipeline": "H2_system_boundary",
833
            "gas_for_industry": "CH4_for_industry",
834
        },
835
        inplace=True,
836
    )
837
838
    for i in ["index", "p_set", "q_set"]:
839
        neighbor_loads = neighbor_loads.drop(i, axis=1)
840
841
    neighbor_loads.to_sql(
842
        "egon_etrago_load",
843
        engine,
844
        schema="grid",
845
        if_exists="append",
846
        index=True,
847
        index_label="load_id",
848
    )
849
850
    # prepare neighboring stores for etrago tables
851
    neighbor_stores["scn_name"] = "eGon100RE"
852
853
    # Unify carrier names
854
    neighbor_stores.carrier = neighbor_stores.carrier.str.replace(" ", "_")
855
856
    neighbor_stores.carrier.replace(
857
        {
858
            "Li_ion": "battery",
859
            "gas": "CH4",
860
        },
861
        inplace=True,
862
    )
863
    neighbor_stores.loc[
864
        (
865
            (neighbor_stores.e_nom_max <= 1e9)
866
            & (neighbor_stores.carrier == "H2")
867
        ),
868
        "carrier",
869
    ] = "H2_underground"
870
    neighbor_stores.loc[
871
        (
872
            (neighbor_stores.e_nom_max > 1e9)
873
            & (neighbor_stores.carrier == "H2")
874
        ),
875
        "carrier",
876
    ] = "H2_overground"
877
878
    for i in ["name", "p_set", "q_set", "e_nom_opt", "lifetime"]:
879
        neighbor_stores = neighbor_stores.drop(i, axis=1)
880
881
    neighbor_stores.to_sql(
882
        "egon_etrago_store",
883
        engine,
884
        schema="grid",
885
        if_exists="append",
886
        index=True,
887
        index_label="store_id",
888
    )
889
890
    # prepare neighboring storage_units for etrago tables
891
    neighbor_storage["scn_name"] = "eGon100RE"
892
893
    # Unify carrier names
894
    neighbor_storage.carrier = neighbor_storage.carrier.str.replace(" ", "_")
895
896
    neighbor_storage.carrier.replace(
897
        {"PHS": "pumped_hydro", "hydro": "reservoir"}, inplace=True
898
    )
899
900
    for i in ["name", "p_nom_opt"]:
901
        neighbor_storage = neighbor_storage.drop(i, axis=1)
902
903
    neighbor_storage.to_sql(
904
        "egon_etrago_storage",
905
        engine,
906
        schema="grid",
907
        if_exists="append",
908
        index=True,
909
        index_label="storage_id",
910
    )
911
912
    # writing neighboring loads_t p_sets to etrago tables
913
914
    neighbor_loads_t_etrago = pd.DataFrame(
915
        columns=["scn_name", "temp_id", "p_set"],
916
        index=neighbor_loads_t.columns,
917
    )
918
    neighbor_loads_t_etrago["scn_name"] = "eGon100RE"
919
    neighbor_loads_t_etrago["temp_id"] = 1
920
    for i in neighbor_loads_t.columns:
921
        neighbor_loads_t_etrago["p_set"][i] = neighbor_loads_t[
922
            i
923
        ].values.tolist()
924
925
    neighbor_loads_t_etrago.to_sql(
926
        "egon_etrago_load_timeseries",
927
        engine,
928
        schema="grid",
929
        if_exists="append",
930
        index=True,
931
        index_label="load_id",
932
    )
933
934
    # writing neighboring generator_t p_max_pu to etrago tables
935
    neighbor_gens_t_etrago = pd.DataFrame(
936
        columns=["scn_name", "temp_id", "p_max_pu"],
937
        index=neighbor_gens_t.columns,
938
    )
939
    neighbor_gens_t_etrago["scn_name"] = "eGon100RE"
940
    neighbor_gens_t_etrago["temp_id"] = 1
941
    for i in neighbor_gens_t.columns:
942
        neighbor_gens_t_etrago["p_max_pu"][i] = neighbor_gens_t[
943
            i
944
        ].values.tolist()
945
946
    neighbor_gens_t_etrago.to_sql(
947
        "egon_etrago_generator_timeseries",
948
        engine,
949
        schema="grid",
950
        if_exists="append",
951
        index=True,
952
        index_label="generator_id",
953
    )
954
955
    # writing neighboring stores_t e_min_pu to etrago tables
956
    neighbor_stores_t_etrago = pd.DataFrame(
957
        columns=["scn_name", "temp_id", "e_min_pu"],
958
        index=neighbor_stores_t.columns,
959
    )
960
    neighbor_stores_t_etrago["scn_name"] = "eGon100RE"
961
    neighbor_stores_t_etrago["temp_id"] = 1
962
    for i in neighbor_stores_t.columns:
963
        neighbor_stores_t_etrago["e_min_pu"][i] = neighbor_stores_t[
964
            i
965
        ].values.tolist()
966
967
    neighbor_stores_t_etrago.to_sql(
968
        "egon_etrago_store_timeseries",
969
        engine,
970
        schema="grid",
971
        if_exists="append",
972
        index=True,
973
        index_label="store_id",
974
    )
975
976
    # writing neighboring storage_units inflow to etrago tables
977
    neighbor_storage_t_etrago = pd.DataFrame(
978
        columns=["scn_name", "temp_id", "inflow"],
979
        index=neighbor_storage_t.columns,
980
    )
981
    neighbor_storage_t_etrago["scn_name"] = "eGon100RE"
982
    neighbor_storage_t_etrago["temp_id"] = 1
983
    for i in neighbor_storage_t.columns:
984
        neighbor_storage_t_etrago["inflow"][i] = neighbor_storage_t[
985
            i
986
        ].values.tolist()
987
988
    neighbor_storage_t_etrago.to_sql(
989
        "egon_etrago_storage_timeseries",
990
        engine,
991
        schema="grid",
992
        if_exists="append",
993
        index=True,
994
        index_label="storage_id",
995
    )
996
997
    # writing neighboring lines_t s_max_pu to etrago tables
998
    if not network.lines_t["s_max_pu"].empty:
999
        neighbor_lines_t_etrago = pd.DataFrame(
1000
            columns=["scn_name", "s_max_pu"], index=neighbor_lines_t.columns
1001
        )
1002
        neighbor_lines_t_etrago["scn_name"] = "eGon100RE"
1003
1004
        for i in neighbor_lines_t.columns:
1005
            neighbor_lines_t_etrago["s_max_pu"][i] = neighbor_lines_t[
1006
                i
1007
            ].values.tolist()
1008
1009
        neighbor_lines_t_etrago.to_sql(
1010
            "egon_etrago_line_timeseries",
1011
            engine,
1012
            schema="grid",
1013
            if_exists="append",
1014
            index=True,
1015
            index_label="line_id",
1016
        )
1017
1018
1019
# Skip execution of pypsa-eur-sec by default until optional task is implemented
1020
execute_pypsa_eur_sec = False
1021
1022
if execute_pypsa_eur_sec:
1023
    tasks = (run_pypsa_eur_sec, clean_database, neighbor_reduction)
1024
else:
1025
    tasks = (clean_database, neighbor_reduction)
1026
1027
1028
class PypsaEurSec(Dataset):
1029
    def __init__(self, dependencies):
1030
        super().__init__(
1031
            name="PypsaEurSec",
1032
            version="0.0.8",
1033
            dependencies=dependencies,
1034
            tasks=tasks,
1035
        )
1036