calculate_H2_capacity()   B
last analyzed

Complexity

Conditions 8

Size

Total Lines 71
Code Lines 40

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 40
dl 0
loc 71
rs 7.0533
c 0
b 0
f 0
cc 8
nop 2

How to fix   Long Method   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

1
"""
2
The central module containing all code dealing with the H2 grid in eGon100RE
3
4
"""
5
6
from itertools import count
7
from pathlib import Path
8
from urllib.request import urlretrieve
9
import math
10
import os
11
import re
12
13
from fuzzywuzzy import process
14
from geoalchemy2.types import Geometry
15
from scipy.spatial import cKDTree
16
from shapely import wkb
17
from shapely.geometry import LineString, MultiLineString, Point
18
import geopandas as gpd
19
import numpy as np
20
import pandas as pd
21
22
from egon.data import config, db
23
from egon.data.datasets.scenario_parameters import get_sector_parameters
24
from egon.data.datasets.scenario_parameters.parameters import (
25
    annualize_capital_costs,
26
)
27
28
29
def insert_h2_pipelines(scn_name):
30
    "Insert H2_grid based on Input Data from FNB-Gas"
31
32
    download_h2_grid_data()
33
    H2_grid_Neubau, H2_grid_Umstellung, H2_grid_Erweiterung = (
34
        read_h2_excel_sheets()
35
    )
36
    h2_bus_location = pd.read_csv(
37
        Path(".")
38
        / "data_bundle_egon_data"
39
        / "hydrogen_network"
40
        / "h2_grid_nodes.csv"
41
    )
42
    con = db.engine()
43
44
    sources = config.datasets()["etrago_hydrogen"]["sources"]
45
    target = config.datasets()["etrago_hydrogen"]["targets"]["hydrogen_links"]
46
47
    h2_buses_df = pd.read_sql(
48
        f"""
49
    SELECT bus_id, x, y FROM {sources["buses"]["schema"]}.{sources["buses"]["table"]}
50
    WHERE carrier in ('H2_grid')
51
    AND scn_name = '{scn_name}'   
52
    """,
53
        con,
54
    )
55
56
    # Delete old entries
57
    db.execute_sql(
58
        f"""
59
        DELETE FROM {target["schema"]}.{target["table"]}
60
        WHERE "carrier" = 'H2_grid'
61
        AND scn_name = '{scn_name}' AND bus0 IN (
62
          SELECT bus_id
63
          FROM {sources["buses"]["schema"]}.{sources["buses"]["table"]}
64
          WHERE country = 'DE'
65
        )
66
        """
67
    )
68
69
    target = config.datasets()["etrago_hydrogen"]["targets"]["hydrogen_links"]
70
71
    for df in [H2_grid_Neubau, H2_grid_Umstellung, H2_grid_Erweiterung]:
72
73
        if df is H2_grid_Neubau:
74
            df.rename(
75
                columns={
76
                    "Planerische \nInbetriebnahme": "Planerische Inbetriebnahme"
77
                },
78
                inplace=True,
79
            )
80
            df.loc[
81
                df["Endpunkt\n(Ort)"] == "AQD Anlandung", "Endpunkt\n(Ort)"
82
            ] = "Schillig"
83
            df.loc[
84
                df["Endpunkt\n(Ort)"] == "Hallendorf", "Endpunkt\n(Ort)"
85
            ] = "Salzgitter"
86
87
        if df is H2_grid_Erweiterung:
88
            df.rename(
89
                columns={
90
                    "Umstellungsdatum/ Planerische Inbetriebnahme": "Planerische Inbetriebnahme",
91
                    "Nenndurchmesser (DN)": "Nenndurchmesser \n(DN)",
92
                    "Investitionskosten\n(Mio. Euro),\nKostenschätzung": "Investitionskosten*\n(Mio. Euro)",
93
                },
94
                inplace=True,
95
            )
96
            df = df[
97
                df["Berücksichtigung im Kernnetz \n[ja/nein/zurückgezogen]"]
98
                .str.strip()
99
                .str.lower()
100
                == "ja"
101
            ]
102
            df.loc[
103
                df["Endpunkt\n(Ort)"] == "Osdorfer Straße", "Endpunkt\n(Ort)"
104
            ] = "Berlin- Lichterfelde"
105
106
        h2_bus_location["Ort"] = h2_bus_location["Ort"].astype(str).str.strip()
107
        df["Anfangspunkt\n(Ort)"] = (
108
            df["Anfangspunkt\n(Ort)"].astype(str).str.strip()
109
        )
110
        df["Endpunkt\n(Ort)"] = df["Endpunkt\n(Ort)"].astype(str).str.strip()
111
112
        df = df[
113
            [
114
                "Anfangspunkt\n(Ort)",
115
                "Endpunkt\n(Ort)",
116
                "Nenndurchmesser \n(DN)",
117
                "Druckstufe (DP)\n[mind. 30 barg]",
118
                "Investitionskosten*\n(Mio. Euro)",
119
                "Planerische Inbetriebnahme",
120
                "Länge \n(km)",
121
            ]
122
        ]
123
124
        # matching start- and endpoint of each pipeline with georeferenced data
125
        df["Anfangspunkt_matched"] = fuzzy_match(
126
            df, h2_bus_location, "Anfangspunkt\n(Ort)"
127
        )
128
        df["Endpunkt_matched"] = fuzzy_match(
129
            df, h2_bus_location, "Endpunkt\n(Ort)"
130
        )
131
132
        # manuell adjustments based on Detailmaßnahmenkarte der FNB-Gas [https://fnb-gas.de/wasserstoffnetz-wasserstoff-kernnetz/]
133
        df = fix_h2_grid_infrastructure(df)
134
135
        df_merged = pd.merge(
136
            df,
137
            h2_bus_location[["Ort", "geom", "x", "y"]],
138
            how="left",
139
            left_on="Anfangspunkt_matched",
140
            right_on="Ort",
141
        ).rename(
142
            columns={"geom": "geom_start", "x": "x_start", "y": "y_start"}
143
        )
144
        df_merged = pd.merge(
145
            df_merged,
146
            h2_bus_location[["Ort", "geom", "x", "y"]],
147
            how="left",
148
            left_on="Endpunkt_matched",
149
            right_on="Ort",
150
        ).rename(columns={"geom": "geom_end", "x": "x_end", "y": "y_end"})
151
152
        H2_grid_df = df_merged.dropna(subset=["geom_start", "geom_end"])
153
        H2_grid_df = H2_grid_df[
154
            H2_grid_df["geom_start"] != H2_grid_df["geom_end"]
155
        ]
156
        H2_grid_df = pd.merge(
157
            H2_grid_df,
158
            h2_buses_df,
159
            how="left",
160
            left_on=["x_start", "y_start"],
161
            right_on=["x", "y"],
162
        ).rename(columns={"bus_id": "bus0"})
163
        H2_grid_df = pd.merge(
164
            H2_grid_df,
165
            h2_buses_df,
166
            how="left",
167
            left_on=["x_end", "y_end"],
168
            right_on=["x", "y"],
169
        ).rename(columns={"bus_id": "bus1"})
170
        H2_grid_df[["bus0", "bus1"]] = H2_grid_df[["bus0", "bus1"]].astype(
171
            "Int64"
172
        )
173
174
        H2_grid_df["geom_start"] = H2_grid_df["geom_start"].apply(
175
            lambda x: wkb.loads(bytes.fromhex(x))
176
        )
177
        H2_grid_df["geom_end"] = H2_grid_df["geom_end"].apply(
178
            lambda x: wkb.loads(bytes.fromhex(x))
179
        )
180
        H2_grid_df["topo"] = H2_grid_df.apply(
181
            lambda row: LineString([row["geom_start"], row["geom_end"]]),
182
            axis=1,
183
        )
184
        H2_grid_df["geom"] = H2_grid_df.apply(
185
            lambda row: MultiLineString(
186
                [LineString([row["geom_start"], row["geom_end"]])]
187
            ),
188
            axis=1,
189
        )
190
        H2_grid_gdf = gpd.GeoDataFrame(H2_grid_df, geometry="geom", crs=4326)
191
192
        scn_params = get_sector_parameters("gas", scn_name)
193
        next_link_id = db.next_etrago_id("link")
194
195
        H2_grid_gdf["link_id"] = range(
196
            next_link_id, next_link_id + len(H2_grid_gdf)
197
        )
198
        H2_grid_gdf["scn_name"] = scn_name
199
        H2_grid_gdf["carrier"] = "H2_grid"
200
        H2_grid_gdf["Planerische Inbetriebnahme"] = (
201
            H2_grid_gdf["Planerische Inbetriebnahme"]
202
            .astype(str)
203
            .apply(
204
                lambda x: (
205
                    int(re.findall(r"\d{4}", x)[-1])
206
                    if re.findall(r"\d{4}", x)
207
                    else (
208
                        int(re.findall(r"\d{2}\.\d{2}\.(\d{4})", x)[-1])
209
                        if re.findall(r"\d{2}\.\d{2}\.(\d{4})", x)
210
                        else None
211
                    )
212
                )
213
            )
214
        )
215
        H2_grid_gdf["build_year"] = H2_grid_gdf[
216
            "Planerische Inbetriebnahme"
217
        ].astype("Int64")
218
        H2_grid_gdf["p_nom"] = H2_grid_gdf.apply(
219
            lambda row: calculate_H2_capacity(
220
                row["Druckstufe (DP)\n[mind. 30 barg]"],
221
                row["Nenndurchmesser \n(DN)"],
222
            ),
223
            axis=1,
224
        )
225
        H2_grid_gdf["p_nom_min"] = H2_grid_gdf["p_nom"]
226
        H2_grid_gdf["p_nom_max"] = float("Inf")
227
        H2_grid_gdf["p_nom_extendable"] = False
228
        H2_grid_gdf["lifetime"] = scn_params["lifetime"]["H2_pipeline"]
229
        H2_grid_gdf["capital_cost"] = H2_grid_gdf.apply(
230
            lambda row: annualize_capital_costs(
231
                (
232
                    (
233
                        float(row["Investitionskosten*\n(Mio. Euro)"])
234
                        * 10**6
235
                        / row["p_nom"]
236
                    )
237
                    if pd.notna(row["Investitionskosten*\n(Mio. Euro)"])
238
                    and str(row["Investitionskosten*\n(Mio. Euro)"])
239
                    .replace(",", "")
240
                    .replace(".", "")
241
                    .isdigit()
242
                    and float(row["Investitionskosten*\n(Mio. Euro)"]) != 0
243
                    else scn_params["overnight_cost"]["H2_pipeline"]
0 ignored issues
show
introduced by
The variable scn_params does not seem to be defined in case the for loop on line 71 is not entered. Are you sure this can never be the case?
Loading history...
244
                    * row["Länge \n(km)"]
245
                ),
246
                row["lifetime"],
247
                0.05,
248
            ),
249
            axis=1,
250
        )
251
        H2_grid_gdf["p_min_pu"] = -1
252
253
        selected_columns = [
254
            "scn_name",
255
            "link_id",
256
            "bus0",
257
            "bus1",
258
            "build_year",
259
            "p_nom",
260
            "p_nom_min",
261
            "p_nom_extendable",
262
            "capital_cost",
263
            "geom",
264
            "topo",
265
            "carrier",
266
            "p_nom_max",
267
            "p_min_pu",
268
        ]
269
270
        H2_grid_final = H2_grid_gdf[selected_columns]
271
272
        # Insert data to db
273
        H2_grid_final.to_postgis(
274
            target["table"],
275
            con,
276
            schema=target["schema"],
277
            if_exists="append",
278
            dtype={"geom": Geometry()},
279
        )
280
281
    # connect saltcaverns to H2_grid
282
    connect_saltcavern_to_h2_grid(scn_name)
283
284
    # connect neighbour countries to H2_grid
285
    connect_h2_grid_to_neighbour_countries(scn_name)
286
287
288
def replace_pipeline(df, start, end, intermediate):
289
    """
290
    Method for adjusting pipelines manually by splittiing pipeline with an intermediate point.
291
292
    Parameters
293
    ----------
294
    df : pandas.core.frame.DataFrame
295
        dataframe to be adjusted
296
    start: str
297
        startpoint of pipeline
298
    end: str
299
        endpoint of pipeline
300
    intermediate: str
301
        new intermediate point for splitting given pipeline
302
303
    Returns
304
    ---------
305
    df : <class 'pandas.core.frame.DataFrame'>
306
        adjusted dataframe
307
308
309
    """
310
    # Find rows where the start and end points match
311
    mask = (
312
        (df["Anfangspunkt_matched"] == start) & (df["Endpunkt_matched"] == end)
313
    ) | (
314
        (df["Anfangspunkt_matched"] == end) & (df["Endpunkt_matched"] == start)
315
    )
316
317
    # Separate the rows to replace
318
    if mask.any():
319
        df_replacement = df[~mask].copy()
320
        row_replaced = df[mask].iloc[0]
321
322
        # Add new rows for the split pipelines
323
        new_rows = pd.DataFrame(
324
            {
325
                "Anfangspunkt_matched": [start, intermediate],
326
                "Endpunkt_matched": [intermediate, end],
327
                "Nenndurchmesser \n(DN)": [
328
                    row_replaced["Nenndurchmesser \n(DN)"],
329
                    row_replaced["Nenndurchmesser \n(DN)"],
330
                ],
331
                "Druckstufe (DP)\n[mind. 30 barg]": [
332
                    row_replaced["Druckstufe (DP)\n[mind. 30 barg]"],
333
                    row_replaced["Druckstufe (DP)\n[mind. 30 barg]"],
334
                ],
335
                "Investitionskosten*\n(Mio. Euro)": [
336
                    row_replaced["Investitionskosten*\n(Mio. Euro)"],
337
                    row_replaced["Investitionskosten*\n(Mio. Euro)"],
338
                ],
339
                "Planerische Inbetriebnahme": [
340
                    row_replaced["Planerische Inbetriebnahme"],
341
                    row_replaced["Planerische Inbetriebnahme"],
342
                ],
343
                "Länge \n(km)": [
344
                    row_replaced["Länge \n(km)"],
345
                    row_replaced["Länge \n(km)"],
346
                ],
347
            }
348
        )
349
350
        df_replacement = pd.concat(
351
            [df_replacement, new_rows], ignore_index=True
352
        )
353
        return df_replacement
354
    else:
355
        return df
356
357
358
def fuzzy_match(df1, df2, column_to_match, threshold=80):
359
    """
360
    Method for matching input data of H2_grid with georeferenced data (even if the strings are not exact the same)
361
362
    Parameters
363
    ----------
364
    df1 : pandas.core.frame.DataFrame
365
        Input dataframe
366
    df2 : pandas.core.frame.DataFrame
367
        georeferenced dataframe with h2_buses
368
    column_to_match: str
369
        matching column
370
    treshhold: float
371
        matching percentage for succesfull comparison
372
373
    Returns
374
    ---------
375
    matched : list
376
        list with all matched location names
377
378
    """
379
    options = df2["Ort"].unique()
380
    matched = []
381
382
    # Compare every locationname in df1 with locationnames in df2
383
    for value in df1[column_to_match]:
384
        match, score = process.extractOne(value, options)
385
        if score >= threshold:
386
            matched.append(match)
387
        else:
388
            matched.append(None)
389
390
    return matched
391
392
393
def calculate_H2_capacity(pressure, diameter):
394
    """
395
    Method for calculagting capacity of pipelines based on data input from FNB Gas
396
397
    Parameters
398
    ----------
399
    pressure : float
400
        input for pressure of pipeline
401
    diameter: float
402
        input for diameter of pipeline
403
    column_to_match: str
404
        matching column
405
    treshhold: float
406
        matching percentage for succesfull comparison
407
408
    Returns
409
    ---------
410
    energy_flow: float
411
        transmission capacity of pipeline
412
413
    """
414
415
    pressure = str(pressure).replace(",", ".")
416
    diameter = str(diameter)
417
418
    def convert_to_float(value):
419
        try:
420
            return float(value)
421
        except ValueError:
422
            return 400  # average value from data-source cause capacities of some lines are not fixed yet
423
424
    # in case of given range for pipeline-capacity calculate average value
425
    if "-" in diameter:
426
        diameters = diameter.split("-")
427
        diameter = (
428
            convert_to_float(diameters[0]) + convert_to_float(diameters[1])
429
        ) / 2
430
    elif "/" in diameter:
431
        diameters = diameter.split("/")
432
        diameter = (
433
            convert_to_float(diameters[0]) + convert_to_float(diameters[1])
434
        ) / 2
435
    else:
436
        try:
437
            diameter = float(diameter)
438
        except ValueError:
439
            diameter = 400  # average value from data-source
440
441
    if "-" in pressure:
442
        pressures = pressure.split("-")
443
        pressure = (float(pressures[0]) + float(pressures[1])) / 2
444
    elif "/" in pressure:
445
        pressures = pressure.split("/")
446
        pressure = (float(pressures[0]) + float(pressures[1])) / 2
447
    else:
448
        try:
449
            pressure = float(diameter)
450
        except ValueError:
451
            pressure = 70  # averaqge value from data-source
452
453
    velocity = 40  # source: L.Koops (2023): GAS PIPELINE VERSUS LIQUID HYDROGEN TRANSPORT – PERSPECTIVES FOR TECHNOLOGIES, ENERGY DEMAND ANDv TRANSPORT CAPACITY, AND IMPLICATIONS FOR AVIATION
454
    temperature = (
455
        10 + 273.15
456
    )  # source: L.Koops (2023): GAS PIPELINE VERSUS LIQUID HYDROGEN TRANSPORT – PERSPECTIVES FOR TECHNOLOGIES, ENERGY DEMAND ANDv TRANSPORT CAPACITY, AND IMPLICATIONS FOR AVIATION
457
    density = (
458
        pressure * 10**5 / (4.1243 * 10**3 * temperature)
459
    )  # gasconstant H2 = 4.1243 [kJ/kgK]
460
    mass_flow = density * math.pi * ((diameter / 10**3) / 2) ** 2 * velocity
461
    energy_flow = mass_flow * 119.988  # low_heating_value H2 = 119.988 [MJ/kg]
462
463
    return energy_flow
464
465
466
def download_h2_grid_data():
467
    """
468
    Download Input data for H2_grid from FNB-Gas (https://fnb-gas.de/wasserstoffnetz-wasserstoff-kernnetz/)
469
470
    The following data for H2 are downloaded into the folder
471
    ./datasets/h2_data:
472
      * Links (file Anlage_3_Wasserstoffkernnetz_Neubau.xlsx,
473
                    Anlage_4_Wasserstoffkernnetz_Umstellung.xlsx,
474
                    Anlage_2_Wasserstoffkernetz_weitere_Leitungen.xlsx)
475
476
    Returns
477
    -------
478
    None
479
480
    """
481
    path = Path("datasets/h2_data")
482
    os.makedirs(path, exist_ok=True)
483
484
    download_config = config.datasets()["etrago_hydrogen"]["sources"][
485
        "H2_grid"
486
    ]
487
    target_file_Um = path / download_config["converted_ch4_pipes"]["path"]
488
    target_file_Neu = path / download_config["new_constructed_pipes"]["path"]
489
    target_file_Erw = (
490
        path / download_config["pipes_of_further_h2_grid_operators"]["path"]
491
    )
492
493
    for target_file in [target_file_Neu, target_file_Um, target_file_Erw]:
494
        if target_file is target_file_Um:
495
            url = download_config["converted_ch4_pipes"]["url"]
496
        elif target_file is target_file_Neu:
497
            url = download_config["new_constructed_pipes"]["url"]
498
        else:
499
            url = download_config["pipes_of_further_h2_grid_operators"]["url"]
500
501
        if not os.path.isfile(target_file):
502
            urlretrieve(url, target_file)
503
504
505
def read_h2_excel_sheets():
506
    """
507
    Read downloaded excel files with location names for future h2-pipelines
508
509
    Returns
510
    -------
511
    df_Neu : <class 'pandas.core.frame.DataFrame'>
512
    df_Um : <class 'pandas.core.frame.DataFrame'>
513
    df_Erw : <class 'pandas.core.frame.DataFrame'>
514
515
516
    """
517
518
    path = Path(".") / "datasets" / "h2_data"
519
    download_config = config.datasets()["etrago_hydrogen"]["sources"][
520
        "H2_grid"
521
    ]
522
    excel_file_Um = pd.ExcelFile(
523
        f'{path}/{download_config["converted_ch4_pipes"]["path"]}'
524
    )
525
    excel_file_Neu = pd.ExcelFile(
526
        f'{path}/{download_config["new_constructed_pipes"]["path"]}'
527
    )
528
    excel_file_Erw = pd.ExcelFile(
529
        f'{path}/{download_config["pipes_of_further_h2_grid_operators"]["path"]}'
530
    )
531
532
    df_Um = pd.read_excel(excel_file_Um, header=3)
533
    df_Neu = pd.read_excel(excel_file_Neu, header=3)
534
    df_Erw = pd.read_excel(excel_file_Erw, header=2)
535
536
    return df_Neu, df_Um, df_Erw
537
538
539
def fix_h2_grid_infrastructure(df):
540
    """
541
    Manuell adjustments for more accurate grid topology based on Detailmaßnahmenkarte der
542
    FNB-Gas [https://fnb-gas.de/wasserstoffnetz-wasserstoff-kernnetz/]
543
544
    Returns
545
    -------
546
    df : <class 'pandas.core.frame.DataFrame'>
547
548
    """
549
550
    df = replace_pipeline(df, "Lubmin", "Uckermark", "Wrangelsburg")
551
    df = replace_pipeline(df, "Wrangelsburg", "Uckermark", "Schönermark")
552
    df = replace_pipeline(
553
        df, "Hemmingstedt", "Ascheberg (Holstein)", "Remmels Nord"
554
    )
555
    df = replace_pipeline(df, "Heidenau", "Elbe-Süd", "Weißenfelde")
556
    df = replace_pipeline(df, "Weißenfelde", "Elbe-Süd", "Stade")
557
    df = replace_pipeline(df, "Stade AOS", "KW Schilling", "Abzweig Stade")
558
    df = replace_pipeline(df, "Rosengarten (Sottorf)", "Moorburg", "Leversen")
559
    df = replace_pipeline(df, "Leversen", "Moorburg", "Hamburg Süd")
560
    df = replace_pipeline(df, "Achim", "Folmhusen", "Wardenburg")
561
    df = replace_pipeline(df, "Achim", "Wardenburg", "Sandkrug")
562
    df = replace_pipeline(df, "Dykhausen", "Bunde", "Emden")
563
    df = replace_pipeline(df, "Emden", "Nüttermoor", "Jemgum")
564
    df = replace_pipeline(df, "Rostock", "Glasewitz", "Fliegerhorst Laage")
565
    df = replace_pipeline(df, "Wilhelmshaven", "Dykhausen", "Sande")
566
    df = replace_pipeline(
567
        df, "Wilhelmshaven Süd", "Wilhelmshaven Nord", "Wilhelmshaven"
568
    )
569
    df = replace_pipeline(df, "Sande", "Jemgum", "Westerstede")
570
    df = replace_pipeline(df, "Kalle", "Ochtrup", "Frensdorfer Bruchgraben")
571
    df = replace_pipeline(
572
        df, "Frensdorfer Bruchgraben", "Ochtrup", "Bad Bentheim"
573
    )
574
    df = replace_pipeline(df, "Bunde", "Wettringen", "Emsbüren")
575
    df = replace_pipeline(df, "Emsbüren", "Dorsten", "Ochtrup")
576
    df = replace_pipeline(df, "Ochtrup", "Dorsten", "Heek")
577
    df = replace_pipeline(df, "Lemförde", "Drohne", "Reiningen")
578
    df = replace_pipeline(df, "Edesbüttel", "Bobbau", "Uhrsleben")
579
    df = replace_pipeline(df, "Sixdorf", "Wiederitzsch", "Cörmigk")
580
    df = replace_pipeline(df, "Schkeuditz", "Plaußig", "Wiederitzsch")
581
    df = replace_pipeline(df, "Wiederitzsch", "Plaußig", "Mockau Nord")
582
    df = replace_pipeline(df, "Bobbau", "Rückersdorf", "Nempitz")
583
    df = replace_pipeline(df, "Räpitz", "Böhlen", "Kleindalzig")
584
    df = replace_pipeline(df, "Buchholz", "Friedersdorf", "Werben")
585
    df = replace_pipeline(df, "Radeland", "Uckermark", "Friedersdorf")
586
    df = replace_pipeline(df, "Friedersdorf", "Uckermark", "Herzfelde")
587
    df = replace_pipeline(df, "Blumberg", "Berlin-Mitte", "Berlin-Marzahn")
588
    df = replace_pipeline(df, "Radeland", "Zethau", "Coswig")
589
    df = replace_pipeline(df, "Leuna", "Böhlen", "Räpitz")
590
    df = replace_pipeline(df, "Dürrengleina", "Stadtroda", "Zöllnitz")
591
    df = replace_pipeline(df, "Mailing", "Kötz", "Wertingen")
592
    df = replace_pipeline(df, "Lampertheim", "Rüsselsheim", "Gernsheim-Nord")
593
    df = replace_pipeline(df, "Birlinghoven", "Rüsselsheim", "Wiesbaden")
594
    df = replace_pipeline(df, "Medelsheim", "Mittelbrunn", "Seyweiler")
595
    df = replace_pipeline(df, "Seyweiler", "Dillingen", "Fürstenhausen")
596
    df = replace_pipeline(df, "Reckrod", "Wolfsbehringen", "Eisenach")
597
    df = replace_pipeline(df, "Elten", "St. Hubert", "Hüthum")
598
    df = replace_pipeline(df, "St. Hubert", "Hüthum", "Uedener Bruch")
599
    df = replace_pipeline(df, "Wallach", "Möllen", "Spellen")
600
    df = replace_pipeline(df, "St. Hubert", "Glehn", "Krefeld")
601
    df = replace_pipeline(df, "Neumühl", "Werne", "Bottrop")
602
    df = replace_pipeline(df, "Bottrop", "Werne", "Recklinghausen")
603
    df = replace_pipeline(df, "Werne", "Eisenach", "Arnsberg-Bruchhausen")
604
    df = replace_pipeline(df, "Dorsten", "Gescher", "Gescher Süd")
605
    df = replace_pipeline(df, "Dorsten", "Hamborn", "Averbruch")
606
    df = replace_pipeline(df, "Neumühl", "Bruckhausen", "Hamborn")
607
    df = replace_pipeline(df, "Werne", "Paffrath", "Westhofen")
608
    df = replace_pipeline(df, "Glehn", "Voigtslach", "Dormagen")
609
    df = replace_pipeline(df, "Voigtslach", "Paffrath", "Leverkusen")
610
    df = replace_pipeline(df, "Glehn", "Ludwigshafen", "Wesseling")
611
    df = replace_pipeline(df, "Rothenstadt", "Rimpar", "Reutles")
612
613
    return df
614
615
616
def connect_saltcavern_to_h2_grid(scn_name):
617
    """
618
    Connect each saltcavern with nearest H2-Bus of the H2-Grid and insert the links into the database
619
620
    Returns
621
    -------
622
    None
623
624
    """
625
626
    targets = config.datasets()["etrago_hydrogen"]["targets"]
627
    sources = config.datasets()["etrago_hydrogen"]["sources"]
628
    engine = db.engine()
629
630
    db.execute_sql(
631
        f"""
632
           DELETE FROM {targets["hydrogen_links"]["schema"]}.{targets["hydrogen_links"]["table"]} 
633
           WHERE "carrier" in ('H2_saltcavern')
634
           AND scn_name = '{scn_name}';    
635
           """
636
    )
637
    h2_buses_query = f"""SELECT bus_id, x, y,ST_Transform(geom, 32632) as geom 
638
                        FROM  {sources["buses"]["schema"]}.{sources["buses"]["table"]} 
639
                        WHERE carrier = 'H2_grid' AND scn_name = '{scn_name}'
640
                    """
641
    h2_buses = gpd.read_postgis(h2_buses_query, engine)
642
643
    salt_caverns_query = f"""SELECT bus_id, x, y, ST_Transform(geom, 32632) as geom 
644
                            FROM  {sources["buses"]["schema"]}.{sources["buses"]["table"]} 
645
                            WHERE carrier = 'H2_saltcavern'  AND scn_name = '{scn_name}'
646
                        """
647
    salt_caverns = gpd.read_postgis(salt_caverns_query, engine)
648
649
    max_link_id = db.next_etrago_id("link")
650
    next_link_id = count(start=max_link_id, step=1)
651
    scn_params = get_sector_parameters("gas", scn_name)
652
653
    H2_coords = np.array([(point.x, point.y) for point in h2_buses.geometry])
654
    H2_tree = cKDTree(H2_coords)
655
    links = []
656
    for idx, bus_saltcavern in salt_caverns.iterrows():
657
        saltcavern_coords = [
658
            bus_saltcavern["geom"].x,
659
            bus_saltcavern["geom"].y,
660
        ]
661
662
        dist, nearest_idx = H2_tree.query(saltcavern_coords, k=1)
663
        nearest_h2_bus = h2_buses.iloc[nearest_idx]
664
665
        link = {
666
            "scn_name": scn_name,
667
            "bus0": nearest_h2_bus["bus_id"],
668
            "bus1": bus_saltcavern["bus_id"],
669
            "link_id": next(next_link_id),
670
            "carrier": "H2_saltcavern",
671
            "lifetime": 25,
672
            "p_nom_extendable": True,
673
            "p_min_pu": -1,
674
            "capital_cost": scn_params["overnight_cost"]["H2_pipeline"]
675
            * dist
676
            / 1000,
677
            "geom": MultiLineString(
678
                [
679
                    LineString(
680
                        [
681
                            (nearest_h2_bus["x"], nearest_h2_bus["y"]),
682
                            (bus_saltcavern["x"], bus_saltcavern["y"]),
683
                        ]
684
                    )
685
                ]
686
            ),
687
        }
688
        links.append(link)
689
690
    links_df = gpd.GeoDataFrame(links, geometry="geom", crs=4326)
691
692
    links_df.to_postgis(
693
        targets["hydrogen_links"]["table"],
694
        engine,
695
        schema=targets["hydrogen_links"]["schema"],
696
        index=False,
697
        if_exists="append",
698
        dtype={"geom": Geometry()},
699
    )
700
701
702
def connect_h2_grid_to_neighbour_countries(scn_name):
703
    """
704
    Connect germand H2_grid with neighbour countries. All german H2-Buses wich were planned as connection
705
    points for Import/Export of Hydrogen to corresponding neighbours country, are based on Publication
706
    of FNB-GAS (https://fnb-gas.de/wasserstoffnetz-wasserstoff-kernnetz/).
707
708
    Returns
709
    -------
710
    None
711
712
    """
713
    engine = db.engine()
714
    targets = config.datasets()["etrago_hydrogen"]["targets"]
715
    sources = config.datasets()["etrago_hydrogen"]["sources"]
716
717
    h2_buses_df = gpd.read_postgis(
718
        f"""
719
    SELECT bus_id, x, y, geom  
720
    FROM {sources["buses"]["schema"]}.{sources["buses"]["table"]}
721
    WHERE carrier in ('H2_grid')
722
    AND scn_name = '{scn_name}'
723
724
    """,
725
        engine,
726
    )
727
728
    h2_links_df = pd.read_sql(
729
        f"""
730
    SELECT link_id, bus0, bus1, p_nom 
731
    FROM {sources["links"]["schema"]}.{sources["links"]["table"]}
732
    WHERE carrier in ('H2_grid')
733
    AND scn_name = '{scn_name}'
734
735
    """,
736
        engine,
737
    )
738
739
    abroad_buses_df = gpd.read_postgis(
740
        f"""
741
        SELECT bus_id, x, y, geom, country 
742
        FROM {sources["buses"]["schema"]}.{sources["buses"]["table"]}
743
        WHERE carrier = 'H2' AND scn_name = '{scn_name}' AND country != 'DE'
744
        """,
745
        engine,
746
    )
747
748
    abroad_con_buses = [
749
        ("Greifenhagen", "PL"),
750
        ("Fürstenberg (PL)", "PL"),
751
        ("Eynatten", "BE"),
752
        ("Überackern", "AT"),
753
        ("Vlieghuis", "NL"),
754
        ("Oude", "NL"),
755
        ("Oude Statenzijl", "NL"),
756
        ("Vreden", "NL"),
757
        ("Elten", "NL"),
758
        ("Leidingen", "FR"),
759
        ("Carling", "FR"),
760
        ("Medelsheim", "FR"),
761
        ("Waidhaus", "CZ"),
762
        ("Deutschneudorf", "CZ"),
763
        ("Grenzach", "CH"),
764
        ("AWZ", "DK"),
765
        ("AWZ", "SE"),
766
        ("AQD Offshore SEN 1", "GB"),
767
        ("AQD Offshore SEN 1", "NO"),
768
        ("AQD Offshore SEN 1", "DK"),
769
        ("AQD Offshore SEN 1", "NL"),
770
        ("Fessenheim", "FR"),
771
        ("Ellund", "DK"),
772
    ]
773
774
    h2_bus_location = pd.read_csv(
775
        Path(".")
776
        / "data_bundle_egon_data"
777
        / "hydrogen_network"
778
        / "h2_grid_nodes.csv"
779
    )
780
781
    ### prepare data for connecting abroad_buses
782
    matched_locations = h2_bus_location[
783
        h2_bus_location["Ort"].isin([name for name, _ in abroad_con_buses])
784
    ]
785
    matched_buses = matched_locations.merge(
786
        h2_buses_df, left_on=["x", "y"], right_on=["x", "y"], how="inner"
787
    )
788
789
    final_matched_buses = matched_buses[
790
        ["bus_id", "Ort", "x", "y", "geom_y"]
791
    ].rename(columns={"geom_y": "geom"})
792
793
    abroad_links = h2_links_df[
794
        (h2_links_df["bus0"].isin(final_matched_buses["bus_id"]))
795
        | (h2_links_df["bus1"].isin(final_matched_buses["bus_id"]))
796
    ]
797
    abroad_links_bus0 = abroad_links.merge(
798
        final_matched_buses, left_on="bus0", right_on="bus_id", how="inner"
799
    )
800
    abroad_links_bus1 = abroad_links.merge(
801
        final_matched_buses, left_on="bus1", right_on="bus_id", how="inner"
802
    )
803
    abroad_con_df = pd.concat([abroad_links_bus1, abroad_links_bus0])
804
805
    connection_links = []
806
    max_link_id = db.next_etrago_id("link")
807
    next_max_link_id = count(start=max_link_id, step=1)
808
809
    for inland_name, country_code in abroad_con_buses:
810
        # filter out germand h2_buses for connecting neighbour-countries
811
        inland_bus = abroad_con_df[abroad_con_df["Ort"] == inland_name]
812
        if inland_bus.empty:
813
            print(f"Warning: No Inland-Bus found for {inland_name}.")
814
            continue
815
816
        # filter out corresponding abroad_bus for connecting neighbour countries
817
        abroad_bus = abroad_buses_df[
818
            abroad_buses_df["country"] == country_code
819
        ]
820
        if abroad_bus.empty:
821
            print(f"Warning: No Abroad-Bus found for {country_code}.")
822
            continue
823
824
        for _, i_bus in inland_bus.iterrows():
825
            abroad_bus["distance"] = abroad_bus["geom"].apply(
826
                lambda g: i_bus["geom"].distance(g)
0 ignored issues
show
introduced by
The variable i_bus does not seem to be defined for all execution paths.
Loading history...
827
            )
828
829
            nearest_abroad_bus = abroad_bus.loc[
830
                abroad_bus["distance"].idxmin()
831
            ]
832
            relevant_buses = inland_bus[
833
                inland_bus["bus_id"] == i_bus["bus_id"]
834
            ]
835
            p_nom_value = relevant_buses["p_nom"].sum()
836
837
        connection_links.append(
838
            {
839
                "scn_name": scn_name,
840
                "carrier": "H2_grid",
841
                "link_id": next(next_max_link_id),
842
                "bus0": i_bus["bus_id"],
0 ignored issues
show
introduced by
The variable i_bus does not seem to be defined for all execution paths.
Loading history...
843
                "bus1": nearest_abroad_bus["bus_id"],
0 ignored issues
show
introduced by
The variable nearest_abroad_bus does not seem to be defined for all execution paths.
Loading history...
844
                "p_nom": p_nom_value,
0 ignored issues
show
introduced by
The variable p_nom_value does not seem to be defined for all execution paths.
Loading history...
845
                "p_min_pu": -1,
846
                "geom": MultiLineString(
847
                    [
848
                        LineString(
849
                            [
850
                                (i_bus["geom"].x, i_bus["geom"].y),
851
                                (
852
                                    nearest_abroad_bus["geom"].x,
853
                                    nearest_abroad_bus["geom"].y,
854
                                ),
855
                            ]
856
                        )
857
                    ]
858
                ),
859
            }
860
        )
861
    connection_links_df = gpd.GeoDataFrame(
862
        connection_links, geometry="geom", crs="EPSG:4326"
863
    )
864
865
    connection_links_df.to_postgis(
866
        name=targets["hydrogen_links"]["table"],
867
        con=engine,
868
        schema=targets["hydrogen_links"]["schema"],
869
        if_exists="append",
870
        index=False,
871
    )
872
    print("Neighbour countries are succesfully connected to H2-grid")
873