Passed
Pull Request — dev (#917)
by
unknown
01:36
created

data.datasets.gas_neighbours.eGon2035   A

Complexity

Total Complexity 29

Size/Duplication

Total Lines 1350
Duplicated Lines 7.48 %

Importance

Changes 0
Metric Value
wmc 29
eloc 767
dl 101
loc 1350
rs 9.833
c 0
b 0
f 0

16 Functions

Rating   Name   Duplication   Size   Complexity  
A insert_generators() 0 63 1
B insert_power_to_h2_demand() 0 134 2
A import_power_to_h2_demandTS() 48 48 2
B read_LNG_capacities() 0 65 2
B calc_global_power_to_h2_demand() 0 87 1
A grid() 0 11 1
C calculate_ch4_grid_capacities() 0 275 7
A import_ch4_demandTS() 53 53 2
A insert_storage() 0 40 1
B calc_capacity_per_year() 0 87 1
A tyndp_gas_generation() 0 13 1
B calc_ch4_storage_capacities() 0 86 3
B insert_ch4_demand() 0 118 2
A tyndp_gas_demand() 0 16 1
B calc_global_ch4_demand() 0 73 1
B calc_capacities() 0 99 1

How to fix   Duplicated Code   

Duplicated Code

Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.

Common duplication problems, and corresponding solutions are:

1
"""Central module containing code dealing with gas neighbours for eGon2035
2
"""
3
4
from pathlib import Path
5
from urllib.request import urlretrieve
6
import ast
7
import zipfile
8
9
from geoalchemy2.types import Geometry
10
from shapely.geometry import LineString, MultiLineString
11
import geopandas as gpd
12
import numpy as np
13
import pandas as pd
14
import pypsa
15
16
from egon.data import config, db
17
from egon.data.datasets import Dataset
18
from egon.data.datasets.electrical_neighbours import (
19
    get_foreign_bus_id,
20
    get_map_buses,
21
)
22
from egon.data.datasets.gas_neighbours.gas_abroad import (
23
    insert_gas_grid_capacities,
24
    get_foreign_gas_bus_id
25
)
26
from egon.data.datasets.scenario_parameters import get_sector_parameters
27
28
countries = [
29
    "AT",
30
    "BE",
31
    "CH",
32
    "CZ",
33
    "DK",
34
    "FR",
35
    "GB",
36
    "LU",
37
    "NL",
38
    "NO",
39
    "PL",
40
    "RU",
41
    "SE",
42
    "UK",
43
]
44
45
46
def read_LNG_capacities():
47
    lng_file = "datasets/gas_data/data/IGGIELGN_LNGs.csv"
48
    IGGIELGN_LNGs = gpd.read_file(lng_file)
49
50
    map_countries_scigrid = {
51
        "BE": "BE00",
52
        "EE": "EE00",
53
        "EL": "GR00",
54
        "ES": "ES00",
55
        "FI": "FI00",
56
        "FR": "FR00",
57
        "GB": "UK00",
58
        "IT": "ITCN",
59
        "LT": "LT00",
60
        "LV": "LV00",
61
        "MT": "MT00",
62
        "NL": "NL00",
63
        "PL": "PL00",
64
        "PT": "PT00",
65
        "SE": "SE01",
66
    }
67
68
    conversion_factor = 437.5  # MCM/day to MWh/h
69
    c2 = 24 / 1000  # MWh/h to GWh/d
70
    p_nom = []
71
72
    for index, row in IGGIELGN_LNGs.iterrows():
73
        param = ast.literal_eval(row["param"])
74
        p_nom.append(
75
            param["max_cap_store2pipe_M_m3_per_d"] * conversion_factor * c2
76
        )
77
78
    IGGIELGN_LNGs["LNG max_cap_store2pipe_M_m3_per_d (in GWh/d)"] = p_nom
79
80
    IGGIELGN_LNGs.drop(
81
        [
82
            "uncertainty",
83
            "method",
84
            "param",
85
            "comment",
86
            "tags",
87
            "source_id",
88
            "lat",
89
            "long",
90
            "geometry",
91
            "id",
92
            "name",
93
            "node_id",
94
        ],
95
        axis=1,
96
        inplace=True,
97
    )
98
99
    IGGIELGN_LNGs["Country"] = IGGIELGN_LNGs["country_code"].map(
100
        map_countries_scigrid
101
    )
102
    IGGIELGN_LNGs = (
103
        IGGIELGN_LNGs.groupby(["Country"])[
104
            "LNG max_cap_store2pipe_M_m3_per_d (in GWh/d)"
105
        ]
106
        .sum()
107
        .sort_index()
108
    )
109
110
    return IGGIELGN_LNGs
111
112
113
def calc_capacities():
114
    """Calculates gas production capacities from TYNDP data
115
116
    Returns
117
    -------
118
    pandas.DataFrame
119
        Gas production capacities per foreign node and energy carrier
120
121
    """
122
123
    sources = config.datasets()["gas_neighbours"]["sources"]
124
125
    # insert installed capacities
126
    file = zipfile.ZipFile(f"tyndp/{sources['tyndp_capacities']}")
127
    df = pd.read_excel(
128
        file.open("TYNDP-2020-Scenario-Datafile.xlsx").read(),
129
        sheet_name="Gas Data",
130
    )
131
132
    df = (
133
        df.query(
134
            'Scenario == "Distributed Energy" & '
135
            '(Case == "Peak" | Case == "Average") &'  # Case: 2 Week/Average/DF/Peak
136
            'Category == "Production"'
137
        )
138
        .drop(
139
            columns=[
140
                "Generator_ID",
141
                "Climate Year",
142
                "Simulation_ID",
143
                "Node 1",
144
                "Path",
145
                "Direct/Indirect",
146
                "Sector",
147
                "Note",
148
                "Category",
149
                "Scenario",
150
            ]
151
        )
152
        .set_index("Node/Line")
153
        .sort_index()
154
    )
155
156
    lng = read_LNG_capacities()
157
    df_2030 = calc_capacity_per_year(df, lng, 2030)
158
    df_2040 = calc_capacity_per_year(df, lng, 2040)
159
160
    # Conversion GWh/d to MWh/h
161
    conversion_factor = 1000 / 24
162
163
    df_2035 = pd.concat([df_2040, df_2030], axis=1)
164
    df_2035 = df_2035.drop(
165
        columns=[
166
            "Value_conv_2040",
167
            "Value_conv_2030",
168
            "Value_bio_2040",
169
            "Value_bio_2030",
170
        ]
171
    )
172
    df_2035["cap_2035"] = (df_2035["CH4_2030"] + df_2035["CH4_2040"]) / 2
173
    df_2035["e_nom_max"] = (
174
        ((df_2035["e_nom_max_2030"] + df_2035["e_nom_max_2040"]) / 2)
175
        * conversion_factor
176
        * 8760
177
    )
178
    df_2035["ratioConv_2035"] = (
179
        df_2035["ratioConv_2030"] + df_2035["ratioConv_2040"]
180
    ) / 2
181
    grouped_capacities = df_2035.drop(
182
        columns=[
183
            "ratioConv_2030",
184
            "ratioConv_2040",
185
            "CH4_2040",
186
            "CH4_2030",
187
            "e_nom_max_2030",
188
            "e_nom_max_2040",
189
        ]
190
    ).reset_index()
191
192
    grouped_capacities["cap_2035"] = (
193
        grouped_capacities["cap_2035"] * conversion_factor
194
    )
195
196
    # Add generator in Russia of infinite capacity
197
    grouped_capacities = grouped_capacities.append(
198
        {
199
            "cap_2035": 1e9,
200
            "e_nom_max": np.inf,
201
            "ratioConv_2035": 1,
202
            "index": "RU",
203
        },
204
        ignore_index=True,
205
    )
206
207
    # choose capacities for considered countries
208
    grouped_capacities = grouped_capacities[
209
        grouped_capacities["index"].str[:2].isin(countries)
210
    ]
211
    return grouped_capacities
212
213
214
def calc_capacity_per_year(df, lng, year):
215
    """Calculates gas production capacities from TYNDP data for a specified year
216
217
    Parameters
218
    ----------
219
    df : pandas.DataFrame
220
        DataFrame containing all TYNDP data.
221
222
    lng : geopandas.GeoDataFrame
223
        Georeferenced LNG terminal capacities.
224
225
    year : int
226
        Year to calculate gas production capacity for.
227
228
    Returns
229
    -------
230
    pandas.DataFrame
231
        Gas production capacities per foreign node and energy carrier
232
    """
233
    df_conv_peak = (
234
        df[
235
            (df["Parameter"] == "Conventional")
236
            & (df["Year"] == year)
237
            & (df["Case"] == "Peak")
238
        ]
239
        .rename(columns={"Value": f"Value_conv_{year}_peak"})
240
        .drop(columns=["Parameter", "Year", "Case"])
241
    )
242
    df_conv_average = (
243
        df[
244
            (df["Parameter"] == "Conventional")
245
            & (df["Year"] == year)
246
            & (df["Case"] == "Average")
247
        ]
248
        .rename(columns={"Value": f"Value_conv_{year}_average"})
249
        .drop(columns=["Parameter", "Year", "Case"])
250
    )
251
    df_bioch4 = (
252
        df[
253
            (df["Parameter"] == "Biomethane")
254
            & (df["Year"] == year)
255
            & (
256
                df["Case"] == "Peak"
257
            )  # Peak and Average have the same valus for biogas production in 2030 and 2040
258
        ]
259
        .rename(columns={"Value": f"Value_bio_{year}"})
260
        .drop(columns=["Parameter", "Year", "Case"])
261
    )
262
263
    # Some values are duplicated (DE00 in 2030)
264
    df_conv_peak = df_conv_peak[~df_conv_peak.index.duplicated(keep="first")]
265
    df_conv_average = df_conv_average[
266
        ~df_conv_average.index.duplicated(keep="first")
267
    ]
268
269
    df_year = pd.concat(
270
        [df_conv_peak, df_conv_average, df_bioch4, lng], axis=1
271
    ).fillna(0)
272
    df_year = df_year[
273
        ~(
274
            (df_year[f"Value_conv_{year}_peak"] == 0)
275
            & (df_year[f"Value_bio_{year}"] == 0)
276
            & (df_year["LNG max_cap_store2pipe_M_m3_per_d (in GWh/d)"] == 0)
277
        )
278
    ]
279
    df_year[f"Value_conv_{year}"] = (
280
        df_year[f"Value_conv_{year}_peak"]
281
        + df_year["LNG max_cap_store2pipe_M_m3_per_d (in GWh/d)"]
282
    )
283
    df_year[f"CH4_{year}"] = (
284
        df_year[f"Value_conv_{year}"] + df_year[f"Value_bio_{year}"]
285
    )
286
    df_year[f"ratioConv_{year}"] = (
287
        df_year[f"Value_conv_{year}_peak"] / df_year[f"CH4_{year}"]
288
    )
289
    df_year[f"e_nom_max_{year}"] = (
290
        df_year[f"Value_conv_{year}_average"] + df_year[f"Value_bio_{year}"]
291
    )
292
    df_year = df_year.drop(
293
        columns=[
294
            "LNG max_cap_store2pipe_M_m3_per_d (in GWh/d)",
295
            f"Value_conv_{year}_average",
296
            f"Value_conv_{year}_peak",
297
        ]
298
    )
299
300
    return df_year
301
302
303
def insert_generators(gen):
304
    """Insert gas generators for foreign countries based on TYNDP-data
305
306
    Parameters
307
    ----------
308
    gen : pandas.DataFrame
309
        Gas production capacities per foreign node and energy carrier
310
311
    Returns
312
    -------
313
    None.
314
315
    """
316
    sources = config.datasets()["gas_neighbours"]["sources"]
317
    targets = config.datasets()["gas_neighbours"]["targets"]
318
    map_buses = get_map_buses()
319
    scn_params = get_sector_parameters("gas", "eGon2035")
320
321
    # Delete existing data
322
    db.execute_sql(
323
        f"""
324
        DELETE FROM
325
        {targets['generators']['schema']}.{targets['generators']['table']}
326
        WHERE bus IN (
327
            SELECT bus_id FROM
328
            {sources['buses']['schema']}.{sources['buses']['table']}
329
            WHERE country != 'DE'
330
            AND scn_name = 'eGon2035')
331
        AND scn_name = 'eGon2035'
332
        AND carrier = 'CH4';
333
        """
334
    )
335
336
    # Set bus_id
337
    gen.loc[gen[gen["index"].isin(map_buses.keys())].index, "index"] = gen.loc[
338
        gen[gen["index"].isin(map_buses.keys())].index, "index"
339
    ].map(map_buses)
340
    gen.loc[:, "bus"] = (
341
        get_foreign_gas_bus_id().loc[gen.loc[:, "index"]].values
342
    )
343
344
    # Add missing columns
345
    c = {"scn_name": "eGon2035", "carrier": "CH4"}
346
    gen = gen.assign(**c)
347
348
    new_id = db.next_etrago_id("generator")
349
    gen["generator_id"] = range(new_id, new_id + len(gen))
350
    gen["p_nom"] = gen["cap_2035"]
351
    gen["marginal_cost"] = (
352
        gen["ratioConv_2035"] * scn_params["marginal_cost"]["CH4"]
353
        + (1 - gen["ratioConv_2035"]) * scn_params["marginal_cost"]["biogas"]
354
    )
355
356
    # Remove useless columns
357
    gen = gen.drop(columns=["index", "ratioConv_2035", "cap_2035"])
358
359
    # Insert data to db
360
    gen.to_sql(
361
        targets["generators"]["table"],
362
        db.engine(),
363
        schema=targets["generators"]["schema"],
364
        index=False,
365
        if_exists="append",
366
    )
367
368
369
def calc_global_ch4_demand(Norway_global_demand_1y):
370
    """Calculates global gas demands from TYNDP data
371
372
    Returns
373
    -------
374
    pandas.DataFrame
375
        Global (yearly) CH4 final demand per foreign node
376
377
    """
378
379
    sources = config.datasets()["gas_neighbours"]["sources"]
380
381
    file = zipfile.ZipFile(f"tyndp/{sources['tyndp_capacities']}")
382
    df = pd.read_excel(
383
        file.open("TYNDP-2020-Scenario-Datafile.xlsx").read(),
384
        sheet_name="Gas Data",
385
    )
386
387
    df = (
388
        df.query(
389
            'Scenario == "Distributed Energy" & '
390
            'Case == "Average" &'  # Case: 2 Week/Average/DF/Peak
391
            'Category == "Demand"'
392
        )
393
        .drop(
394
            columns=[
395
                "Generator_ID",
396
                "Climate Year",
397
                "Simulation_ID",
398
                "Node 1",
399
                "Path",
400
                "Direct/Indirect",
401
                "Sector",
402
                "Note",
403
                "Category",
404
                "Case",
405
                "Scenario",
406
            ]
407
        )
408
        .set_index("Node/Line")
409
    )
410
411
    df_2030 = (
412
        df[(df["Parameter"] == "Final demand") & (df["Year"] == 2030)]
413
        .rename(columns={"Value": "Value_2030"})
414
        .drop(columns=["Parameter", "Year"])
415
    )
416
417
    df_2040 = (
418
        df[(df["Parameter"] == "Final demand") & (df["Year"] == 2040)]
419
        .rename(columns={"Value": "Value_2040"})
420
        .drop(columns=["Parameter", "Year"])
421
    )
422
423
    # Conversion GWh/d to MWh/h
424
    conversion_factor = 1000 / 24
425
426
    df_2035 = pd.concat([df_2040, df_2030], axis=1)
427
    df_2035["GlobD_2035"] = (
428
        (df_2035["Value_2030"] + df_2035["Value_2040"]) / 2
429
    ) * conversion_factor
430
    df_2035.loc["NOS0"] = [
431
        0,
432
        0,
433
        Norway_global_demand_1y / 8760,
434
    ]  # Manually add Norway demand
435
    grouped_demands = df_2035.drop(
436
        columns=["Value_2030", "Value_2040"]
437
    ).reset_index()
438
439
    # choose demands for considered countries
440
    return grouped_demands[
441
        grouped_demands["Node/Line"].str[:2].isin(countries)
442
    ]
443
444
445 View Code Duplication
def import_ch4_demandTS():
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
446
    """Import from the PyPSA-eur-sec run the timeseries of
447
    residential rural heat per neighbor country.
448
    This timeserie is used to calculate:
449
    - the global (yearly) heat demand of Norway (that will be supplied by CH4)
450
    - the normalized CH4 hourly resolved demand profile
451
452
    Parameters
453
    ----------
454
    None.
455
456
    Returns
457
    -------
458
    Norway_global_demand: Float
459
        Yearly heat demand of Norway in MWh
460
    neighbor_loads_t: pandas.DataFrame
461
        Normalized CH4 hourly resolved demand profiles per neighbor country
462
463
    """
464
465
    cwd = Path(".")
466
    target_file = (
467
        cwd
468
        / "data_bundle_egon_data"
469
        / "pypsa_eur_sec"
470
        / "2022-07-26-egondata-integration"
471
        / "postnetworks"
472
        / "elec_s_37_lv2.0__Co2L0-1H-T-H-B-I-dist1_2050.nc"
473
    )
474
475
    network = pypsa.Network(str(target_file))
476
477
    # Set country tag for all buses
478
    network.buses.country = network.buses.index.str[:2]
479
    neighbors = network.buses[network.buses.country != "DE"]
480
    neighbors = neighbors[
481
        (neighbors["country"].isin(countries))
482
        & (neighbors["carrier"] == "residential rural heat")
483
    ].drop_duplicates(subset="country")
484
485
    neighbor_loads = network.loads[network.loads.bus.isin(neighbors.index)]
486
    neighbor_loads_t_index = neighbor_loads.index[
487
        neighbor_loads.index.isin(network.loads_t.p_set.columns)
488
    ]
489
    neighbor_loads_t = network.loads_t["p_set"][neighbor_loads_t_index]
490
    Norway_global_demand = neighbor_loads_t[
491
        "NO3 0 residential rural heat"
492
    ].sum()
493
494
    for i in neighbor_loads_t.columns:
495
        neighbor_loads_t[i] = neighbor_loads_t[i] / neighbor_loads_t[i].sum()
496
497
    return Norway_global_demand, neighbor_loads_t
498
499
500 View Code Duplication
def import_power_to_h2_demandTS():
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
501
    """Import from the PyPSA-eur-sec run the timeseries of
502
    industry demand heat per neighbor country and normalize it
503
    in order to model the power-to-H2 hourly resolved demand profile.
504
505
    Parameters
506
    ----------
507
    None.
508
509
    Returns
510
    -------
511
    neighbor_loads_t: pandas.DataFrame
512
        Normalized CH4 hourly resolved demand profiles per neighbor country
513
514
    """
515
516
    cwd = Path(".")
517
    target_file = (
518
        cwd
519
        / "data_bundle_egon_data"
520
        / "pypsa_eur_sec"
521
        / "2022-07-26-egondata-integration"
522
        / "postnetworks"
523
        / "elec_s_37_lv2.0__Co2L0-1H-T-H-B-I-dist1_2050.nc"
524
    )
525
526
    network = pypsa.Network(str(target_file))
527
528
    # Set country tag for all buses
529
    network.buses.country = network.buses.index.str[:2]
530
    neighbors = network.buses[network.buses.country != "DE"]
531
    neighbors = neighbors[
532
        (neighbors["country"].isin(countries))
533
        & (
534
            neighbors["carrier"] == "residential rural heat"
535
        )  # no available industry profile for now, using another timeserie
536
    ]  # .drop_duplicates(subset="country")
537
538
    neighbor_loads = network.loads[network.loads.bus.isin(neighbors.index)]
539
    neighbor_loads_t_index = neighbor_loads.index[
540
        neighbor_loads.index.isin(network.loads_t.p_set.columns)
541
    ]
542
    neighbor_loads_t = network.loads_t["p_set"][neighbor_loads_t_index]
543
544
    for i in neighbor_loads_t.columns:
545
        neighbor_loads_t[i] = neighbor_loads_t[i] / neighbor_loads_t[i].sum()
546
547
    return neighbor_loads_t
548
549
550
def insert_ch4_demand(global_demand, normalized_ch4_demandTS):
551
    """Insert gas final demands for foreign countries
552
553
    Parameters
554
    ----------
555
    global_demand : pandas.DataFrame
556
        Global gas demand per foreign node in 1 year
557
    gas_demandTS : pandas.DataFrame
558
        Normalized time serie of the demand per foreign country
559
560
    Returns
561
    -------
562
    None.
563
564
    """
565
    sources = config.datasets()["gas_neighbours"]["sources"]
566
    targets = config.datasets()["gas_neighbours"]["targets"]
567
    map_buses = get_map_buses()
568
569
    # Delete existing data
570
571
    db.execute_sql(
572
        f"""
573
        DELETE FROM 
574
        {targets['load_timeseries']['schema']}.{targets['load_timeseries']['table']}
575
        WHERE "load_id" IN (
576
            SELECT load_id FROM 
577
            {targets['loads']['schema']}.{targets['loads']['table']}
578
            WHERE bus IN (
579
                SELECT bus_id FROM
580
                {sources['buses']['schema']}.{sources['buses']['table']}
581
                WHERE country != 'DE'
582
                AND scn_name = 'eGon2035')
583
            AND scn_name = 'eGon2035'
584
            AND carrier = 'CH4'            
585
        );
586
        """
587
    )
588
589
    db.execute_sql(
590
        f"""
591
        DELETE FROM
592
        {targets['loads']['schema']}.{targets['loads']['table']}
593
        WHERE bus IN (
594
            SELECT bus_id FROM
595
            {sources['buses']['schema']}.{sources['buses']['table']}
596
            WHERE country != 'DE'
597
            AND scn_name = 'eGon2035')
598
        AND scn_name = 'eGon2035'
599
        AND carrier = 'CH4'
600
        """
601
    )
602
603
    # Set bus_id
604
    global_demand.loc[
605
        global_demand[global_demand["Node/Line"].isin(map_buses.keys())].index,
606
        "Node/Line",
607
    ] = global_demand.loc[
608
        global_demand[global_demand["Node/Line"].isin(map_buses.keys())].index,
609
        "Node/Line",
610
    ].map(
611
        map_buses
612
    )
613
    global_demand.loc[:, "bus"] = (
614
        get_foreign_gas_bus_id().loc[global_demand.loc[:, "Node/Line"]].values
615
    )
616
617
    # Add missing columns
618
    c = {"scn_name": "eGon2035", "carrier": "CH4"}
619
    global_demand = global_demand.assign(**c)
620
621
    new_id = db.next_etrago_id("load")
622
    global_demand["load_id"] = range(new_id, new_id + len(global_demand))
623
624
    ch4_demand_TS = global_demand.copy()
625
    # Remove useless columns
626
    global_demand = global_demand.drop(columns=["Node/Line", "GlobD_2035"])
627
628
    # Insert data to db
629
    global_demand.to_sql(
630
        targets["loads"]["table"],
631
        db.engine(),
632
        schema=targets["loads"]["schema"],
633
        index=False,
634
        if_exists="append",
635
    )
636
637
    # Insert time series
638
    ch4_demand_TS["Node/Line"] = ch4_demand_TS["Node/Line"].replace(
639
        ["UK00"], "GB"
640
    )
641
642
    p_set = []
643
    for index, row in ch4_demand_TS.iterrows():
644
        normalized_TS_df = normalized_ch4_demandTS.loc[
645
            :,
646
            normalized_ch4_demandTS.columns.str.contains(row["Node/Line"][:2]),
647
        ]
648
        p_set.append(
649
            (
650
                normalized_TS_df[normalized_TS_df.columns[0]]
651
                * row["GlobD_2035"]
652
            ).tolist()
653
        )
654
655
    ch4_demand_TS["p_set"] = p_set
656
    ch4_demand_TS["temp_id"] = 1
657
    ch4_demand_TS = ch4_demand_TS.drop(
658
        columns=["Node/Line", "GlobD_2035", "bus", "carrier"]
659
    )
660
661
    # Insert data to DB
662
    ch4_demand_TS.to_sql(
663
        targets["load_timeseries"]["table"],
664
        db.engine(),
665
        schema=targets["load_timeseries"]["schema"],
666
        index=False,
667
        if_exists="append",
668
    )
669
670
671
def calc_ch4_storage_capacities():
672
    target_file = (
673
        Path(".") / "datasets" / "gas_data" / "data" / "IGGIELGN_Storages.csv"
674
    )
675
676
    ch4_storage_capacities = pd.read_csv(
677
        target_file,
678
        delimiter=";",
679
        decimal=".",
680
        usecols=["country_code", "param"],
681
    )
682
683
    ch4_storage_capacities = ch4_storage_capacities[
684
        ch4_storage_capacities["country_code"].isin(countries)
685
    ]
686
687
    map_countries_scigrid = {
688
        "AT": "AT00",
689
        "BE": "BE00",
690
        "CZ": "CZ00",
691
        "DK": "DKE1",
692
        "EE": "EE00",
693
        "EL": "GR00",
694
        "ES": "ES00",
695
        "FI": "FI00",
696
        "FR": "FR00",
697
        "GB": "UK00",
698
        "IT": "ITCN",
699
        "LT": "LT00",
700
        "LV": "LV00",
701
        "MT": "MT00",
702
        "NL": "NL00",
703
        "PL": "PL00",
704
        "PT": "PT00",
705
        "SE": "SE01",
706
    }
707
708
    # Define new columns
709
    max_workingGas_M_m3 = []
710
    end_year = []
711
712
    for index, row in ch4_storage_capacities.iterrows():
713
        param = ast.literal_eval(row["param"])
714
        end_year.append(param["end_year"])
715
        max_workingGas_M_m3.append(param["max_workingGas_M_m3"])
716
717
    end_year = [float("inf") if x == None else x for x in end_year]
718
    ch4_storage_capacities = ch4_storage_capacities.assign(end_year=end_year)
719
    ch4_storage_capacities = ch4_storage_capacities[
720
        ch4_storage_capacities["end_year"] >= 2035
721
    ]
722
723
    # Calculate e_nom
724
    conv_factor = (
725
        10830  # M_m3 to MWh - gross calorific value = 39 MJ/m3 (eurogas.org)
726
    )
727
    ch4_storage_capacities["e_nom"] = [
728
        conv_factor * i for i in max_workingGas_M_m3
729
    ]
730
731
    ch4_storage_capacities.drop(
732
        ["param", "end_year"],
733
        axis=1,
734
        inplace=True,
735
    )
736
737
    ch4_storage_capacities["Country"] = ch4_storage_capacities[
738
        "country_code"
739
    ].map(map_countries_scigrid)
740
    ch4_storage_capacities = ch4_storage_capacities.groupby(
741
        ["country_code"]
742
    ).agg(
743
        {
744
            "e_nom": "sum",
745
            "Country": "first",
746
        },
747
    )
748
749
    ch4_storage_capacities = ch4_storage_capacities.drop(["RU"])
750
    ch4_storage_capacities.loc[:, "bus"] = (
751
        get_foreign_gas_bus_id()
752
        .loc[ch4_storage_capacities.loc[:, "Country"]]
753
        .values
754
    )
755
756
    return ch4_storage_capacities
757
758
759
def insert_storage(ch4_storage_capacities):
760
    sources = config.datasets()["gas_neighbours"]["sources"]
761
    targets = config.datasets()["gas_neighbours"]["targets"]
762
763
    # Clean table
764
    db.execute_sql(
765
        f"""
766
        DELETE FROM {targets['stores']['schema']}.{targets['stores']['table']}  
767
        WHERE "carrier" = 'CH4'
768
        AND scn_name = 'eGon2035'
769
        AND bus IN (
770
            SELECT bus_id FROM {sources['buses']['schema']}.{sources['buses']['table']}
771
            WHERE scn_name = 'eGon2035' 
772
            AND country != 'DE'
773
            );
774
        """
775
    )
776
    # Add missing columns
777
    c = {"scn_name": "eGon2035", "carrier": "CH4"}
778
    ch4_storage_capacities = ch4_storage_capacities.assign(**c)
779
780
    new_id = db.next_etrago_id("store")
781
    ch4_storage_capacities["store_id"] = range(
782
        new_id, new_id + len(ch4_storage_capacities)
783
    )
784
785
    ch4_storage_capacities.drop(
786
        ["Country"],
787
        axis=1,
788
        inplace=True,
789
    )
790
791
    ch4_storage_capacities = ch4_storage_capacities.reset_index(drop=True)
792
    # Insert data to db
793
    ch4_storage_capacities.to_sql(
794
        targets["stores"]["table"],
795
        db.engine(),
796
        schema=targets["stores"]["schema"],
797
        index=False,
798
        if_exists="append",
799
    )
800
801
802
def calc_global_power_to_h2_demand():
803
    """Calculates global power demand linked to h2 production from TYNDP data
804
805
    Returns
806
    -------
807
    pandas.DataFrame
808
        Global power-to-h2 demand per foreign node
809
810
    """
811
    sources = config.datasets()["gas_neighbours"]["sources"]
812
813
    file = zipfile.ZipFile(f"tyndp/{sources['tyndp_capacities']}")
814
    df = pd.read_excel(
815
        file.open("TYNDP-2020-Scenario-Datafile.xlsx").read(),
816
        sheet_name="Gas Data",
817
    )
818
819
    df = (
820
        df.query(
821
            'Scenario == "Distributed Energy" & '
822
            'Case == "Average" &'  # Case: 2 Week/Average/DF/Peak
823
            'Parameter == "P2H2"'
824
        )
825
        .drop(
826
            columns=[
827
                "Generator_ID",
828
                "Climate Year",
829
                "Simulation_ID",
830
                "Node 1",
831
                "Path",
832
                "Direct/Indirect",
833
                "Sector",
834
                "Note",
835
                "Category",
836
                "Case",
837
                "Scenario",
838
                "Parameter",
839
            ]
840
        )
841
        .set_index("Node/Line")
842
    )
843
844
    df_2030 = (
845
        df[df["Year"] == 2030]
846
        .rename(columns={"Value": "Value_2030"})
847
        .drop(columns=["Year"])
848
    )
849
    df_2040 = (
850
        df[df["Year"] == 2040]
851
        .rename(columns={"Value": "Value_2040"})
852
        .drop(columns=["Year"])
853
    )
854
855
    # Conversion GWh/d to MWh/h
856
    conversion_factor = 1000 / 24
857
858
    df_2035 = pd.concat([df_2040, df_2030], axis=1)
859
    df_2035["GlobD_2035"] = (
860
        (df_2035["Value_2030"] + df_2035["Value_2040"]) / 2
861
    ) * conversion_factor
862
863
    global_power_to_h2_demand = df_2035.drop(
864
        columns=["Value_2030", "Value_2040"]
865
    )
866
867
    # choose demands for considered countries
868
    global_power_to_h2_demand = global_power_to_h2_demand[
869
        (global_power_to_h2_demand.index.str[:2].isin(countries))
870
        & (global_power_to_h2_demand["GlobD_2035"] != 0)
871
    ]
872
873
    # Split in two the demands for DK and UK
874
    global_power_to_h2_demand.loc["DKW1"] = (
875
        global_power_to_h2_demand.loc["DKE1"] / 2
876
    )
877
    global_power_to_h2_demand.loc["DKE1"] = (
878
        global_power_to_h2_demand.loc["DKE1"] / 2
879
    )
880
    global_power_to_h2_demand.loc["UKNI"] = (
881
        global_power_to_h2_demand.loc["UK00"] / 2
882
    )
883
    global_power_to_h2_demand.loc["UK00"] = (
884
        global_power_to_h2_demand.loc["UK00"] / 2
885
    )
886
    global_power_to_h2_demand = global_power_to_h2_demand.reset_index()
887
888
    return global_power_to_h2_demand
889
890
891
def insert_power_to_h2_demand(
892
    global_power_to_h2_demand, normalized_power_to_h2_demandTS
893
):
894
    sources = config.datasets()["gas_neighbours"]["sources"]
895
    targets = config.datasets()["gas_neighbours"]["targets"]
896
    map_buses = get_map_buses()
897
898
    # Delete existing data
899
900
    db.execute_sql(
901
        f"""
902
        DELETE FROM 
903
        {targets['load_timeseries']['schema']}.{targets['load_timeseries']['table']}
904
        WHERE "load_id" IN (
905
            SELECT load_id FROM 
906
            {targets['loads']['schema']}.{targets['loads']['table']}
907
            WHERE bus IN (
908
                SELECT bus_id FROM
909
                {sources['buses']['schema']}.{sources['buses']['table']}
910
                WHERE country != 'DE'
911
                AND scn_name = 'eGon2035')
912
            AND scn_name = 'eGon2035'
913
            AND carrier = 'H2 for industry'            
914
        );
915
        """
916
    )
917
918
    db.execute_sql(
919
        f"""
920
        DELETE FROM
921
        {targets['loads']['schema']}.{targets['loads']['table']}
922
        WHERE bus IN (
923
            SELECT bus_id FROM
924
            {sources['buses']['schema']}.{sources['buses']['table']}
925
            WHERE country != 'DE'
926
            AND scn_name = 'eGon2035')
927
        AND scn_name = 'eGon2035'
928
        AND carrier = 'H2 for industry'
929
        """
930
    )
931
932
    # Set bus_id
933
    global_power_to_h2_demand.loc[
934
        global_power_to_h2_demand[
935
            global_power_to_h2_demand["Node/Line"].isin(map_buses.keys())
936
        ].index,
937
        "Node/Line",
938
    ] = global_power_to_h2_demand.loc[
939
        global_power_to_h2_demand[
940
            global_power_to_h2_demand["Node/Line"].isin(map_buses.keys())
941
        ].index,
942
        "Node/Line",
943
    ].map(
944
        map_buses
945
    )
946
    global_power_to_h2_demand.loc[:, "bus"] = (
947
        get_foreign_bus_id()
948
        .loc[global_power_to_h2_demand.loc[:, "Node/Line"]]
949
        .values
950
    )
951
952
    # Add missing columns
953
    c = {"scn_name": "eGon2035", "carrier": "H2 for industry"}
954
    global_power_to_h2_demand = global_power_to_h2_demand.assign(**c)
955
956
    new_id = db.next_etrago_id("load")
957
    global_power_to_h2_demand["load_id"] = range(
958
        new_id, new_id + len(global_power_to_h2_demand)
959
    )
960
961
    power_to_h2_demand_TS = global_power_to_h2_demand.copy()
962
    # Remove useless columns
963
    global_power_to_h2_demand = global_power_to_h2_demand.drop(
964
        columns=["Node/Line", "GlobD_2035"]
965
    )
966
967
    # Insert data to db
968
    global_power_to_h2_demand.to_sql(
969
        targets["loads"]["table"],
970
        db.engine(),
971
        schema=targets["loads"]["schema"],
972
        index=False,
973
        if_exists="append",
974
    )
975
976
    # Insert time series
977
    normalized_power_to_h2_demandTS = normalized_power_to_h2_demandTS.drop(
978
        columns=[
979
            "NO3 0 residential rural heat",
980
            "CH0 0 residential rural heat",
981
            "LU0 0 residential rural heat",
982
        ]
983
    )
984
985
    power_to_h2_demand_TS = power_to_h2_demand_TS.replace(
986
        {
987
            "Node/Line": {
988
                "UK00": "GB4",
989
                "UKNI": "GB5",
990
                "DKW1": "DK3",
991
                "DKE1": "DK0",
992
                "SE02": "SE3",
993
            }
994
        }
995
    )
996
997
    p_set = []
998
    for index, row in power_to_h2_demand_TS.iterrows():
999
        normalized_TS_df = normalized_power_to_h2_demandTS.loc[
1000
            :,
1001
            normalized_power_to_h2_demandTS.columns.str.contains(
1002
                row["Node/Line"][:3]
1003
            ),
1004
        ]
1005
        p_set.append(
1006
            (
1007
                normalized_TS_df[normalized_TS_df.columns[0]]
1008
                * row["GlobD_2035"]
1009
            ).tolist()
1010
        )
1011
1012
    power_to_h2_demand_TS["p_set"] = p_set
1013
    power_to_h2_demand_TS["temp_id"] = 1
1014
    power_to_h2_demand_TS = power_to_h2_demand_TS.drop(
1015
        columns=["Node/Line", "GlobD_2035", "bus", "carrier"]
1016
    )
1017
1018
    # Insert data to db
1019
    power_to_h2_demand_TS.to_sql(
1020
        targets["load_timeseries"]["table"],
1021
        db.engine(),
1022
        schema=targets["load_timeseries"]["schema"],
1023
        index=False,
1024
        if_exists="append",
1025
    )
1026
1027
1028
def calculate_ch4_grid_capacities():
1029
    """Calculates CH4 grid capacities for foreign countries based on TYNDP-data
1030
1031
    Parameters
1032
    ----------
1033
    None.
1034
1035
    Returns
1036
    -------
1037
    Neighbouring_pipe_capacities_list : pandas.DataFrame
1038
1039
    """
1040
    sources = config.datasets()["gas_neighbours"]["sources"]
1041
1042
    # Download file
1043
    basename = "ENTSOG_TYNDP_2020_Annex_C2_Capacities_per_country.xlsx"
1044
    url = "https://www.entsog.eu/sites/default/files/2021-07/" + basename
1045
    target_file = Path(".") / "datasets" / "gas_data" / basename
1046
1047
    urlretrieve(url, target_file)
1048
    map_pipelines = {
1049
        "NORDSTREAM": "RU00",
1050
        "NORDSTREAM 2": "RU00",
1051
        "OPAL": "DE",
1052
        "YAMAL (BY)": "RU00",
1053
        "Denmark": "DKE1",
1054
        "Belgium": "BE00",
1055
        "Netherlands": "NL00",
1056
        "Norway": "NOM1",
1057
        "Switzerland": "CH00",
1058
        "Poland": "PL00",
1059
        "United Kingdom": "UK00",
1060
        "Germany": "DE",
1061
        "Austria": "AT00",
1062
        "France": "FR00",
1063
        "Czechia": "CZ00",
1064
        "Russia": "RU00",
1065
        "Luxemburg": "LUB1",
1066
    }
1067
1068
    grid_countries = [
1069
        "NORDSTREAM",
1070
        "NORDSTREAM 2",
1071
        "OPAL",
1072
        "YAMAL (BY)",
1073
        "Denmark",
1074
        "Belgium",
1075
        "Netherlands",
1076
        "Norway",
1077
        "Switzerland",
1078
        "Poland",
1079
        "United Kingdom",
1080
        "Germany",
1081
        "Austria",
1082
        "France",
1083
        "Czechia",
1084
        "Russia",
1085
        "Luxemburg",
1086
    ]
1087
1088
    # Read-in data from csv-file
1089
    pipe_capacities_list = pd.read_excel(
1090
        target_file,
1091
        sheet_name="Transmission Peak Capacity",
1092
        skiprows=range(4),
1093
    )
1094
    pipe_capacities_list = pipe_capacities_list[
1095
        ["To Country", "Unnamed: 3", "From Country", 2035]
1096
    ].rename(
1097
        columns={
1098
            "Unnamed: 3": "Scenario",
1099
            "To Country": "To_Country",
1100
            "From Country": "From_Country",
1101
        }
1102
    )
1103
    pipe_capacities_list["To_Country"] = pd.Series(
1104
        pipe_capacities_list["To_Country"]
1105
    ).fillna(method="ffill")
1106
    pipe_capacities_list["From_Country"] = pd.Series(
1107
        pipe_capacities_list["From_Country"]
1108
    ).fillna(method="ffill")
1109
    pipe_capacities_list = pipe_capacities_list[
1110
        pipe_capacities_list["Scenario"] == "Advanced"
1111
    ].drop(columns={"Scenario"})
1112
    pipe_capacities_list = pipe_capacities_list[
1113
        (
1114
            (pipe_capacities_list["To_Country"].isin(grid_countries))
1115
            & (pipe_capacities_list["From_Country"].isin(grid_countries))
1116
        )
1117
        & (pipe_capacities_list[2035] != 0)
1118
    ]
1119
    pipe_capacities_list["To_Country"] = pipe_capacities_list[
1120
        "To_Country"
1121
    ].map(map_pipelines)
1122
    pipe_capacities_list["From_Country"] = pipe_capacities_list[
1123
        "From_Country"
1124
    ].map(map_pipelines)
1125
    pipe_capacities_list["countrycombination"] = pipe_capacities_list[
1126
        ["To_Country", "From_Country"]
1127
    ].apply(
1128
        lambda x: tuple(sorted([str(x.To_Country), str(x.From_Country)])),
1129
        axis=1,
1130
    )
1131
1132
    pipeline_strategies = {
1133
        "To_Country": "first",
1134
        "From_Country": "first",
1135
        2035: sum,
1136
    }
1137
1138
    pipe_capacities_list = pipe_capacities_list.groupby(
1139
        ["countrycombination"]
1140
    ).agg(pipeline_strategies)
1141
1142
    # Add manually DK-SE and AT-CH pipes (Scigrid gas data)
1143
    pipe_capacities_list.loc["(DKE1, SE02)"] = ["DKE1", "SE02", 651]
1144
    pipe_capacities_list.loc["(AT00, CH00)"] = ["AT00", "CH00", 651]
1145
1146
    # Conversion GWh/d to MWh/h
1147
    pipe_capacities_list["p_nom"] = pipe_capacities_list[2035] * (1000 / 24)
1148
1149
    # Border crossing CH4 pipelines between foreign countries
1150
1151
    Neighbouring_pipe_capacities_list = pipe_capacities_list[
1152
        (pipe_capacities_list["To_Country"] != "DE")
1153
        & (pipe_capacities_list["From_Country"] != "DE")
1154
    ].reset_index()
1155
1156
    Neighbouring_pipe_capacities_list.loc[:, "bus0"] = (
1157
        get_foreign_gas_bus_id()
1158
        .loc[Neighbouring_pipe_capacities_list.loc[:, "To_Country"]]
1159
        .values
1160
    )
1161
    Neighbouring_pipe_capacities_list.loc[:, "bus1"] = (
1162
        get_foreign_gas_bus_id()
1163
        .loc[Neighbouring_pipe_capacities_list.loc[:, "From_Country"]]
1164
        .values
1165
    )
1166
1167
    # Adjust columns
1168
    Neighbouring_pipe_capacities_list = Neighbouring_pipe_capacities_list.drop(
1169
        columns=[
1170
            "To_Country",
1171
            "From_Country",
1172
            "countrycombination",
1173
            2035,
1174
        ]
1175
    )
1176
1177
    new_id = db.next_etrago_id("link")
1178
    Neighbouring_pipe_capacities_list["link_id"] = range(
1179
        new_id, new_id + len(Neighbouring_pipe_capacities_list)
1180
    )
1181
1182
    # Border crossing CH4 pipelines between DE and neighbouring countries
1183
    DE_pipe_capacities_list = pipe_capacities_list[
1184
        (pipe_capacities_list["To_Country"] == "DE")
1185
        | (pipe_capacities_list["From_Country"] == "DE")
1186
    ].reset_index()
1187
1188
    dict_cross_pipes_DE = {
1189
        ("AT00", "DE"): "AT",
1190
        ("BE00", "DE"): "BE",
1191
        ("CH00", "DE"): "CH",
1192
        ("CZ00", "DE"): "CZ",
1193
        ("DE", "DKE1"): "DK",
1194
        ("DE", "FR00"): "FR",
1195
        ("DE", "LUB1"): "LU",
1196
        ("DE", "NL00"): "NL",
1197
        ("DE", "NOM1"): "NO",
1198
        ("DE", "PL00"): "PL",
1199
        ("DE", "RU00"): "RU",
1200
    }
1201
1202
    DE_pipe_capacities_list["country_code"] = DE_pipe_capacities_list[
1203
        "countrycombination"
1204
    ].map(dict_cross_pipes_DE)
1205
    DE_pipe_capacities_list = DE_pipe_capacities_list.set_index("country_code")
1206
1207
    for country_code in [e for e in countries if e not in ("GB", "SE", "UK")]:
1208
1209
        # Select cross-bording links
1210
        cap_DE = db.select_dataframe(
1211
            f"""SELECT link_id, bus0, bus1
1212
                FROM {sources['links']['schema']}.{sources['links']['table']}
1213
                    WHERE scn_name = 'eGon2035' 
1214
                    AND carrier = 'CH4'
1215
                    AND (("bus0" IN (
1216
                        SELECT bus_id FROM {sources['buses']['schema']}.{sources['buses']['table']}
1217
                            WHERE country = 'DE'
1218
                            AND carrier = 'CH4'
1219
                            AND scn_name = 'eGon2035')
1220
                        AND "bus1" IN (SELECT bus_id FROM {sources['buses']['schema']}.{sources['buses']['table']}
1221
                            WHERE country = '{country_code}'
1222
                            AND carrier = 'CH4'
1223
                            AND scn_name = 'eGon2035')
1224
                    )
1225
                    OR ("bus0" IN (
1226
                        SELECT bus_id FROM {sources['buses']['schema']}.{sources['buses']['table']}
1227
                            WHERE country = '{country_code}'
1228
                            AND carrier = 'CH4'
1229
                            AND scn_name = 'eGon2035')
1230
                        AND "bus1" IN (SELECT bus_id FROM {sources['buses']['schema']}.{sources['buses']['table']}
1231
                            WHERE country = 'DE'
1232
                            AND carrier = 'CH4'
1233
                            AND scn_name = 'eGon2035'))
1234
                    )
1235
            ;"""
1236
        )
1237
1238
        cap_DE["p_nom"] = DE_pipe_capacities_list.at[
1239
            country_code, "p_nom"
1240
        ] / len(cap_DE.index)
1241
        Neighbouring_pipe_capacities_list = (
1242
            Neighbouring_pipe_capacities_list.append(cap_DE)
1243
        )
1244
1245
    # Add topo, geom and length
1246
    bus_geom = db.select_geodataframe(
1247
        """SELECT bus_id, geom
1248
        FROM grid.egon_etrago_bus
1249
        WHERE scn_name = 'eGon2035'
1250
        AND carrier = 'CH4'
1251
        """,
1252
        epsg=4326,
1253
    ).set_index("bus_id")
1254
1255
    coordinates_bus0 = []
1256
    coordinates_bus1 = []
1257
1258
    for index, row in Neighbouring_pipe_capacities_list.iterrows():
1259
        coordinates_bus0.append(bus_geom["geom"].loc[int(row["bus0"])])
1260
        coordinates_bus1.append(bus_geom["geom"].loc[int(row["bus1"])])
1261
1262
    Neighbouring_pipe_capacities_list["coordinates_bus0"] = coordinates_bus0
1263
    Neighbouring_pipe_capacities_list["coordinates_bus1"] = coordinates_bus1
1264
1265
    Neighbouring_pipe_capacities_list[
1266
        "topo"
1267
    ] = Neighbouring_pipe_capacities_list.apply(
1268
        lambda row: LineString(
1269
            [row["coordinates_bus0"], row["coordinates_bus1"]]
1270
        ),
1271
        axis=1,
1272
    )
1273
    Neighbouring_pipe_capacities_list[
1274
        "geom"
1275
    ] = Neighbouring_pipe_capacities_list.apply(
1276
        lambda row: MultiLineString([row["topo"]]), axis=1
1277
    )
1278
    Neighbouring_pipe_capacities_list[
1279
        "length"
1280
    ] = Neighbouring_pipe_capacities_list.apply(
1281
        lambda row: row["topo"].length, axis=1
1282
    )
1283
1284
    # Remove useless columns
1285
    Neighbouring_pipe_capacities_list = Neighbouring_pipe_capacities_list.drop(
1286
        columns=[
1287
            "coordinates_bus0",
1288
            "coordinates_bus1",
1289
        ]
1290
    )
1291
1292
    # Add missing columns
1293
    c = {"scn_name": "eGon2035", "carrier": "CH4"}
1294
    Neighbouring_pipe_capacities_list = (
1295
        Neighbouring_pipe_capacities_list.assign(**c)
1296
    )
1297
1298
    Neighbouring_pipe_capacities_list = (
1299
        Neighbouring_pipe_capacities_list.set_geometry("geom", crs=4326)
1300
    )
1301
1302
    return Neighbouring_pipe_capacities_list
1303
1304
1305
def tyndp_gas_generation():
1306
    """Insert data from TYNDP 2020 accordning to NEP 2021
1307
    Scenario 'Distributed Energy', linear interpolate between 2030 and 2040
1308
1309
    Returns
1310
    -------
1311
    None.
1312
    """
1313
    capacities = calc_capacities()
1314
    insert_generators(capacities)
1315
1316
    ch4_storage_capacities = calc_ch4_storage_capacities()
1317
    insert_storage(ch4_storage_capacities)
1318
1319
1320
def tyndp_gas_demand():
1321
    """Insert data from TYNDP 2020 accordning to NEP 2021
1322
    Scenario 'Distributed Energy', linear interpolate between 2030 and 2040
1323
1324
    Returns
1325
    -------
1326
    None.
1327
    """
1328
    Norway_global_demand_1y, normalized_ch4_demandTS = import_ch4_demandTS()
1329
    global_ch4_demand = calc_global_ch4_demand(Norway_global_demand_1y)
1330
    insert_ch4_demand(global_ch4_demand, normalized_ch4_demandTS)
1331
1332
    normalized_power_to_h2_demandTS = import_power_to_h2_demandTS()
1333
    global_power_to_h2_demand = calc_global_power_to_h2_demand()
1334
    insert_power_to_h2_demand(
1335
        global_power_to_h2_demand, normalized_power_to_h2_demandTS
1336
    )
1337
1338
1339
def grid():
1340
    """Insert data from TYNDP 2020 accordning to NEP 2021
1341
    Scenario 'Distributed Energy', linear interpolate between 2030 and 2040
1342
1343
    Returns
1344
    -------
1345
    None.
1346
    """
1347
    Neighbouring_pipe_capacities_list = calculate_ch4_grid_capacities()
1348
    insert_gas_grid_capacities(
1349
        Neighbouring_pipe_capacities_list, scn_name="eGon2035"
1350
    )
1351