Passed
Pull Request — dev (#917)
by
unknown
01:35
created

insert_generators()   A

Complexity

Conditions 1

Size

Total Lines 63
Code Lines 27

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 27
dl 0
loc 63
rs 9.232
c 0
b 0
f 0
cc 1
nop 1

How to fix   Long Method   

Long Method

Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.

For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.

Commonly applied refactorings include:

1
"""Central module containing code dealing with gas neighbours for eGon2035
2
"""
3
4
from pathlib import Path
5
from urllib.request import urlretrieve
6
import ast
7
import zipfile
8
9
from geoalchemy2.types import Geometry
10
from shapely.geometry import LineString, MultiLineString
11
import geopandas as gpd
12
import numpy as np
13
import pandas as pd
14
import pypsa
15
16
from egon.data import config, db
17
from egon.data.datasets import Dataset
18
from egon.data.datasets.electrical_neighbours import (
19
    get_foreign_bus_id,
20
    get_map_buses,
21
)
22
from egon.data.datasets.gas_neighbours.gas_abroad import (
23
    insert_gas_grid_capacities,
24
)
25
from egon.data.datasets.scenario_parameters import get_sector_parameters
26
27
countries = [
28
    "AT",
29
    "BE",
30
    "CH",
31
    "CZ",
32
    "DK",
33
    "FR",
34
    "GB",
35
    "LU",
36
    "NL",
37
    "NO",
38
    "PL",
39
    "RU",
40
    "SE",
41
    "UK",
42
]
43
44
45 View Code Duplication
def get_foreign_gas_bus_id(carrier="CH4"):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
46
    """Calculate the etrago bus id based on the geometry
47
48
    Mapp node_ids from TYNDP and etragos bus_id
49
50
    Parameters
51
    ----------
52
    carrier : str
53
        Name of the carrier
54
55
    Returns
56
    -------
57
    pandas.Series
58
        List of mapped node_ids from TYNDP and etragos bus_id
59
60
    """
61
    sources = config.datasets()["gas_neighbours"]["sources"]
62
    scn_name = "eGon2035"
63
64
    bus_id = db.select_geodataframe(
65
        f"""
66
        SELECT bus_id, ST_Buffer(geom, 1) as geom, country
67
        FROM grid.egon_etrago_bus
68
        WHERE scn_name = '{scn_name}'
69
        AND carrier = '{carrier}'
70
        AND country != 'DE'
71
        """,
72
        epsg=3035,
73
    )
74
75
    # insert installed capacities
76
    file = zipfile.ZipFile(f"tyndp/{sources['tyndp_capacities']}")
77
78
    # Select buses in neighbouring countries as geodataframe
79
    buses = pd.read_excel(
80
        file.open("TYNDP-2020-Scenario-Datafile.xlsx").read(),
81
        sheet_name="Nodes - Dict",
82
    ).query("longitude==longitude")
83
    buses = gpd.GeoDataFrame(
84
        buses,
85
        crs=4326,
86
        geometry=gpd.points_from_xy(buses.longitude, buses.latitude),
87
    ).to_crs(3035)
88
89
    buses["bus_id"] = 0
90
91
    # Select bus_id from etrago with shortest distance to TYNDP node
92
    for i, row in buses.iterrows():
93
        distance = bus_id.set_index("bus_id").geom.distance(row.geometry)
94
        buses.loc[i, "bus_id"] = distance[
95
            distance == distance.min()
96
        ].index.values[0]
97
98
    return buses.set_index("node_id").bus_id
99
100
101
def read_LNG_capacities():
102
    lng_file = "datasets/gas_data/data/IGGIELGN_LNGs.csv"
103
    IGGIELGN_LNGs = gpd.read_file(lng_file)
104
105
    map_countries_scigrid = {
106
        "BE": "BE00",
107
        "EE": "EE00",
108
        "EL": "GR00",
109
        "ES": "ES00",
110
        "FI": "FI00",
111
        "FR": "FR00",
112
        "GB": "UK00",
113
        "IT": "ITCN",
114
        "LT": "LT00",
115
        "LV": "LV00",
116
        "MT": "MT00",
117
        "NL": "NL00",
118
        "PL": "PL00",
119
        "PT": "PT00",
120
        "SE": "SE01",
121
    }
122
123
    conversion_factor = 437.5  # MCM/day to MWh/h
124
    c2 = 24 / 1000  # MWh/h to GWh/d
125
    p_nom = []
126
127
    for index, row in IGGIELGN_LNGs.iterrows():
128
        param = ast.literal_eval(row["param"])
129
        p_nom.append(
130
            param["max_cap_store2pipe_M_m3_per_d"] * conversion_factor * c2
131
        )
132
133
    IGGIELGN_LNGs["LNG max_cap_store2pipe_M_m3_per_d (in GWh/d)"] = p_nom
134
135
    IGGIELGN_LNGs.drop(
136
        [
137
            "uncertainty",
138
            "method",
139
            "param",
140
            "comment",
141
            "tags",
142
            "source_id",
143
            "lat",
144
            "long",
145
            "geometry",
146
            "id",
147
            "name",
148
            "node_id",
149
        ],
150
        axis=1,
151
        inplace=True,
152
    )
153
154
    IGGIELGN_LNGs["Country"] = IGGIELGN_LNGs["country_code"].map(
155
        map_countries_scigrid
156
    )
157
    IGGIELGN_LNGs = (
158
        IGGIELGN_LNGs.groupby(["Country"])[
159
            "LNG max_cap_store2pipe_M_m3_per_d (in GWh/d)"
160
        ]
161
        .sum()
162
        .sort_index()
163
    )
164
165
    return IGGIELGN_LNGs
166
167
168
def calc_capacities():
169
    """Calculates gas production capacities from TYNDP data
170
171
    Returns
172
    -------
173
    pandas.DataFrame
174
        Gas production capacities per foreign node and energy carrier
175
176
    """
177
178
    sources = config.datasets()["gas_neighbours"]["sources"]
179
180
    # insert installed capacities
181
    file = zipfile.ZipFile(f"tyndp/{sources['tyndp_capacities']}")
182
    df = pd.read_excel(
183
        file.open("TYNDP-2020-Scenario-Datafile.xlsx").read(),
184
        sheet_name="Gas Data",
185
    )
186
187
    df = (
188
        df.query(
189
            'Scenario == "Distributed Energy" & '
190
            '(Case == "Peak" | Case == "Average") &'  # Case: 2 Week/Average/DF/Peak
191
            'Category == "Production"'
192
        )
193
        .drop(
194
            columns=[
195
                "Generator_ID",
196
                "Climate Year",
197
                "Simulation_ID",
198
                "Node 1",
199
                "Path",
200
                "Direct/Indirect",
201
                "Sector",
202
                "Note",
203
                "Category",
204
                "Scenario",
205
            ]
206
        )
207
        .set_index("Node/Line")
208
        .sort_index()
209
    )
210
211
    lng = read_LNG_capacities()
212
    df_2030 = calc_capacity_per_year(df, lng, 2030)
213
    df_2040 = calc_capacity_per_year(df, lng, 2040)
214
215
    # Conversion GWh/d to MWh/h
216
    conversion_factor = 1000 / 24
217
218
    df_2035 = pd.concat([df_2040, df_2030], axis=1)
219
    df_2035 = df_2035.drop(
220
        columns=[
221
            "Value_conv_2040",
222
            "Value_conv_2030",
223
            "Value_bio_2040",
224
            "Value_bio_2030",
225
        ]
226
    )
227
    df_2035["cap_2035"] = (df_2035["CH4_2030"] + df_2035["CH4_2040"]) / 2
228
    df_2035["e_nom_max"] = (
229
        ((df_2035["e_nom_max_2030"] + df_2035["e_nom_max_2040"]) / 2)
230
        * conversion_factor
231
        * 8760
232
    )
233
    df_2035["ratioConv_2035"] = (
234
        df_2035["ratioConv_2030"] + df_2035["ratioConv_2040"]
235
    ) / 2
236
    grouped_capacities = df_2035.drop(
237
        columns=[
238
            "ratioConv_2030",
239
            "ratioConv_2040",
240
            "CH4_2040",
241
            "CH4_2030",
242
            "e_nom_max_2030",
243
            "e_nom_max_2040",
244
        ]
245
    ).reset_index()
246
247
    grouped_capacities["cap_2035"] = (
248
        grouped_capacities["cap_2035"] * conversion_factor
249
    )
250
251
    # Add generator in Russia of infinite capacity
252
    grouped_capacities = grouped_capacities.append(
253
        {
254
            "cap_2035": 1e9,
255
            "e_nom_max": np.inf,
256
            "ratioConv_2035": 1,
257
            "index": "RU",
258
        },
259
        ignore_index=True,
260
    )
261
262
    # choose capacities for considered countries
263
    grouped_capacities = grouped_capacities[
264
        grouped_capacities["index"].str[:2].isin(countries)
265
    ]
266
    return grouped_capacities
267
268
269
def calc_capacity_per_year(df, lng, year):
270
    """Calculates gas production capacities from TYNDP data for a specified year
271
272
    Parameters
273
    ----------
274
    df : pandas.DataFrame
275
        DataFrame containing all TYNDP data.
276
277
    lng : geopandas.GeoDataFrame
278
        Georeferenced LNG terminal capacities.
279
280
    year : int
281
        Year to calculate gas production capacity for.
282
283
    Returns
284
    -------
285
    pandas.DataFrame
286
        Gas production capacities per foreign node and energy carrier
287
    """
288
    df_conv_peak = (
289
        df[
290
            (df["Parameter"] == "Conventional")
291
            & (df["Year"] == year)
292
            & (df["Case"] == "Peak")
293
        ]
294
        .rename(columns={"Value": f"Value_conv_{year}_peak"})
295
        .drop(columns=["Parameter", "Year", "Case"])
296
    )
297
    df_conv_average = (
298
        df[
299
            (df["Parameter"] == "Conventional")
300
            & (df["Year"] == year)
301
            & (df["Case"] == "Average")
302
        ]
303
        .rename(columns={"Value": f"Value_conv_{year}_average"})
304
        .drop(columns=["Parameter", "Year", "Case"])
305
    )
306
    df_bioch4 = (
307
        df[
308
            (df["Parameter"] == "Biomethane")
309
            & (df["Year"] == year)
310
            & (
311
                df["Case"] == "Peak"
312
            )  # Peak and Average have the same valus for biogas production in 2030 and 2040
313
        ]
314
        .rename(columns={"Value": f"Value_bio_{year}"})
315
        .drop(columns=["Parameter", "Year", "Case"])
316
    )
317
318
    # Some values are duplicated (DE00 in 2030)
319
    df_conv_peak = df_conv_peak[~df_conv_peak.index.duplicated(keep="first")]
320
    df_conv_average = df_conv_average[
321
        ~df_conv_average.index.duplicated(keep="first")
322
    ]
323
324
    df_year = pd.concat(
325
        [df_conv_peak, df_conv_average, df_bioch4, lng], axis=1
326
    ).fillna(0)
327
    df_year = df_year[
328
        ~(
329
            (df_year[f"Value_conv_{year}_peak"] == 0)
330
            & (df_year[f"Value_bio_{year}"] == 0)
331
            & (df_year["LNG max_cap_store2pipe_M_m3_per_d (in GWh/d)"] == 0)
332
        )
333
    ]
334
    df_year[f"Value_conv_{year}"] = (
335
        df_year[f"Value_conv_{year}_peak"]
336
        + df_year["LNG max_cap_store2pipe_M_m3_per_d (in GWh/d)"]
337
    )
338
    df_year[f"CH4_{year}"] = (
339
        df_year[f"Value_conv_{year}"] + df_year[f"Value_bio_{year}"]
340
    )
341
    df_year[f"ratioConv_{year}"] = (
342
        df_year[f"Value_conv_{year}_peak"] / df_year[f"CH4_{year}"]
343
    )
344
    df_year[f"e_nom_max_{year}"] = (
345
        df_year[f"Value_conv_{year}_average"] + df_year[f"Value_bio_{year}"]
346
    )
347
    df_year = df_year.drop(
348
        columns=[
349
            "LNG max_cap_store2pipe_M_m3_per_d (in GWh/d)",
350
            f"Value_conv_{year}_average",
351
            f"Value_conv_{year}_peak",
352
        ]
353
    )
354
355
    return df_year
356
357
358
def insert_generators(gen):
359
    """Insert gas generators for foreign countries based on TYNDP-data
360
361
    Parameters
362
    ----------
363
    gen : pandas.DataFrame
364
        Gas production capacities per foreign node and energy carrier
365
366
    Returns
367
    -------
368
    None.
369
370
    """
371
    sources = config.datasets()["gas_neighbours"]["sources"]
372
    targets = config.datasets()["gas_neighbours"]["targets"]
373
    map_buses = get_map_buses()
374
    scn_params = get_sector_parameters("gas", "eGon2035")
375
376
    # Delete existing data
377
    db.execute_sql(
378
        f"""
379
        DELETE FROM
380
        {targets['generators']['schema']}.{targets['generators']['table']}
381
        WHERE bus IN (
382
            SELECT bus_id FROM
383
            {sources['buses']['schema']}.{sources['buses']['table']}
384
            WHERE country != 'DE'
385
            AND scn_name = 'eGon2035')
386
        AND scn_name = 'eGon2035'
387
        AND carrier = 'CH4';
388
        """
389
    )
390
391
    # Set bus_id
392
    gen.loc[gen[gen["index"].isin(map_buses.keys())].index, "index"] = gen.loc[
393
        gen[gen["index"].isin(map_buses.keys())].index, "index"
394
    ].map(map_buses)
395
    gen.loc[:, "bus"] = (
396
        get_foreign_gas_bus_id().loc[gen.loc[:, "index"]].values
397
    )
398
399
    # Add missing columns
400
    c = {"scn_name": "eGon2035", "carrier": "CH4"}
401
    gen = gen.assign(**c)
402
403
    new_id = db.next_etrago_id("generator")
404
    gen["generator_id"] = range(new_id, new_id + len(gen))
405
    gen["p_nom"] = gen["cap_2035"]
406
    gen["marginal_cost"] = (
407
        gen["ratioConv_2035"] * scn_params["marginal_cost"]["CH4"]
408
        + (1 - gen["ratioConv_2035"]) * scn_params["marginal_cost"]["biogas"]
409
    )
410
411
    # Remove useless columns
412
    gen = gen.drop(columns=["index", "ratioConv_2035", "cap_2035"])
413
414
    # Insert data to db
415
    gen.to_sql(
416
        targets["generators"]["table"],
417
        db.engine(),
418
        schema=targets["generators"]["schema"],
419
        index=False,
420
        if_exists="append",
421
    )
422
423
424
def calc_global_ch4_demand(Norway_global_demand_1y):
425
    """Calculates global gas demands from TYNDP data
426
427
    Returns
428
    -------
429
    pandas.DataFrame
430
        Global (yearly) CH4 final demand per foreign node
431
432
    """
433
434
    sources = config.datasets()["gas_neighbours"]["sources"]
435
436
    file = zipfile.ZipFile(f"tyndp/{sources['tyndp_capacities']}")
437
    df = pd.read_excel(
438
        file.open("TYNDP-2020-Scenario-Datafile.xlsx").read(),
439
        sheet_name="Gas Data",
440
    )
441
442
    df = (
443
        df.query(
444
            'Scenario == "Distributed Energy" & '
445
            'Case == "Average" &'  # Case: 2 Week/Average/DF/Peak
446
            'Category == "Demand"'
447
        )
448
        .drop(
449
            columns=[
450
                "Generator_ID",
451
                "Climate Year",
452
                "Simulation_ID",
453
                "Node 1",
454
                "Path",
455
                "Direct/Indirect",
456
                "Sector",
457
                "Note",
458
                "Category",
459
                "Case",
460
                "Scenario",
461
            ]
462
        )
463
        .set_index("Node/Line")
464
    )
465
466
    df_2030 = (
467
        df[(df["Parameter"] == "Final demand") & (df["Year"] == 2030)]
468
        .rename(columns={"Value": "Value_2030"})
469
        .drop(columns=["Parameter", "Year"])
470
    )
471
472
    df_2040 = (
473
        df[(df["Parameter"] == "Final demand") & (df["Year"] == 2040)]
474
        .rename(columns={"Value": "Value_2040"})
475
        .drop(columns=["Parameter", "Year"])
476
    )
477
478
    # Conversion GWh/d to MWh/h
479
    conversion_factor = 1000 / 24
480
481
    df_2035 = pd.concat([df_2040, df_2030], axis=1)
482
    df_2035["GlobD_2035"] = (
483
        (df_2035["Value_2030"] + df_2035["Value_2040"]) / 2
484
    ) * conversion_factor
485
    df_2035.loc["NOS0"] = [
486
        0,
487
        0,
488
        Norway_global_demand_1y / 8760,
489
    ]  # Manually add Norway demand
490
    grouped_demands = df_2035.drop(
491
        columns=["Value_2030", "Value_2040"]
492
    ).reset_index()
493
494
    # choose demands for considered countries
495
    return grouped_demands[
496
        grouped_demands["Node/Line"].str[:2].isin(countries)
497
    ]
498
499
500 View Code Duplication
def import_ch4_demandTS():
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
501
    """Import from the PyPSA-eur-sec run the timeseries of
502
    residential rural heat per neighbor country.
503
    This timeserie is used to calculate:
504
    - the global (yearly) heat demand of Norway (that will be supplied by CH4)
505
    - the normalized CH4 hourly resolved demand profile
506
507
    Parameters
508
    ----------
509
    None.
510
511
    Returns
512
    -------
513
    Norway_global_demand: Float
514
        Yearly heat demand of Norway in MWh
515
    neighbor_loads_t: pandas.DataFrame
516
        Normalized CH4 hourly resolved demand profiles per neighbor country
517
518
    """
519
520
    cwd = Path(".")
521
    target_file = (
522
        cwd
523
        / "data_bundle_egon_data"
524
        / "pypsa_eur_sec"
525
        / "2022-07-26-egondata-integration"
526
        / "postnetworks"
527
        / "elec_s_37_lv2.0__Co2L0-1H-T-H-B-I-dist1_2050.nc"
528
    )
529
530
    network = pypsa.Network(str(target_file))
531
532
    # Set country tag for all buses
533
    network.buses.country = network.buses.index.str[:2]
534
    neighbors = network.buses[network.buses.country != "DE"]
535
    neighbors = neighbors[
536
        (neighbors["country"].isin(countries))
537
        & (neighbors["carrier"] == "residential rural heat")
538
    ].drop_duplicates(subset="country")
539
540
    neighbor_loads = network.loads[network.loads.bus.isin(neighbors.index)]
541
    neighbor_loads_t_index = neighbor_loads.index[
542
        neighbor_loads.index.isin(network.loads_t.p_set.columns)
543
    ]
544
    neighbor_loads_t = network.loads_t["p_set"][neighbor_loads_t_index]
545
    Norway_global_demand = neighbor_loads_t[
546
        "NO3 0 residential rural heat"
547
    ].sum()
548
549
    for i in neighbor_loads_t.columns:
550
        neighbor_loads_t[i] = neighbor_loads_t[i] / neighbor_loads_t[i].sum()
551
552
    return Norway_global_demand, neighbor_loads_t
553
554
555 View Code Duplication
def import_power_to_h2_demandTS():
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
556
    """Import from the PyPSA-eur-sec run the timeseries of
557
    industry demand heat per neighbor country and normalize it
558
    in order to model the power-to-H2 hourly resolved demand profile.
559
560
    Parameters
561
    ----------
562
    None.
563
564
    Returns
565
    -------
566
    neighbor_loads_t: pandas.DataFrame
567
        Normalized CH4 hourly resolved demand profiles per neighbor country
568
569
    """
570
571
    cwd = Path(".")
572
    target_file = (
573
        cwd
574
        / "data_bundle_egon_data"
575
        / "pypsa_eur_sec"
576
        / "2022-07-26-egondata-integration"
577
        / "postnetworks"
578
        / "elec_s_37_lv2.0__Co2L0-1H-T-H-B-I-dist1_2050.nc"
579
    )
580
581
    network = pypsa.Network(str(target_file))
582
583
    # Set country tag for all buses
584
    network.buses.country = network.buses.index.str[:2]
585
    neighbors = network.buses[network.buses.country != "DE"]
586
    neighbors = neighbors[
587
        (neighbors["country"].isin(countries))
588
        & (
589
            neighbors["carrier"] == "residential rural heat"
590
        )  # no available industry profile for now, using another timeserie
591
    ]  # .drop_duplicates(subset="country")
592
593
    neighbor_loads = network.loads[network.loads.bus.isin(neighbors.index)]
594
    neighbor_loads_t_index = neighbor_loads.index[
595
        neighbor_loads.index.isin(network.loads_t.p_set.columns)
596
    ]
597
    neighbor_loads_t = network.loads_t["p_set"][neighbor_loads_t_index]
598
599
    for i in neighbor_loads_t.columns:
600
        neighbor_loads_t[i] = neighbor_loads_t[i] / neighbor_loads_t[i].sum()
601
602
    return neighbor_loads_t
603
604
605
def insert_ch4_demand(global_demand, normalized_ch4_demandTS):
606
    """Insert gas final demands for foreign countries
607
608
    Parameters
609
    ----------
610
    global_demand : pandas.DataFrame
611
        Global gas demand per foreign node in 1 year
612
    gas_demandTS : pandas.DataFrame
613
        Normalized time serie of the demand per foreign country
614
615
    Returns
616
    -------
617
    None.
618
619
    """
620
    sources = config.datasets()["gas_neighbours"]["sources"]
621
    targets = config.datasets()["gas_neighbours"]["targets"]
622
    map_buses = get_map_buses()
623
624
    # Delete existing data
625
626
    db.execute_sql(
627
        f"""
628
        DELETE FROM 
629
        {targets['load_timeseries']['schema']}.{targets['load_timeseries']['table']}
630
        WHERE "load_id" IN (
631
            SELECT load_id FROM 
632
            {targets['loads']['schema']}.{targets['loads']['table']}
633
            WHERE bus IN (
634
                SELECT bus_id FROM
635
                {sources['buses']['schema']}.{sources['buses']['table']}
636
                WHERE country != 'DE'
637
                AND scn_name = 'eGon2035')
638
            AND scn_name = 'eGon2035'
639
            AND carrier = 'CH4'            
640
        );
641
        """
642
    )
643
644
    db.execute_sql(
645
        f"""
646
        DELETE FROM
647
        {targets['loads']['schema']}.{targets['loads']['table']}
648
        WHERE bus IN (
649
            SELECT bus_id FROM
650
            {sources['buses']['schema']}.{sources['buses']['table']}
651
            WHERE country != 'DE'
652
            AND scn_name = 'eGon2035')
653
        AND scn_name = 'eGon2035'
654
        AND carrier = 'CH4'
655
        """
656
    )
657
658
    # Set bus_id
659
    global_demand.loc[
660
        global_demand[global_demand["Node/Line"].isin(map_buses.keys())].index,
661
        "Node/Line",
662
    ] = global_demand.loc[
663
        global_demand[global_demand["Node/Line"].isin(map_buses.keys())].index,
664
        "Node/Line",
665
    ].map(
666
        map_buses
667
    )
668
    global_demand.loc[:, "bus"] = (
669
        get_foreign_gas_bus_id().loc[global_demand.loc[:, "Node/Line"]].values
670
    )
671
672
    # Add missing columns
673
    c = {"scn_name": "eGon2035", "carrier": "CH4"}
674
    global_demand = global_demand.assign(**c)
675
676
    new_id = db.next_etrago_id("load")
677
    global_demand["load_id"] = range(new_id, new_id + len(global_demand))
678
679
    ch4_demand_TS = global_demand.copy()
680
    # Remove useless columns
681
    global_demand = global_demand.drop(columns=["Node/Line", "GlobD_2035"])
682
683
    # Insert data to db
684
    global_demand.to_sql(
685
        targets["loads"]["table"],
686
        db.engine(),
687
        schema=targets["loads"]["schema"],
688
        index=False,
689
        if_exists="append",
690
    )
691
692
    # Insert time series
693
    ch4_demand_TS["Node/Line"] = ch4_demand_TS["Node/Line"].replace(
694
        ["UK00"], "GB"
695
    )
696
697
    p_set = []
698
    for index, row in ch4_demand_TS.iterrows():
699
        normalized_TS_df = normalized_ch4_demandTS.loc[
700
            :,
701
            normalized_ch4_demandTS.columns.str.contains(row["Node/Line"][:2]),
702
        ]
703
        p_set.append(
704
            (
705
                normalized_TS_df[normalized_TS_df.columns[0]]
706
                * row["GlobD_2035"]
707
            ).tolist()
708
        )
709
710
    ch4_demand_TS["p_set"] = p_set
711
    ch4_demand_TS["temp_id"] = 1
712
    ch4_demand_TS = ch4_demand_TS.drop(
713
        columns=["Node/Line", "GlobD_2035", "bus", "carrier"]
714
    )
715
716
    # Insert data to DB
717
    ch4_demand_TS.to_sql(
718
        targets["load_timeseries"]["table"],
719
        db.engine(),
720
        schema=targets["load_timeseries"]["schema"],
721
        index=False,
722
        if_exists="append",
723
    )
724
725
726
def calc_ch4_storage_capacities():
727
    target_file = (
728
        Path(".") / "datasets" / "gas_data" / "data" / "IGGIELGN_Storages.csv"
729
    )
730
731
    ch4_storage_capacities = pd.read_csv(
732
        target_file,
733
        delimiter=";",
734
        decimal=".",
735
        usecols=["country_code", "param"],
736
    )
737
738
    ch4_storage_capacities = ch4_storage_capacities[
739
        ch4_storage_capacities["country_code"].isin(countries)
740
    ]
741
742
    map_countries_scigrid = {
743
        "AT": "AT00",
744
        "BE": "BE00",
745
        "CZ": "CZ00",
746
        "DK": "DKE1",
747
        "EE": "EE00",
748
        "EL": "GR00",
749
        "ES": "ES00",
750
        "FI": "FI00",
751
        "FR": "FR00",
752
        "GB": "UK00",
753
        "IT": "ITCN",
754
        "LT": "LT00",
755
        "LV": "LV00",
756
        "MT": "MT00",
757
        "NL": "NL00",
758
        "PL": "PL00",
759
        "PT": "PT00",
760
        "SE": "SE01",
761
    }
762
763
    # Define new columns
764
    max_workingGas_M_m3 = []
765
    end_year = []
766
767
    for index, row in ch4_storage_capacities.iterrows():
768
        param = ast.literal_eval(row["param"])
769
        end_year.append(param["end_year"])
770
        max_workingGas_M_m3.append(param["max_workingGas_M_m3"])
771
772
    end_year = [float("inf") if x == None else x for x in end_year]
773
    ch4_storage_capacities = ch4_storage_capacities.assign(end_year=end_year)
774
    ch4_storage_capacities = ch4_storage_capacities[
775
        ch4_storage_capacities["end_year"] >= 2035
776
    ]
777
778
    # Calculate e_nom
779
    conv_factor = (
780
        10830  # M_m3 to MWh - gross calorific value = 39 MJ/m3 (eurogas.org)
781
    )
782
    ch4_storage_capacities["e_nom"] = [
783
        conv_factor * i for i in max_workingGas_M_m3
784
    ]
785
786
    ch4_storage_capacities.drop(
787
        ["param", "end_year"],
788
        axis=1,
789
        inplace=True,
790
    )
791
792
    ch4_storage_capacities["Country"] = ch4_storage_capacities[
793
        "country_code"
794
    ].map(map_countries_scigrid)
795
    ch4_storage_capacities = ch4_storage_capacities.groupby(
796
        ["country_code"]
797
    ).agg(
798
        {
799
            "e_nom": "sum",
800
            "Country": "first",
801
        },
802
    )
803
804
    ch4_storage_capacities = ch4_storage_capacities.drop(["RU"])
805
    ch4_storage_capacities.loc[:, "bus"] = (
806
        get_foreign_gas_bus_id()
807
        .loc[ch4_storage_capacities.loc[:, "Country"]]
808
        .values
809
    )
810
811
    return ch4_storage_capacities
812
813
814
def insert_storage(ch4_storage_capacities):
815
    sources = config.datasets()["gas_neighbours"]["sources"]
816
    targets = config.datasets()["gas_neighbours"]["targets"]
817
818
    # Clean table
819
    db.execute_sql(
820
        f"""
821
        DELETE FROM {targets['stores']['schema']}.{targets['stores']['table']}  
822
        WHERE "carrier" = 'CH4'
823
        AND scn_name = 'eGon2035'
824
        AND bus IN (
825
            SELECT bus_id FROM {sources['buses']['schema']}.{sources['buses']['table']}
826
            WHERE scn_name = 'eGon2035' 
827
            AND country != 'DE'
828
            );
829
        """
830
    )
831
    # Add missing columns
832
    c = {"scn_name": "eGon2035", "carrier": "CH4"}
833
    ch4_storage_capacities = ch4_storage_capacities.assign(**c)
834
835
    new_id = db.next_etrago_id("store")
836
    ch4_storage_capacities["store_id"] = range(
837
        new_id, new_id + len(ch4_storage_capacities)
838
    )
839
840
    ch4_storage_capacities.drop(
841
        ["Country"],
842
        axis=1,
843
        inplace=True,
844
    )
845
846
    ch4_storage_capacities = ch4_storage_capacities.reset_index(drop=True)
847
    # Insert data to db
848
    ch4_storage_capacities.to_sql(
849
        targets["stores"]["table"],
850
        db.engine(),
851
        schema=targets["stores"]["schema"],
852
        index=False,
853
        if_exists="append",
854
    )
855
856
857
def calc_global_power_to_h2_demand():
858
    """Calculates global power demand linked to h2 production from TYNDP data
859
860
    Returns
861
    -------
862
    pandas.DataFrame
863
        Global power-to-h2 demand per foreign node
864
865
    """
866
    sources = config.datasets()["gas_neighbours"]["sources"]
867
868
    file = zipfile.ZipFile(f"tyndp/{sources['tyndp_capacities']}")
869
    df = pd.read_excel(
870
        file.open("TYNDP-2020-Scenario-Datafile.xlsx").read(),
871
        sheet_name="Gas Data",
872
    )
873
874
    df = (
875
        df.query(
876
            'Scenario == "Distributed Energy" & '
877
            'Case == "Average" &'  # Case: 2 Week/Average/DF/Peak
878
            'Parameter == "P2H2"'
879
        )
880
        .drop(
881
            columns=[
882
                "Generator_ID",
883
                "Climate Year",
884
                "Simulation_ID",
885
                "Node 1",
886
                "Path",
887
                "Direct/Indirect",
888
                "Sector",
889
                "Note",
890
                "Category",
891
                "Case",
892
                "Scenario",
893
                "Parameter",
894
            ]
895
        )
896
        .set_index("Node/Line")
897
    )
898
899
    df_2030 = (
900
        df[df["Year"] == 2030]
901
        .rename(columns={"Value": "Value_2030"})
902
        .drop(columns=["Year"])
903
    )
904
    df_2040 = (
905
        df[df["Year"] == 2040]
906
        .rename(columns={"Value": "Value_2040"})
907
        .drop(columns=["Year"])
908
    )
909
910
    # Conversion GWh/d to MWh/h
911
    conversion_factor = 1000 / 24
912
913
    df_2035 = pd.concat([df_2040, df_2030], axis=1)
914
    df_2035["GlobD_2035"] = (
915
        (df_2035["Value_2030"] + df_2035["Value_2040"]) / 2
916
    ) * conversion_factor
917
918
    global_power_to_h2_demand = df_2035.drop(
919
        columns=["Value_2030", "Value_2040"]
920
    )
921
922
    # choose demands for considered countries
923
    global_power_to_h2_demand = global_power_to_h2_demand[
924
        (global_power_to_h2_demand.index.str[:2].isin(countries))
925
        & (global_power_to_h2_demand["GlobD_2035"] != 0)
926
    ]
927
928
    # Split in two the demands for DK and UK
929
    global_power_to_h2_demand.loc["DKW1"] = (
930
        global_power_to_h2_demand.loc["DKE1"] / 2
931
    )
932
    global_power_to_h2_demand.loc["DKE1"] = (
933
        global_power_to_h2_demand.loc["DKE1"] / 2
934
    )
935
    global_power_to_h2_demand.loc["UKNI"] = (
936
        global_power_to_h2_demand.loc["UK00"] / 2
937
    )
938
    global_power_to_h2_demand.loc["UK00"] = (
939
        global_power_to_h2_demand.loc["UK00"] / 2
940
    )
941
    global_power_to_h2_demand = global_power_to_h2_demand.reset_index()
942
943
    return global_power_to_h2_demand
944
945
946
def insert_power_to_h2_demand(
947
    global_power_to_h2_demand, normalized_power_to_h2_demandTS
948
):
949
    sources = config.datasets()["gas_neighbours"]["sources"]
950
    targets = config.datasets()["gas_neighbours"]["targets"]
951
    map_buses = get_map_buses()
952
953
    # Delete existing data
954
955
    db.execute_sql(
956
        f"""
957
        DELETE FROM 
958
        {targets['load_timeseries']['schema']}.{targets['load_timeseries']['table']}
959
        WHERE "load_id" IN (
960
            SELECT load_id FROM 
961
            {targets['loads']['schema']}.{targets['loads']['table']}
962
            WHERE bus IN (
963
                SELECT bus_id FROM
964
                {sources['buses']['schema']}.{sources['buses']['table']}
965
                WHERE country != 'DE'
966
                AND scn_name = 'eGon2035')
967
            AND scn_name = 'eGon2035'
968
            AND carrier = 'H2 for industry'            
969
        );
970
        """
971
    )
972
973
    db.execute_sql(
974
        f"""
975
        DELETE FROM
976
        {targets['loads']['schema']}.{targets['loads']['table']}
977
        WHERE bus IN (
978
            SELECT bus_id FROM
979
            {sources['buses']['schema']}.{sources['buses']['table']}
980
            WHERE country != 'DE'
981
            AND scn_name = 'eGon2035')
982
        AND scn_name = 'eGon2035'
983
        AND carrier = 'H2 for industry'
984
        """
985
    )
986
987
    # Set bus_id
988
    global_power_to_h2_demand.loc[
989
        global_power_to_h2_demand[
990
            global_power_to_h2_demand["Node/Line"].isin(map_buses.keys())
991
        ].index,
992
        "Node/Line",
993
    ] = global_power_to_h2_demand.loc[
994
        global_power_to_h2_demand[
995
            global_power_to_h2_demand["Node/Line"].isin(map_buses.keys())
996
        ].index,
997
        "Node/Line",
998
    ].map(
999
        map_buses
1000
    )
1001
    global_power_to_h2_demand.loc[:, "bus"] = (
1002
        get_foreign_bus_id()
1003
        .loc[global_power_to_h2_demand.loc[:, "Node/Line"]]
1004
        .values
1005
    )
1006
1007
    # Add missing columns
1008
    c = {"scn_name": "eGon2035", "carrier": "H2 for industry"}
1009
    global_power_to_h2_demand = global_power_to_h2_demand.assign(**c)
1010
1011
    new_id = db.next_etrago_id("load")
1012
    global_power_to_h2_demand["load_id"] = range(
1013
        new_id, new_id + len(global_power_to_h2_demand)
1014
    )
1015
1016
    power_to_h2_demand_TS = global_power_to_h2_demand.copy()
1017
    # Remove useless columns
1018
    global_power_to_h2_demand = global_power_to_h2_demand.drop(
1019
        columns=["Node/Line", "GlobD_2035"]
1020
    )
1021
1022
    # Insert data to db
1023
    global_power_to_h2_demand.to_sql(
1024
        targets["loads"]["table"],
1025
        db.engine(),
1026
        schema=targets["loads"]["schema"],
1027
        index=False,
1028
        if_exists="append",
1029
    )
1030
1031
    # Insert time series
1032
    normalized_power_to_h2_demandTS = normalized_power_to_h2_demandTS.drop(
1033
        columns=[
1034
            "NO3 0 residential rural heat",
1035
            "CH0 0 residential rural heat",
1036
            "LU0 0 residential rural heat",
1037
        ]
1038
    )
1039
1040
    power_to_h2_demand_TS = power_to_h2_demand_TS.replace(
1041
        {
1042
            "Node/Line": {
1043
                "UK00": "GB4",
1044
                "UKNI": "GB5",
1045
                "DKW1": "DK3",
1046
                "DKE1": "DK0",
1047
                "SE02": "SE3",
1048
            }
1049
        }
1050
    )
1051
1052
    p_set = []
1053
    for index, row in power_to_h2_demand_TS.iterrows():
1054
        normalized_TS_df = normalized_power_to_h2_demandTS.loc[
1055
            :,
1056
            normalized_power_to_h2_demandTS.columns.str.contains(
1057
                row["Node/Line"][:3]
1058
            ),
1059
        ]
1060
        p_set.append(
1061
            (
1062
                normalized_TS_df[normalized_TS_df.columns[0]]
1063
                * row["GlobD_2035"]
1064
            ).tolist()
1065
        )
1066
1067
    power_to_h2_demand_TS["p_set"] = p_set
1068
    power_to_h2_demand_TS["temp_id"] = 1
1069
    power_to_h2_demand_TS = power_to_h2_demand_TS.drop(
1070
        columns=["Node/Line", "GlobD_2035", "bus", "carrier"]
1071
    )
1072
1073
    # Insert data to db
1074
    power_to_h2_demand_TS.to_sql(
1075
        targets["load_timeseries"]["table"],
1076
        db.engine(),
1077
        schema=targets["load_timeseries"]["schema"],
1078
        index=False,
1079
        if_exists="append",
1080
    )
1081
1082
1083
def calculate_ch4_grid_capacities():
1084
    """Calculates CH4 grid capacities for foreign countries based on TYNDP-data
1085
1086
    Parameters
1087
    ----------
1088
    None.
1089
1090
    Returns
1091
    -------
1092
    Neighbouring_pipe_capacities_list : pandas.DataFrame
1093
1094
    """
1095
    sources = config.datasets()["gas_neighbours"]["sources"]
1096
1097
    # Download file
1098
    basename = "ENTSOG_TYNDP_2020_Annex_C2_Capacities_per_country.xlsx"
1099
    url = "https://www.entsog.eu/sites/default/files/2021-07/" + basename
1100
    target_file = Path(".") / "datasets" / "gas_data" / basename
1101
1102
    urlretrieve(url, target_file)
1103
    map_pipelines = {
1104
        "NORDSTREAM": "RU00",
1105
        "NORDSTREAM 2": "RU00",
1106
        "OPAL": "DE",
1107
        "YAMAL (BY)": "RU00",
1108
        "Denmark": "DKE1",
1109
        "Belgium": "BE00",
1110
        "Netherlands": "NL00",
1111
        "Norway": "NOM1",
1112
        "Switzerland": "CH00",
1113
        "Poland": "PL00",
1114
        "United Kingdom": "UK00",
1115
        "Germany": "DE",
1116
        "Austria": "AT00",
1117
        "France": "FR00",
1118
        "Czechia": "CZ00",
1119
        "Russia": "RU00",
1120
        "Luxemburg": "LUB1",
1121
    }
1122
1123
    grid_countries = [
1124
        "NORDSTREAM",
1125
        "NORDSTREAM 2",
1126
        "OPAL",
1127
        "YAMAL (BY)",
1128
        "Denmark",
1129
        "Belgium",
1130
        "Netherlands",
1131
        "Norway",
1132
        "Switzerland",
1133
        "Poland",
1134
        "United Kingdom",
1135
        "Germany",
1136
        "Austria",
1137
        "France",
1138
        "Czechia",
1139
        "Russia",
1140
        "Luxemburg",
1141
    ]
1142
1143
    # Read-in data from csv-file
1144
    pipe_capacities_list = pd.read_excel(
1145
        target_file,
1146
        sheet_name="Transmission Peak Capacity",
1147
        skiprows=range(4),
1148
    )
1149
    pipe_capacities_list = pipe_capacities_list[
1150
        ["To Country", "Unnamed: 3", "From Country", 2035]
1151
    ].rename(
1152
        columns={
1153
            "Unnamed: 3": "Scenario",
1154
            "To Country": "To_Country",
1155
            "From Country": "From_Country",
1156
        }
1157
    )
1158
    pipe_capacities_list["To_Country"] = pd.Series(
1159
        pipe_capacities_list["To_Country"]
1160
    ).fillna(method="ffill")
1161
    pipe_capacities_list["From_Country"] = pd.Series(
1162
        pipe_capacities_list["From_Country"]
1163
    ).fillna(method="ffill")
1164
    pipe_capacities_list = pipe_capacities_list[
1165
        pipe_capacities_list["Scenario"] == "Advanced"
1166
    ].drop(columns={"Scenario"})
1167
    pipe_capacities_list = pipe_capacities_list[
1168
        (
1169
            (pipe_capacities_list["To_Country"].isin(grid_countries))
1170
            & (pipe_capacities_list["From_Country"].isin(grid_countries))
1171
        )
1172
        & (pipe_capacities_list[2035] != 0)
1173
    ]
1174
    pipe_capacities_list["To_Country"] = pipe_capacities_list[
1175
        "To_Country"
1176
    ].map(map_pipelines)
1177
    pipe_capacities_list["From_Country"] = pipe_capacities_list[
1178
        "From_Country"
1179
    ].map(map_pipelines)
1180
    pipe_capacities_list["countrycombination"] = pipe_capacities_list[
1181
        ["To_Country", "From_Country"]
1182
    ].apply(
1183
        lambda x: tuple(sorted([str(x.To_Country), str(x.From_Country)])),
1184
        axis=1,
1185
    )
1186
1187
    pipeline_strategies = {
1188
        "To_Country": "first",
1189
        "From_Country": "first",
1190
        2035: sum,
1191
    }
1192
1193
    pipe_capacities_list = pipe_capacities_list.groupby(
1194
        ["countrycombination"]
1195
    ).agg(pipeline_strategies)
1196
1197
    # Add manually DK-SE and AT-CH pipes (Scigrid gas data)
1198
    pipe_capacities_list.loc["(DKE1, SE02)"] = ["DKE1", "SE02", 651]
1199
    pipe_capacities_list.loc["(AT00, CH00)"] = ["AT00", "CH00", 651]
1200
1201
    # Conversion GWh/d to MWh/h
1202
    pipe_capacities_list["p_nom"] = pipe_capacities_list[2035] * (1000 / 24)
1203
1204
    # Border crossing CH4 pipelines between foreign countries
1205
1206
    Neighbouring_pipe_capacities_list = pipe_capacities_list[
1207
        (pipe_capacities_list["To_Country"] != "DE")
1208
        & (pipe_capacities_list["From_Country"] != "DE")
1209
    ].reset_index()
1210
1211
    Neighbouring_pipe_capacities_list.loc[:, "bus0"] = (
1212
        get_foreign_gas_bus_id()
1213
        .loc[Neighbouring_pipe_capacities_list.loc[:, "To_Country"]]
1214
        .values
1215
    )
1216
    Neighbouring_pipe_capacities_list.loc[:, "bus1"] = (
1217
        get_foreign_gas_bus_id()
1218
        .loc[Neighbouring_pipe_capacities_list.loc[:, "From_Country"]]
1219
        .values
1220
    )
1221
1222
    # Adjust columns
1223
    Neighbouring_pipe_capacities_list = Neighbouring_pipe_capacities_list.drop(
1224
        columns=[
1225
            "To_Country",
1226
            "From_Country",
1227
            "countrycombination",
1228
            2035,
1229
        ]
1230
    )
1231
1232
    new_id = db.next_etrago_id("link")
1233
    Neighbouring_pipe_capacities_list["link_id"] = range(
1234
        new_id, new_id + len(Neighbouring_pipe_capacities_list)
1235
    )
1236
1237
    # Border crossing CH4 pipelines between DE and neighbouring countries
1238
    DE_pipe_capacities_list = pipe_capacities_list[
1239
        (pipe_capacities_list["To_Country"] == "DE")
1240
        | (pipe_capacities_list["From_Country"] == "DE")
1241
    ].reset_index()
1242
1243
    dict_cross_pipes_DE = {
1244
        ("AT00", "DE"): "AT",
1245
        ("BE00", "DE"): "BE",
1246
        ("CH00", "DE"): "CH",
1247
        ("CZ00", "DE"): "CZ",
1248
        ("DE", "DKE1"): "DK",
1249
        ("DE", "FR00"): "FR",
1250
        ("DE", "LUB1"): "LU",
1251
        ("DE", "NL00"): "NL",
1252
        ("DE", "NOM1"): "NO",
1253
        ("DE", "PL00"): "PL",
1254
        ("DE", "RU00"): "RU",
1255
    }
1256
1257
    DE_pipe_capacities_list["country_code"] = DE_pipe_capacities_list[
1258
        "countrycombination"
1259
    ].map(dict_cross_pipes_DE)
1260
    DE_pipe_capacities_list = DE_pipe_capacities_list.set_index("country_code")
1261
1262
    for country_code in [e for e in countries if e not in ("GB", "SE", "UK")]:
1263
1264
        # Select cross-bording links
1265
        cap_DE = db.select_dataframe(
1266
            f"""SELECT link_id, bus0, bus1
1267
                FROM {sources['links']['schema']}.{sources['links']['table']}
1268
                    WHERE scn_name = 'eGon2035' 
1269
                    AND carrier = 'CH4'
1270
                    AND (("bus0" IN (
1271
                        SELECT bus_id FROM {sources['buses']['schema']}.{sources['buses']['table']}
1272
                            WHERE country = 'DE'
1273
                            AND carrier = 'CH4'
1274
                            AND scn_name = 'eGon2035')
1275
                        AND "bus1" IN (SELECT bus_id FROM {sources['buses']['schema']}.{sources['buses']['table']}
1276
                            WHERE country = '{country_code}'
1277
                            AND carrier = 'CH4'
1278
                            AND scn_name = 'eGon2035')
1279
                    )
1280
                    OR ("bus0" IN (
1281
                        SELECT bus_id FROM {sources['buses']['schema']}.{sources['buses']['table']}
1282
                            WHERE country = '{country_code}'
1283
                            AND carrier = 'CH4'
1284
                            AND scn_name = 'eGon2035')
1285
                        AND "bus1" IN (SELECT bus_id FROM {sources['buses']['schema']}.{sources['buses']['table']}
1286
                            WHERE country = 'DE'
1287
                            AND carrier = 'CH4'
1288
                            AND scn_name = 'eGon2035'))
1289
                    )
1290
            ;"""
1291
        )
1292
1293
        cap_DE["p_nom"] = DE_pipe_capacities_list.at[
1294
            country_code, "p_nom"
1295
        ] / len(cap_DE.index)
1296
        Neighbouring_pipe_capacities_list = (
1297
            Neighbouring_pipe_capacities_list.append(cap_DE)
1298
        )
1299
1300
    # Add topo, geom and length
1301
    bus_geom = db.select_geodataframe(
1302
        """SELECT bus_id, geom
1303
        FROM grid.egon_etrago_bus
1304
        WHERE scn_name = 'eGon2035'
1305
        AND carrier = 'CH4'
1306
        """,
1307
        epsg=4326,
1308
    ).set_index("bus_id")
1309
1310
    coordinates_bus0 = []
1311
    coordinates_bus1 = []
1312
1313
    for index, row in Neighbouring_pipe_capacities_list.iterrows():
1314
        coordinates_bus0.append(bus_geom["geom"].loc[int(row["bus0"])])
1315
        coordinates_bus1.append(bus_geom["geom"].loc[int(row["bus1"])])
1316
1317
    Neighbouring_pipe_capacities_list["coordinates_bus0"] = coordinates_bus0
1318
    Neighbouring_pipe_capacities_list["coordinates_bus1"] = coordinates_bus1
1319
1320
    Neighbouring_pipe_capacities_list[
1321
        "topo"
1322
    ] = Neighbouring_pipe_capacities_list.apply(
1323
        lambda row: LineString(
1324
            [row["coordinates_bus0"], row["coordinates_bus1"]]
1325
        ),
1326
        axis=1,
1327
    )
1328
    Neighbouring_pipe_capacities_list[
1329
        "geom"
1330
    ] = Neighbouring_pipe_capacities_list.apply(
1331
        lambda row: MultiLineString([row["topo"]]), axis=1
1332
    )
1333
    Neighbouring_pipe_capacities_list[
1334
        "length"
1335
    ] = Neighbouring_pipe_capacities_list.apply(
1336
        lambda row: row["topo"].length, axis=1
1337
    )
1338
1339
    # Remove useless columns
1340
    Neighbouring_pipe_capacities_list = Neighbouring_pipe_capacities_list.drop(
1341
        columns=[
1342
            "coordinates_bus0",
1343
            "coordinates_bus1",
1344
        ]
1345
    )
1346
1347
    # Add missing columns
1348
    c = {"scn_name": "eGon2035", "carrier": "CH4"}
1349
    Neighbouring_pipe_capacities_list = (
1350
        Neighbouring_pipe_capacities_list.assign(**c)
1351
    )
1352
1353
    Neighbouring_pipe_capacities_list = (
1354
        Neighbouring_pipe_capacities_list.set_geometry("geom", crs=4326)
1355
    )
1356
1357
    return Neighbouring_pipe_capacities_list
1358
1359
1360
def tyndp_gas_generation():
1361
    """Insert data from TYNDP 2020 accordning to NEP 2021
1362
    Scenario 'Distributed Energy', linear interpolate between 2030 and 2040
1363
1364
    Returns
1365
    -------
1366
    None.
1367
    """
1368
    capacities = calc_capacities()
1369
    insert_generators(capacities)
1370
1371
    ch4_storage_capacities = calc_ch4_storage_capacities()
1372
    insert_storage(ch4_storage_capacities)
1373
1374
1375
def tyndp_gas_demand():
1376
    """Insert data from TYNDP 2020 accordning to NEP 2021
1377
    Scenario 'Distributed Energy', linear interpolate between 2030 and 2040
1378
1379
    Returns
1380
    -------
1381
    None.
1382
    """
1383
    Norway_global_demand_1y, normalized_ch4_demandTS = import_ch4_demandTS()
1384
    global_ch4_demand = calc_global_ch4_demand(Norway_global_demand_1y)
1385
    insert_ch4_demand(global_ch4_demand, normalized_ch4_demandTS)
1386
1387
    normalized_power_to_h2_demandTS = import_power_to_h2_demandTS()
1388
    global_power_to_h2_demand = calc_global_power_to_h2_demand()
1389
    insert_power_to_h2_demand(
1390
        global_power_to_h2_demand, normalized_power_to_h2_demandTS
1391
    )
1392
1393
1394
def grid():
1395
    """Insert data from TYNDP 2020 accordning to NEP 2021
1396
    Scenario 'Distributed Energy', linear interpolate between 2030 and 2040
1397
1398
    Returns
1399
    -------
1400
    None.
1401
    """
1402
    Neighbouring_pipe_capacities_list = calculate_ch4_grid_capacities()
1403
    insert_gas_grid_capacities(
1404
        Neighbouring_pipe_capacities_list, scn_name="eGon2035"
1405
    )
1406