Passed
Pull Request — dev (#1052)
by
unknown
01:35
created

relate_to_schmidt_sites()   A

Complexity

Conditions 1

Size

Total Lines 25
Code Lines 11

Duplication

Lines 0
Ratio 0 %

Importance

Changes 0
Metric Value
eloc 11
dl 0
loc 25
rs 9.85
c 0
b 0
f 0
cc 1
nop 1
1
from sqlalchemy import ARRAY, Column, Float, Integer, String
2
from sqlalchemy.ext.declarative import declarative_base
3
import geopandas as gpd
4
import numpy as np
5
import pandas as pd
6
7
from egon.data import config, db
8
from egon.data.datasets import Dataset
9
from egon.data.datasets.electricity_demand.temporal import calc_load_curve
10
from egon.data.datasets.industry.temporal import identify_bus
11
12
# CONSTANTS
13
# TODO: move to datasets.yml
14
CON = db.engine()
15
16
# CTS
17
CTS_COOL_VENT_AC_SHARE = 0.22
18
19
S_FLEX_CTS = 0.5
20
S_UTIL_CTS = 0.67
21
S_INC_CTS = 1
22
S_DEC_CTS = 0
23
DELTA_T_CTS = 1
24
25
# industry
26
IND_VENT_COOL_SHARE = 0.039
27
IND_VENT_SHARE = 0.017
28
29
# OSM
30
S_FLEX_OSM = 0.5
31
S_UTIL_OSM = 0.73
32
S_INC_OSM = 0.9
33
S_DEC_OSM = 0.5
34
DELTA_T_OSM = 1
35
36
# paper
37
S_FLEX_PAPER = 0.15
38
S_UTIL_PAPER = 0.86
39
S_INC_PAPER = 0.95
40
S_DEC_PAPER = 0
41
DELTA_T_PAPER = 3
42
43
# recycled paper
44
S_FLEX_RECYCLED_PAPER = 0.7
45
S_UTIL_RECYCLED_PAPER = 0.85
46
S_INC_RECYCLED_PAPER = 0.95
47
S_DEC_RECYCLED_PAPER = 0
48
DELTA_T_RECYCLED_PAPER = 3
49
50
# pulp
51
S_FLEX_PULP = 0.7
52
S_UTIL_PULP = 0.83
53
S_INC_PULP = 0.95
54
S_DEC_PULP = 0
55
DELTA_T_PULP = 2
56
57
# cement
58
S_FLEX_CEMENT = 0.61
59
S_UTIL_CEMENT = 0.65
60
S_INC_CEMENT = 0.95
61
S_DEC_CEMENT = 0
62
DELTA_T_CEMENT = 4
63
64
# wz 23
65
WZ = 23
66
67
S_FLEX_WZ = 0.5
68
S_UTIL_WZ = 0.8
69
S_INC_WZ = 1
70
S_DEC_WZ = 0.5
71
DELTA_T_WZ = 1
72
73
Base = declarative_base()
74
75
76
class DsmPotential(Dataset):
77
    def __init__(self, dependencies):
78
        super().__init__(
79
            name="DsmPotential",
80
            version="0.0.4.dev",
81
            dependencies=dependencies,
82
            tasks=(dsm_cts_ind_processing),
83
        )
84
85
86
# Datasets
87 View Code Duplication
class EgonEtragoElectricityCtsDsmTimeseries(Base):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
88
    target = config.datasets()["DSM_CTS_industry"]["targets"][
89
        "cts_loadcurves_dsm"
90
    ]
91
92
    __tablename__ = target["table"]
93
    __table_args__ = {"schema": target["schema"]}
94
95
    bus = Column(Integer, primary_key=True, index=True)
96
    scn_name = Column(String, primary_key=True, index=True)
97
    p_set = Column(ARRAY(Float))
98
    p_max_pu = Column(ARRAY(Float))
99
    p_min_pu = Column(ARRAY(Float))
100
    e_max_pu = Column(ARRAY(Float))
101
    e_min_pu = Column(ARRAY(Float))
102
103
104 View Code Duplication
class EgonOsmIndLoadCurvesIndividualDsmTimeseries(Base):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
105
    target = config.datasets()["DSM_CTS_industry"]["targets"][
106
        "ind_osm_loadcurves_individual_dsm"
107
    ]
108
109
    __tablename__ = target["table"]
110
    __table_args__ = {"schema": target["schema"]}
111
112
    osm_id = Column(Integer, primary_key=True, index=True)
113
    scn_name = Column(String, primary_key=True, index=True)
114
    bus = Column(Integer)
115
    p_set = Column(ARRAY(Float))
116
    p_max_pu = Column(ARRAY(Float))
117
    p_min_pu = Column(ARRAY(Float))
118
    e_max_pu = Column(ARRAY(Float))
119
    e_min_pu = Column(ARRAY(Float))
120
121
122 View Code Duplication
class EgonDemandregioSitesIndElectricityDsmTimeseries(Base):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
123
    target = config.datasets()["DSM_CTS_industry"]["targets"][
124
        "demandregio_ind_sites_dsm"
125
    ]
126
127
    __tablename__ = target["table"]
128
    __table_args__ = {"schema": target["schema"]}
129
130
    industrial_sites_id = Column(Integer, primary_key=True, index=True)
131
    scn_name = Column(String, primary_key=True, index=True)
132
    bus = Column(Integer)
133
    application = Column(String)
134
    p_set = Column(ARRAY(Float))
135
    p_max_pu = Column(ARRAY(Float))
136
    p_min_pu = Column(ARRAY(Float))
137
    e_max_pu = Column(ARRAY(Float))
138
    e_min_pu = Column(ARRAY(Float))
139
140
141 View Code Duplication
class EgonSitesIndLoadCurvesIndividualDsmTimeseries(Base):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
142
    target = config.datasets()["DSM_CTS_industry"]["targets"][
143
        "ind_sites_loadcurves_individual"
144
    ]
145
146
    __tablename__ = target["table"]
147
    __table_args__ = {"schema": target["schema"]}
148
149
    site_id = Column(Integer, primary_key=True, index=True)
150
    scn_name = Column(String, primary_key=True, index=True)
151
    bus = Column(Integer)
152
    p_set = Column(ARRAY(Float))
153
    p_max_pu = Column(ARRAY(Float))
154
    p_min_pu = Column(ARRAY(Float))
155
    e_max_pu = Column(ARRAY(Float))
156
    e_min_pu = Column(ARRAY(Float))
157
158
159
# Code
160
def cts_data_import(cts_cool_vent_ac_share):
161
    """
162
    Import CTS data necessary to identify DSM-potential.
163
164
    ----------
165
    cts_share: float
166
        Share of cooling, ventilation and AC in CTS demand
167
    """
168
169
    # import load data
170
171
    sources = config.datasets()["DSM_CTS_industry"]["sources"][
172
        "cts_loadcurves"
173
    ]
174
175
    ts = db.select_dataframe(
176
        f"""SELECT bus_id, scn_name, p_set FROM
177
        {sources['schema']}.{sources['table']}"""
178
    )
179
180
    # identify relevant columns and prepare df to be returned
181
182
    dsm = pd.DataFrame(index=ts.index)
183
184
    dsm["bus"] = ts["bus_id"].copy()
185
    dsm["scn_name"] = ts["scn_name"].copy()
186
    dsm["p_set"] = ts["p_set"].copy()
187
188
    # calculate share of timeseries for air conditioning, cooling and
189
    # ventilation out of CTS-data
190
191
    timeseries = dsm["p_set"].copy()
192
193
    for index, liste in timeseries.iteritems():
194
        share = [float(item) * cts_cool_vent_ac_share for item in liste]
195
        timeseries.loc[index] = share
196
197
    dsm["p_set"] = timeseries.copy()
198
199
    return dsm
200
201
202 View Code Duplication
def ind_osm_data_import(ind_vent_cool_share):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
203
    """
204
    Import industry data per osm-area necessary to identify DSM-potential.
205
        ----------
206
    ind_share: float
207
        Share of considered application in industry demand
208
    """
209
210
    # import load data
211
212
    sources = config.datasets()["DSM_CTS_industry"]["sources"][
213
        "ind_osm_loadcurves"
214
    ]
215
216
    dsm = db.select_dataframe(
217
        f"""SELECT bus, scn_name, p_set FROM
218
        {sources['schema']}.{sources['table']}"""
219
    )
220
221
    # calculate share of timeseries for cooling and ventilation out of
222
    # industry-data
223
224
    timeseries = dsm["p_set"].copy()
225
226
    for index, liste in timeseries.iteritems():
227
        share = [float(item) * ind_vent_cool_share for item in liste]
228
229
        timeseries.loc[index] = share
230
231
    dsm["p_set"] = timeseries.copy()
232
233
    return dsm
234
235
236 View Code Duplication
def ind_osm_data_import_individual(ind_vent_cool_share):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
237
    """
238
    Import industry data per osm-area necessary to identify DSM-potential.
239
        ----------
240
    ind_share: float
241
        Share of considered application in industry demand
242
    """
243
244
    # import load data
245
246
    sources = config.datasets()["DSM_CTS_industry"]["sources"][
247
        "ind_osm_loadcurves_individual"
248
    ]
249
250
    dsm = db.select_dataframe(
251
        f"""
252
        SELECT osm_id, bus_id as bus, scn_name, p_set FROM
253
        {sources['schema']}.{sources['table']}
254
        WHERE scn_name in ('eGon2035', 'eGon100RE')
255
        """
256
    )
257
258
    # calculate share of timeseries for cooling and ventilation out of
259
    # industry-data
260
261
    timeseries = dsm["p_set"].copy()
262
263
    for index, liste in timeseries.iteritems():
264
        share = [float(item) * ind_vent_cool_share for item in liste]
265
266
        timeseries.loc[index] = share
267
268
    dsm["p_set"] = timeseries.copy()
269
270
    return dsm
271
272
273 View Code Duplication
def ind_sites_vent_data_import(ind_vent_share, wz):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
274
    """
275
    Import industry sites necessary to identify DSM-potential.
276
        ----------
277
    ind_vent_share: float
278
        Share of considered application in industry demand
279
    wz: int
280
        Wirtschaftszweig to be considered within industry sites
281
    """
282
283
    # import load data
284
285
    sources = config.datasets()["DSM_CTS_industry"]["sources"][
286
        "ind_sites_loadcurves"
287
    ]
288
289
    dsm = db.select_dataframe(
290
        f"""
291
        SELECT bus, scn_name, p_set FROM
292
        {sources['schema']}.{sources['table']}
293
        WHERE wz = '{wz}'
294
        """
295
    )
296
297
    # calculate share of timeseries for ventilation
298
299
    timeseries = dsm["p_set"].copy()
300
301
    for index, liste in timeseries.iteritems():
302
        share = [float(item) * ind_vent_share for item in liste]
303
        timeseries.loc[index] = share
304
305
    dsm["p_set"] = timeseries.copy()
306
307
    return dsm
308
309
310 View Code Duplication
def ind_sites_vent_data_import_individual(ind_vent_share, wz):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
311
    """
312
    Import industry sites necessary to identify DSM-potential.
313
        ----------
314
    ind_vent_share: float
315
        Share of considered application in industry demand
316
    wz: int
317
        Wirtschaftszweig to be considered within industry sites
318
    """
319
320
    # import load data
321
322
    sources = config.datasets()["DSM_CTS_industry"]["sources"][
323
        "ind_sites_loadcurves_individual"
324
    ]
325
326
    dsm = db.select_dataframe(
327
        f"""
328
        SELECT site_id, bus_id as bus, scn_name, p_set FROM
329
        {sources['schema']}.{sources['table']}
330
        WHERE scn_name IN ('eGon2035', 'eGon100RE')
331
        AND wz = '{wz}'
332
        """
333
    )
334
335
    # calculate share of timeseries for ventilation
336
337
    timeseries = dsm["p_set"].copy()
338
339
    for index, liste in timeseries.iteritems():
340
        share = [float(item) * ind_vent_share for item in liste]
341
        timeseries.loc[index] = share
342
343
    dsm["p_set"] = timeseries.copy()
344
345
    return dsm
346
347
348
def calc_ind_site_timeseries(scenario):
349
    # calculate timeseries per site
350
    # -> using code from egon.data.datasets.industry.temporal:
351
    # calc_load_curves_ind_sites
352
353
    # select demands per industrial site including the subsector information
354
    source1 = config.datasets()["DSM_CTS_industry"]["sources"][
355
        "demandregio_ind_sites"
356
    ]
357
358
    demands_ind_sites = db.select_dataframe(
359
        f"""SELECT industrial_sites_id, wz, demand
360
            FROM {source1['schema']}.{source1['table']}
361
            WHERE scenario = '{scenario}'
362
            AND demand > 0
363
            """
364
    ).set_index(["industrial_sites_id"])
365
366
    # select industrial sites as demand_areas from database
367
    source2 = config.datasets()["DSM_CTS_industry"]["sources"]["ind_sites"]
368
369
    demand_area = db.select_geodataframe(
370
        f"""SELECT id, geom, subsector FROM
371
            {source2['schema']}.{source2['table']}""",
372
        index_col="id",
373
        geom_col="geom",
374
        epsg=3035,
375
    )
376
377
    # replace entries to bring it in line with demandregio's subsector
378
    # definitions
379
    demands_ind_sites.replace(1718, 17, inplace=True)
380
    share_wz_sites = demands_ind_sites.copy()
381
382
    # create additional df on wz_share per industrial site, which is always set
383
    # to one as the industrial demand per site is subsector specific
384
    share_wz_sites.demand = 1
385
    share_wz_sites.reset_index(inplace=True)
386
387
    share_transpose = pd.DataFrame(
388
        index=share_wz_sites.industrial_sites_id.unique(),
389
        columns=share_wz_sites.wz.unique(),
390
    )
391
    share_transpose.index.rename("industrial_sites_id", inplace=True)
392
    for wz in share_transpose.columns:
393
        share_transpose[wz] = (
394
            share_wz_sites[share_wz_sites.wz == wz]
395
            .set_index("industrial_sites_id")
396
            .demand
397
        )
398
399
    # calculate load curves
400
    load_curves = calc_load_curve(share_transpose, demands_ind_sites["demand"])
401
402
    # identify bus per industrial site
403
    curves_bus = identify_bus(load_curves, demand_area)
404
    curves_bus.index = curves_bus["id"].astype(int)
405
406
    # initialize dataframe to be returned
407
408
    ts = pd.DataFrame(
409
        data=curves_bus["bus_id"], index=curves_bus["id"].astype(int)
410
    )
411
    ts["scenario_name"] = scenario
412
    curves_bus.drop({"id", "bus_id", "geom"}, axis=1, inplace=True)
413
    ts["p_set"] = curves_bus.values.tolist()
414
415
    # add subsector to relate to Schmidt's tables afterwards
416
    ts["application"] = demand_area["subsector"]
417
418
    return ts
419
420
421
def relate_to_schmidt_sites(dsm):
422
    # import industrial sites by Schmidt
423
424
    source = config.datasets()["DSM_CTS_industry"]["sources"][
425
        "ind_sites_schmidt"
426
    ]
427
428
    schmidt = db.select_dataframe(
429
        f"""SELECT application, geom FROM
430
            {source['schema']}.{source['table']}"""
431
    )
432
433
    # relate calculated timeseries (dsm) to Schmidt's industrial sites
434
435
    applications = np.unique(schmidt["application"])
436
    dsm = pd.DataFrame(dsm[dsm["application"].isin(applications)])
437
438
    # initialize dataframe to be returned
439
440
    dsm.rename(
441
        columns={"scenario_name": "scn_name", "bus_id": "bus"},
442
        inplace=True,
443
    )
444
445
    return dsm
446
447
448
def ind_sites_data_import():
449
    """
450
    Import industry sites data necessary to identify DSM-potential.
451
    """
452
    # calculate timeseries per site
453
454
    # scenario eGon2035
455
    dsm_2035 = calc_ind_site_timeseries("eGon2035")
456
    dsm_2035.reset_index(inplace=True)
457
    # scenario eGon100RE
458
    dsm_100 = calc_ind_site_timeseries("eGon100RE")
459
    dsm_100.reset_index(inplace=True)
460
    # bring df for both scenarios together
461
    dsm_100.index = range(len(dsm_2035), (len(dsm_2035) + len((dsm_100))))
462
    dsm = dsm_2035.append(dsm_100)
463
464
    # relate calculated timeseries to Schmidt's industrial sites
465
466
    dsm = relate_to_schmidt_sites(dsm)
467
468
    return dsm[["application", "id", "bus", "scn_name", "p_set"]]
469
470
471
def calculate_potentials(s_flex, s_util, s_inc, s_dec, delta_t, dsm):
472
    """
473
    Calculate DSM-potential per bus using the methods by Heitkoetter et. al.:
474
        https://doi.org/10.1016/j.adapen.2020.100001
475
    Parameters
476
        ----------
477
    s_flex: float
478
        Feasability factor to account for socio-technical restrictions
479
    s_util: float
480
        Average annual utilisation rate
481
    s_inc: float
482
        Shiftable share of installed capacity up to which load can be
483
        increased considering technical limitations
484
    s_dec: float
485
        Shiftable share of installed capacity up to which load can be
486
        decreased considering technical limitations
487
    delta_t: int
488
        Maximum shift duration in hours
489
    dsm: DataFrame
490
        List of existing buses with DSM-potential including timeseries of
491
        loads
492
    """
493
494
    # copy relevant timeseries
495
    timeseries = dsm["p_set"].copy()
496
497
    # calculate scheduled load L(t)
498
499
    scheduled_load = timeseries.copy()
500
501
    for index, liste in scheduled_load.iteritems():
502
        share = []
503
        for item in liste:
504
            share.append(item * s_flex)
505
        scheduled_load.loc[index] = share
506
507
    # calculate maximum capacity Lambda
508
509
    # calculate energy annual requirement
510
    energy_annual = pd.Series(index=timeseries.index, dtype=float)
511
    for index, liste in timeseries.iteritems():
512
        energy_annual.loc[index] = sum(liste)
513
514
    # calculate Lambda
515
    lam = (energy_annual * s_flex) / (8760 * s_util)
516
517
    # calculation of P_max and P_min
518
519
    # P_max
520
    p_max = scheduled_load.copy()
521
    for index, liste in scheduled_load.iteritems():
522
        lamb = lam.loc[index]
523
        p = []
524
        for item in liste:
525
            value = lamb * s_inc - item
526
            if value < 0:
527
                value = 0
528
            p.append(value)
529
        p_max.loc[index] = p
530
531
    # P_min
532
    p_min = scheduled_load.copy()
533
    for index, liste in scheduled_load.iteritems():
534
        lamb = lam.loc[index]
535
        p = []
536
        for item in liste:
537
            value = -(item - lamb * s_dec)
538
            if value > 0:
539
                value = 0
540
            p.append(value)
541
        p_min.loc[index] = p
542
543
    # calculation of E_max and E_min
544
545
    e_max = scheduled_load.copy()
546
    e_min = scheduled_load.copy()
547
548
    for index, liste in scheduled_load.iteritems():
549
        emin = []
550
        emax = []
551
        for i in range(len(liste)):
552
            if i + delta_t > len(liste):
553
                emax.append(
554
                    (sum(liste[i:]) + sum(liste[: delta_t - (len(liste) - i)]))
555
                )
556
            else:
557
                emax.append(sum(liste[i : i + delta_t]))
558
            if i - delta_t < 0:
559
                emin.append(
560
                    (
561
                        -1
562
                        * (
563
                            (
564
                                sum(liste[:i])
565
                                + sum(liste[len(liste) - delta_t + i :])
566
                            )
567
                        )
568
                    )
569
                )
570
            else:
571
                emin.append(-1 * sum(liste[i - delta_t : i]))
572
        e_max.loc[index] = emax
573
        e_min.loc[index] = emin
574
575
    return p_max, p_min, e_max, e_min
576
577
578
def create_dsm_components(con, p_max, p_min, e_max, e_min, dsm):
579
    """
580
    Create components representing DSM.
581
    Parameters
582
        ----------
583
    con :
584
        Connection to database
585
    p_max: DataFrame
586
        Timeseries identifying maximum load increase
587
    p_min: DataFrame
588
        Timeseries identifying maximum load decrease
589
    e_max: DataFrame
590
        Timeseries identifying maximum energy amount to be preponed
591
    e_min: DataFrame
592
        Timeseries identifying maximum energy amount to be postponed
593
    dsm: DataFrame
594
        List of existing buses with DSM-potential including timeseries of loads
595
    """
596
597
    # calculate P_nom and P per unit
598
    p_nom = pd.Series(index=p_max.index, dtype=float)
599
    for index, row in p_max.iteritems():
600
        nom = max(max(row), abs(min(p_min.loc[index])))
601
        p_nom.loc[index] = nom
602
        new = [element / nom for element in row]
603
        p_max.loc[index] = new
604
        new = [element / nom for element in p_min.loc[index]]
605
        p_min.loc[index] = new
606
607
    # calculate E_nom and E per unit
608
    e_nom = pd.Series(index=p_min.index, dtype=float)
609
    for index, row in e_max.iteritems():
610
        nom = max(max(row), abs(min(e_min.loc[index])))
611
        e_nom.loc[index] = nom
612
        new = [element / nom for element in row]
613
        e_max.loc[index] = new
614
        new = [element / nom for element in e_min.loc[index]]
615
        e_min.loc[index] = new
616
617
    # add DSM-buses to "original" buses
618
    dsm_buses = gpd.GeoDataFrame(index=dsm.index)
619
    dsm_buses["original_bus"] = dsm["bus"].copy()
620
    dsm_buses["scn_name"] = dsm["scn_name"].copy()
621
622
    # get original buses and add copy of relevant information
623
    target1 = config.datasets()["DSM_CTS_industry"]["targets"]["bus"]
624
    original_buses = db.select_geodataframe(
625
        f"""SELECT bus_id, v_nom, scn_name, x, y, geom FROM
626
            {target1['schema']}.{target1['table']}""",
627
        geom_col="geom",
628
        epsg=4326,
629
    )
630
631
    # copy relevant information from original buses to DSM-buses
632
    dsm_buses["index"] = dsm_buses.index
633
    originals = original_buses[
634
        original_buses["bus_id"].isin(np.unique(dsm_buses["original_bus"]))
635
    ]
636
    dsm_buses = originals.merge(
637
        dsm_buses,
638
        left_on=["bus_id", "scn_name"],
639
        right_on=["original_bus", "scn_name"],
640
    )
641
    dsm_buses.index = dsm_buses["index"]
642
    dsm_buses.drop(["bus_id", "index"], axis=1, inplace=True)
643
644
    # new bus_ids for DSM-buses
645
    max_id = original_buses["bus_id"].max()
646
    if np.isnan(max_id):
647
        max_id = 0
648
    dsm_id = max_id + 1
649
    bus_id = pd.Series(index=dsm_buses.index, dtype=int)
650
651
    # Get number of DSM buses for both scenarios
652
    rows_per_scenario = (
653
        dsm_buses.groupby("scn_name").count().original_bus.to_dict()
654
    )
655
656
    # Assignment of DSM ids
657
    bus_id.iloc[: rows_per_scenario.get("eGon2035", 0)] = range(
658
        dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0)
659
    )
660
661
    bus_id.iloc[
662
        rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get(
663
            "eGon2035", 0
664
        )
665
        + rows_per_scenario.get("eGon100RE", 0)
666
    ] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0))
667
668
    dsm_buses["bus_id"] = bus_id
669
670
    # add links from "orignal" buses to DSM-buses
671
672
    dsm_links = pd.DataFrame(index=dsm_buses.index)
673
    dsm_links["original_bus"] = dsm_buses["original_bus"].copy()
674
    dsm_links["dsm_bus"] = dsm_buses["bus_id"].copy()
675
    dsm_links["scn_name"] = dsm_buses["scn_name"].copy()
676
677
    # set link_id
678
    target2 = config.datasets()["DSM_CTS_industry"]["targets"]["link"]
679
    sql = f"""SELECT link_id FROM {target2['schema']}.{target2['table']}"""
680
    max_id = pd.read_sql_query(sql, con)
681
    max_id = max_id["link_id"].max()
682
    if np.isnan(max_id):
683
        max_id = 0
684
    dsm_id = max_id + 1
685
    link_id = pd.Series(index=dsm_buses.index, dtype=int)
686
687
    # Assignment of link ids
688
    link_id.iloc[: rows_per_scenario.get("eGon2035", 0)] = range(
689
        dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0)
690
    )
691
692
    link_id.iloc[
693
        rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get(
694
            "eGon2035", 0
695
        )
696
        + rows_per_scenario.get("eGon100RE", 0)
697
    ] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0))
698
699
    dsm_links["link_id"] = link_id
700
701
    # add calculated timeseries to df to be returned
702
    dsm_links["p_nom"] = p_nom
703
    dsm_links["p_min"] = p_min
704
    dsm_links["p_max"] = p_max
705
706
    # add DSM-stores
707
708
    dsm_stores = pd.DataFrame(index=dsm_buses.index)
709
    dsm_stores["bus"] = dsm_buses["bus_id"].copy()
710
    dsm_stores["scn_name"] = dsm_buses["scn_name"].copy()
711
    dsm_stores["original_bus"] = dsm_buses["original_bus"].copy()
712
713
    # set store_id
714
    target3 = config.datasets()["DSM_CTS_industry"]["targets"]["store"]
715
    sql = f"""SELECT store_id FROM {target3['schema']}.{target3['table']}"""
716
    max_id = pd.read_sql_query(sql, con)
717
    max_id = max_id["store_id"].max()
718
    if np.isnan(max_id):
719
        max_id = 0
720
    dsm_id = max_id + 1
721
    store_id = pd.Series(index=dsm_buses.index, dtype=int)
722
723
    # Assignment of store ids
724
    store_id.iloc[: rows_per_scenario.get("eGon2035", 0)] = range(
725
        dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0)
726
    )
727
728
    store_id.iloc[
729
        rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get(
730
            "eGon2035", 0
731
        )
732
        + rows_per_scenario.get("eGon100RE", 0)
733
    ] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0))
734
735
    dsm_stores["store_id"] = store_id
736
737
    # add calculated timeseries to df to be returned
738
    dsm_stores["e_nom"] = e_nom
739
    dsm_stores["e_min"] = e_min
740
    dsm_stores["e_max"] = e_max
741
742
    return dsm_buses, dsm_links, dsm_stores
743
744
745
def aggregate_components(df_dsm_buses, df_dsm_links, df_dsm_stores):
746
    # aggregate buses
747
748
    grouper = [df_dsm_buses.original_bus, df_dsm_buses.scn_name]
749
750
    df_dsm_buses = df_dsm_buses.groupby(grouper).first()
751
752
    df_dsm_buses.reset_index(inplace=True)
753
    df_dsm_buses.sort_values("scn_name", inplace=True)
754
755
    # aggregate links
756
757
    df_dsm_links["p_max"] = df_dsm_links["p_max"].apply(lambda x: np.array(x))
758
    df_dsm_links["p_min"] = df_dsm_links["p_min"].apply(lambda x: np.array(x))
759
760
    grouper = [df_dsm_links.original_bus, df_dsm_links.scn_name]
761
    p_nom = df_dsm_links.groupby(grouper)["p_nom"].sum()
762
    p_max = df_dsm_links.groupby(grouper)["p_max"].apply(np.sum)
763
    p_min = df_dsm_links.groupby(grouper)["p_min"].apply(np.sum)
764
765
    df_dsm_links = df_dsm_links.groupby(grouper).first()
766
    df_dsm_links.p_nom = p_nom
767
    df_dsm_links.p_max = p_max
768
    df_dsm_links.p_min = p_min
769
770
    df_dsm_links["p_max"] = df_dsm_links["p_max"].apply(lambda x: list(x))
771
    df_dsm_links["p_min"] = df_dsm_links["p_min"].apply(lambda x: list(x))
772
773
    df_dsm_links.reset_index(inplace=True)
774
    df_dsm_links.sort_values("scn_name", inplace=True)
775
776
    # aggregate stores
777
778
    df_dsm_stores["e_max"] = df_dsm_stores["e_max"].apply(
779
        lambda x: np.array(x)
780
    )
781
    df_dsm_stores["e_min"] = df_dsm_stores["e_min"].apply(
782
        lambda x: np.array(x)
783
    )
784
785
    grouper = [df_dsm_stores.original_bus, df_dsm_stores.scn_name]
786
    e_nom = df_dsm_stores.groupby(grouper)["e_nom"].sum()
787
    e_max = df_dsm_stores.groupby(grouper)["e_max"].apply(np.sum)
788
    e_min = df_dsm_stores.groupby(grouper)["e_min"].apply(np.sum)
789
790
    df_dsm_stores = df_dsm_stores.groupby(grouper).first()
791
    df_dsm_stores.e_nom = e_nom
792
    df_dsm_stores.e_max = e_max
793
    df_dsm_stores.e_min = e_min
794
795
    df_dsm_stores["e_max"] = df_dsm_stores["e_max"].apply(lambda x: list(x))
796
    df_dsm_stores["e_min"] = df_dsm_stores["e_min"].apply(lambda x: list(x))
797
798
    df_dsm_stores.reset_index(inplace=True)
799
    df_dsm_stores.sort_values("scn_name", inplace=True)
800
801
    # select new bus_ids for aggregated buses and add to links and stores
802
    bus_id = db.next_etrago_id("Bus") + df_dsm_buses.index
803
804
    df_dsm_buses["bus_id"] = bus_id
805
    df_dsm_links["dsm_bus"] = bus_id
806
    df_dsm_stores["bus"] = bus_id
807
808
    # select new link_ids for aggregated links
809
    link_id = db.next_etrago_id("Link") + df_dsm_links.index
810
811
    df_dsm_links["link_id"] = link_id
812
813
    # select new store_ids to aggregated stores
814
815
    store_id = db.next_etrago_id("Store") + df_dsm_stores.index
816
817
    df_dsm_stores["store_id"] = store_id
818
819
    return df_dsm_buses, df_dsm_links, df_dsm_stores
820
821
822
def data_export(dsm_buses, dsm_links, dsm_stores, carrier):
823
    """
824
    Export new components to database.
825
826
    Parameters
827
    ----------
828
    dsm_buses: DataFrame
829
        Buses representing locations of DSM-potential
830
    dsm_links: DataFrame
831
        Links connecting DSM-buses and DSM-stores
832
    dsm_stores: DataFrame
833
        Stores representing DSM-potential
834
    carrier: String
835
        Remark to be filled in column 'carrier' identifying DSM-potential
836
    """
837
838
    targets = config.datasets()["DSM_CTS_industry"]["targets"]
839
840
    # dsm_buses
841
842
    insert_buses = gpd.GeoDataFrame(
843
        index=dsm_buses.index,
844
        data=dsm_buses["geom"],
845
        geometry="geom",
846
        crs=dsm_buses.crs,
847
    )
848
    insert_buses["scn_name"] = dsm_buses["scn_name"]
849
    insert_buses["bus_id"] = dsm_buses["bus_id"]
850
    insert_buses["v_nom"] = dsm_buses["v_nom"]
851
    insert_buses["carrier"] = carrier
852
    insert_buses["x"] = dsm_buses["x"]
853
    insert_buses["y"] = dsm_buses["y"]
854
855
    # insert into database
856
    insert_buses.to_postgis(
857
        targets["bus"]["table"],
858
        con=db.engine(),
859
        schema=targets["bus"]["schema"],
860
        if_exists="append",
861
        index=False,
862
        dtype={"geom": "geometry"},
863
    )
864
865
    # dsm_links
866
867
    insert_links = pd.DataFrame(index=dsm_links.index)
868
    insert_links["scn_name"] = dsm_links["scn_name"]
869
    insert_links["link_id"] = dsm_links["link_id"]
870
    insert_links["bus0"] = dsm_links["original_bus"]
871
    insert_links["bus1"] = dsm_links["dsm_bus"]
872
    insert_links["carrier"] = carrier
873
    insert_links["p_nom"] = dsm_links["p_nom"]
874
875
    # insert into database
876
    insert_links.to_sql(
877
        targets["link"]["table"],
878
        con=db.engine(),
879
        schema=targets["link"]["schema"],
880
        if_exists="append",
881
        index=False,
882
    )
883
884
    insert_links_timeseries = pd.DataFrame(index=dsm_links.index)
885
    insert_links_timeseries["scn_name"] = dsm_links["scn_name"]
886
    insert_links_timeseries["link_id"] = dsm_links["link_id"]
887
    insert_links_timeseries["p_min_pu"] = dsm_links["p_min"]
888
    insert_links_timeseries["p_max_pu"] = dsm_links["p_max"]
889
    insert_links_timeseries["temp_id"] = 1
890
891
    # insert into database
892
    insert_links_timeseries.to_sql(
893
        targets["link_timeseries"]["table"],
894
        con=db.engine(),
895
        schema=targets["link_timeseries"]["schema"],
896
        if_exists="append",
897
        index=False,
898
    )
899
900
    # dsm_stores
901
902
    insert_stores = pd.DataFrame(index=dsm_stores.index)
903
    insert_stores["scn_name"] = dsm_stores["scn_name"]
904
    insert_stores["store_id"] = dsm_stores["store_id"]
905
    insert_stores["bus"] = dsm_stores["bus"]
906
    insert_stores["carrier"] = carrier
907
    insert_stores["e_nom"] = dsm_stores["e_nom"]
908
909
    # insert into database
910
    insert_stores.to_sql(
911
        targets["store"]["table"],
912
        con=db.engine(),
913
        schema=targets["store"]["schema"],
914
        if_exists="append",
915
        index=False,
916
    )
917
918
    insert_stores_timeseries = pd.DataFrame(index=dsm_stores.index)
919
    insert_stores_timeseries["scn_name"] = dsm_stores["scn_name"]
920
    insert_stores_timeseries["store_id"] = dsm_stores["store_id"]
921
    insert_stores_timeseries["e_min_pu"] = dsm_stores["e_min"]
922
    insert_stores_timeseries["e_max_pu"] = dsm_stores["e_max"]
923
    insert_stores_timeseries["temp_id"] = 1
924
925
    # insert into database
926
    insert_stores_timeseries.to_sql(
927
        targets["store_timeseries"]["table"],
928
        con=db.engine(),
929
        schema=targets["store_timeseries"]["schema"],
930
        if_exists="append",
931
        index=False,
932
    )
933
934
935
def delete_dsm_entries(carrier):
936
    """
937
    Deletes DSM-components from database if they already exist before creating
938
    new ones.
939
940
    Parameters
941
        ----------
942
     carrier: String
943
        Remark in column 'carrier' identifying DSM-potential
944
    """
945
946
    targets = config.datasets()["DSM_CTS_industry"]["targets"]
947
948
    # buses
949
950
    sql = f"""DELETE FROM {targets["bus"]["schema"]}.{targets["bus"]["table"]} b
951
     WHERE (b.carrier LIKE '{carrier}');"""
952
    db.execute_sql(sql)
953
954
    # links
955
956
    sql = f"""
957
        DELETE FROM {targets["link_timeseries"]["schema"]}.
958
        {targets["link_timeseries"]["table"]} t
959
        WHERE t.link_id IN
960
        (
961
            SELECT l.link_id FROM {targets["link"]["schema"]}.
962
            {targets["link"]["table"]} l
963
            WHERE l.carrier LIKE '{carrier}'
964
        );
965
        """
966
967
    db.execute_sql(sql)
968
969
    sql = f"""
970
        DELETE FROM {targets["link"]["schema"]}.
971
        {targets["link"]["table"]} l
972
        WHERE (l.carrier LIKE '{carrier}');
973
        """
974
975
    db.execute_sql(sql)
976
977
    # stores
978
979
    sql = f"""
980
        DELETE FROM {targets["store_timeseries"]["schema"]}.
981
        {targets["store_timeseries"]["table"]} t
982
        WHERE t.store_id IN
983
        (
984
            SELECT s.store_id FROM {targets["store"]["schema"]}.
985
            {targets["store"]["table"]} s
986
            WHERE s.carrier LIKE '{carrier}'
987
        );
988
        """
989
990
    db.execute_sql(sql)
991
992
    sql = f"""
993
        DELETE FROM {targets["store"]["schema"]}.{targets["store"]["table"]} s
994
        WHERE (s.carrier LIKE '{carrier}');
995
        """
996
997
    db.execute_sql(sql)
998
999
1000
def dsm_cts_ind(
1001
    con=db.engine(),
1002
    cts_cool_vent_ac_share=0.22,
1003
    ind_vent_cool_share=0.039,
1004
    ind_vent_share=0.017,
1005
):
1006
    """
1007
    Execute methodology to create and implement components for DSM considering
1008
    a) CTS per osm-area: combined potentials of cooling, ventilation and air
1009
      conditioning
1010
    b) Industry per osm-are: combined potentials of cooling and ventilation
1011
    c) Industrial Sites: potentials of ventilation in sites of
1012
      "Wirtschaftszweig" (WZ) 23
1013
    d) Industrial Sites: potentials of sites specified by subsectors
1014
      identified by Schmidt (https://zenodo.org/record/3613767#.YTsGwVtCRhG):
1015
      Paper, Recycled Paper, Pulp, Cement
1016
1017
    Modelled using the methods by Heitkoetter et. al.:
1018
    https://doi.org/10.1016/j.adapen.2020.100001
1019
1020
    Parameters
1021
    ----------
1022
    con :
1023
        Connection to database
1024
    cts_cool_vent_ac_share: float
1025
        Share of cooling, ventilation and AC in CTS demand
1026
    ind_vent_cool_share: float
1027
        Share of cooling and ventilation in industry demand
1028
    ind_vent_share: float
1029
        Share of ventilation in industry demand in sites of WZ 23
1030
1031
    """
1032
1033
    # CTS per osm-area: cooling, ventilation and air conditioning
1034
1035
    print(" ")
1036
    print("CTS per osm-area: cooling, ventilation and air conditioning")
1037
    print(" ")
1038
1039
    dsm = cts_data_import(cts_cool_vent_ac_share)
1040
1041
    # calculate combined potentials of cooling, ventilation and air
1042
    # conditioning in CTS using combined parameters by Heitkoetter et. al.
1043
    p_max, p_min, e_max, e_min = calculate_potentials(
1044
        s_flex=S_FLEX_CTS,
1045
        s_util=S_UTIL_CTS,
1046
        s_inc=S_INC_CTS,
1047
        s_dec=S_DEC_CTS,
1048
        delta_t=DELTA_T_CTS,
1049
        dsm=dsm,
1050
    )
1051
1052
    dsm_buses, dsm_links, dsm_stores = create_dsm_components(
1053
        con, p_max, p_min, e_max, e_min, dsm
1054
    )
1055
1056
    df_dsm_buses = dsm_buses.copy()
1057
    df_dsm_links = dsm_links.copy()
1058
    df_dsm_stores = dsm_stores.copy()
1059
1060
    # industry per osm-area: cooling and ventilation
1061
1062
    print(" ")
1063
    print("industry per osm-area: cooling and ventilation")
1064
    print(" ")
1065
1066
    dsm = ind_osm_data_import(ind_vent_cool_share)
1067
1068
    # calculate combined potentials of cooling and ventilation in industrial
1069
    # sector using combined parameters by Heitkoetter et. al.
1070
    p_max, p_min, e_max, e_min = calculate_potentials(
1071
        s_flex=S_FLEX_OSM,
1072
        s_util=S_UTIL_OSM,
1073
        s_inc=S_INC_OSM,
1074
        s_dec=S_DEC_OSM,
1075
        delta_t=DELTA_T_OSM,
1076
        dsm=dsm,
1077
    )
1078
1079
    dsm_buses, dsm_links, dsm_stores = create_dsm_components(
1080
        con, p_max, p_min, e_max, e_min, dsm
1081
    )
1082
1083
    df_dsm_buses = gpd.GeoDataFrame(
1084
        pd.concat([df_dsm_buses, dsm_buses], ignore_index=True),
1085
        crs="EPSG:4326",
1086
    )
1087
    df_dsm_links = pd.DataFrame(
1088
        pd.concat([df_dsm_links, dsm_links], ignore_index=True)
1089
    )
1090
    df_dsm_stores = pd.DataFrame(
1091
        pd.concat([df_dsm_stores, dsm_stores], ignore_index=True)
1092
    )
1093
1094
    # industry sites
1095
1096
    # industry sites: different applications
1097
1098
    dsm = ind_sites_data_import()
1099
1100
    print(" ")
1101
    print("industry sites: paper")
1102
    print(" ")
1103
1104
    dsm_paper = gpd.GeoDataFrame(
1105
        dsm[
1106
            dsm["application"].isin(
1107
                [
1108
                    "Graphic Paper",
1109
                    "Packing Paper and Board",
1110
                    "Hygiene Paper",
1111
                    "Technical/Special Paper and Board",
1112
                ]
1113
            )
1114
        ]
1115
    )
1116
1117
    # calculate potentials of industrial sites with paper-applications
1118
    # using parameters by Heitkoetter et al.
1119
    p_max, p_min, e_max, e_min = calculate_potentials(
1120
        s_flex=S_FLEX_PAPER,
1121
        s_util=S_UTIL_PAPER,
1122
        s_inc=S_INC_PAPER,
1123
        s_dec=S_DEC_PAPER,
1124
        delta_t=DELTA_T_PAPER,
1125
        dsm=dsm_paper,
1126
    )
1127
1128
    dsm_buses, dsm_links, dsm_stores = create_dsm_components(
1129
        con, p_max, p_min, e_max, e_min, dsm_paper
1130
    )
1131
1132
    df_dsm_buses = gpd.GeoDataFrame(
1133
        pd.concat([df_dsm_buses, dsm_buses], ignore_index=True),
1134
        crs="EPSG:4326",
1135
    )
1136
    df_dsm_links = pd.DataFrame(
1137
        pd.concat([df_dsm_links, dsm_links], ignore_index=True)
1138
    )
1139
    df_dsm_stores = pd.DataFrame(
1140
        pd.concat([df_dsm_stores, dsm_stores], ignore_index=True)
1141
    )
1142
1143
    print(" ")
1144
    print("industry sites: recycled paper")
1145
    print(" ")
1146
1147
    # calculate potentials of industrial sites with recycled paper-applications
1148
    # using parameters by Heitkoetter et. al.
1149
    dsm_recycled_paper = gpd.GeoDataFrame(
1150
        dsm[dsm["application"] == "Recycled Paper"]
1151
    )
1152
1153
    p_max, p_min, e_max, e_min = calculate_potentials(
1154
        s_flex=S_FLEX_RECYCLED_PAPER,
1155
        s_util=S_UTIL_RECYCLED_PAPER,
1156
        s_inc=S_INC_RECYCLED_PAPER,
1157
        s_dec=S_DEC_RECYCLED_PAPER,
1158
        delta_t=DELTA_T_RECYCLED_PAPER,
1159
        dsm=dsm_recycled_paper,
1160
    )
1161
1162
    dsm_buses, dsm_links, dsm_stores = create_dsm_components(
1163
        con, p_max, p_min, e_max, e_min, dsm_recycled_paper
1164
    )
1165
1166
    df_dsm_buses = gpd.GeoDataFrame(
1167
        pd.concat([df_dsm_buses, dsm_buses], ignore_index=True),
1168
        crs="EPSG:4326",
1169
    )
1170
    df_dsm_links = pd.DataFrame(
1171
        pd.concat([df_dsm_links, dsm_links], ignore_index=True)
1172
    )
1173
    df_dsm_stores = pd.DataFrame(
1174
        pd.concat([df_dsm_stores, dsm_stores], ignore_index=True)
1175
    )
1176
1177
    print(" ")
1178
    print("industry sites: pulp")
1179
    print(" ")
1180
1181
    dsm_pulp = gpd.GeoDataFrame(dsm[dsm["application"] == "Mechanical Pulp"])
1182
1183
    # calculate potentials of industrial sites with pulp-applications
1184
    # using parameters by Heitkoetter et. al.
1185
    p_max, p_min, e_max, e_min = calculate_potentials(
1186
        s_flex=S_FLEX_PULP,
1187
        s_util=S_UTIL_PULP,
1188
        s_inc=S_INC_PULP,
1189
        s_dec=S_DEC_PULP,
1190
        delta_t=DELTA_T_PULP,
1191
        dsm=dsm_pulp,
1192
    )
1193
1194
    dsm_buses, dsm_links, dsm_stores = create_dsm_components(
1195
        con, p_max, p_min, e_max, e_min, dsm_pulp
1196
    )
1197
1198
    df_dsm_buses = gpd.GeoDataFrame(
1199
        pd.concat([df_dsm_buses, dsm_buses], ignore_index=True),
1200
        crs="EPSG:4326",
1201
    )
1202
    df_dsm_links = pd.DataFrame(
1203
        pd.concat([df_dsm_links, dsm_links], ignore_index=True)
1204
    )
1205
    df_dsm_stores = pd.DataFrame(
1206
        pd.concat([df_dsm_stores, dsm_stores], ignore_index=True)
1207
    )
1208
1209
    # industry sites: cement
1210
1211
    print(" ")
1212
    print("industry sites: cement")
1213
    print(" ")
1214
1215
    dsm_cement = gpd.GeoDataFrame(dsm[dsm["application"] == "Cement Mill"])
1216
1217
    # calculate potentials of industrial sites with cement-applications
1218
    # using parameters by Heitkoetter et al.
1219
    p_max, p_min, e_max, e_min = calculate_potentials(
1220
        s_flex=S_FLEX_CEMENT,
1221
        s_util=S_UTIL_CEMENT,
1222
        s_inc=S_INC_CEMENT,
1223
        s_dec=S_DEC_CEMENT,
1224
        delta_t=DELTA_T_CEMENT,
1225
        dsm=dsm_cement,
1226
    )
1227
1228
    dsm_buses, dsm_links, dsm_stores = create_dsm_components(
1229
        con, p_max, p_min, e_max, e_min, dsm_cement
1230
    )
1231
1232
    df_dsm_buses = gpd.GeoDataFrame(
1233
        pd.concat([df_dsm_buses, dsm_buses], ignore_index=True),
1234
        crs="EPSG:4326",
1235
    )
1236
    df_dsm_links = pd.DataFrame(
1237
        pd.concat([df_dsm_links, dsm_links], ignore_index=True)
1238
    )
1239
    df_dsm_stores = pd.DataFrame(
1240
        pd.concat([df_dsm_stores, dsm_stores], ignore_index=True)
1241
    )
1242
1243
    # industry sites: ventilation in WZ23
1244
1245
    print(" ")
1246
    print("industry sites: ventilation in WZ23")
1247
    print(" ")
1248
1249
    dsm = ind_sites_vent_data_import(ind_vent_share, wz=WZ)
1250
1251
    # drop entries of Cement Mills whose DSM-potentials have already been
1252
    # modelled
1253
    cement = np.unique(dsm_cement["bus"].values)
1254
    index_names = np.array(dsm[dsm["bus"].isin(cement)].index)
1255
    dsm.drop(index_names, inplace=True)
1256
1257
    # calculate potentials of ventialtion in industrial sites of WZ 23
1258
    # using parameters by Heitkoetter et. al.
1259
    p_max, p_min, e_max, e_min = calculate_potentials(
1260
        s_flex=S_FLEX_WZ,
1261
        s_util=S_UTIL_WZ,
1262
        s_inc=S_INC_WZ,
1263
        s_dec=S_DEC_WZ,
1264
        delta_t=DELTA_T_WZ,
1265
        dsm=dsm,
1266
    )
1267
1268
    dsm_buses, dsm_links, dsm_stores = create_dsm_components(
1269
        con, p_max, p_min, e_max, e_min, dsm
1270
    )
1271
1272
    df_dsm_buses = gpd.GeoDataFrame(
1273
        pd.concat([df_dsm_buses, dsm_buses], ignore_index=True),
1274
        crs="EPSG:4326",
1275
    )
1276
    df_dsm_links = pd.DataFrame(
1277
        pd.concat([df_dsm_links, dsm_links], ignore_index=True)
1278
    )
1279
    df_dsm_stores = pd.DataFrame(
1280
        pd.concat([df_dsm_stores, dsm_stores], ignore_index=True)
1281
    )
1282
1283
    # TODO
1284
    # # aggregate DSM components per substation
1285
    # dsm_buses, dsm_links, dsm_stores = aggregate_components(
1286
    #     df_dsm_buses, df_dsm_links, df_dsm_stores
1287
    # )
1288
    #
1289
    # # export aggregated DSM components to database
1290
    #
1291
    # delete_dsm_entries("dsm-cts")
1292
    # delete_dsm_entries("dsm-ind-osm")
1293
    # delete_dsm_entries("dsm-ind-sites")
1294
    # delete_dsm_entries("dsm")
1295
    #
1296
    # data_export(dsm_buses, dsm_links, dsm_stores, carrier="dsm")
1297
1298
1299
def col_per_unit(lst):
1300
    max_val = max([abs(val) for val in lst])
1301
1302
    return [val / max_val for val in lst]
1303
1304
1305
def calc_per_unit(df):
1306
    for col in ["p_max_pu", "p_min_pu", "e_max_pu", "e_min_pu"]:
1307
        df[col] = df[col].apply(col_per_unit)
1308
1309
    return df
1310
1311
1312
def create_table(df, table, engine=CON):
1313
    """Create table"""
1314
    table.__table__.drop(bind=engine, checkfirst=True)
1315
    table.__table__.create(bind=engine, checkfirst=True)
1316
1317
    df.to_sql(
1318
        name=table.__table__.name,
1319
        schema=table.__table__.schema,
1320
        con=engine,
1321
        if_exists="append",
1322
        index=False,
1323
    )
1324
1325
1326
def dsm_cts_ind_individual(
1327
    con=CON,
1328
    cts_cool_vent_ac_share=CTS_COOL_VENT_AC_SHARE,
1329
    ind_vent_cool_share=IND_VENT_COOL_SHARE,
1330
    ind_vent_share=IND_VENT_SHARE,
1331
):
1332
    """
1333
    Execute methodology to create and implement components for DSM considering
1334
    a) CTS per osm-area: combined potentials of cooling, ventilation and air
1335
      conditioning
1336
    b) Industry per osm-are: combined potentials of cooling and ventilation
1337
    c) Industrial Sites: potentials of ventilation in sites of
1338
      "Wirtschaftszweig" (WZ) 23
1339
    d) Industrial Sites: potentials of sites specified by subsectors
1340
      identified by Schmidt (https://zenodo.org/record/3613767#.YTsGwVtCRhG):
1341
      Paper, Recycled Paper, Pulp, Cement
1342
1343
    Modelled using the methods by Heitkoetter et. al.:
1344
    https://doi.org/10.1016/j.adapen.2020.100001
1345
1346
    Parameters
1347
    ----------
1348
    con :
1349
        Connection to database
1350
    cts_cool_vent_ac_share: float
1351
        Share of cooling, ventilation and AC in CTS demand
1352
    ind_vent_cool_share: float
1353
        Share of cooling and ventilation in industry demand
1354
    ind_vent_share: float
1355
        Share of ventilation in industry demand in sites of WZ 23
1356
1357
    """
1358
1359
    # CTS per osm-area: cooling, ventilation and air conditioning
1360
1361
    print(" ")
1362
    print("CTS per osm-area: cooling, ventilation and air conditioning")
1363
    print(" ")
1364
1365
    dsm = cts_data_import(cts_cool_vent_ac_share)
1366
1367
    # calculate combined potentials of cooling, ventilation and air
1368
    # conditioning in CTS using combined parameters by Heitkoetter et. al.
1369
    vals = calculate_potentials(
1370
        s_flex=S_FLEX_CTS,
1371
        s_util=S_UTIL_CTS,
1372
        s_inc=S_INC_CTS,
1373
        s_dec=S_DEC_CTS,
1374
        delta_t=DELTA_T_CTS,
1375
        dsm=dsm,
1376
    )
1377
1378
    base_columns = [
1379
        "bus",
1380
        "scn_name",
1381
        "p_set",
1382
        "p_max_pu",
1383
        "p_min_pu",
1384
        "e_max_pu",
1385
        "e_min_pu",
1386
    ]
1387
1388
    cts_df = pd.concat([dsm, *vals], axis=1, ignore_index=True)
1389
    cts_df.columns = base_columns
1390
    cts_df = calc_per_unit(cts_df)
1391
1392
    print(" ")
1393
    print("industry per osm-area: cooling and ventilation")
1394
    print(" ")
1395
1396
    dsm = ind_osm_data_import_individual(ind_vent_cool_share)
1397
1398
    # calculate combined potentials of cooling and ventilation in industrial
1399
    # sector using combined parameters by Heitkoetter et. al.
1400
    vals = calculate_potentials(
1401
        s_flex=S_FLEX_OSM,
1402
        s_util=S_UTIL_OSM,
1403
        s_inc=S_INC_OSM,
1404
        s_dec=S_DEC_OSM,
1405
        delta_t=DELTA_T_OSM,
1406
        dsm=dsm,
1407
    )
1408
1409
    columns = ["osm_id"] + base_columns
1410
1411
    osm_df = pd.concat([dsm, *vals], axis=1, ignore_index=True)
1412
    osm_df.columns = columns
1413
    osm_df = calc_per_unit(osm_df)
1414
1415
    # industry sites
1416
1417
    # industry sites: different applications
1418
1419
    dsm = ind_sites_data_import()
1420
1421
    print(" ")
1422
    print("industry sites: paper")
1423
    print(" ")
1424
1425
    dsm_paper = gpd.GeoDataFrame(
1426
        dsm[
1427
            dsm["application"].isin(
1428
                [
1429
                    "Graphic Paper",
1430
                    "Packing Paper and Board",
1431
                    "Hygiene Paper",
1432
                    "Technical/Special Paper and Board",
1433
                ]
1434
            )
1435
        ]
1436
    )
1437
1438
    # calculate potentials of industrial sites with paper-applications
1439
    # using parameters by Heitkoetter et al.
1440
    vals = calculate_potentials(
1441
        s_flex=S_FLEX_PAPER,
1442
        s_util=S_UTIL_PAPER,
1443
        s_inc=S_INC_PAPER,
1444
        s_dec=S_DEC_PAPER,
1445
        delta_t=DELTA_T_PAPER,
1446
        dsm=dsm_paper,
1447
    )
1448
1449
    columns = ["application", "industrial_sites_id"] + base_columns
1450
1451
    paper_df = pd.concat([dsm_paper, *vals], axis=1, ignore_index=True)
1452
    paper_df.columns = columns
1453
    paper_df = calc_per_unit(paper_df)
1454
1455
    print(" ")
1456
    print("industry sites: recycled paper")
1457
    print(" ")
1458
1459
    # calculate potentials of industrial sites with recycled paper-applications
1460
    # using parameters by Heitkoetter et. al.
1461
    dsm_recycled_paper = gpd.GeoDataFrame(
1462
        dsm[dsm["application"] == "Recycled Paper"]
1463
    )
1464
1465
    vals = calculate_potentials(
1466
        s_flex=S_FLEX_RECYCLED_PAPER,
1467
        s_util=S_UTIL_RECYCLED_PAPER,
1468
        s_inc=S_INC_RECYCLED_PAPER,
1469
        s_dec=S_DEC_RECYCLED_PAPER,
1470
        delta_t=DELTA_T_RECYCLED_PAPER,
1471
        dsm=dsm_recycled_paper,
1472
    )
1473
1474
    recycled_paper_df = pd.concat(
1475
        [dsm_recycled_paper, *vals], axis=1, ignore_index=True
1476
    )
1477
    recycled_paper_df.columns = columns
1478
    recycled_paper_df = calc_per_unit(recycled_paper_df)
1479
1480
    print(" ")
1481
    print("industry sites: pulp")
1482
    print(" ")
1483
1484
    dsm_pulp = gpd.GeoDataFrame(dsm[dsm["application"] == "Mechanical Pulp"])
1485
1486
    # calculate potentials of industrial sites with pulp-applications
1487
    # using parameters by Heitkoetter et. al.
1488
    vals = calculate_potentials(
1489
        s_flex=S_FLEX_PULP,
1490
        s_util=S_UTIL_PULP,
1491
        s_inc=S_INC_PULP,
1492
        s_dec=S_DEC_PULP,
1493
        delta_t=DELTA_T_PULP,
1494
        dsm=dsm_pulp,
1495
    )
1496
1497
    pulp_df = pd.concat([dsm_pulp, *vals], axis=1, ignore_index=True)
1498
    pulp_df.columns = columns
1499
    pulp_df = calc_per_unit(pulp_df)
1500
1501
    # industry sites: cement
1502
1503
    print(" ")
1504
    print("industry sites: cement")
1505
    print(" ")
1506
1507
    dsm_cement = gpd.GeoDataFrame(dsm[dsm["application"] == "Cement Mill"])
1508
1509
    # calculate potentials of industrial sites with cement-applications
1510
    # using parameters by Heitkoetter et al.
1511
    vals = calculate_potentials(
1512
        s_flex=S_FLEX_CEMENT,
1513
        s_util=S_UTIL_CEMENT,
1514
        s_inc=S_INC_CEMENT,
1515
        s_dec=S_DEC_CEMENT,
1516
        delta_t=DELTA_T_CEMENT,
1517
        dsm=dsm_cement,
1518
    )
1519
1520
    cement_df = pd.concat([dsm_cement, *vals], axis=1, ignore_index=True)
1521
    cement_df.columns = columns
1522
    cement_df = calc_per_unit(cement_df)
1523
1524
    ind_df = pd.concat(
1525
        [paper_df, recycled_paper_df, pulp_df, cement_df], ignore_index=True
1526
    )
1527
1528
    # industry sites: ventilation in WZ23
1529
1530
    print(" ")
1531
    print("industry sites: ventilation in WZ23")
1532
    print(" ")
1533
1534
    dsm = ind_sites_vent_data_import_individual(ind_vent_share, wz=WZ)
1535
1536
    # drop entries of Cement Mills whose DSM-potentials have already been
1537
    # modelled
1538
    cement = np.unique(dsm_cement["bus"].values)
1539
    index_names = np.array(dsm[dsm["bus"].isin(cement)].index)
1540
    dsm.drop(index_names, inplace=True)
1541
1542
    # calculate potentials of ventialtion in industrial sites of WZ 23
1543
    # using parameters by Heitkoetter et al.
1544
    vals = calculate_potentials(
1545
        s_flex=S_FLEX_WZ,
1546
        s_util=S_UTIL_WZ,
1547
        s_inc=S_INC_WZ,
1548
        s_dec=S_DEC_WZ,
1549
        delta_t=DELTA_T_WZ,
1550
        dsm=dsm,
1551
    )
1552
1553
    columns = ["site_id"] + base_columns
1554
1555
    ind_sites_df = pd.concat([dsm, *vals], axis=1, ignore_index=True)
1556
    ind_sites_df.columns = columns
1557
    ind_sites_df = calc_per_unit(ind_sites_df)
1558
1559
    # create tables
1560
    create_table(
1561
        df=cts_df, table=EgonEtragoElectricityCtsDsmTimeseries, engine=CON
1562
    )
1563
    create_table(
1564
        df=osm_df,
1565
        table=EgonOsmIndLoadCurvesIndividualDsmTimeseries,
1566
        engine=CON,
1567
    )
1568
    create_table(
1569
        df=ind_df,
1570
        table=EgonDemandregioSitesIndElectricityDsmTimeseries,
1571
        engine=CON,
1572
    )
1573
    create_table(
1574
        df=ind_sites_df,
1575
        table=EgonSitesIndLoadCurvesIndividualDsmTimeseries,
1576
        engine=CON,
1577
    )
1578
1579
1580
def dsm_cts_ind_processing():
1581
    dsm_cts_ind()
1582
1583
    dsm_cts_ind_individual()
1584