Passed
Push — dev ( c7392f...970ff3 )
by
unknown
01:45 queued 12s
created

src/egon/data/datasets/DSM_cts_ind.py (2 issues)

Severity
1
import geopandas as gpd
2
import numpy as np
3
import pandas as pd
4
5
from egon.data import db
6
from egon.data.datasets import Dataset
7
from egon.data.datasets.electricity_demand.temporal import calc_load_curve
8
from egon.data.datasets.industry.temporal import identify_bus
9
import egon.data.config
10
11
12
class dsm_Potential(Dataset):
13
    def __init__(self, dependencies):
14
        super().__init__(
15
            name="DSM_potentials",
16
            version="0.0.4",
17
            dependencies=dependencies,
18
            tasks=(dsm_cts_ind_processing),
19
        )
20
21
22
def dsm_cts_ind_processing():
23
    def cts_data_import(con, cts_cool_vent_ac_share):
24
25
        """
26
        Import CTS data necessary to identify DSM-potential.
27
            ----------
28
        con :
29
            Connection to database
30
        cts_share: float
31
            Share of cooling, ventilation and AC in CTS demand
32
        """
33
34
        # import load data
35
36
        sources = egon.data.config.datasets()["DSM_CTS_industry"]["sources"][
37
            "cts_loadcurves"
38
        ]
39
40
        ts = db.select_dataframe(
41
            f"""SELECT bus_id, scn_name, p_set FROM
42
            {sources['schema']}.{sources['table']}"""
43
        )
44
45
        # identify relevant columns and prepare df to be returned
46
47
        dsm = pd.DataFrame(index=ts.index)
48
49
        dsm["bus"] = ts["bus_id"].copy()
50
        dsm["scn_name"] = ts["scn_name"].copy()
51
        dsm["p_set"] = ts["p_set"].copy()
52
53
        # calculate share of timeseries for air conditioning, cooling and ventilation out of CTS-data
54
55
        timeseries = dsm["p_set"].copy()
56
        for index, liste in timeseries.iteritems():
57
            share = []
58
            for item in liste:
59
                share.append(float(item) * cts_cool_vent_ac_share)
60
            timeseries.loc[index] = share
61
        dsm["p_set"] = timeseries.copy()
62
63
        return dsm
64
65
    def ind_osm_data_import(con, ind_vent_cool_share):
66
67
        """
68
        Import industry data per osm-area necessary to identify DSM-potential.
69
            ----------
70
        con :
71
            Connection to database
72
        ind_share: float
73
            Share of considered application in industry demand
74
        """
75
76
        # import load data
77
78
        sources = egon.data.config.datasets()["DSM_CTS_industry"]["sources"][
79
            "ind_osm_loadcurves"
80
        ]
81
82
        dsm = db.select_dataframe(
83
            f"""SELECT bus, scn_name, p_set FROM
84
            {sources['schema']}.{sources['table']}"""
85
        )
86
87
        # calculate share of timeseries for cooling and ventilation out of industry-data
88
89
        timeseries = dsm["p_set"].copy()
90
        for index, liste in timeseries.iteritems():
91
            share = []
92
            for item in liste:
93
                share.append(float(item) * ind_vent_cool_share)
94
            timeseries.loc[index] = share
95
        dsm["p_set"] = timeseries.copy()
96
97
        return dsm
98
99
    def ind_sites_vent_data_import(con, ind_vent_share, wz):
100
101
        """
102
        Import industry sites necessary to identify DSM-potential.
103
            ----------
104
        con :
105
            Connection to database
106
        ind_vent_share: float
107
            Share of considered application in industry demand
108
        wz: int
109
            Wirtschaftszweig to be considered within industry sites
110
        """
111
112
        # import load data
113
114
        sources = egon.data.config.datasets()["DSM_CTS_industry"]["sources"][
115
            "ind_sites_loadcurves"
116
        ]
117
118
        dsm = db.select_dataframe(
119
            f"""SELECT bus, scn_name, p_set, wz FROM
120
            {sources['schema']}.{sources['table']}"""
121
        )
122
123
        # select load for considered applications
124
125
        dsm = dsm[dsm["wz"] == wz]
126
127
        # calculate share of timeseries for ventilation
128
129
        timeseries = dsm["p_set"].copy()
130
        for index, liste in timeseries.iteritems():
131
            share = []
132
            for item in liste:
133
                share.append(float(item) * ind_vent_share)
134
            timeseries.loc[index] = share
135
        dsm["p_set"] = timeseries.copy()
136
137
        return dsm
138
139
    def ind_sites_data_import(con):
140
141
        """
142
        Import industry sites data necessary to identify DSM-potential.
143
            ----------
144
        con :
145
            Connection to database
146
        """
147
148
        def calc_ind_site_timeseries(scenario):
149
150
            # calculate timeseries per site
151
            # -> using code from egon.data.datasets.industry.temporal: calc_load_curves_ind_sites
152
153
            # select demands per industrial site including the subsector information
154
            source1 = egon.data.config.datasets()["DSM_CTS_industry"][
155
                "sources"
156
            ]["demandregio_ind_sites"]
157
158
            demands_ind_sites = db.select_dataframe(
159
                f"""SELECT industrial_sites_id, wz, demand
160
                    FROM {source1['schema']}.{source1['table']}
161
                    WHERE scenario = '{scenario}'
162
                    AND demand > 0
163
                    """
164
            ).set_index(["industrial_sites_id"])
165
166
            # select industrial sites as demand_areas from database
167
            source2 = egon.data.config.datasets()["DSM_CTS_industry"][
168
                "sources"
169
            ]["ind_sites"]
170
171
            demand_area = db.select_geodataframe(
172
                f"""SELECT id, geom, subsector FROM
173
                    {source2['schema']}.{source2['table']}""",
174
                index_col="id",
175
                geom_col="geom",
176
                epsg=3035,
177
            )
178
179
            # replace entries to bring it in line with demandregio's subsector definitions
180
            demands_ind_sites.replace(1718, 17, inplace=True)
181
            share_wz_sites = demands_ind_sites.copy()
182
183
            # create additional df on wz_share per industrial site, which is always set to one
184
            # as the industrial demand per site is subsector specific
185
            share_wz_sites.demand = 1
186
            share_wz_sites.reset_index(inplace=True)
187
188
            share_transpose = pd.DataFrame(
189
                index=share_wz_sites.industrial_sites_id.unique(),
190
                columns=share_wz_sites.wz.unique(),
191
            )
192
            share_transpose.index.rename("industrial_sites_id", inplace=True)
193
            for wz in share_transpose.columns:
194
                share_transpose[wz] = (
195
                    share_wz_sites[share_wz_sites.wz == wz]
196
                    .set_index("industrial_sites_id")
197
                    .demand
198
                )
199
200
            # calculate load curves
201
            load_curves = calc_load_curve(
202
                share_transpose, demands_ind_sites["demand"]
203
            )
204
205
            # identify bus per industrial site
206
            curves_bus = identify_bus(load_curves, demand_area)
207
            curves_bus.index = curves_bus["id"].astype(int)
208
209
            # initialize dataframe to be returned
210
211
            ts = pd.DataFrame(
212
                data=curves_bus["bus_id"], index=curves_bus["id"].astype(int)
213
            )
214
            ts["scenario_name"] = scenario
215
            curves_bus.drop({"id", "bus_id", "geom"}, axis=1, inplace=True)
216
            ts["p_set"] = curves_bus.values.tolist()
217
218
            # add subsector to relate to Schmidt's tables afterwards
219
            ts["application"] = demand_area["subsector"]
220
221
            return ts
222
223
        def relate_to_Schmidt_sites(dsm):
224
225
            # import industrial sites by Schmidt
226
227
            source = egon.data.config.datasets()["DSM_CTS_industry"][
228
                "sources"
229
            ]["ind_sites_schmidt"]
230
231
            schmidt = db.select_dataframe(
232
                f"""SELECT application, geom FROM
233
                    {source['schema']}.{source['table']}"""
234
            )
235
236
            # relate calculated timeseries (dsm) to Schmidt's industrial sites
237
238
            applications = np.unique(schmidt["application"])
239
            dsm = pd.DataFrame(dsm[dsm["application"].isin(applications)])
240
241
            # initialize dataframe to be returned
242
243
            dsm.rename(
244
                columns={"scenario_name": "scn_name", "bus_id": "bus"},
245
                inplace=True,
246
            )
247
248
            return dsm
249
250
        # calculate timeseries per site
251
252
        # scenario eGon2035
253
        dsm_2035 = calc_ind_site_timeseries("eGon2035")
254
        dsm_2035.reset_index(inplace=True)
255
        # scenario eGon100RE
256
        dsm_100 = calc_ind_site_timeseries("eGon100RE")
257
        dsm_100.reset_index(inplace=True)
258
        # bring df for both scenarios together
259
        dsm_100.index = range(len(dsm_2035), (len(dsm_2035) + len((dsm_100))))
260
        dsm = dsm_2035.append(dsm_100)
261
262
        # relate calculated timeseries to Schmidt's industrial sites
263
264
        dsm = relate_to_Schmidt_sites(dsm)
265
266
        return dsm
267
268
    def calculate_potentials(s_flex, s_util, s_inc, s_dec, delta_t, dsm):
269
270
        """
271
        Calculate DSM-potential per bus using the methods by Heitkoetter et. al.:
272
            https://doi.org/10.1016/j.adapen.2020.100001
273
        Parameters
274
            ----------
275
        s_flex: float
276
            Feasability factor to account for socio-technical restrictions
277
        s_util: float
278
            Average annual utilisation rate
279
        s_inc: float
280
            Shiftable share of installed capacity up to which load can be increased considering technical limitations
281
        s_dec: float
282
            Shiftable share of installed capacity up to which load can be decreased considering technical limitations
283
        delta_t: int
284
            Maximum shift duration in hours
285
        dsm: DataFrame
286
            List of existing buses with DSM-potential including timeseries of loads
287
        """
288
289
        # copy relevant timeseries
290
        timeseries = dsm["p_set"].copy()
291
292
        # calculate scheduled load L(t)
293
294
        scheduled_load = timeseries.copy()
295
296
        for index, liste in scheduled_load.iteritems():
297
            share = []
298
            for item in liste:
299
                share.append(item * s_flex)
300
            scheduled_load.loc[index] = share
301
302
        # calculate maximum capacity Lambda
303
304
        # calculate energy annual requirement
305
        energy_annual = pd.Series(index=timeseries.index, dtype=float)
306
        for index, liste in timeseries.iteritems():
307
            energy_annual.loc[index] = sum(liste)
308
309
        # calculate Lambda
310
        lam = (energy_annual * s_flex) / (8760 * s_util)
311
312
        # calculation of P_max and P_min
313
314
        # P_max
315
        p_max = scheduled_load.copy()
316
        for index, liste in scheduled_load.iteritems():
317
            lamb = lam.loc[index]
318
            p = []
319
            for item in liste:
320
                value = lamb * s_inc - item
321
                if value < 0:
322
                    value = 0
323
                p.append(value)
324
            p_max.loc[index] = p
325
326
        # P_min
327
        p_min = scheduled_load.copy()
328
        for index, liste in scheduled_load.iteritems():
329
            lamb = lam.loc[index]
330
            p = []
331
            for item in liste:
332
                value = -(item - lamb * s_dec)
333
                if value > 0:
334
                    value = 0
335
                p.append(value)
336
            p_min.loc[index] = p
337
338
        # calculation of E_max and E_min
339
340
        e_max = scheduled_load.copy()
341
        e_min = scheduled_load.copy()
342
343
        for index, liste in scheduled_load.iteritems():
344
            emin = []
345
            emax = []
346
            for i in range(0, len(liste)):
347
                if i + delta_t > len(liste):
348
                    emax.append(
349
                        sum(liste[i : len(liste)])
350
                        + sum(liste[0 : delta_t - (len(liste) - i)])
351
                    )
352
                else:
353
                    emax.append(sum(liste[i : i + delta_t]))
354
                if i - delta_t < 0:
355
                    emin.append(
356
                        -1
357
                        * (
358
                            sum(liste[0:i])
359
                            + sum(liste[len(liste) - delta_t + i : len(liste)])
360
                        )
361
                    )
362
                else:
363
                    emin.append(-1 * sum(liste[i - delta_t : i]))
364
            e_max.loc[index] = emax
365
            e_min.loc[index] = emin
366
367
        return p_max, p_min, e_max, e_min
368
369
    def create_dsm_components(
370
        con, p_max, p_min, e_max, e_min, dsm, export_aggregated=True
371
    ):
372
373
        """
374
        Create components representing DSM.
375
        Parameters
376
            ----------
377
        con :
378
            Connection to database
379
        p_max: DataFrame
380
            Timeseries identifying maximum load increase
381
        p_min: DataFrame
382
            Timeseries identifying maximum load decrease
383
        e_max: DataFrame
384
            Timeseries identifying maximum energy amount to be preponed
385
        e_min: DataFrame
386
            Timeseries identifying maximum energy amount to be postponed
387
        dsm: DataFrame
388
            List of existing buses with DSM-potential including timeseries of loads
389
        """
390
391
        # if components should be exported seperately
392
        # and not as aggregated DSM-components:
393
394
        if not export_aggregated:
395
396
            # calculate P_nom and P per unit
397
            p_nom = pd.Series(index=p_max.index, dtype=float)
398
            for index, row in p_max.iteritems():
399
                nom = max(max(row), abs(min(p_min.loc[index])))
400
                p_nom.loc[index] = nom
401
                new = [element / nom for element in row]
402
                p_max.loc[index] = new
403
                new = [element / nom for element in p_min.loc[index]]
404
                p_min.loc[index] = new
405
406
            # calculate E_nom and E per unit
407
            e_nom = pd.Series(index=p_min.index, dtype=float)
408
            for index, row in e_max.iteritems():
409
                nom = max(max(row), abs(min(e_min.loc[index])))
410
                e_nom.loc[index] = nom
411
                new = [element / nom for element in row]
412
                e_max.loc[index] = new
413
                new = [element / nom for element in e_min.loc[index]]
414
                e_min.loc[index] = new
415
416
        # add DSM-buses to "original" buses
417
        dsm_buses = gpd.GeoDataFrame(index=dsm.index)
418
        dsm_buses["original_bus"] = dsm["bus"].copy()
419
        dsm_buses["scn_name"] = dsm["scn_name"].copy()
420
421
        # get original buses and add copy of relevant information
422
        target1 = egon.data.config.datasets()["DSM_CTS_industry"]["targets"][
423
            "bus"
424
        ]
425
        original_buses = db.select_geodataframe(
426
            f"""SELECT bus_id, v_nom, scn_name, x, y, geom FROM
427
                {target1['schema']}.{target1['table']}""",
428
            geom_col="geom",
429
            epsg=4326,
430
        )
431
432
        # copy relevant information from original buses to DSM-buses
433
        dsm_buses["index"] = dsm_buses.index
434
        originals = original_buses[
435
            original_buses["bus_id"].isin(np.unique(dsm_buses["original_bus"]))
436
        ]
437
        dsm_buses = originals.merge(
438
            dsm_buses,
439
            left_on=["bus_id", "scn_name"],
440
            right_on=["original_bus", "scn_name"],
441
        )
442
        dsm_buses.index = dsm_buses["index"]
443
        dsm_buses.drop(["bus_id", "index"], axis=1, inplace=True)
444
445
        # new bus_ids for DSM-buses
446
        max_id = original_buses["bus_id"].max()
447
        if np.isnan(max_id):
448
            max_id = 0
449
        dsm_id = max_id + 1
450
        bus_id = pd.Series(index=dsm_buses.index, dtype=int)
451
452
        # Get number of DSM buses for both scenarios
453
        rows_per_scenario = (
454
            dsm_buses.groupby("scn_name").count().original_bus.to_dict()
455
        )
456
457
        # Assignment of DSM ids
458
        bus_id.iloc[0 : rows_per_scenario.get("eGon2035", 0)] = range(
459
            dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0)
460
        )
461
        bus_id.iloc[
462
            rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get(
463
                "eGon2035", 0
464
            )
465
            + rows_per_scenario.get("eGon100RE", 0)
466
        ] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0))
467
468
        dsm_buses["bus_id"] = bus_id
469
470
        # add links from "orignal" buses to DSM-buses
471
472
        dsm_links = pd.DataFrame(index=dsm_buses.index)
473
        dsm_links["original_bus"] = dsm_buses["original_bus"].copy()
474
        dsm_links["dsm_bus"] = dsm_buses["bus_id"].copy()
475
        dsm_links["scn_name"] = dsm_buses["scn_name"].copy()
476
477
        # set link_id
478
        target2 = egon.data.config.datasets()["DSM_CTS_industry"]["targets"][
479
            "link"
480
        ]
481
        sql = f"""SELECT link_id FROM {target2['schema']}.{target2['table']}"""
482
        max_id = pd.read_sql_query(sql, con)
483
        max_id = max_id["link_id"].max()
484
        if np.isnan(max_id):
485
            max_id = 0
486
        dsm_id = max_id + 1
487
        link_id = pd.Series(index=dsm_buses.index, dtype=int)
488
489
        # Assignment of link ids
490
        link_id.iloc[0 : rows_per_scenario.get("eGon2035", 0)] = range(
491
            dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0)
492
        )
493
        link_id.iloc[
494
            rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get(
495
                "eGon2035", 0
496
            )
497
            + rows_per_scenario.get("eGon100RE", 0)
498
        ] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0))
499
500
        dsm_links["link_id"] = link_id
501
502
        # add calculated timeseries to df to be returned
503
        if not export_aggregated:
504
            dsm_links["p_nom"] = p_nom
0 ignored issues
show
The variable p_nom does not seem to be defined in case BooleanNotNode on line 394 is False. Are you sure this can never be the case?
Loading history...
505
        dsm_links["p_min"] = p_min
506
        dsm_links["p_max"] = p_max
507
508
        # add DSM-stores
509
510
        dsm_stores = pd.DataFrame(index=dsm_buses.index)
511
        dsm_stores["bus"] = dsm_buses["bus_id"].copy()
512
        dsm_stores["scn_name"] = dsm_buses["scn_name"].copy()
513
        dsm_stores["original_bus"] = dsm_buses["original_bus"].copy()
514
515
        # set store_id
516
        target3 = egon.data.config.datasets()["DSM_CTS_industry"]["targets"][
517
            "store"
518
        ]
519
        sql = (
520
            f"""SELECT store_id FROM {target3['schema']}.{target3['table']}"""
521
        )
522
        max_id = pd.read_sql_query(sql, con)
523
        max_id = max_id["store_id"].max()
524
        if np.isnan(max_id):
525
            max_id = 0
526
        dsm_id = max_id + 1
527
        store_id = pd.Series(index=dsm_buses.index, dtype=int)
528
529
        # Assignment of store ids
530
        store_id.iloc[0 : rows_per_scenario.get("eGon2035", 0)] = range(
531
            dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0)
532
        )
533
        store_id.iloc[
534
            rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get(
535
                "eGon2035", 0
536
            )
537
            + rows_per_scenario.get("eGon100RE", 0)
538
        ] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0))
539
540
        dsm_stores["store_id"] = store_id
541
542
        # add calculated timeseries to df to be returned
543
        if not export_aggregated:
544
            dsm_stores["e_nom"] = e_nom
0 ignored issues
show
The variable e_nom does not seem to be defined in case BooleanNotNode on line 394 is False. Are you sure this can never be the case?
Loading history...
545
        dsm_stores["e_min"] = e_min
546
        dsm_stores["e_max"] = e_max
547
548
        return dsm_buses, dsm_links, dsm_stores
549
550
    def aggregate_components(con, df_dsm_buses, df_dsm_links, df_dsm_stores):
551
552
        # aggregate buses
553
554
        grouper = [df_dsm_buses.original_bus, df_dsm_buses.scn_name]
555
        df_dsm_buses = df_dsm_buses.groupby(grouper).first()
556
557
        df_dsm_buses.reset_index(inplace=True)
558
        df_dsm_buses.sort_values("scn_name", inplace=True)
559
560
        # aggregate links
561
562
        df_dsm_links["p_max"] = df_dsm_links["p_max"].apply(
563
            lambda x: np.array(x)
564
        )
565
        df_dsm_links["p_min"] = df_dsm_links["p_min"].apply(
566
            lambda x: np.array(x)
567
        )
568
569
        grouper = [df_dsm_links.original_bus, df_dsm_links.scn_name]
570
571
        p_max = df_dsm_links.groupby(grouper)["p_max"].apply(np.sum)
572
        p_min = df_dsm_links.groupby(grouper)["p_min"].apply(np.sum)
573
574
        df_dsm_links = df_dsm_links.groupby(grouper).first()
575
        df_dsm_links.p_max = p_max
576
        df_dsm_links.p_min = p_min
577
578
        df_dsm_links.reset_index(inplace=True)
579
        df_dsm_links.sort_values("scn_name", inplace=True)
580
581
        # calculate P_nom and P per unit
582
        for index, row in df_dsm_links.iterrows():
583
            nom = max(max(row.p_max), abs(min(row.p_min)))
584
            df_dsm_links.at[index, "p_nom"] = nom
585
        df_dsm_links["p_max"] = df_dsm_links["p_max"] / df_dsm_links["p_nom"]
586
        df_dsm_links["p_min"] = df_dsm_links["p_min"] / df_dsm_links["p_nom"]
587
588
        df_dsm_links["p_max"] = df_dsm_links["p_max"].apply(lambda x: list(x))
589
        df_dsm_links["p_min"] = df_dsm_links["p_min"].apply(lambda x: list(x))
590
591
        # aggregate stores
592
593
        df_dsm_stores["e_max"] = df_dsm_stores["e_max"].apply(
594
            lambda x: np.array(x)
595
        )
596
        df_dsm_stores["e_min"] = df_dsm_stores["e_min"].apply(
597
            lambda x: np.array(x)
598
        )
599
600
        grouper = [df_dsm_stores.original_bus, df_dsm_stores.scn_name]
601
602
        e_max = df_dsm_stores.groupby(grouper)["e_max"].apply(np.sum)
603
        e_min = df_dsm_stores.groupby(grouper)["e_min"].apply(np.sum)
604
605
        df_dsm_stores = df_dsm_stores.groupby(grouper).first()
606
        df_dsm_stores.e_max = e_max
607
        df_dsm_stores.e_min = e_min
608
609
        df_dsm_stores.reset_index(inplace=True)
610
        df_dsm_stores.sort_values("scn_name", inplace=True)
611
612
        # calculate E_nom and E per unit
613
        for index, row in df_dsm_stores.iterrows():
614
            nom = max(max(row.e_max), abs(min(row.e_min)))
615
            df_dsm_stores.at[index, "e_nom"] = nom
616
617
        df_dsm_stores["e_max"] = (
618
            df_dsm_stores["e_max"] / df_dsm_stores["e_nom"]
619
        )
620
        df_dsm_stores["e_min"] = (
621
            df_dsm_stores["e_min"] / df_dsm_stores["e_nom"]
622
        )
623
624
        df_dsm_stores["e_max"] = df_dsm_stores["e_max"].apply(
625
            lambda x: list(x)
626
        )
627
        df_dsm_stores["e_min"] = df_dsm_stores["e_min"].apply(
628
            lambda x: list(x)
629
        )
630
631
        # select new bus_ids for aggregated buses and add to links and stores
632
        bus_id = db.next_etrago_id("Bus") + df_dsm_buses.index
633
634
        df_dsm_buses["bus_id"] = bus_id
635
        df_dsm_links["dsm_bus"] = bus_id
636
        df_dsm_stores["bus"] = bus_id
637
638
        # select new link_ids for aggregated links
639
        link_id = db.next_etrago_id("Link") + df_dsm_links.index
640
641
        df_dsm_links["link_id"] = link_id
642
643
        # select new store_ids to aggregated stores
644
645
        store_id = db.next_etrago_id("Store") + df_dsm_stores.index
646
647
        df_dsm_stores["store_id"] = store_id
648
649
        return df_dsm_buses, df_dsm_links, df_dsm_stores
650
651
    def data_export(con, dsm_buses, dsm_links, dsm_stores, carrier):
652
653
        """
654
        Export new components to database.
655
        Parameters
656
            ----------
657
        con :
658
            Connection to database
659
        dsm_buses: DataFrame
660
            Buses representing locations of DSM-potential
661
        dsm_links: DataFrame
662
            Links connecting DSM-buses and DSM-stores
663
        dsm_stores: DataFrame
664
            Stores representing DSM-potential
665
        carrier: String
666
            Remark to be filled in column 'carrier' identifying DSM-potential
667
        """
668
669
        targets = egon.data.config.datasets()["DSM_CTS_industry"]["targets"]
670
671
        # dsm_buses
672
673
        insert_buses = gpd.GeoDataFrame(
674
            index=dsm_buses.index,
675
            data=dsm_buses["geom"],
676
            geometry="geom",
677
            crs=dsm_buses.crs,
678
        )
679
        insert_buses["scn_name"] = dsm_buses["scn_name"]
680
        insert_buses["bus_id"] = dsm_buses["bus_id"]
681
        insert_buses["v_nom"] = dsm_buses["v_nom"]
682
        insert_buses["carrier"] = carrier
683
        insert_buses["x"] = dsm_buses["x"]
684
        insert_buses["y"] = dsm_buses["y"]
685
686
        # insert into database
687
        insert_buses.to_postgis(
688
            targets["bus"]["table"],
689
            con=db.engine(),
690
            schema=targets["bus"]["schema"],
691
            if_exists="append",
692
            index=False,
693
            dtype={"geom": "geometry"},
694
        )
695
696
        # dsm_links
697
698
        insert_links = pd.DataFrame(index=dsm_links.index)
699
        insert_links["scn_name"] = dsm_links["scn_name"]
700
        insert_links["link_id"] = dsm_links["link_id"]
701
        insert_links["bus0"] = dsm_links["original_bus"]
702
        insert_links["bus1"] = dsm_links["dsm_bus"]
703
        insert_links["carrier"] = carrier
704
        insert_links["p_nom"] = dsm_links["p_nom"]
705
706
        # insert into database
707
        insert_links.to_sql(
708
            targets["link"]["table"],
709
            con=db.engine(),
710
            schema=targets["link"]["schema"],
711
            if_exists="append",
712
            index=False,
713
        )
714
715
        insert_links_timeseries = pd.DataFrame(index=dsm_links.index)
716
        insert_links_timeseries["scn_name"] = dsm_links["scn_name"]
717
        insert_links_timeseries["link_id"] = dsm_links["link_id"]
718
        insert_links_timeseries["p_min_pu"] = dsm_links["p_min"]
719
        insert_links_timeseries["p_max_pu"] = dsm_links["p_max"]
720
        insert_links_timeseries["temp_id"] = 1
721
722
        # insert into database
723
        insert_links_timeseries.to_sql(
724
            targets["link_timeseries"]["table"],
725
            con=db.engine(),
726
            schema=targets["link_timeseries"]["schema"],
727
            if_exists="append",
728
            index=False,
729
        )
730
731
        # dsm_stores
732
733
        insert_stores = pd.DataFrame(index=dsm_stores.index)
734
        insert_stores["scn_name"] = dsm_stores["scn_name"]
735
        insert_stores["store_id"] = dsm_stores["store_id"]
736
        insert_stores["bus"] = dsm_stores["bus"]
737
        insert_stores["carrier"] = carrier
738
        insert_stores["e_nom"] = dsm_stores["e_nom"]
739
740
        # insert into database
741
        insert_stores.to_sql(
742
            targets["store"]["table"],
743
            con=db.engine(),
744
            schema=targets["store"]["schema"],
745
            if_exists="append",
746
            index=False,
747
        )
748
749
        insert_stores_timeseries = pd.DataFrame(index=dsm_stores.index)
750
        insert_stores_timeseries["scn_name"] = dsm_stores["scn_name"]
751
        insert_stores_timeseries["store_id"] = dsm_stores["store_id"]
752
        insert_stores_timeseries["e_min_pu"] = dsm_stores["e_min"]
753
        insert_stores_timeseries["e_max_pu"] = dsm_stores["e_max"]
754
        insert_stores_timeseries["temp_id"] = 1
755
756
        # insert into database
757
        insert_stores_timeseries.to_sql(
758
            targets["store_timeseries"]["table"],
759
            con=db.engine(),
760
            schema=targets["store_timeseries"]["schema"],
761
            if_exists="append",
762
            index=False,
763
        )
764
765
    def delete_dsm_entries(carrier):
766
767
        """
768
        Deletes DSM-components from database if they already exist before creating new ones.
769
        Parameters
770
            ----------
771
         carrier: String
772
            Remark in column 'carrier' identifying DSM-potential
773
        """
774
775
        targets = egon.data.config.datasets()["DSM_CTS_industry"]["targets"]
776
777
        # buses
778
779
        sql = f"""DELETE FROM {targets["bus"]["schema"]}.{targets["bus"]["table"]} b
780
         WHERE (b.carrier LIKE '{carrier}');"""
781
        db.execute_sql(sql)
782
783
        # links
784
785
        sql = f"""DELETE FROM {targets["link_timeseries"]["schema"]}.{targets["link_timeseries"]["table"]} t
786
        WHERE t.link_id IN
787
                 (SELECT l.link_id FROM {targets["link"]["schema"]}.{targets["link"]["table"]} l
788
              WHERE l.carrier LIKE '{carrier}');"""
789
        db.execute_sql(sql)
790
        sql = f"""DELETE FROM {targets["link"]["schema"]}.{targets["link"]["table"]} l
791
         WHERE (l.carrier LIKE '{carrier}');"""
792
        db.execute_sql(sql)
793
794
        # stores
795
796
        sql = f"""DELETE FROM {targets["store_timeseries"]["schema"]}.{targets["store_timeseries"]["table"]} t
797
        WHERE t.store_id IN
798
                 (SELECT s.store_id FROM {targets["store"]["schema"]}.{targets["store"]["table"]} s
799
              WHERE s.carrier LIKE '{carrier}');"""
800
        db.execute_sql(sql)
801
        sql = f"""DELETE FROM {targets["store"]["schema"]}.{targets["store"]["table"]} s
802
         WHERE (s.carrier LIKE '{carrier}');"""
803
        db.execute_sql(sql)
804
805
    def dsm_cts_ind(
806
        con=db.engine(),
807
        cts_cool_vent_ac_share=0.22,
808
        ind_cool_vent_share=0.039,
809
        ind_vent_share=0.017,
810
    ):
811
812
        """
813
        Execute methodology to create and implement components for DSM considering
814
        a) CTS per osm-area: combined potentials of cooling, ventilation and air conditioning
815
        b) Industry per osm-are: combined potentials of cooling and ventilation
816
        c) Industrial Sites: potentials of ventilation in sites of "Wirtschaftszweig" (WZ) 23
817
        d) Industrial Sites: potentials of sites specified by subsectors identified by Schmidt (https://zenodo.org/record/3613767#.YTsGwVtCRhG):
818
                                                                                                Paper, Recycled Paper, Pulp, Cement
819
        Modelled using the methods by Heitkoetter et. al.: https://doi.org/10.1016/j.adapen.2020.100001
820
        Parameters
821
            ----------
822
        con :
823
            Connection to database
824
        cts_cool_vent_ac_share: float
825
            Share of cooling, ventilation and AC in CTS demand
826
        ind_cool_vent_share: float
827
            Share of cooling and ventilation in industry demand
828
        ind_vent_share: float
829
            Share of ventilation in industry demand in sites of WZ 23
830
831
        """
832
833
        # CTS per osm-area: cooling, ventilation and air conditioning
834
835
        print(" ")
836
        print("CTS per osm-area: cooling, ventilation and air conditioning")
837
        print(" ")
838
839
        dsm = cts_data_import(con, cts_cool_vent_ac_share)
840
841
        # calculate combined potentials of cooling, ventilation and air conditioning in CTS
842
        # using combined parameters by Heitkoetter et. al.
843
        p_max, p_min, e_max, e_min = calculate_potentials(
844
            s_flex=0.5, s_util=0.67, s_inc=1, s_dec=0, delta_t=1, dsm=dsm
845
        )
846
847
        dsm_buses, dsm_links, dsm_stores = create_dsm_components(
848
            con, p_max, p_min, e_max, e_min, dsm
849
        )
850
851
        df_dsm_buses = dsm_buses.copy()
852
        df_dsm_links = dsm_links.copy()
853
        df_dsm_stores = dsm_stores.copy()
854
855
        # industry per osm-area: cooling and ventilation
856
857
        print(" ")
858
        print("industry per osm-area: cooling and ventilation")
859
        print(" ")
860
861
        dsm = ind_osm_data_import(con, ind_cool_vent_share)
862
863
        # calculate combined potentials of cooling and ventilation in industrial sector
864
        # using combined parameters by Heitkoetter et. al.
865
        p_max, p_min, e_max, e_min = calculate_potentials(
866
            s_flex=0.5, s_util=0.73, s_inc=0.9, s_dec=0.5, delta_t=1, dsm=dsm
867
        )
868
869
        dsm_buses, dsm_links, dsm_stores = create_dsm_components(
870
            con, p_max, p_min, e_max, e_min, dsm
871
        )
872
873
        df_dsm_buses = gpd.GeoDataFrame(
874
            pd.concat([df_dsm_buses, dsm_buses], ignore_index=True),
875
            crs="EPSG:4326",
876
        )
877
        df_dsm_links = pd.DataFrame(
878
            pd.concat([df_dsm_links, dsm_links], ignore_index=True)
879
        )
880
        df_dsm_stores = pd.DataFrame(
881
            pd.concat([df_dsm_stores, dsm_stores], ignore_index=True)
882
        )
883
884
        # industry sites
885
886
        # industry sites: different applications
887
888
        dsm = ind_sites_data_import(con)
889
890
        print(" ")
891
        print("industry sites: paper")
892
        print(" ")
893
894
        dsm_paper = gpd.GeoDataFrame(
895
            dsm[
896
                dsm["application"].isin(
897
                    [
898
                        "Graphic Paper",
899
                        "Packing Paper and Board",
900
                        "Hygiene Paper",
901
                        "Technical/Special Paper and Board",
902
                    ]
903
                )
904
            ]
905
        )
906
907
        # calculate potentials of industrial sites with paper-applications
908
        # using parameters by Heitkoetter et. al.
909
        p_max, p_min, e_max, e_min = calculate_potentials(
910
            s_flex=0.15,
911
            s_util=0.86,
912
            s_inc=0.95,
913
            s_dec=0,
914
            delta_t=3,
915
            dsm=dsm_paper,
916
        )
917
918
        dsm_buses, dsm_links, dsm_stores = create_dsm_components(
919
            con, p_max, p_min, e_max, e_min, dsm_paper
920
        )
921
922
        df_dsm_buses = gpd.GeoDataFrame(
923
            pd.concat([df_dsm_buses, dsm_buses], ignore_index=True),
924
            crs="EPSG:4326",
925
        )
926
        df_dsm_links = pd.DataFrame(
927
            pd.concat([df_dsm_links, dsm_links], ignore_index=True)
928
        )
929
        df_dsm_stores = pd.DataFrame(
930
            pd.concat([df_dsm_stores, dsm_stores], ignore_index=True)
931
        )
932
933
        print(" ")
934
        print("industry sites: recycled paper")
935
        print(" ")
936
937
        # calculate potentials of industrial sites with recycled paper-applications
938
        # using parameters by Heitkoetter et. al.
939
        dsm_recycled_paper = gpd.GeoDataFrame(
940
            dsm[dsm["application"] == "Recycled Paper"]
941
        )
942
943
        p_max, p_min, e_max, e_min = calculate_potentials(
944
            s_flex=0.7,
945
            s_util=0.85,
946
            s_inc=0.95,
947
            s_dec=0,
948
            delta_t=3,
949
            dsm=dsm_recycled_paper,
950
        )
951
952
        dsm_buses, dsm_links, dsm_stores = create_dsm_components(
953
            con, p_max, p_min, e_max, e_min, dsm_recycled_paper
954
        )
955
956
        df_dsm_buses = gpd.GeoDataFrame(
957
            pd.concat([df_dsm_buses, dsm_buses], ignore_index=True),
958
            crs="EPSG:4326",
959
        )
960
        df_dsm_links = pd.DataFrame(
961
            pd.concat([df_dsm_links, dsm_links], ignore_index=True)
962
        )
963
        df_dsm_stores = pd.DataFrame(
964
            pd.concat([df_dsm_stores, dsm_stores], ignore_index=True)
965
        )
966
967
        print(" ")
968
        print("industry sites: pulp")
969
        print(" ")
970
971
        dsm_pulp = gpd.GeoDataFrame(
972
            dsm[dsm["application"] == "Mechanical Pulp"]
973
        )
974
975
        # calculate potentials of industrial sites with pulp-applications
976
        # using parameters by Heitkoetter et. al.
977
        p_max, p_min, e_max, e_min = calculate_potentials(
978
            s_flex=0.7,
979
            s_util=0.83,
980
            s_inc=0.95,
981
            s_dec=0,
982
            delta_t=2,
983
            dsm=dsm_pulp,
984
        )
985
986
        dsm_buses, dsm_links, dsm_stores = create_dsm_components(
987
            con, p_max, p_min, e_max, e_min, dsm_pulp
988
        )
989
990
        df_dsm_buses = gpd.GeoDataFrame(
991
            pd.concat([df_dsm_buses, dsm_buses], ignore_index=True),
992
            crs="EPSG:4326",
993
        )
994
        df_dsm_links = pd.DataFrame(
995
            pd.concat([df_dsm_links, dsm_links], ignore_index=True)
996
        )
997
        df_dsm_stores = pd.DataFrame(
998
            pd.concat([df_dsm_stores, dsm_stores], ignore_index=True)
999
        )
1000
1001
        # industry sites: cement
1002
1003
        print(" ")
1004
        print("industry sites: cement")
1005
        print(" ")
1006
1007
        dsm_cement = gpd.GeoDataFrame(dsm[dsm["application"] == "Cement Mill"])
1008
1009
        # calculate potentials of industrial sites with cement-applications
1010
        # using parameters by Heitkoetter et. al.
1011
        p_max, p_min, e_max, e_min = calculate_potentials(
1012
            s_flex=0.61,
1013
            s_util=0.65,
1014
            s_inc=0.95,
1015
            s_dec=0,
1016
            delta_t=4,
1017
            dsm=dsm_cement,
1018
        )
1019
1020
        dsm_buses, dsm_links, dsm_stores = create_dsm_components(
1021
            con, p_max, p_min, e_max, e_min, dsm_cement
1022
        )
1023
1024
        df_dsm_buses = gpd.GeoDataFrame(
1025
            pd.concat([df_dsm_buses, dsm_buses], ignore_index=True),
1026
            crs="EPSG:4326",
1027
        )
1028
        df_dsm_links = pd.DataFrame(
1029
            pd.concat([df_dsm_links, dsm_links], ignore_index=True)
1030
        )
1031
        df_dsm_stores = pd.DataFrame(
1032
            pd.concat([df_dsm_stores, dsm_stores], ignore_index=True)
1033
        )
1034
1035
        # industry sites: ventilation in WZ23
1036
1037
        print(" ")
1038
        print("industry sites: ventilation in WZ23")
1039
        print(" ")
1040
1041
        dsm = ind_sites_vent_data_import(con, ind_vent_share, wz=23)
1042
1043
        # drop entries of Cement Mills whose DSM-potentials have already been modelled
1044
        cement = np.unique(dsm_cement["bus"].values)
1045
        index_names = np.array(dsm[dsm["bus"].isin(cement)].index)
1046
        dsm.drop(index_names, inplace=True)
1047
1048
        # calculate potentials of ventialtion in industrial sites of WZ 23
1049
        # using parameters by Heitkoetter et. al.
1050
        p_max, p_min, e_max, e_min = calculate_potentials(
1051
            s_flex=0.5, s_util=0.8, s_inc=1, s_dec=0.5, delta_t=1, dsm=dsm
1052
        )
1053
1054
        dsm_buses, dsm_links, dsm_stores = create_dsm_components(
1055
            con, p_max, p_min, e_max, e_min, dsm
1056
        )
1057
1058
        df_dsm_buses = gpd.GeoDataFrame(
1059
            pd.concat([df_dsm_buses, dsm_buses], ignore_index=True),
1060
            crs="EPSG:4326",
1061
        )
1062
        df_dsm_links = pd.DataFrame(
1063
            pd.concat([df_dsm_links, dsm_links], ignore_index=True)
1064
        )
1065
        df_dsm_stores = pd.DataFrame(
1066
            pd.concat([df_dsm_stores, dsm_stores], ignore_index=True)
1067
        )
1068
1069
        # aggregate DSM components per substation
1070
1071
        dsm_buses, dsm_links, dsm_stores = aggregate_components(
1072
            con, df_dsm_buses, df_dsm_links, df_dsm_stores
1073
        )
1074
1075
        # export aggregated DSM components to database
1076
1077
        delete_dsm_entries("dsm-cts")
1078
        delete_dsm_entries("dsm-ind-osm")
1079
        delete_dsm_entries("dsm-ind-sites")
1080
        delete_dsm_entries("dsm")
1081
1082
        data_export(con, dsm_buses, dsm_links, dsm_stores, carrier="dsm")
1083
1084
    dsm_cts_ind()
1085