Total Complexity | 49 |
Total Lines | 1462 |
Duplicated Lines | 4.51 % |
Changes | 0 |
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like data.datasets.DSM_cts_ind often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | import geopandas as gpd |
||
2 | import numpy as np |
||
3 | import pandas as pd |
||
4 | |||
5 | from egon.data import config, db |
||
6 | from egon.data.datasets import Dataset |
||
7 | from egon.data.datasets.electricity_demand.temporal import calc_load_curve |
||
8 | from egon.data.datasets.industry.temporal import identify_bus |
||
9 | |||
10 | # CONSTANTS |
||
11 | # TODO: move to datasets.yml |
||
12 | CON = db.engine() |
||
13 | |||
14 | # CTS |
||
15 | CTS_COOL_VENT_AC_SHARE = 0.22 |
||
16 | |||
17 | S_FLEX_CTS = 0.5 |
||
18 | S_UTIL_CTS = 0.67 |
||
19 | S_INC_CTS = 1 |
||
20 | S_DEC_CTS = 0 |
||
21 | DELTA_T_CTS = 1 |
||
22 | |||
23 | # industry |
||
24 | IND_VENT_COOL_SHARE = 0.039 |
||
25 | IND_VENT_SHARE = 0.017 |
||
26 | |||
27 | # OSM |
||
28 | S_FLEX_OSM = 0.5 |
||
29 | S_UTIL_OSM = 0.73 |
||
30 | S_INC_OSM = 0.9 |
||
31 | S_DEC_OSM = 0.5 |
||
32 | DELTA_T_OSM = 1 |
||
33 | |||
34 | # paper |
||
35 | S_FLEX_PAPER = 0.15 |
||
36 | S_UTIL_PAPER = 0.86 |
||
37 | S_INC_PAPER = 0.95 |
||
38 | S_DEC_PAPER = 0 |
||
39 | DELTA_T_PAPER = 3 |
||
40 | |||
41 | # recycled paper |
||
42 | S_FLEX_RECYCLED_PAPER = 0.7 |
||
43 | S_UTIL_RECYCLED_PAPER = 0.85 |
||
44 | S_INC_RECYCLED_PAPER = 0.95 |
||
45 | S_DEC_RECYCLED_PAPER = 0 |
||
46 | DELTA_T_RECYCLED_PAPER = 3 |
||
47 | |||
48 | # pulp |
||
49 | S_FLEX_PULP = 0.7 |
||
50 | S_UTIL_PULP = 0.83 |
||
51 | S_INC_PULP = 0.95 |
||
52 | S_DEC_PULP = 0 |
||
53 | DELTA_T_PULP = 2 |
||
54 | |||
55 | # cement |
||
56 | S_FLEX_CEMENT = 0.61 |
||
57 | S_UTIL_CEMENT = 0.65 |
||
58 | S_INC_CEMENT = 0.95 |
||
59 | S_DEC_CEMENT = 0 |
||
60 | DELTA_T_CEMENT = 4 |
||
61 | |||
62 | # wz 23 |
||
63 | WZ = 23 |
||
64 | |||
65 | S_FLEX_WZ = 0.5 |
||
66 | S_UTIL_WZ = 0.8 |
||
67 | S_INC_WZ = 1 |
||
68 | S_DEC_WZ = 0.5 |
||
69 | DELTA_T_WZ = 1 |
||
70 | |||
71 | |||
72 | class dsm_Potential(Dataset): |
||
73 | def __init__(self, dependencies): |
||
74 | super().__init__( |
||
75 | name="DSM_potentials", |
||
76 | version="0.0.4.dev", |
||
77 | dependencies=dependencies, |
||
78 | tasks=(dsm_cts_ind_processing), |
||
79 | ) |
||
80 | |||
81 | |||
82 | def cts_data_import(cts_cool_vent_ac_share): |
||
83 | |||
84 | """ |
||
85 | Import CTS data necessary to identify DSM-potential. |
||
86 | ---------- |
||
87 | cts_share: float |
||
88 | Share of cooling, ventilation and AC in CTS demand |
||
89 | """ |
||
90 | |||
91 | # import load data |
||
92 | |||
93 | sources = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
94 | "cts_loadcurves" |
||
95 | ] |
||
96 | |||
97 | ts = db.select_dataframe( |
||
98 | f"""SELECT bus_id, scn_name, p_set FROM |
||
99 | {sources['schema']}.{sources['table']}""" |
||
100 | ) |
||
101 | |||
102 | # identify relevant columns and prepare df to be returned |
||
103 | |||
104 | dsm = pd.DataFrame(index=ts.index) |
||
105 | |||
106 | dsm["bus"] = ts["bus_id"].copy() |
||
107 | dsm["scn_name"] = ts["scn_name"].copy() |
||
108 | dsm["p_set"] = ts["p_set"].copy() |
||
109 | |||
110 | # calculate share of timeseries for air conditioning, cooling and |
||
111 | # ventilation out of CTS-data |
||
112 | |||
113 | timeseries = dsm["p_set"].copy() |
||
114 | |||
115 | for index, liste in timeseries.iteritems(): |
||
116 | share = [float(item) * cts_cool_vent_ac_share for item in liste] |
||
117 | timeseries.loc[index] = share |
||
118 | |||
119 | dsm["p_set"] = timeseries.copy() |
||
120 | |||
121 | return dsm |
||
122 | |||
123 | |||
124 | View Code Duplication | def ind_osm_data_import(ind_vent_cool_share): |
|
|
|||
125 | |||
126 | """ |
||
127 | Import industry data per osm-area necessary to identify DSM-potential. |
||
128 | ---------- |
||
129 | ind_share: float |
||
130 | Share of considered application in industry demand |
||
131 | """ |
||
132 | |||
133 | # import load data |
||
134 | |||
135 | sources = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
136 | "ind_osm_loadcurves" |
||
137 | ] |
||
138 | |||
139 | dsm = db.select_dataframe( |
||
140 | f"""SELECT bus, scn_name, p_set FROM |
||
141 | {sources['schema']}.{sources['table']}""" |
||
142 | ) |
||
143 | |||
144 | # calculate share of timeseries for cooling and ventilation out of |
||
145 | # industry-data |
||
146 | |||
147 | timeseries = dsm["p_set"].copy() |
||
148 | |||
149 | for index, liste in timeseries.iteritems(): |
||
150 | share = [float(item) * ind_vent_cool_share for item in liste] |
||
151 | |||
152 | timeseries.loc[index] = share |
||
153 | |||
154 | dsm["p_set"] = timeseries.copy() |
||
155 | |||
156 | return dsm |
||
157 | |||
158 | |||
159 | View Code Duplication | def ind_osm_data_import_individual(ind_vent_cool_share): |
|
160 | |||
161 | """ |
||
162 | Import industry data per osm-area necessary to identify DSM-potential. |
||
163 | ---------- |
||
164 | ind_share: float |
||
165 | Share of considered application in industry demand |
||
166 | """ |
||
167 | |||
168 | # import load data |
||
169 | |||
170 | sources = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
171 | "ind_osm_loadcurves_individual" |
||
172 | ] |
||
173 | |||
174 | dsm = db.select_dataframe( |
||
175 | f"""SELECT osm_id, bus_id as bus, scn_name, p_set FROM |
||
176 | {sources['schema']}.{sources['table']}""" |
||
177 | ) |
||
178 | |||
179 | # calculate share of timeseries for cooling and ventilation out of |
||
180 | # industry-data |
||
181 | |||
182 | timeseries = dsm["p_set"].copy() |
||
183 | |||
184 | for index, liste in timeseries.iteritems(): |
||
185 | share = [float(item) * ind_vent_cool_share for item in liste] |
||
186 | |||
187 | timeseries.loc[index] = share |
||
188 | |||
189 | dsm["p_set"] = timeseries.copy() |
||
190 | |||
191 | return dsm |
||
192 | |||
193 | |||
194 | def ind_sites_vent_data_import(ind_vent_share, wz): |
||
195 | |||
196 | """ |
||
197 | Import industry sites necessary to identify DSM-potential. |
||
198 | ---------- |
||
199 | ind_vent_share: float |
||
200 | Share of considered application in industry demand |
||
201 | wz: int |
||
202 | Wirtschaftszweig to be considered within industry sites |
||
203 | """ |
||
204 | |||
205 | # import load data |
||
206 | |||
207 | sources = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
208 | "ind_sites_loadcurves" |
||
209 | ] |
||
210 | |||
211 | dsm = db.select_dataframe( |
||
212 | f"""SELECT bus, scn_name, wz, p_set FROM |
||
213 | {sources['schema']}.{sources['table']}""" |
||
214 | ) |
||
215 | |||
216 | # select load for considered applications |
||
217 | |||
218 | dsm = dsm[dsm["wz"] == wz] |
||
219 | |||
220 | # calculate share of timeseries for ventilation |
||
221 | |||
222 | timeseries = dsm["p_set"].copy() |
||
223 | |||
224 | for index, liste in timeseries.iteritems(): |
||
225 | share = [float(item) * ind_vent_share for item in liste] |
||
226 | timeseries.loc[index] = share |
||
227 | |||
228 | dsm["p_set"] = timeseries.copy() |
||
229 | |||
230 | return dsm |
||
231 | |||
232 | |||
233 | def ind_sites_vent_data_import_individual(ind_vent_share, wz): |
||
234 | """ |
||
235 | Import industry sites necessary to identify DSM-potential. |
||
236 | ---------- |
||
237 | ind_vent_share: float |
||
238 | Share of considered application in industry demand |
||
239 | wz: int |
||
240 | Wirtschaftszweig to be considered within industry sites |
||
241 | """ |
||
242 | |||
243 | # import load data |
||
244 | |||
245 | sources = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
246 | "ind_sites_loadcurves_individual" |
||
247 | ] |
||
248 | |||
249 | # TODO: at the moment `wz` is missing within the table |
||
250 | # egon_sites_ind_load_curves_individual. Edit this part as soon as it is |
||
251 | # available |
||
252 | dsm = db.select_dataframe( |
||
253 | f"""SELECT site_id, bus_id as bus, scn_name, p_set FROM |
||
254 | {sources['schema']}.{sources['table']}""" |
||
255 | ) |
||
256 | |||
257 | # select load for considered applications |
||
258 | |||
259 | # dsm = dsm[dsm["wz"] == wz] |
||
260 | |||
261 | # calculate share of timeseries for ventilation |
||
262 | |||
263 | timeseries = dsm["p_set"].copy() |
||
264 | |||
265 | for index, liste in timeseries.iteritems(): |
||
266 | share = [float(item) * ind_vent_share for item in liste] |
||
267 | timeseries.loc[index] = share |
||
268 | |||
269 | dsm["p_set"] = timeseries.copy() |
||
270 | |||
271 | return dsm |
||
272 | |||
273 | |||
274 | def calc_ind_site_timeseries(scenario): |
||
275 | |||
276 | # calculate timeseries per site |
||
277 | # -> using code from egon.data.datasets.industry.temporal: |
||
278 | # calc_load_curves_ind_sites |
||
279 | |||
280 | # select demands per industrial site including the subsector information |
||
281 | source1 = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
282 | "demandregio_ind_sites" |
||
283 | ] |
||
284 | |||
285 | demands_ind_sites = db.select_dataframe( |
||
286 | f"""SELECT industrial_sites_id, wz, demand |
||
287 | FROM {source1['schema']}.{source1['table']} |
||
288 | WHERE scenario = '{scenario}' |
||
289 | AND demand > 0 |
||
290 | """ |
||
291 | ).set_index(["industrial_sites_id"]) |
||
292 | |||
293 | # select industrial sites as demand_areas from database |
||
294 | source2 = config.datasets()["DSM_CTS_industry"]["sources"]["ind_sites"] |
||
295 | |||
296 | demand_area = db.select_geodataframe( |
||
297 | f"""SELECT id, geom, subsector FROM |
||
298 | {source2['schema']}.{source2['table']}""", |
||
299 | index_col="id", |
||
300 | geom_col="geom", |
||
301 | epsg=3035, |
||
302 | ) |
||
303 | |||
304 | # replace entries to bring it in line with demandregio's subsector |
||
305 | # definitions |
||
306 | demands_ind_sites.replace(1718, 17, inplace=True) |
||
307 | share_wz_sites = demands_ind_sites.copy() |
||
308 | |||
309 | # create additional df on wz_share per industrial site, which is always set |
||
310 | # to one as the industrial demand per site is subsector specific |
||
311 | share_wz_sites.demand = 1 |
||
312 | share_wz_sites.reset_index(inplace=True) |
||
313 | |||
314 | share_transpose = pd.DataFrame( |
||
315 | index=share_wz_sites.industrial_sites_id.unique(), |
||
316 | columns=share_wz_sites.wz.unique(), |
||
317 | ) |
||
318 | share_transpose.index.rename("industrial_sites_id", inplace=True) |
||
319 | for wz in share_transpose.columns: |
||
320 | share_transpose[wz] = ( |
||
321 | share_wz_sites[share_wz_sites.wz == wz] |
||
322 | .set_index("industrial_sites_id") |
||
323 | .demand |
||
324 | ) |
||
325 | |||
326 | # calculate load curves |
||
327 | load_curves = calc_load_curve(share_transpose, demands_ind_sites["demand"]) |
||
328 | |||
329 | # identify bus per industrial site |
||
330 | curves_bus = identify_bus(load_curves, demand_area) |
||
331 | curves_bus.index = curves_bus["id"].astype(int) |
||
332 | |||
333 | # initialize dataframe to be returned |
||
334 | |||
335 | ts = pd.DataFrame( |
||
336 | data=curves_bus["bus_id"], index=curves_bus["id"].astype(int) |
||
337 | ) |
||
338 | ts["scenario_name"] = scenario |
||
339 | curves_bus.drop({"id", "bus_id", "geom"}, axis=1, inplace=True) |
||
340 | ts["p_set"] = curves_bus.values.tolist() |
||
341 | |||
342 | # add subsector to relate to Schmidt's tables afterwards |
||
343 | ts["application"] = demand_area["subsector"] |
||
344 | |||
345 | return ts |
||
346 | |||
347 | |||
348 | def relate_to_schmidt_sites(dsm): |
||
349 | |||
350 | # import industrial sites by Schmidt |
||
351 | |||
352 | source = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
353 | "ind_sites_schmidt" |
||
354 | ] |
||
355 | |||
356 | schmidt = db.select_dataframe( |
||
357 | f"""SELECT application, geom FROM |
||
358 | {source['schema']}.{source['table']}""" |
||
359 | ) |
||
360 | |||
361 | # relate calculated timeseries (dsm) to Schmidt's industrial sites |
||
362 | |||
363 | applications = np.unique(schmidt["application"]) |
||
364 | dsm = pd.DataFrame(dsm[dsm["application"].isin(applications)]) |
||
365 | |||
366 | # initialize dataframe to be returned |
||
367 | |||
368 | dsm.rename( |
||
369 | columns={"scenario_name": "scn_name", "bus_id": "bus"}, |
||
370 | inplace=True, |
||
371 | ) |
||
372 | |||
373 | return dsm |
||
374 | |||
375 | |||
376 | def ind_sites_data_import(): |
||
377 | """ |
||
378 | Import industry sites data necessary to identify DSM-potential. |
||
379 | """ |
||
380 | # calculate timeseries per site |
||
381 | |||
382 | # scenario eGon2035 |
||
383 | dsm_2035 = calc_ind_site_timeseries("eGon2035") |
||
384 | dsm_2035.reset_index(inplace=True) |
||
385 | # scenario eGon100RE |
||
386 | dsm_100 = calc_ind_site_timeseries("eGon100RE") |
||
387 | dsm_100.reset_index(inplace=True) |
||
388 | # bring df for both scenarios together |
||
389 | dsm_100.index = range(len(dsm_2035), (len(dsm_2035) + len((dsm_100)))) |
||
390 | dsm = dsm_2035.append(dsm_100) |
||
391 | |||
392 | # relate calculated timeseries to Schmidt's industrial sites |
||
393 | |||
394 | dsm = relate_to_schmidt_sites(dsm) |
||
395 | |||
396 | return dsm[["application", "id", "bus", "scn_name", "p_set"]] |
||
397 | |||
398 | |||
399 | def calculate_potentials(s_flex, s_util, s_inc, s_dec, delta_t, dsm): |
||
400 | |||
401 | """ |
||
402 | Calculate DSM-potential per bus using the methods by Heitkoetter et. al.: |
||
403 | https://doi.org/10.1016/j.adapen.2020.100001 |
||
404 | Parameters |
||
405 | ---------- |
||
406 | s_flex: float |
||
407 | Feasability factor to account for socio-technical restrictions |
||
408 | s_util: float |
||
409 | Average annual utilisation rate |
||
410 | s_inc: float |
||
411 | Shiftable share of installed capacity up to which load can be |
||
412 | increased considering technical limitations |
||
413 | s_dec: float |
||
414 | Shiftable share of installed capacity up to which load can be |
||
415 | decreased considering technical limitations |
||
416 | delta_t: int |
||
417 | Maximum shift duration in hours |
||
418 | dsm: DataFrame |
||
419 | List of existing buses with DSM-potential including timeseries of |
||
420 | loads |
||
421 | """ |
||
422 | |||
423 | # copy relevant timeseries |
||
424 | timeseries = dsm["p_set"].copy() |
||
425 | |||
426 | # calculate scheduled load L(t) |
||
427 | |||
428 | scheduled_load = timeseries.copy() |
||
429 | |||
430 | for index, liste in scheduled_load.iteritems(): |
||
431 | share = [] |
||
432 | for item in liste: |
||
433 | share.append(item * s_flex) |
||
434 | scheduled_load.loc[index] = share |
||
435 | |||
436 | # calculate maximum capacity Lambda |
||
437 | |||
438 | # calculate energy annual requirement |
||
439 | energy_annual = pd.Series(index=timeseries.index, dtype=float) |
||
440 | for index, liste in timeseries.iteritems(): |
||
441 | energy_annual.loc[index] = sum(liste) |
||
442 | |||
443 | # calculate Lambda |
||
444 | lam = (energy_annual * s_flex) / (8760 * s_util) |
||
445 | |||
446 | # calculation of P_max and P_min |
||
447 | |||
448 | # P_max |
||
449 | p_max = scheduled_load.copy() |
||
450 | for index, liste in scheduled_load.iteritems(): |
||
451 | lamb = lam.loc[index] |
||
452 | p = [] |
||
453 | for item in liste: |
||
454 | value = lamb * s_inc - item |
||
455 | if value < 0: |
||
456 | value = 0 |
||
457 | p.append(value) |
||
458 | p_max.loc[index] = p |
||
459 | |||
460 | # P_min |
||
461 | p_min = scheduled_load.copy() |
||
462 | for index, liste in scheduled_load.iteritems(): |
||
463 | lamb = lam.loc[index] |
||
464 | p = [] |
||
465 | for item in liste: |
||
466 | value = -(item - lamb * s_dec) |
||
467 | if value > 0: |
||
468 | value = 0 |
||
469 | p.append(value) |
||
470 | p_min.loc[index] = p |
||
471 | |||
472 | # calculation of E_max and E_min |
||
473 | |||
474 | e_max = scheduled_load.copy() |
||
475 | e_min = scheduled_load.copy() |
||
476 | |||
477 | for index, liste in scheduled_load.iteritems(): |
||
478 | emin = [] |
||
479 | emax = [] |
||
480 | for i in range(len(liste)): |
||
481 | if i + delta_t > len(liste): |
||
482 | emax.append( |
||
483 | (sum(liste[i:]) + sum(liste[: delta_t - (len(liste) - i)])) |
||
484 | ) |
||
485 | else: |
||
486 | emax.append(sum(liste[i : i + delta_t])) |
||
487 | if i - delta_t < 0: |
||
488 | emin.append( |
||
489 | ( |
||
490 | -1 |
||
491 | * ( |
||
492 | ( |
||
493 | sum(liste[:i]) |
||
494 | + sum(liste[len(liste) - delta_t + i :]) |
||
495 | ) |
||
496 | ) |
||
497 | ) |
||
498 | ) |
||
499 | else: |
||
500 | emin.append(-1 * sum(liste[i - delta_t : i])) |
||
501 | e_max.loc[index] = emax |
||
502 | e_min.loc[index] = emin |
||
503 | |||
504 | return p_max, p_min, e_max, e_min |
||
505 | |||
506 | |||
507 | def create_dsm_components(con, p_max, p_min, e_max, e_min, dsm): |
||
508 | |||
509 | """ |
||
510 | Create components representing DSM. |
||
511 | Parameters |
||
512 | ---------- |
||
513 | con : |
||
514 | Connection to database |
||
515 | p_max: DataFrame |
||
516 | Timeseries identifying maximum load increase |
||
517 | p_min: DataFrame |
||
518 | Timeseries identifying maximum load decrease |
||
519 | e_max: DataFrame |
||
520 | Timeseries identifying maximum energy amount to be preponed |
||
521 | e_min: DataFrame |
||
522 | Timeseries identifying maximum energy amount to be postponed |
||
523 | dsm: DataFrame |
||
524 | List of existing buses with DSM-potential including timeseries of loads |
||
525 | """ |
||
526 | |||
527 | # calculate P_nom and P per unit |
||
528 | p_nom = pd.Series(index=p_max.index, dtype=float) |
||
529 | for index, row in p_max.iteritems(): |
||
530 | nom = max(max(row), abs(min(p_min.loc[index]))) |
||
531 | p_nom.loc[index] = nom |
||
532 | new = [element / nom for element in row] |
||
533 | p_max.loc[index] = new |
||
534 | new = [element / nom for element in p_min.loc[index]] |
||
535 | p_min.loc[index] = new |
||
536 | |||
537 | # calculate E_nom and E per unit |
||
538 | e_nom = pd.Series(index=p_min.index, dtype=float) |
||
539 | for index, row in e_max.iteritems(): |
||
540 | nom = max(max(row), abs(min(e_min.loc[index]))) |
||
541 | e_nom.loc[index] = nom |
||
542 | new = [element / nom for element in row] |
||
543 | e_max.loc[index] = new |
||
544 | new = [element / nom for element in e_min.loc[index]] |
||
545 | e_min.loc[index] = new |
||
546 | |||
547 | # add DSM-buses to "original" buses |
||
548 | dsm_buses = gpd.GeoDataFrame(index=dsm.index) |
||
549 | dsm_buses["original_bus"] = dsm["bus"].copy() |
||
550 | dsm_buses["scn_name"] = dsm["scn_name"].copy() |
||
551 | |||
552 | # get original buses and add copy of relevant information |
||
553 | target1 = config.datasets()["DSM_CTS_industry"]["targets"]["bus"] |
||
554 | original_buses = db.select_geodataframe( |
||
555 | f"""SELECT bus_id, v_nom, scn_name, x, y, geom FROM |
||
556 | {target1['schema']}.{target1['table']}""", |
||
557 | geom_col="geom", |
||
558 | epsg=4326, |
||
559 | ) |
||
560 | |||
561 | # copy relevant information from original buses to DSM-buses |
||
562 | dsm_buses["index"] = dsm_buses.index |
||
563 | originals = original_buses[ |
||
564 | original_buses["bus_id"].isin(np.unique(dsm_buses["original_bus"])) |
||
565 | ] |
||
566 | dsm_buses = originals.merge( |
||
567 | dsm_buses, |
||
568 | left_on=["bus_id", "scn_name"], |
||
569 | right_on=["original_bus", "scn_name"], |
||
570 | ) |
||
571 | dsm_buses.index = dsm_buses["index"] |
||
572 | dsm_buses.drop(["bus_id", "index"], axis=1, inplace=True) |
||
573 | |||
574 | # new bus_ids for DSM-buses |
||
575 | max_id = original_buses["bus_id"].max() |
||
576 | if np.isnan(max_id): |
||
577 | max_id = 0 |
||
578 | dsm_id = max_id + 1 |
||
579 | bus_id = pd.Series(index=dsm_buses.index, dtype=int) |
||
580 | |||
581 | # Get number of DSM buses for both scenarios |
||
582 | rows_per_scenario = ( |
||
583 | dsm_buses.groupby("scn_name").count().original_bus.to_dict() |
||
584 | ) |
||
585 | |||
586 | # Assignment of DSM ids |
||
587 | bus_id.iloc[: rows_per_scenario.get("eGon2035", 0)] = range( |
||
588 | dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0) |
||
589 | ) |
||
590 | |||
591 | bus_id.iloc[ |
||
592 | rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get( |
||
593 | "eGon2035", 0 |
||
594 | ) |
||
595 | + rows_per_scenario.get("eGon100RE", 0) |
||
596 | ] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0)) |
||
597 | |||
598 | dsm_buses["bus_id"] = bus_id |
||
599 | |||
600 | # add links from "orignal" buses to DSM-buses |
||
601 | |||
602 | dsm_links = pd.DataFrame(index=dsm_buses.index) |
||
603 | dsm_links["original_bus"] = dsm_buses["original_bus"].copy() |
||
604 | dsm_links["dsm_bus"] = dsm_buses["bus_id"].copy() |
||
605 | dsm_links["scn_name"] = dsm_buses["scn_name"].copy() |
||
606 | |||
607 | # set link_id |
||
608 | target2 = config.datasets()["DSM_CTS_industry"]["targets"]["link"] |
||
609 | sql = f"""SELECT link_id FROM {target2['schema']}.{target2['table']}""" |
||
610 | max_id = pd.read_sql_query(sql, con) |
||
611 | max_id = max_id["link_id"].max() |
||
612 | if np.isnan(max_id): |
||
613 | max_id = 0 |
||
614 | dsm_id = max_id + 1 |
||
615 | link_id = pd.Series(index=dsm_buses.index, dtype=int) |
||
616 | |||
617 | # Assignment of link ids |
||
618 | link_id.iloc[: rows_per_scenario.get("eGon2035", 0)] = range( |
||
619 | dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0) |
||
620 | ) |
||
621 | |||
622 | link_id.iloc[ |
||
623 | rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get( |
||
624 | "eGon2035", 0 |
||
625 | ) |
||
626 | + rows_per_scenario.get("eGon100RE", 0) |
||
627 | ] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0)) |
||
628 | |||
629 | dsm_links["link_id"] = link_id |
||
630 | |||
631 | # add calculated timeseries to df to be returned |
||
632 | dsm_links["p_nom"] = p_nom |
||
633 | dsm_links["p_min"] = p_min |
||
634 | dsm_links["p_max"] = p_max |
||
635 | |||
636 | # add DSM-stores |
||
637 | |||
638 | dsm_stores = pd.DataFrame(index=dsm_buses.index) |
||
639 | dsm_stores["bus"] = dsm_buses["bus_id"].copy() |
||
640 | dsm_stores["scn_name"] = dsm_buses["scn_name"].copy() |
||
641 | dsm_stores["original_bus"] = dsm_buses["original_bus"].copy() |
||
642 | |||
643 | # set store_id |
||
644 | target3 = config.datasets()["DSM_CTS_industry"]["targets"]["store"] |
||
645 | sql = f"""SELECT store_id FROM {target3['schema']}.{target3['table']}""" |
||
646 | max_id = pd.read_sql_query(sql, con) |
||
647 | max_id = max_id["store_id"].max() |
||
648 | if np.isnan(max_id): |
||
649 | max_id = 0 |
||
650 | dsm_id = max_id + 1 |
||
651 | store_id = pd.Series(index=dsm_buses.index, dtype=int) |
||
652 | |||
653 | # Assignment of store ids |
||
654 | store_id.iloc[: rows_per_scenario.get("eGon2035", 0)] = range( |
||
655 | dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0) |
||
656 | ) |
||
657 | |||
658 | store_id.iloc[ |
||
659 | rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get( |
||
660 | "eGon2035", 0 |
||
661 | ) |
||
662 | + rows_per_scenario.get("eGon100RE", 0) |
||
663 | ] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0)) |
||
664 | |||
665 | dsm_stores["store_id"] = store_id |
||
666 | |||
667 | # add calculated timeseries to df to be returned |
||
668 | dsm_stores["e_nom"] = e_nom |
||
669 | dsm_stores["e_min"] = e_min |
||
670 | dsm_stores["e_max"] = e_max |
||
671 | |||
672 | return dsm_buses, dsm_links, dsm_stores |
||
673 | |||
674 | |||
675 | def aggregate_components(df_dsm_buses, df_dsm_links, df_dsm_stores): |
||
676 | |||
677 | # aggregate buses |
||
678 | |||
679 | grouper = [df_dsm_buses.original_bus, df_dsm_buses.scn_name] |
||
680 | |||
681 | df_dsm_buses = df_dsm_buses.groupby(grouper).first() |
||
682 | |||
683 | df_dsm_buses.reset_index(inplace=True) |
||
684 | df_dsm_buses.sort_values("scn_name", inplace=True) |
||
685 | |||
686 | # aggregate links |
||
687 | |||
688 | df_dsm_links["p_max"] = df_dsm_links["p_max"].apply(lambda x: np.array(x)) |
||
689 | df_dsm_links["p_min"] = df_dsm_links["p_min"].apply(lambda x: np.array(x)) |
||
690 | |||
691 | grouper = [df_dsm_links.original_bus, df_dsm_links.scn_name] |
||
692 | p_nom = df_dsm_links.groupby(grouper)["p_nom"].sum() |
||
693 | p_max = df_dsm_links.groupby(grouper)["p_max"].apply(np.sum) |
||
694 | p_min = df_dsm_links.groupby(grouper)["p_min"].apply(np.sum) |
||
695 | |||
696 | df_dsm_links = df_dsm_links.groupby(grouper).first() |
||
697 | df_dsm_links.p_nom = p_nom |
||
698 | df_dsm_links.p_max = p_max |
||
699 | df_dsm_links.p_min = p_min |
||
700 | |||
701 | df_dsm_links["p_max"] = df_dsm_links["p_max"].apply(lambda x: list(x)) |
||
702 | df_dsm_links["p_min"] = df_dsm_links["p_min"].apply(lambda x: list(x)) |
||
703 | |||
704 | df_dsm_links.reset_index(inplace=True) |
||
705 | df_dsm_links.sort_values("scn_name", inplace=True) |
||
706 | |||
707 | # aggregate stores |
||
708 | |||
709 | df_dsm_stores["e_max"] = df_dsm_stores["e_max"].apply( |
||
710 | lambda x: np.array(x) |
||
711 | ) |
||
712 | df_dsm_stores["e_min"] = df_dsm_stores["e_min"].apply( |
||
713 | lambda x: np.array(x) |
||
714 | ) |
||
715 | |||
716 | grouper = [df_dsm_stores.original_bus, df_dsm_stores.scn_name] |
||
717 | e_nom = df_dsm_stores.groupby(grouper)["e_nom"].sum() |
||
718 | e_max = df_dsm_stores.groupby(grouper)["e_max"].apply(np.sum) |
||
719 | e_min = df_dsm_stores.groupby(grouper)["e_min"].apply(np.sum) |
||
720 | |||
721 | df_dsm_stores = df_dsm_stores.groupby(grouper).first() |
||
722 | df_dsm_stores.e_nom = e_nom |
||
723 | df_dsm_stores.e_max = e_max |
||
724 | df_dsm_stores.e_min = e_min |
||
725 | |||
726 | df_dsm_stores["e_max"] = df_dsm_stores["e_max"].apply(lambda x: list(x)) |
||
727 | df_dsm_stores["e_min"] = df_dsm_stores["e_min"].apply(lambda x: list(x)) |
||
728 | |||
729 | df_dsm_stores.reset_index(inplace=True) |
||
730 | df_dsm_stores.sort_values("scn_name", inplace=True) |
||
731 | |||
732 | # select new bus_ids for aggregated buses and add to links and stores |
||
733 | bus_id = db.next_etrago_id("Bus") + df_dsm_buses.index |
||
734 | |||
735 | df_dsm_buses["bus_id"] = bus_id |
||
736 | df_dsm_links["dsm_bus"] = bus_id |
||
737 | df_dsm_stores["bus"] = bus_id |
||
738 | |||
739 | # select new link_ids for aggregated links |
||
740 | link_id = db.next_etrago_id("Link") + df_dsm_links.index |
||
741 | |||
742 | df_dsm_links["link_id"] = link_id |
||
743 | |||
744 | # select new store_ids to aggregated stores |
||
745 | |||
746 | store_id = db.next_etrago_id("Store") + df_dsm_stores.index |
||
747 | |||
748 | df_dsm_stores["store_id"] = store_id |
||
749 | |||
750 | return df_dsm_buses, df_dsm_links, df_dsm_stores |
||
751 | |||
752 | |||
753 | def data_export(dsm_buses, dsm_links, dsm_stores, carrier): |
||
754 | |||
755 | """ |
||
756 | Export new components to database. |
||
757 | |||
758 | Parameters |
||
759 | ---------- |
||
760 | dsm_buses: DataFrame |
||
761 | Buses representing locations of DSM-potential |
||
762 | dsm_links: DataFrame |
||
763 | Links connecting DSM-buses and DSM-stores |
||
764 | dsm_stores: DataFrame |
||
765 | Stores representing DSM-potential |
||
766 | carrier: String |
||
767 | Remark to be filled in column 'carrier' identifying DSM-potential |
||
768 | """ |
||
769 | |||
770 | targets = config.datasets()["DSM_CTS_industry"]["targets"] |
||
771 | |||
772 | # dsm_buses |
||
773 | |||
774 | insert_buses = gpd.GeoDataFrame( |
||
775 | index=dsm_buses.index, |
||
776 | data=dsm_buses["geom"], |
||
777 | geometry="geom", |
||
778 | crs=dsm_buses.crs, |
||
779 | ) |
||
780 | insert_buses["scn_name"] = dsm_buses["scn_name"] |
||
781 | insert_buses["bus_id"] = dsm_buses["bus_id"] |
||
782 | insert_buses["v_nom"] = dsm_buses["v_nom"] |
||
783 | insert_buses["carrier"] = carrier |
||
784 | insert_buses["x"] = dsm_buses["x"] |
||
785 | insert_buses["y"] = dsm_buses["y"] |
||
786 | |||
787 | # insert into database |
||
788 | insert_buses.to_postgis( |
||
789 | targets["bus"]["table"], |
||
790 | con=db.engine(), |
||
791 | schema=targets["bus"]["schema"], |
||
792 | if_exists="append", |
||
793 | index=False, |
||
794 | dtype={"geom": "geometry"}, |
||
795 | ) |
||
796 | |||
797 | # dsm_links |
||
798 | |||
799 | insert_links = pd.DataFrame(index=dsm_links.index) |
||
800 | insert_links["scn_name"] = dsm_links["scn_name"] |
||
801 | insert_links["link_id"] = dsm_links["link_id"] |
||
802 | insert_links["bus0"] = dsm_links["original_bus"] |
||
803 | insert_links["bus1"] = dsm_links["dsm_bus"] |
||
804 | insert_links["carrier"] = carrier |
||
805 | insert_links["p_nom"] = dsm_links["p_nom"] |
||
806 | |||
807 | # insert into database |
||
808 | insert_links.to_sql( |
||
809 | targets["link"]["table"], |
||
810 | con=db.engine(), |
||
811 | schema=targets["link"]["schema"], |
||
812 | if_exists="append", |
||
813 | index=False, |
||
814 | ) |
||
815 | |||
816 | insert_links_timeseries = pd.DataFrame(index=dsm_links.index) |
||
817 | insert_links_timeseries["scn_name"] = dsm_links["scn_name"] |
||
818 | insert_links_timeseries["link_id"] = dsm_links["link_id"] |
||
819 | insert_links_timeseries["p_min_pu"] = dsm_links["p_min"] |
||
820 | insert_links_timeseries["p_max_pu"] = dsm_links["p_max"] |
||
821 | insert_links_timeseries["temp_id"] = 1 |
||
822 | |||
823 | # insert into database |
||
824 | insert_links_timeseries.to_sql( |
||
825 | targets["link_timeseries"]["table"], |
||
826 | con=db.engine(), |
||
827 | schema=targets["link_timeseries"]["schema"], |
||
828 | if_exists="append", |
||
829 | index=False, |
||
830 | ) |
||
831 | |||
832 | # dsm_stores |
||
833 | |||
834 | insert_stores = pd.DataFrame(index=dsm_stores.index) |
||
835 | insert_stores["scn_name"] = dsm_stores["scn_name"] |
||
836 | insert_stores["store_id"] = dsm_stores["store_id"] |
||
837 | insert_stores["bus"] = dsm_stores["bus"] |
||
838 | insert_stores["carrier"] = carrier |
||
839 | insert_stores["e_nom"] = dsm_stores["e_nom"] |
||
840 | |||
841 | # insert into database |
||
842 | insert_stores.to_sql( |
||
843 | targets["store"]["table"], |
||
844 | con=db.engine(), |
||
845 | schema=targets["store"]["schema"], |
||
846 | if_exists="append", |
||
847 | index=False, |
||
848 | ) |
||
849 | |||
850 | insert_stores_timeseries = pd.DataFrame(index=dsm_stores.index) |
||
851 | insert_stores_timeseries["scn_name"] = dsm_stores["scn_name"] |
||
852 | insert_stores_timeseries["store_id"] = dsm_stores["store_id"] |
||
853 | insert_stores_timeseries["e_min_pu"] = dsm_stores["e_min"] |
||
854 | insert_stores_timeseries["e_max_pu"] = dsm_stores["e_max"] |
||
855 | insert_stores_timeseries["temp_id"] = 1 |
||
856 | |||
857 | # insert into database |
||
858 | insert_stores_timeseries.to_sql( |
||
859 | targets["store_timeseries"]["table"], |
||
860 | con=db.engine(), |
||
861 | schema=targets["store_timeseries"]["schema"], |
||
862 | if_exists="append", |
||
863 | index=False, |
||
864 | ) |
||
865 | |||
866 | |||
867 | def delete_dsm_entries(carrier): |
||
868 | |||
869 | """ |
||
870 | Deletes DSM-components from database if they already exist before creating |
||
871 | new ones. |
||
872 | |||
873 | Parameters |
||
874 | ---------- |
||
875 | carrier: String |
||
876 | Remark in column 'carrier' identifying DSM-potential |
||
877 | """ |
||
878 | |||
879 | targets = config.datasets()["DSM_CTS_industry"]["targets"] |
||
880 | |||
881 | # buses |
||
882 | |||
883 | sql = f"""DELETE FROM {targets["bus"]["schema"]}.{targets["bus"]["table"]} b |
||
884 | WHERE (b.carrier LIKE '{carrier}');""" |
||
885 | db.execute_sql(sql) |
||
886 | |||
887 | # links |
||
888 | |||
889 | sql = f""" |
||
890 | DELETE FROM {targets["link_timeseries"]["schema"]}. |
||
891 | {targets["link_timeseries"]["table"]} t |
||
892 | WHERE t.link_id IN |
||
893 | ( |
||
894 | SELECT l.link_id FROM {targets["link"]["schema"]}. |
||
895 | {targets["link"]["table"]} l |
||
896 | WHERE l.carrier LIKE '{carrier}' |
||
897 | ); |
||
898 | """ |
||
899 | |||
900 | db.execute_sql(sql) |
||
901 | |||
902 | sql = f""" |
||
903 | DELETE FROM {targets["link"]["schema"]}. |
||
904 | {targets["link"]["table"]} l |
||
905 | WHERE (l.carrier LIKE '{carrier}'); |
||
906 | """ |
||
907 | |||
908 | db.execute_sql(sql) |
||
909 | |||
910 | # stores |
||
911 | |||
912 | sql = f""" |
||
913 | DELETE FROM {targets["store_timeseries"]["schema"]}. |
||
914 | {targets["store_timeseries"]["table"]} t |
||
915 | WHERE t.store_id IN |
||
916 | ( |
||
917 | SELECT s.store_id FROM {targets["store"]["schema"]}. |
||
918 | {targets["store"]["table"]} s |
||
919 | WHERE s.carrier LIKE '{carrier}' |
||
920 | ); |
||
921 | """ |
||
922 | |||
923 | db.execute_sql(sql) |
||
924 | |||
925 | sql = f""" |
||
926 | DELETE FROM {targets["store"]["schema"]}.{targets["store"]["table"]} s |
||
927 | WHERE (s.carrier LIKE '{carrier}'); |
||
928 | """ |
||
929 | |||
930 | db.execute_sql(sql) |
||
931 | |||
932 | |||
933 | def dsm_cts_ind( |
||
934 | con=db.engine(), |
||
935 | cts_cool_vent_ac_share=0.22, |
||
936 | ind_vent_cool_share=0.039, |
||
937 | ind_vent_share=0.017, |
||
938 | ): |
||
939 | """ |
||
940 | Execute methodology to create and implement components for DSM considering |
||
941 | a) CTS per osm-area: combined potentials of cooling, ventilation and air |
||
942 | conditioning |
||
943 | b) Industry per osm-are: combined potentials of cooling and ventilation |
||
944 | c) Industrial Sites: potentials of ventilation in sites of |
||
945 | "Wirtschaftszweig" (WZ) 23 |
||
946 | d) Industrial Sites: potentials of sites specified by subsectors |
||
947 | identified by Schmidt (https://zenodo.org/record/3613767#.YTsGwVtCRhG): |
||
948 | Paper, Recycled Paper, Pulp, Cement |
||
949 | |||
950 | Modelled using the methods by Heitkoetter et. al.: |
||
951 | https://doi.org/10.1016/j.adapen.2020.100001 |
||
952 | |||
953 | Parameters |
||
954 | ---------- |
||
955 | con : |
||
956 | Connection to database |
||
957 | cts_cool_vent_ac_share: float |
||
958 | Share of cooling, ventilation and AC in CTS demand |
||
959 | ind_vent_cool_share: float |
||
960 | Share of cooling and ventilation in industry demand |
||
961 | ind_vent_share: float |
||
962 | Share of ventilation in industry demand in sites of WZ 23 |
||
963 | |||
964 | """ |
||
965 | |||
966 | # CTS per osm-area: cooling, ventilation and air conditioning |
||
967 | |||
968 | print(" ") |
||
969 | print("CTS per osm-area: cooling, ventilation and air conditioning") |
||
970 | print(" ") |
||
971 | |||
972 | dsm = cts_data_import(cts_cool_vent_ac_share) |
||
973 | |||
974 | # calculate combined potentials of cooling, ventilation and air |
||
975 | # conditioning in CTS using combined parameters by Heitkoetter et. al. |
||
976 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
977 | s_flex=S_FLEX_CTS, |
||
978 | s_util=S_UTIL_CTS, |
||
979 | s_inc=S_INC_CTS, |
||
980 | s_dec=S_DEC_CTS, |
||
981 | delta_t=DELTA_T_CTS, |
||
982 | dsm=dsm, |
||
983 | ) |
||
984 | |||
985 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
986 | con, p_max, p_min, e_max, e_min, dsm |
||
987 | ) |
||
988 | |||
989 | df_dsm_buses = dsm_buses.copy() |
||
990 | df_dsm_links = dsm_links.copy() |
||
991 | df_dsm_stores = dsm_stores.copy() |
||
992 | |||
993 | # industry per osm-area: cooling and ventilation |
||
994 | |||
995 | print(" ") |
||
996 | print("industry per osm-area: cooling and ventilation") |
||
997 | print(" ") |
||
998 | |||
999 | dsm = ind_osm_data_import(ind_vent_cool_share) |
||
1000 | |||
1001 | # calculate combined potentials of cooling and ventilation in industrial |
||
1002 | # sector using combined parameters by Heitkoetter et. al. |
||
1003 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
1004 | s_flex=S_FLEX_OSM, |
||
1005 | s_util=S_UTIL_OSM, |
||
1006 | s_inc=S_INC_OSM, |
||
1007 | s_dec=S_DEC_OSM, |
||
1008 | delta_t=DELTA_T_OSM, |
||
1009 | dsm=dsm, |
||
1010 | ) |
||
1011 | |||
1012 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
1013 | con, p_max, p_min, e_max, e_min, dsm |
||
1014 | ) |
||
1015 | |||
1016 | df_dsm_buses = gpd.GeoDataFrame( |
||
1017 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
1018 | crs="EPSG:4326", |
||
1019 | ) |
||
1020 | df_dsm_links = pd.DataFrame( |
||
1021 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
1022 | ) |
||
1023 | df_dsm_stores = pd.DataFrame( |
||
1024 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
1025 | ) |
||
1026 | |||
1027 | # industry sites |
||
1028 | |||
1029 | # industry sites: different applications |
||
1030 | |||
1031 | dsm = ind_sites_data_import() |
||
1032 | |||
1033 | print(" ") |
||
1034 | print("industry sites: paper") |
||
1035 | print(" ") |
||
1036 | |||
1037 | dsm_paper = gpd.GeoDataFrame( |
||
1038 | dsm[ |
||
1039 | dsm["application"].isin( |
||
1040 | [ |
||
1041 | "Graphic Paper", |
||
1042 | "Packing Paper and Board", |
||
1043 | "Hygiene Paper", |
||
1044 | "Technical/Special Paper and Board", |
||
1045 | ] |
||
1046 | ) |
||
1047 | ] |
||
1048 | ) |
||
1049 | |||
1050 | # calculate potentials of industrial sites with paper-applications |
||
1051 | # using parameters by Heitkoetter et al. |
||
1052 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
1053 | s_flex=S_FLEX_PAPER, |
||
1054 | s_util=S_UTIL_PAPER, |
||
1055 | s_inc=S_INC_PAPER, |
||
1056 | s_dec=S_DEC_PAPER, |
||
1057 | delta_t=DELTA_T_PAPER, |
||
1058 | dsm=dsm_paper, |
||
1059 | ) |
||
1060 | |||
1061 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
1062 | con, p_max, p_min, e_max, e_min, dsm_paper |
||
1063 | ) |
||
1064 | |||
1065 | df_dsm_buses = gpd.GeoDataFrame( |
||
1066 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
1067 | crs="EPSG:4326", |
||
1068 | ) |
||
1069 | df_dsm_links = pd.DataFrame( |
||
1070 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
1071 | ) |
||
1072 | df_dsm_stores = pd.DataFrame( |
||
1073 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
1074 | ) |
||
1075 | |||
1076 | print(" ") |
||
1077 | print("industry sites: recycled paper") |
||
1078 | print(" ") |
||
1079 | |||
1080 | # calculate potentials of industrial sites with recycled paper-applications |
||
1081 | # using parameters by Heitkoetter et. al. |
||
1082 | dsm_recycled_paper = gpd.GeoDataFrame( |
||
1083 | dsm[dsm["application"] == "Recycled Paper"] |
||
1084 | ) |
||
1085 | |||
1086 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
1087 | s_flex=S_FLEX_RECYCLED_PAPER, |
||
1088 | s_util=S_UTIL_RECYCLED_PAPER, |
||
1089 | s_inc=S_INC_RECYCLED_PAPER, |
||
1090 | s_dec=S_DEC_RECYCLED_PAPER, |
||
1091 | delta_t=DELTA_T_RECYCLED_PAPER, |
||
1092 | dsm=dsm_recycled_paper, |
||
1093 | ) |
||
1094 | |||
1095 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
1096 | con, p_max, p_min, e_max, e_min, dsm_recycled_paper |
||
1097 | ) |
||
1098 | |||
1099 | df_dsm_buses = gpd.GeoDataFrame( |
||
1100 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
1101 | crs="EPSG:4326", |
||
1102 | ) |
||
1103 | df_dsm_links = pd.DataFrame( |
||
1104 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
1105 | ) |
||
1106 | df_dsm_stores = pd.DataFrame( |
||
1107 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
1108 | ) |
||
1109 | |||
1110 | print(" ") |
||
1111 | print("industry sites: pulp") |
||
1112 | print(" ") |
||
1113 | |||
1114 | dsm_pulp = gpd.GeoDataFrame(dsm[dsm["application"] == "Mechanical Pulp"]) |
||
1115 | |||
1116 | # calculate potentials of industrial sites with pulp-applications |
||
1117 | # using parameters by Heitkoetter et. al. |
||
1118 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
1119 | s_flex=S_FLEX_PULP, |
||
1120 | s_util=S_UTIL_PULP, |
||
1121 | s_inc=S_INC_PULP, |
||
1122 | s_dec=S_DEC_PULP, |
||
1123 | delta_t=DELTA_T_PULP, |
||
1124 | dsm=dsm_pulp, |
||
1125 | ) |
||
1126 | |||
1127 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
1128 | con, p_max, p_min, e_max, e_min, dsm_pulp |
||
1129 | ) |
||
1130 | |||
1131 | df_dsm_buses = gpd.GeoDataFrame( |
||
1132 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
1133 | crs="EPSG:4326", |
||
1134 | ) |
||
1135 | df_dsm_links = pd.DataFrame( |
||
1136 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
1137 | ) |
||
1138 | df_dsm_stores = pd.DataFrame( |
||
1139 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
1140 | ) |
||
1141 | |||
1142 | # industry sites: cement |
||
1143 | |||
1144 | print(" ") |
||
1145 | print("industry sites: cement") |
||
1146 | print(" ") |
||
1147 | |||
1148 | dsm_cement = gpd.GeoDataFrame(dsm[dsm["application"] == "Cement Mill"]) |
||
1149 | |||
1150 | # calculate potentials of industrial sites with cement-applications |
||
1151 | # using parameters by Heitkoetter et. al. |
||
1152 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
1153 | s_flex=S_FLEX_CEMENT, |
||
1154 | s_util=S_UTIL_CEMENT, |
||
1155 | s_inc=S_INC_CEMENT, |
||
1156 | s_dec=S_DEC_CEMENT, |
||
1157 | delta_t=DELTA_T_CEMENT, |
||
1158 | dsm=dsm_cement, |
||
1159 | ) |
||
1160 | |||
1161 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
1162 | con, p_max, p_min, e_max, e_min, dsm_cement |
||
1163 | ) |
||
1164 | |||
1165 | df_dsm_buses = gpd.GeoDataFrame( |
||
1166 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
1167 | crs="EPSG:4326", |
||
1168 | ) |
||
1169 | df_dsm_links = pd.DataFrame( |
||
1170 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
1171 | ) |
||
1172 | df_dsm_stores = pd.DataFrame( |
||
1173 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
1174 | ) |
||
1175 | |||
1176 | # industry sites: ventilation in WZ23 |
||
1177 | |||
1178 | print(" ") |
||
1179 | print("industry sites: ventilation in WZ23") |
||
1180 | print(" ") |
||
1181 | |||
1182 | dsm = ind_sites_vent_data_import(ind_vent_share, wz=23) |
||
1183 | |||
1184 | # drop entries of Cement Mills whose DSM-potentials have already been |
||
1185 | # modelled |
||
1186 | cement = np.unique(dsm_cement["bus"].values) |
||
1187 | index_names = np.array(dsm[dsm["bus"].isin(cement)].index) |
||
1188 | dsm.drop(index_names, inplace=True) |
||
1189 | |||
1190 | # calculate potentials of ventialtion in industrial sites of WZ 23 |
||
1191 | # using parameters by Heitkoetter et. al. |
||
1192 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
1193 | s_flex=S_FLEX_WZ, |
||
1194 | s_util=S_UTIL_WZ, |
||
1195 | s_inc=S_INC_WZ, |
||
1196 | s_dec=S_DEC_WZ, |
||
1197 | delta_t=DELTA_T_WZ, |
||
1198 | dsm=dsm, |
||
1199 | ) |
||
1200 | |||
1201 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
1202 | con, p_max, p_min, e_max, e_min, dsm |
||
1203 | ) |
||
1204 | |||
1205 | df_dsm_buses = gpd.GeoDataFrame( |
||
1206 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
1207 | crs="EPSG:4326", |
||
1208 | ) |
||
1209 | df_dsm_links = pd.DataFrame( |
||
1210 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
1211 | ) |
||
1212 | df_dsm_stores = pd.DataFrame( |
||
1213 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
1214 | ) |
||
1215 | |||
1216 | # aggregate DSM components per substation |
||
1217 | dsm_buses, dsm_links, dsm_stores = aggregate_components( |
||
1218 | df_dsm_buses, df_dsm_links, df_dsm_stores |
||
1219 | ) |
||
1220 | |||
1221 | # export aggregated DSM components to database |
||
1222 | |||
1223 | delete_dsm_entries("dsm-cts") |
||
1224 | delete_dsm_entries("dsm-ind-osm") |
||
1225 | delete_dsm_entries("dsm-ind-sites") |
||
1226 | delete_dsm_entries("dsm") |
||
1227 | |||
1228 | data_export(dsm_buses, dsm_links, dsm_stores, carrier="dsm") |
||
1229 | |||
1230 | |||
1231 | def dsm_cts_ind_individual( |
||
1232 | con=CON, |
||
1233 | cts_cool_vent_ac_share=CTS_COOL_VENT_AC_SHARE, |
||
1234 | ind_vent_cool_share=IND_VENT_COOL_SHARE, |
||
1235 | ind_vent_share=IND_VENT_SHARE, |
||
1236 | ): |
||
1237 | """ |
||
1238 | Execute methodology to create and implement components for DSM considering |
||
1239 | a) CTS per osm-area: combined potentials of cooling, ventilation and air |
||
1240 | conditioning |
||
1241 | b) Industry per osm-are: combined potentials of cooling and ventilation |
||
1242 | c) Industrial Sites: potentials of ventilation in sites of |
||
1243 | "Wirtschaftszweig" (WZ) 23 |
||
1244 | d) Industrial Sites: potentials of sites specified by subsectors |
||
1245 | identified by Schmidt (https://zenodo.org/record/3613767#.YTsGwVtCRhG): |
||
1246 | Paper, Recycled Paper, Pulp, Cement |
||
1247 | |||
1248 | Modelled using the methods by Heitkoetter et. al.: |
||
1249 | https://doi.org/10.1016/j.adapen.2020.100001 |
||
1250 | |||
1251 | Parameters |
||
1252 | ---------- |
||
1253 | con : |
||
1254 | Connection to database |
||
1255 | cts_cool_vent_ac_share: float |
||
1256 | Share of cooling, ventilation and AC in CTS demand |
||
1257 | ind_vent_cool_share: float |
||
1258 | Share of cooling and ventilation in industry demand |
||
1259 | ind_vent_share: float |
||
1260 | Share of ventilation in industry demand in sites of WZ 23 |
||
1261 | |||
1262 | """ |
||
1263 | |||
1264 | # CTS per osm-area: cooling, ventilation and air conditioning |
||
1265 | |||
1266 | print(" ") |
||
1267 | print("CTS per osm-area: cooling, ventilation and air conditioning") |
||
1268 | print(" ") |
||
1269 | |||
1270 | dsm = cts_data_import(cts_cool_vent_ac_share) |
||
1271 | |||
1272 | # calculate combined potentials of cooling, ventilation and air |
||
1273 | # conditioning in CTS using combined parameters by Heitkoetter et. al. |
||
1274 | vals = calculate_potentials( |
||
1275 | s_flex=S_FLEX_CTS, |
||
1276 | s_util=S_UTIL_CTS, |
||
1277 | s_inc=S_INC_CTS, |
||
1278 | s_dec=S_DEC_CTS, |
||
1279 | delta_t=DELTA_T_CTS, |
||
1280 | dsm=dsm, |
||
1281 | ) |
||
1282 | |||
1283 | # TODO: Werte sind noch nicht p.u. |
||
1284 | |||
1285 | base_columns = [ |
||
1286 | "bus", |
||
1287 | "scn_name", |
||
1288 | "p_set", |
||
1289 | "p_max_pu", |
||
1290 | "p_min_pu", |
||
1291 | "e_max_pu", |
||
1292 | "e_min_pu", |
||
1293 | ] |
||
1294 | |||
1295 | cts_df = pd.concat([dsm, *vals], axis=1, ignore_index=True) |
||
1296 | cts_df.columns = base_columns |
||
1297 | |||
1298 | print(" ") |
||
1299 | print("industry per osm-area: cooling and ventilation") |
||
1300 | print(" ") |
||
1301 | |||
1302 | dsm = ind_osm_data_import_individual(ind_vent_cool_share) |
||
1303 | |||
1304 | # calculate combined potentials of cooling and ventilation in industrial |
||
1305 | # sector using combined parameters by Heitkoetter et. al. |
||
1306 | vals = calculate_potentials( |
||
1307 | s_flex=S_FLEX_OSM, |
||
1308 | s_util=S_UTIL_OSM, |
||
1309 | s_inc=S_INC_OSM, |
||
1310 | s_dec=S_DEC_OSM, |
||
1311 | delta_t=DELTA_T_OSM, |
||
1312 | dsm=dsm, |
||
1313 | ) |
||
1314 | |||
1315 | columns = ["osm_id"] + base_columns |
||
1316 | |||
1317 | osm_df = pd.concat([dsm, *vals], axis=1, ignore_index=True) |
||
1318 | osm_df.columns = columns |
||
1319 | |||
1320 | # industry sites |
||
1321 | |||
1322 | # industry sites: different applications |
||
1323 | |||
1324 | dsm = ind_sites_data_import() |
||
1325 | |||
1326 | print(" ") |
||
1327 | print("industry sites: paper") |
||
1328 | print(" ") |
||
1329 | |||
1330 | dsm_paper = gpd.GeoDataFrame( |
||
1331 | dsm[ |
||
1332 | dsm["application"].isin( |
||
1333 | [ |
||
1334 | "Graphic Paper", |
||
1335 | "Packing Paper and Board", |
||
1336 | "Hygiene Paper", |
||
1337 | "Technical/Special Paper and Board", |
||
1338 | ] |
||
1339 | ) |
||
1340 | ] |
||
1341 | ) |
||
1342 | |||
1343 | # calculate potentials of industrial sites with paper-applications |
||
1344 | # using parameters by Heitkoetter et al. |
||
1345 | vals = calculate_potentials( |
||
1346 | s_flex=S_FLEX_PAPER, |
||
1347 | s_util=S_UTIL_PAPER, |
||
1348 | s_inc=S_INC_PAPER, |
||
1349 | s_dec=S_DEC_PAPER, |
||
1350 | delta_t=DELTA_T_PAPER, |
||
1351 | dsm=dsm_paper, |
||
1352 | ) |
||
1353 | |||
1354 | columns = ["application", "id"] + base_columns |
||
1355 | |||
1356 | paper_df = pd.concat([dsm_paper, *vals], axis=1, ignore_index=True) |
||
1357 | paper_df.columns = columns |
||
1358 | |||
1359 | print(" ") |
||
1360 | print("industry sites: recycled paper") |
||
1361 | print(" ") |
||
1362 | |||
1363 | # calculate potentials of industrial sites with recycled paper-applications |
||
1364 | # using parameters by Heitkoetter et. al. |
||
1365 | dsm_recycled_paper = gpd.GeoDataFrame( |
||
1366 | dsm[dsm["application"] == "Recycled Paper"] |
||
1367 | ) |
||
1368 | |||
1369 | vals = calculate_potentials( |
||
1370 | s_flex=S_FLEX_RECYCLED_PAPER, |
||
1371 | s_util=S_UTIL_RECYCLED_PAPER, |
||
1372 | s_inc=S_INC_RECYCLED_PAPER, |
||
1373 | s_dec=S_DEC_RECYCLED_PAPER, |
||
1374 | delta_t=DELTA_T_RECYCLED_PAPER, |
||
1375 | dsm=dsm_recycled_paper, |
||
1376 | ) |
||
1377 | |||
1378 | df_recycled_paper = pd.concat( |
||
1379 | [dsm_recycled_paper, *vals], axis=1, ignore_index=True |
||
1380 | ) |
||
1381 | df_recycled_paper.columns = columns |
||
1382 | |||
1383 | print(" ") |
||
1384 | print("industry sites: pulp") |
||
1385 | print(" ") |
||
1386 | |||
1387 | dsm_pulp = gpd.GeoDataFrame(dsm[dsm["application"] == "Mechanical Pulp"]) |
||
1388 | |||
1389 | # calculate potentials of industrial sites with pulp-applications |
||
1390 | # using parameters by Heitkoetter et. al. |
||
1391 | vals = calculate_potentials( |
||
1392 | s_flex=S_FLEX_PULP, |
||
1393 | s_util=S_UTIL_PULP, |
||
1394 | s_inc=S_INC_PULP, |
||
1395 | s_dec=S_DEC_PULP, |
||
1396 | delta_t=DELTA_T_PULP, |
||
1397 | dsm=dsm_pulp, |
||
1398 | ) |
||
1399 | |||
1400 | df_pulp = pd.concat([dsm_pulp, *vals], axis=1, ignore_index=True) |
||
1401 | df_pulp.columns = columns |
||
1402 | |||
1403 | # industry sites: cement |
||
1404 | |||
1405 | print(" ") |
||
1406 | print("industry sites: cement") |
||
1407 | print(" ") |
||
1408 | |||
1409 | dsm_cement = gpd.GeoDataFrame(dsm[dsm["application"] == "Cement Mill"]) |
||
1410 | |||
1411 | # calculate potentials of industrial sites with cement-applications |
||
1412 | # using parameters by Heitkoetter et. al. |
||
1413 | vals = calculate_potentials( |
||
1414 | s_flex=S_FLEX_CEMENT, |
||
1415 | s_util=S_UTIL_CEMENT, |
||
1416 | s_inc=S_INC_CEMENT, |
||
1417 | s_dec=S_DEC_CEMENT, |
||
1418 | delta_t=DELTA_T_CEMENT, |
||
1419 | dsm=dsm_cement, |
||
1420 | ) |
||
1421 | |||
1422 | df_cement = pd.concat([dsm_cement, *vals], axis=1, ignore_index=True) |
||
1423 | df_cement.columns = columns |
||
1424 | |||
1425 | # industry sites: ventilation in WZ23 |
||
1426 | |||
1427 | print(" ") |
||
1428 | print("industry sites: ventilation in WZ23") |
||
1429 | print(" ") |
||
1430 | |||
1431 | dsm = ind_sites_vent_data_import_individual(ind_vent_share, wz=23) |
||
1432 | |||
1433 | # drop entries of Cement Mills whose DSM-potentials have already been |
||
1434 | # modelled |
||
1435 | cement = np.unique(dsm_cement["bus"].values) |
||
1436 | index_names = np.array(dsm[dsm["bus"].isin(cement)].index) |
||
1437 | dsm.drop(index_names, inplace=True) |
||
1438 | |||
1439 | # calculate potentials of ventialtion in industrial sites of WZ 23 |
||
1440 | # using parameters by Heitkoetter et. al. |
||
1441 | vals = calculate_potentials( |
||
1442 | s_flex=S_FLEX_WZ, |
||
1443 | s_util=S_UTIL_WZ, |
||
1444 | s_inc=S_INC_WZ, |
||
1445 | s_dec=S_DEC_WZ, |
||
1446 | delta_t=DELTA_T_WZ, |
||
1447 | dsm=dsm, |
||
1448 | ) |
||
1449 | |||
1450 | columns = ["site_id"] + base_columns |
||
1451 | |||
1452 | df_ind_sites = pd.concat([dsm, *vals], axis=1, ignore_index=True) |
||
1453 | df_ind_sites.columns = columns |
||
1454 | |||
1455 | # TODO |
||
1456 | |||
1457 | |||
1458 | def dsm_cts_ind_processing(): |
||
1459 | dsm_cts_ind() |
||
1460 | |||
1461 | dsm_cts_ind_individual() |
||
1462 |