Total Complexity | 55 |
Total Lines | 1856 |
Duplicated Lines | 10.88 % |
Changes | 0 |
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like data.datasets.DSM_cts_ind often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | """ |
||
2 | Currently, there are differences in the aggregated and individual DSM time |
||
3 | series. These are caused by the truncation of the values at zero. |
||
4 | |||
5 | The sum of the individual time series is a more accurate value than the |
||
6 | aggregated time series used so far and should replace it in the future. Since |
||
7 | the deviations are relatively small, a tolerance is currently accepted in the |
||
8 | sanity checks. See `#1120 <https://github.com/openego/eGon-data/issues/1120>`_ |
||
9 | for updates. |
||
10 | """ |
||
11 | |||
12 | import datetime |
||
13 | import json |
||
14 | |||
15 | from omi.dialects import get_dialect |
||
16 | from sqlalchemy import ARRAY, Column, Float, Integer, String |
||
17 | from sqlalchemy.ext.declarative import declarative_base |
||
18 | import geopandas as gpd |
||
19 | import numpy as np |
||
20 | import pandas as pd |
||
21 | |||
22 | from egon.data import config, db |
||
23 | from egon.data.datasets import Dataset |
||
24 | from egon.data.datasets.electricity_demand.temporal import calc_load_curve |
||
25 | from egon.data.datasets.industry.temporal import identify_bus |
||
26 | from egon.data.metadata import ( |
||
27 | context, |
||
28 | contributors, |
||
29 | generate_resource_fields_from_db_table, |
||
30 | license_odbl, |
||
31 | meta_metadata, |
||
32 | sources, |
||
33 | ) |
||
34 | |||
35 | # CONSTANTS |
||
36 | # TODO: move to datasets.yml |
||
37 | CON = db.engine() |
||
38 | |||
39 | # CTS |
||
40 | CTS_COOL_VENT_AC_SHARE = 0.22 |
||
41 | |||
42 | S_FLEX_CTS = 0.5 |
||
43 | S_UTIL_CTS = 0.67 |
||
44 | S_INC_CTS = 1 |
||
45 | S_DEC_CTS = 0 |
||
46 | DELTA_T_CTS = 1 |
||
47 | |||
48 | # industry |
||
49 | IND_VENT_COOL_SHARE = 0.039 |
||
50 | IND_VENT_SHARE = 0.017 |
||
51 | |||
52 | # OSM |
||
53 | S_FLEX_OSM = 0.5 |
||
54 | S_UTIL_OSM = 0.73 |
||
55 | S_INC_OSM = 0.9 |
||
56 | S_DEC_OSM = 0.5 |
||
57 | DELTA_T_OSM = 1 |
||
58 | |||
59 | # paper |
||
60 | S_FLEX_PAPER = 0.15 |
||
61 | S_UTIL_PAPER = 0.86 |
||
62 | S_INC_PAPER = 0.95 |
||
63 | S_DEC_PAPER = 0 |
||
64 | DELTA_T_PAPER = 3 |
||
65 | |||
66 | # recycled paper |
||
67 | S_FLEX_RECYCLED_PAPER = 0.7 |
||
68 | S_UTIL_RECYCLED_PAPER = 0.85 |
||
69 | S_INC_RECYCLED_PAPER = 0.95 |
||
70 | S_DEC_RECYCLED_PAPER = 0 |
||
71 | DELTA_T_RECYCLED_PAPER = 3 |
||
72 | |||
73 | # pulp |
||
74 | S_FLEX_PULP = 0.7 |
||
75 | S_UTIL_PULP = 0.83 |
||
76 | S_INC_PULP = 0.95 |
||
77 | S_DEC_PULP = 0 |
||
78 | DELTA_T_PULP = 2 |
||
79 | |||
80 | # cement |
||
81 | S_FLEX_CEMENT = 0.61 |
||
82 | S_UTIL_CEMENT = 0.65 |
||
83 | S_INC_CEMENT = 0.95 |
||
84 | S_DEC_CEMENT = 0 |
||
85 | DELTA_T_CEMENT = 4 |
||
86 | |||
87 | # wz 23 |
||
88 | WZ = 23 |
||
89 | |||
90 | S_FLEX_WZ = 0.5 |
||
91 | S_UTIL_WZ = 0.8 |
||
92 | S_INC_WZ = 1 |
||
93 | S_DEC_WZ = 0.5 |
||
94 | DELTA_T_WZ = 1 |
||
95 | |||
96 | Base = declarative_base() |
||
97 | |||
98 | |||
99 | class DsmPotential(Dataset): |
||
100 | """ |
||
101 | Calculate Demand-Side Management potentials and transfer to charactersitics of DSM components |
||
102 | |||
103 | DSM within this work includes the shifting of loads within the sectors of |
||
104 | industry and CTS. Therefore, the corresponding formerly prepared demand |
||
105 | time sereies are used. Shiftable potentials are calculated using the |
||
106 | parametrization elaborated in Heitkoetter et. al (doi:https://doi.org/10.1016/j.adapen.2020.100001). |
||
107 | DSM is modelled as storage-equivalent operation using the methods by Kleinhans (doi:10.48550/ARXIV.1401.4121). |
||
108 | The potentials are transferred to characterisitcs of DSM links (minimal and |
||
109 | maximal shiftable power per time step) and DSM stores (minimum and maximum |
||
110 | capacity per time step). DSM buses are created to connect DSM components with |
||
111 | the electrical network. All DSM components are added to the corresponding |
||
112 | tables for the transmission grid level. For the distribution grids, the |
||
113 | respective time series are exported to the corresponding tables (for the |
||
114 | required higher spatial resolution). |
||
115 | |||
116 | *Dependencies* |
||
117 | * :py:class:`CtsElectricityDemand <egon.data.datasets.electricity_demand>` |
||
118 | * :py:class:`IndustrialDemandCurves <from egon.data.datasets.industry>` |
||
119 | * :py:class:`Osmtgmod <egon.data.datasets.osmtgmod>` |
||
120 | |||
121 | *Resulting tables* |
||
122 | * :py:class:`grid.egon_etrago_bus <egon.data.datasets.etrago_setup.EgonPfHvBus>` is extended |
||
123 | * :py:class:`grid.egon_etrago_link <egon.data.datasets.etrago_setup.EgonPfHvLink>` is extended |
||
124 | * :py:class:`grid.egon_etrago_link_timeseries <egon.data.datasets.etrago_setup.EgonPfHvLinkTimeseries>` is extended |
||
125 | * :py:class:`grid.egon_etrago_store <egon.data.datasets.etrago_setup.EgonPfHvStore>` is extended |
||
126 | * :py:class:`grid.egon_etrago_store_timeseries <egon.data.datasets.etrago_setup.EgonPfHvStoreTimeseries>` is extended |
||
127 | * :py:class:`demand.egon_etrago_electricity_cts_dsm_timeseries <egon.data.datasets.DsmPotential.EgonEtragoElectricityCtsDsmTimeseries>` is created and filled # noqa: E501 |
||
128 | * :py:class:`demand.egon_osm_ind_load_curves_individual_dsm_timeseries <egon.data.datasets.DsmPotential.EgonOsmIndLoadCurvesIndividualDsmTimeseries>` is created and filled # noqa: E501 |
||
129 | * :py:class:`demand.egon_demandregio_sites_ind_electricity_dsm_timeseries <egon.data.datasets.DsmPotential.EgonDemandregioSitesIndElectricityDsmTimeseries>` is created and filled # noqa: E501 |
||
130 | * :py:class:`demand.egon_sites_ind_load_curves_individual_dsm_timeseries <egon.data.datasets.DsmPotential.EgonSitesIndLoadCurvesIndividualDsmTimeseries>` is created and filled # noqa: E501 |
||
131 | |||
132 | """ |
||
133 | |||
134 | #: |
||
135 | name: str = "DsmPotential" |
||
136 | #: |
||
137 | version: str = "0.0.7" |
||
138 | |||
139 | def __init__(self, dependencies): |
||
140 | super().__init__( |
||
141 | name=self.name, |
||
142 | version=self.version, |
||
143 | dependencies=dependencies, |
||
144 | tasks=(dsm_cts_ind_processing,), |
||
145 | ) |
||
146 | |||
147 | |||
148 | # Datasets |
||
149 | View Code Duplication | class EgonEtragoElectricityCtsDsmTimeseries(Base): |
|
|
|||
150 | target = config.datasets()["DSM_CTS_industry"]["targets"][ |
||
151 | "cts_loadcurves_dsm" |
||
152 | ] |
||
153 | |||
154 | __tablename__ = target["table"] |
||
155 | __table_args__ = {"schema": target["schema"]} |
||
156 | |||
157 | bus = Column(Integer, primary_key=True, index=True) |
||
158 | scn_name = Column(String, primary_key=True, index=True) |
||
159 | p_set = Column(ARRAY(Float)) |
||
160 | p_max = Column(ARRAY(Float)) |
||
161 | p_min = Column(ARRAY(Float)) |
||
162 | e_max = Column(ARRAY(Float)) |
||
163 | e_min = Column(ARRAY(Float)) |
||
164 | |||
165 | |||
166 | View Code Duplication | class EgonOsmIndLoadCurvesIndividualDsmTimeseries(Base): |
|
167 | target = config.datasets()["DSM_CTS_industry"]["targets"][ |
||
168 | "ind_osm_loadcurves_individual_dsm" |
||
169 | ] |
||
170 | |||
171 | __tablename__ = target["table"] |
||
172 | __table_args__ = {"schema": target["schema"]} |
||
173 | |||
174 | osm_id = Column(Integer, primary_key=True, index=True) |
||
175 | scn_name = Column(String, primary_key=True, index=True) |
||
176 | bus = Column(Integer) |
||
177 | p_set = Column(ARRAY(Float)) |
||
178 | p_max = Column(ARRAY(Float)) |
||
179 | p_min = Column(ARRAY(Float)) |
||
180 | e_max = Column(ARRAY(Float)) |
||
181 | e_min = Column(ARRAY(Float)) |
||
182 | |||
183 | |||
184 | View Code Duplication | class EgonDemandregioSitesIndElectricityDsmTimeseries(Base): |
|
185 | target = config.datasets()["DSM_CTS_industry"]["targets"][ |
||
186 | "demandregio_ind_sites_dsm" |
||
187 | ] |
||
188 | |||
189 | __tablename__ = target["table"] |
||
190 | __table_args__ = {"schema": target["schema"]} |
||
191 | |||
192 | industrial_sites_id = Column(Integer, primary_key=True, index=True) |
||
193 | scn_name = Column(String, primary_key=True, index=True) |
||
194 | bus = Column(Integer) |
||
195 | application = Column(String) |
||
196 | p_set = Column(ARRAY(Float)) |
||
197 | p_max = Column(ARRAY(Float)) |
||
198 | p_min = Column(ARRAY(Float)) |
||
199 | e_max = Column(ARRAY(Float)) |
||
200 | e_min = Column(ARRAY(Float)) |
||
201 | |||
202 | |||
203 | View Code Duplication | class EgonSitesIndLoadCurvesIndividualDsmTimeseries(Base): |
|
204 | target = config.datasets()["DSM_CTS_industry"]["targets"][ |
||
205 | "ind_sites_loadcurves_individual" |
||
206 | ] |
||
207 | |||
208 | __tablename__ = target["table"] |
||
209 | __table_args__ = {"schema": target["schema"]} |
||
210 | |||
211 | site_id = Column(Integer, primary_key=True, index=True) |
||
212 | scn_name = Column(String, primary_key=True, index=True) |
||
213 | bus = Column(Integer) |
||
214 | p_set = Column(ARRAY(Float)) |
||
215 | p_max = Column(ARRAY(Float)) |
||
216 | p_min = Column(ARRAY(Float)) |
||
217 | e_max = Column(ARRAY(Float)) |
||
218 | e_min = Column(ARRAY(Float)) |
||
219 | |||
220 | |||
221 | def add_metadata_individual(): |
||
222 | targets = config.datasets()["DSM_CTS_industry"]["targets"] |
||
223 | |||
224 | targets = { |
||
225 | k: v for k, v in targets.items() if "dsm_timeseries" in v["table"] |
||
226 | } |
||
227 | |||
228 | title_dict = { |
||
229 | "egon_etrago_electricity_cts_dsm_timeseries": ( |
||
230 | "DSM flexibility band time series for CTS" |
||
231 | ), |
||
232 | "egon_osm_ind_load_curves_individual_dsm_timeseries": ( |
||
233 | "DSM flexibility band time series for OSM industry sites" |
||
234 | ), |
||
235 | "egon_demandregio_sites_ind_electricity_dsm_timeseries": ( |
||
236 | "DSM flexibility band time series for demandregio industry sites" |
||
237 | ), |
||
238 | "egon_sites_ind_load_curves_individual_dsm_timeseries": ( |
||
239 | "DSM flexibility band time series for other industry sites" |
||
240 | ), |
||
241 | } |
||
242 | |||
243 | description_dict = { |
||
244 | "egon_etrago_electricity_cts_dsm_timeseries": ( |
||
245 | "DSM flexibility band time series for CTS in 1 h resolution " |
||
246 | "including available store capacity and power potential" |
||
247 | ), |
||
248 | "egon_osm_ind_load_curves_individual_dsm_timeseries": ( |
||
249 | "DSM flexibility band time series for OSM industry sites in 1 h " |
||
250 | "resolution including available store capacity and power potential" |
||
251 | ), |
||
252 | "egon_demandregio_sites_ind_electricity_dsm_timeseries": ( |
||
253 | "DSM flexibility band time series for demandregio industry sites " |
||
254 | "in 1 h resolution including available store capacity and power " |
||
255 | "potential" |
||
256 | ), |
||
257 | "egon_sites_ind_load_curves_individual_dsm_timeseries": ( |
||
258 | "DSM flexibility band time series for other industry sites in 1 h " |
||
259 | "resolution including available store capacity and power potential" |
||
260 | ), |
||
261 | } |
||
262 | |||
263 | keywords_dict = { |
||
264 | "egon_etrago_electricity_cts_dsm_timeseries": ["cts"], |
||
265 | "egon_osm_ind_load_curves_individual_dsm_timeseries": [ |
||
266 | "osm", |
||
267 | "industry", |
||
268 | ], |
||
269 | "egon_demandregio_sites_ind_electricity_dsm_timeseries": [ |
||
270 | "demandregio", |
||
271 | "industry", |
||
272 | ], |
||
273 | "egon_sites_ind_load_curves_individual_dsm_timeseries": ["industry"], |
||
274 | } |
||
275 | |||
276 | primaryKey_dict = { |
||
277 | "egon_etrago_electricity_cts_dsm_timeseries": ["bus"], |
||
278 | "egon_osm_ind_load_curves_individual_dsm_timeseries": ["osm_id"], |
||
279 | "egon_demandregio_sites_ind_electricity_dsm_timeseries": [ |
||
280 | "industrial_sites_id", |
||
281 | ], |
||
282 | "egon_sites_ind_load_curves_individual_dsm_timeseries": ["site_id"], |
||
283 | } |
||
284 | |||
285 | sources_dict = { |
||
286 | "egon_etrago_electricity_cts_dsm_timeseries": [ |
||
287 | sources()["nep2021"], |
||
288 | sources()["zensus"], |
||
289 | ], |
||
290 | "egon_osm_ind_load_curves_individual_dsm_timeseries": [ |
||
291 | sources()["hotmaps_industrial_sites"], |
||
292 | sources()["schmidt"], |
||
293 | sources()["seenergies"], |
||
294 | ], |
||
295 | "egon_demandregio_sites_ind_electricity_dsm_timeseries": [ |
||
296 | sources()["openstreetmap"], |
||
297 | ], |
||
298 | "egon_sites_ind_load_curves_individual_dsm_timeseries": [ |
||
299 | sources()["hotmaps_industrial_sites"], |
||
300 | sources()["openstreetmap"], |
||
301 | sources()["schmidt"], |
||
302 | sources()["seenergies"], |
||
303 | ], |
||
304 | } |
||
305 | |||
306 | contris = contributors(["kh", "kh"]) |
||
307 | |||
308 | contris[0]["date"] = "2023-03-17" |
||
309 | |||
310 | contris[0]["object"] = "metadata" |
||
311 | contris[1]["object"] = "dataset" |
||
312 | |||
313 | contris[0]["comment"] = "Add metadata to dataset." |
||
314 | contris[1]["comment"] = "Add workflow to generate dataset." |
||
315 | |||
316 | for t_dict in targets.values(): |
||
317 | schema = t_dict["schema"] |
||
318 | table = t_dict["table"] |
||
319 | name = f"{schema}.{table}" |
||
320 | |||
321 | meta = { |
||
322 | "name": name, |
||
323 | "title": title_dict[table], |
||
324 | "id": "WILL_BE_SET_AT_PUBLICATION", |
||
325 | "description": description_dict[table], |
||
326 | "language": "en-US", |
||
327 | "keywords": ["dsm", "timeseries"] + keywords_dict[table], |
||
328 | "publicationDate": datetime.date.today().isoformat(), |
||
329 | "context": context(), |
||
330 | "spatial": { |
||
331 | "location": "none", |
||
332 | "extent": "Germany", |
||
333 | "resolution": "none", |
||
334 | }, |
||
335 | "temporal": { |
||
336 | "referenceDate": "2011-01-01", |
||
337 | "timeseries": { |
||
338 | "start": "2011-01-01", |
||
339 | "end": "2011-12-31", |
||
340 | "resolution": "1 h", |
||
341 | "alignment": "left", |
||
342 | "aggregationType": "average", |
||
343 | }, |
||
344 | }, |
||
345 | "sources": [ |
||
346 | sources()["egon-data"], |
||
347 | sources()["vg250"], |
||
348 | sources()["demandregio"], |
||
349 | ] |
||
350 | + sources_dict[table], |
||
351 | "licenses": [license_odbl("© eGon development team")], |
||
352 | "contributors": contris, |
||
353 | "resources": [ |
||
354 | { |
||
355 | "profile": "tabular-data-resource", |
||
356 | "name": name, |
||
357 | "path": "None", |
||
358 | "format": "PostgreSQL", |
||
359 | "encoding": "UTF-8", |
||
360 | "schema": { |
||
361 | "fields": generate_resource_fields_from_db_table( |
||
362 | schema, |
||
363 | table, |
||
364 | ), |
||
365 | "primaryKey": ["scn_name"] + primaryKey_dict[table], |
||
366 | }, |
||
367 | "dialect": {"delimiter": "", "decimalSeparator": ""}, |
||
368 | } |
||
369 | ], |
||
370 | "review": {"path": "", "badge": ""}, |
||
371 | "metaMetadata": meta_metadata(), |
||
372 | "_comment": { |
||
373 | "metadata": ( |
||
374 | "Metadata documentation and explanation (https://" |
||
375 | "github.com/OpenEnergyPlatform/oemetadata/blob/master/" |
||
376 | "metadata/v141/metadata_key_description.md)" |
||
377 | ), |
||
378 | "dates": ( |
||
379 | "Dates and time must follow the ISO8601 including time " |
||
380 | "zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)" |
||
381 | ), |
||
382 | "units": "Use a space between numbers and units (100 m)", |
||
383 | "languages": ( |
||
384 | "Languages must follow the IETF (BCP47) format (en-GB, " |
||
385 | "en-US, de-DE)" |
||
386 | ), |
||
387 | "licenses": ( |
||
388 | "License name must follow the SPDX License List " |
||
389 | "(https://spdx.org/licenses/)" |
||
390 | ), |
||
391 | "review": ( |
||
392 | "Following the OEP Data Review (https://github.com/" |
||
393 | "OpenEnergyPlatform/data-preprocessing/wiki)" |
||
394 | ), |
||
395 | "none": "If not applicable use (none)", |
||
396 | }, |
||
397 | } |
||
398 | |||
399 | dialect = get_dialect( |
||
400 | f"oep-v{meta_metadata()['metadataVersion'][4:7]}" |
||
401 | )() |
||
402 | |||
403 | meta = dialect.compile_and_render(dialect.parse(json.dumps(meta))) |
||
404 | |||
405 | db.submit_comment( |
||
406 | f"'{json.dumps(meta)}'", |
||
407 | schema, |
||
408 | table, |
||
409 | ) |
||
410 | |||
411 | |||
412 | # Code |
||
413 | def cts_data_import(cts_cool_vent_ac_share): |
||
414 | """ |
||
415 | Import CTS data necessary to identify DSM-potential. |
||
416 | |||
417 | Parameters |
||
418 | ---------- |
||
419 | cts_share: float |
||
420 | Share of cooling, ventilation and AC in CTS demand |
||
421 | """ |
||
422 | |||
423 | # import load data |
||
424 | |||
425 | sources = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
426 | "cts_loadcurves" |
||
427 | ] |
||
428 | |||
429 | ts = db.select_dataframe( |
||
430 | f"""SELECT bus_id, scn_name, p_set FROM |
||
431 | {sources['schema']}.{sources['table']}""" |
||
432 | ) |
||
433 | |||
434 | # identify relevant columns and prepare df to be returned |
||
435 | |||
436 | dsm = pd.DataFrame(index=ts.index) |
||
437 | |||
438 | dsm["bus"] = ts["bus_id"].copy() |
||
439 | dsm["scn_name"] = ts["scn_name"].copy() |
||
440 | dsm["p_set"] = ts["p_set"].copy() |
||
441 | |||
442 | # calculate share of timeseries for air conditioning, cooling and |
||
443 | # ventilation out of CTS-data |
||
444 | |||
445 | timeseries = dsm["p_set"].copy() |
||
446 | |||
447 | for index, liste in timeseries.items(): |
||
448 | share = [float(item) * cts_cool_vent_ac_share for item in liste] |
||
449 | timeseries.loc[index] = share |
||
450 | |||
451 | dsm["p_set"] = timeseries.copy() |
||
452 | |||
453 | return dsm |
||
454 | |||
455 | |||
456 | View Code Duplication | def ind_osm_data_import(ind_vent_cool_share): |
|
457 | """ |
||
458 | Import industry data per osm-area necessary to identify DSM-potential. |
||
459 | |||
460 | Parameters |
||
461 | ---------- |
||
462 | ind_share: float |
||
463 | Share of considered application in industry demand |
||
464 | """ |
||
465 | |||
466 | # import load data |
||
467 | |||
468 | sources = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
469 | "ind_osm_loadcurves" |
||
470 | ] |
||
471 | |||
472 | dsm = db.select_dataframe( |
||
473 | f""" |
||
474 | SELECT bus, scn_name, p_set FROM |
||
475 | {sources['schema']}.{sources['table']} |
||
476 | """ |
||
477 | ) |
||
478 | |||
479 | # calculate share of timeseries for cooling and ventilation out of |
||
480 | # industry-data |
||
481 | |||
482 | timeseries = dsm["p_set"].copy() |
||
483 | |||
484 | for index, liste in timeseries.items(): |
||
485 | share = [float(item) * ind_vent_cool_share for item in liste] |
||
486 | |||
487 | timeseries.loc[index] = share |
||
488 | |||
489 | dsm["p_set"] = timeseries.copy() |
||
490 | |||
491 | return dsm |
||
492 | |||
493 | |||
494 | View Code Duplication | def ind_osm_data_import_individual(ind_vent_cool_share): |
|
495 | """ |
||
496 | Import industry data per osm-area necessary to identify DSM-potential. |
||
497 | |||
498 | Parameters |
||
499 | ---------- |
||
500 | ind_share: float |
||
501 | Share of considered application in industry demand |
||
502 | """ |
||
503 | |||
504 | # import load data |
||
505 | |||
506 | sources = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
507 | "ind_osm_loadcurves_individual" |
||
508 | ] |
||
509 | |||
510 | dsm = db.select_dataframe( |
||
511 | f""" |
||
512 | SELECT osm_id, bus_id as bus, scn_name, p_set FROM |
||
513 | {sources['schema']}.{sources['table']} |
||
514 | """ |
||
515 | ) |
||
516 | |||
517 | # calculate share of timeseries for cooling and ventilation out of |
||
518 | # industry-data |
||
519 | |||
520 | timeseries = dsm["p_set"].copy() |
||
521 | |||
522 | for index, liste in timeseries.items(): |
||
523 | share = [float(item) * ind_vent_cool_share for item in liste] |
||
524 | |||
525 | timeseries.loc[index] = share |
||
526 | |||
527 | dsm["p_set"] = timeseries.copy() |
||
528 | |||
529 | return dsm |
||
530 | |||
531 | |||
532 | View Code Duplication | def ind_sites_vent_data_import(ind_vent_share, wz): |
|
533 | """ |
||
534 | Import industry sites necessary to identify DSM-potential. |
||
535 | |||
536 | Parameters |
||
537 | ---------- |
||
538 | ind_vent_share: float |
||
539 | Share of considered application in industry demand |
||
540 | wz: int |
||
541 | Wirtschaftszweig to be considered within industry sites |
||
542 | """ |
||
543 | |||
544 | # import load data |
||
545 | |||
546 | sources = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
547 | "ind_sites_loadcurves" |
||
548 | ] |
||
549 | |||
550 | dsm = db.select_dataframe( |
||
551 | f""" |
||
552 | SELECT bus, scn_name, p_set FROM |
||
553 | {sources['schema']}.{sources['table']} |
||
554 | WHERE wz = {wz} |
||
555 | """ |
||
556 | ) |
||
557 | |||
558 | # calculate share of timeseries for ventilation |
||
559 | |||
560 | timeseries = dsm["p_set"].copy() |
||
561 | |||
562 | for index, liste in timeseries.items(): |
||
563 | share = [float(item) * ind_vent_share for item in liste] |
||
564 | timeseries.loc[index] = share |
||
565 | |||
566 | dsm["p_set"] = timeseries.copy() |
||
567 | |||
568 | return dsm |
||
569 | |||
570 | |||
571 | View Code Duplication | def ind_sites_vent_data_import_individual(ind_vent_share, wz): |
|
572 | """ |
||
573 | Import industry sites necessary to identify DSM-potential. |
||
574 | |||
575 | Parameters |
||
576 | ---------- |
||
577 | ind_vent_share: float |
||
578 | Share of considered application in industry demand |
||
579 | wz: int |
||
580 | Wirtschaftszweig to be considered within industry sites |
||
581 | """ |
||
582 | |||
583 | # import load data |
||
584 | |||
585 | sources = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
586 | "ind_sites_loadcurves_individual" |
||
587 | ] |
||
588 | |||
589 | dsm = db.select_dataframe( |
||
590 | f""" |
||
591 | SELECT site_id, bus_id as bus, scn_name, p_set FROM |
||
592 | {sources['schema']}.{sources['table']} |
||
593 | WHERE wz = {wz} |
||
594 | """ |
||
595 | ) |
||
596 | |||
597 | # calculate share of timeseries for ventilation |
||
598 | |||
599 | timeseries = dsm["p_set"].copy() |
||
600 | |||
601 | for index, liste in timeseries.items(): |
||
602 | share = [float(item) * ind_vent_share for item in liste] |
||
603 | timeseries.loc[index] = share |
||
604 | |||
605 | dsm["p_set"] = timeseries.copy() |
||
606 | |||
607 | return dsm |
||
608 | |||
609 | |||
610 | def calc_ind_site_timeseries(scenario): |
||
611 | # calculate timeseries per site |
||
612 | # -> using code from egon.data.datasets.industry.temporal: |
||
613 | # calc_load_curves_ind_sites |
||
614 | |||
615 | # select demands per industrial site including the subsector information |
||
616 | source1 = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
617 | "demandregio_ind_sites" |
||
618 | ] |
||
619 | |||
620 | demands_ind_sites = db.select_dataframe( |
||
621 | f"""SELECT industrial_sites_id, wz, demand |
||
622 | FROM {source1['schema']}.{source1['table']} |
||
623 | WHERE scenario = '{scenario}' |
||
624 | AND demand > 0 |
||
625 | """ |
||
626 | ).set_index(["industrial_sites_id"]) |
||
627 | |||
628 | # select industrial sites as demand_areas from database |
||
629 | source2 = config.datasets()["DSM_CTS_industry"]["sources"]["ind_sites"] |
||
630 | |||
631 | demand_area = db.select_geodataframe( |
||
632 | f"""SELECT id, geom, subsector FROM |
||
633 | {source2['schema']}.{source2['table']}""", |
||
634 | index_col="id", |
||
635 | geom_col="geom", |
||
636 | epsg=3035, |
||
637 | ) |
||
638 | |||
639 | # replace entries to bring it in line with demandregio's subsector |
||
640 | # definitions |
||
641 | demands_ind_sites.replace(1718, 17, inplace=True) |
||
642 | share_wz_sites = demands_ind_sites.copy() |
||
643 | |||
644 | # create additional df on wz_share per industrial site, which is always set |
||
645 | # to one as the industrial demand per site is subsector specific |
||
646 | share_wz_sites.demand = 1 |
||
647 | share_wz_sites.reset_index(inplace=True) |
||
648 | |||
649 | share_transpose = pd.DataFrame( |
||
650 | index=share_wz_sites.industrial_sites_id.unique(), |
||
651 | columns=share_wz_sites.wz.unique(), |
||
652 | ) |
||
653 | share_transpose.index.rename("industrial_sites_id", inplace=True) |
||
654 | for wz in share_transpose.columns: |
||
655 | share_transpose[wz] = ( |
||
656 | share_wz_sites[share_wz_sites.wz == wz] |
||
657 | .set_index("industrial_sites_id") |
||
658 | .demand |
||
659 | ) |
||
660 | |||
661 | # calculate load curves |
||
662 | load_curves = calc_load_curve( |
||
663 | share_transpose, scenario, demands_ind_sites["demand"] |
||
664 | ) |
||
665 | |||
666 | # identify bus per industrial site |
||
667 | curves_bus = identify_bus(load_curves, demand_area) |
||
668 | curves_bus.index = curves_bus["id"].astype(int) |
||
669 | |||
670 | # initialize dataframe to be returned |
||
671 | |||
672 | ts = pd.DataFrame( |
||
673 | data=curves_bus["bus_id"], index=curves_bus["id"].astype(int) |
||
674 | ) |
||
675 | ts["scenario_name"] = scenario |
||
676 | curves_bus.drop({"id", "bus_id", "geom"}, axis=1, inplace=True) |
||
677 | ts["p_set"] = curves_bus.values.tolist() |
||
678 | |||
679 | # add subsector to relate to Schmidt's tables afterwards |
||
680 | ts["application"] = demand_area["subsector"] |
||
681 | |||
682 | return ts |
||
683 | |||
684 | |||
685 | def relate_to_schmidt_sites(dsm): |
||
686 | # import industrial sites by Schmidt |
||
687 | |||
688 | source = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
689 | "ind_sites_schmidt" |
||
690 | ] |
||
691 | |||
692 | schmidt = db.select_dataframe( |
||
693 | f"""SELECT application, geom FROM |
||
694 | {source['schema']}.{source['table']}""" |
||
695 | ) |
||
696 | |||
697 | # relate calculated timeseries (dsm) to Schmidt's industrial sites |
||
698 | |||
699 | applications = np.unique(schmidt["application"]) |
||
700 | dsm = pd.DataFrame(dsm[dsm["application"].isin(applications)]) |
||
701 | |||
702 | # initialize dataframe to be returned |
||
703 | |||
704 | dsm.rename( |
||
705 | columns={"scenario_name": "scn_name", "bus_id": "bus"}, |
||
706 | inplace=True, |
||
707 | ) |
||
708 | |||
709 | return dsm |
||
710 | |||
711 | |||
712 | def ind_sites_data_import(): |
||
713 | """ |
||
714 | Import industry sites data necessary to identify DSM-potential. |
||
715 | """ |
||
716 | # calculate timeseries per site |
||
717 | scenarios = config.settings()["egon-data"]["--scenarios"] |
||
718 | |||
719 | dsm = pd.DataFrame( |
||
720 | columns=["bus_id", "scenario_name", "p_set", "application", "id"] |
||
721 | ) |
||
722 | |||
723 | # scenario eGon2035 |
||
724 | if "eGon2035" in scenarios: |
||
725 | dsm_2035 = calc_ind_site_timeseries("eGon2035").reset_index() |
||
726 | dsm = pd.concat([dsm, dsm_2035], ignore_index=True) |
||
727 | # scenario eGon100RE |
||
728 | if "eGon100RE" in scenarios: |
||
729 | dsm_100 = calc_ind_site_timeseries("eGon100RE").reset_index() |
||
730 | dsm = pd.concat([dsm, dsm_100], ignore_index=True) |
||
731 | |||
732 | dsm.index = range(len(dsm)) |
||
733 | # relate calculated timeseries to Schmidt's industrial sites |
||
734 | |||
735 | dsm = relate_to_schmidt_sites(dsm) |
||
736 | |||
737 | return dsm[["application", "id", "bus", "scn_name", "p_set"]] |
||
738 | |||
739 | |||
740 | def calculate_potentials(s_flex, s_util, s_inc, s_dec, delta_t, dsm): |
||
741 | """ |
||
742 | Calculate DSM-potential per bus using the methods by Heitkoetter et. al.: |
||
743 | https://doi.org/10.1016/j.adapen.2020.100001 |
||
744 | |||
745 | Parameters |
||
746 | ---------- |
||
747 | s_flex: float |
||
748 | Feasability factor to account for socio-technical restrictions |
||
749 | s_util: float |
||
750 | Average annual utilisation rate |
||
751 | s_inc: float |
||
752 | Shiftable share of installed capacity up to which load can be |
||
753 | increased considering technical limitations |
||
754 | s_dec: float |
||
755 | Shiftable share of installed capacity up to which load can be |
||
756 | decreased considering technical limitations |
||
757 | delta_t: int |
||
758 | Maximum shift duration in hours |
||
759 | dsm: DataFrame |
||
760 | List of existing buses with DSM-potential including timeseries of |
||
761 | loads |
||
762 | """ |
||
763 | |||
764 | # copy relevant timeseries |
||
765 | timeseries = dsm["p_set"].copy() |
||
766 | |||
767 | # calculate scheduled load L(t) |
||
768 | |||
769 | scheduled_load = timeseries.copy() |
||
770 | |||
771 | for index, liste in scheduled_load.items(): |
||
772 | share = [item * s_flex for item in liste] |
||
773 | scheduled_load.loc[index] = share |
||
774 | |||
775 | # calculate maximum capacity Lambda |
||
776 | |||
777 | # calculate energy annual requirement |
||
778 | energy_annual = pd.Series(index=timeseries.index, dtype=float) |
||
779 | for index, liste in timeseries.items(): |
||
780 | energy_annual.loc[index] = sum(liste) |
||
781 | |||
782 | # calculate Lambda |
||
783 | lam = (energy_annual * s_flex) / (8760 * s_util) |
||
784 | |||
785 | # calculation of P_max and P_min |
||
786 | |||
787 | # P_max |
||
788 | p_max = scheduled_load.copy() |
||
789 | for index, liste in scheduled_load.items(): |
||
790 | lamb = lam.loc[index] |
||
791 | p_max.loc[index] = [max(0, lamb * s_inc - item) for item in liste] |
||
792 | |||
793 | # P_min |
||
794 | p_min = scheduled_load.copy() |
||
795 | for index, liste in scheduled_load.items(): |
||
796 | lamb = lam.loc[index] |
||
797 | p_min.loc[index] = [min(0, -(item - lamb * s_dec)) for item in liste] |
||
798 | |||
799 | # calculation of E_max and E_min |
||
800 | |||
801 | e_max = scheduled_load.copy() |
||
802 | e_min = scheduled_load.copy() |
||
803 | |||
804 | for index, liste in scheduled_load.items(): |
||
805 | emin = [] |
||
806 | emax = [] |
||
807 | for i in range(len(liste)): |
||
808 | if i + delta_t > len(liste): |
||
809 | emax.append( |
||
810 | (sum(liste[i:]) + sum(liste[: delta_t - (len(liste) - i)])) |
||
811 | ) |
||
812 | else: |
||
813 | emax.append(sum(liste[i : i + delta_t])) |
||
814 | if i - delta_t < 0: |
||
815 | emin.append( |
||
816 | ( |
||
817 | -1 |
||
818 | * ( |
||
819 | ( |
||
820 | sum(liste[:i]) |
||
821 | + sum(liste[len(liste) - delta_t + i :]) |
||
822 | ) |
||
823 | ) |
||
824 | ) |
||
825 | ) |
||
826 | else: |
||
827 | emin.append(-1 * sum(liste[i - delta_t : i])) |
||
828 | e_max.loc[index] = emax |
||
829 | e_min.loc[index] = emin |
||
830 | |||
831 | return p_max, p_min, e_max, e_min |
||
832 | |||
833 | |||
834 | def create_dsm_components( |
||
835 | con, p_max, p_min, e_max, e_min, dsm, export_aggregated=True |
||
836 | ): |
||
837 | """ |
||
838 | Create components representing DSM. |
||
839 | |||
840 | Parameters |
||
841 | ---------- |
||
842 | con : |
||
843 | Connection to database |
||
844 | p_max: DataFrame |
||
845 | Timeseries identifying maximum load increase |
||
846 | p_min: DataFrame |
||
847 | Timeseries identifying maximum load decrease |
||
848 | e_max: DataFrame |
||
849 | Timeseries identifying maximum energy amount to be preponed |
||
850 | e_min: DataFrame |
||
851 | Timeseries identifying maximum energy amount to be postponed |
||
852 | dsm: DataFrame |
||
853 | List of existing buses with DSM-potential including timeseries of loads |
||
854 | """ |
||
855 | if not export_aggregated: |
||
856 | # calculate P_nom and P per unit |
||
857 | p_nom = pd.Series(index=p_max.index, dtype=float) |
||
858 | for index, row in p_max.items(): |
||
859 | nom = max(max(row), abs(min(p_min.loc[index]))) |
||
860 | p_nom.loc[index] = nom |
||
861 | new = [element / nom for element in row] |
||
862 | p_max.loc[index] = new |
||
863 | new = [element / nom for element in p_min.loc[index]] |
||
864 | p_min.loc[index] = new |
||
865 | |||
866 | # calculate E_nom and E per unit |
||
867 | e_nom = pd.Series(index=p_min.index, dtype=float) |
||
868 | for index, row in e_max.items(): |
||
869 | nom = max(max(row), abs(min(e_min.loc[index]))) |
||
870 | e_nom.loc[index] = nom |
||
871 | new = [element / nom for element in row] |
||
872 | e_max.loc[index] = new |
||
873 | new = [element / nom for element in e_min.loc[index]] |
||
874 | e_min.loc[index] = new |
||
875 | |||
876 | # add DSM-buses to "original" buses |
||
877 | dsm_buses = gpd.GeoDataFrame(index=dsm.index) |
||
878 | dsm_buses["original_bus"] = dsm["bus"].copy() |
||
879 | dsm_buses["scn_name"] = dsm["scn_name"].copy() |
||
880 | |||
881 | # get original buses and add copy of relevant information |
||
882 | target1 = config.datasets()["DSM_CTS_industry"]["targets"]["bus"] |
||
883 | original_buses = db.select_geodataframe( |
||
884 | f"""SELECT bus_id, v_nom, scn_name, x, y, geom FROM |
||
885 | {target1['schema']}.{target1['table']}""", |
||
886 | geom_col="geom", |
||
887 | epsg=4326, |
||
888 | ) |
||
889 | |||
890 | # copy relevant information from original buses to DSM-buses |
||
891 | dsm_buses["index"] = dsm_buses.index |
||
892 | originals = original_buses[ |
||
893 | original_buses["bus_id"].isin(np.unique(dsm_buses["original_bus"])) |
||
894 | ] |
||
895 | dsm_buses = originals.merge( |
||
896 | dsm_buses, |
||
897 | left_on=["bus_id", "scn_name"], |
||
898 | right_on=["original_bus", "scn_name"], |
||
899 | ) |
||
900 | dsm_buses.index = dsm_buses["index"] |
||
901 | dsm_buses.drop(["bus_id", "index"], axis=1, inplace=True) |
||
902 | |||
903 | # new bus_ids for DSM-buses |
||
904 | max_id = original_buses["bus_id"].max() |
||
905 | if np.isnan(max_id): |
||
906 | max_id = 0 |
||
907 | dsm_id = max_id + 1 |
||
908 | bus_id = pd.Series(index=dsm_buses.index, dtype=int) |
||
909 | |||
910 | # Get number of DSM buses for both scenarios |
||
911 | rows_per_scenario = ( |
||
912 | dsm_buses.groupby("scn_name").count().original_bus.to_dict() |
||
913 | ) |
||
914 | |||
915 | # Assignment of DSM ids |
||
916 | bus_id.iloc[: rows_per_scenario.get("eGon2035", 0)] = range( |
||
917 | dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0) |
||
918 | ) |
||
919 | |||
920 | bus_id.iloc[ |
||
921 | rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get( |
||
922 | "eGon2035", 0 |
||
923 | ) |
||
924 | + rows_per_scenario.get("eGon100RE", 0) |
||
925 | ] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0)) |
||
926 | |||
927 | dsm_buses["bus_id"] = bus_id |
||
928 | |||
929 | # add links from "orignal" buses to DSM-buses |
||
930 | |||
931 | dsm_links = pd.DataFrame(index=dsm_buses.index) |
||
932 | dsm_links["original_bus"] = dsm_buses["original_bus"].copy() |
||
933 | dsm_links["dsm_bus"] = dsm_buses["bus_id"].copy() |
||
934 | dsm_links["scn_name"] = dsm_buses["scn_name"].copy() |
||
935 | |||
936 | # set link_id |
||
937 | target2 = config.datasets()["DSM_CTS_industry"]["targets"]["link"] |
||
938 | sql = f"""SELECT link_id FROM {target2['schema']}.{target2['table']}""" |
||
939 | max_id = pd.read_sql_query(sql, con) |
||
940 | max_id = max_id["link_id"].max() |
||
941 | if np.isnan(max_id): |
||
942 | max_id = 0 |
||
943 | dsm_id = max_id + 1 |
||
944 | link_id = pd.Series(index=dsm_buses.index, dtype=int) |
||
945 | |||
946 | # Assignment of link ids |
||
947 | link_id.iloc[: rows_per_scenario.get("eGon2035", 0)] = range( |
||
948 | dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0) |
||
949 | ) |
||
950 | |||
951 | link_id.iloc[ |
||
952 | rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get( |
||
953 | "eGon2035", 0 |
||
954 | ) |
||
955 | + rows_per_scenario.get("eGon100RE", 0) |
||
956 | ] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0)) |
||
957 | |||
958 | dsm_links["link_id"] = link_id |
||
959 | |||
960 | # add calculated timeseries to df to be returned |
||
961 | if not export_aggregated: |
||
962 | dsm_links["p_nom"] = p_nom |
||
963 | dsm_links["p_min"] = p_min |
||
964 | dsm_links["p_max"] = p_max |
||
965 | |||
966 | # add DSM-stores |
||
967 | |||
968 | dsm_stores = pd.DataFrame(index=dsm_buses.index) |
||
969 | dsm_stores["bus"] = dsm_buses["bus_id"].copy() |
||
970 | dsm_stores["scn_name"] = dsm_buses["scn_name"].copy() |
||
971 | dsm_stores["original_bus"] = dsm_buses["original_bus"].copy() |
||
972 | |||
973 | # set store_id |
||
974 | target3 = config.datasets()["DSM_CTS_industry"]["targets"]["store"] |
||
975 | sql = f"""SELECT store_id FROM {target3['schema']}.{target3['table']}""" |
||
976 | max_id = pd.read_sql_query(sql, con) |
||
977 | max_id = max_id["store_id"].max() |
||
978 | if np.isnan(max_id): |
||
979 | max_id = 0 |
||
980 | dsm_id = max_id + 1 |
||
981 | store_id = pd.Series(index=dsm_buses.index, dtype=int) |
||
982 | |||
983 | # Assignment of store ids |
||
984 | store_id.iloc[: rows_per_scenario.get("eGon2035", 0)] = range( |
||
985 | dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0) |
||
986 | ) |
||
987 | |||
988 | store_id.iloc[ |
||
989 | rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get( |
||
990 | "eGon2035", 0 |
||
991 | ) |
||
992 | + rows_per_scenario.get("eGon100RE", 0) |
||
993 | ] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0)) |
||
994 | |||
995 | dsm_stores["store_id"] = store_id |
||
996 | |||
997 | # add calculated timeseries to df to be returned |
||
998 | if not export_aggregated: |
||
999 | dsm_stores["e_nom"] = e_nom |
||
1000 | dsm_stores["e_min"] = e_min |
||
1001 | dsm_stores["e_max"] = e_max |
||
1002 | |||
1003 | return dsm_buses, dsm_links, dsm_stores |
||
1004 | |||
1005 | |||
1006 | def aggregate_components(df_dsm_buses, df_dsm_links, df_dsm_stores): |
||
1007 | # aggregate buses |
||
1008 | |||
1009 | grouper = [df_dsm_buses.original_bus, df_dsm_buses.scn_name] |
||
1010 | |||
1011 | df_dsm_buses = df_dsm_buses.groupby(grouper).first() |
||
1012 | |||
1013 | df_dsm_buses.reset_index(inplace=True) |
||
1014 | df_dsm_buses.sort_values("scn_name", inplace=True) |
||
1015 | |||
1016 | # aggregate links |
||
1017 | |||
1018 | df_dsm_links["p_max"] = df_dsm_links["p_max"].apply(lambda x: np.array(x)) |
||
1019 | df_dsm_links["p_min"] = df_dsm_links["p_min"].apply(lambda x: np.array(x)) |
||
1020 | |||
1021 | grouper = [df_dsm_links.original_bus, df_dsm_links.scn_name] |
||
1022 | |||
1023 | p_max = df_dsm_links.groupby(grouper)["p_max"].apply(np.sum) |
||
1024 | p_min = df_dsm_links.groupby(grouper)["p_min"].apply(np.sum) |
||
1025 | |||
1026 | df_dsm_links = df_dsm_links.groupby(grouper).first() |
||
1027 | df_dsm_links.p_max = p_max |
||
1028 | df_dsm_links.p_min = p_min |
||
1029 | |||
1030 | df_dsm_links.reset_index(inplace=True) |
||
1031 | df_dsm_links.sort_values("scn_name", inplace=True) |
||
1032 | |||
1033 | # calculate P_nom and P per unit |
||
1034 | for index, row in df_dsm_links.iterrows(): |
||
1035 | nom = max(max(row.p_max), abs(min(row.p_min))) |
||
1036 | df_dsm_links.at[index, "p_nom"] = nom |
||
1037 | |||
1038 | df_dsm_links["p_max"] = df_dsm_links["p_max"] / df_dsm_links["p_nom"] |
||
1039 | df_dsm_links["p_min"] = df_dsm_links["p_min"] / df_dsm_links["p_nom"] |
||
1040 | |||
1041 | df_dsm_links["p_max"] = df_dsm_links["p_max"].apply(lambda x: list(x)) |
||
1042 | df_dsm_links["p_min"] = df_dsm_links["p_min"].apply(lambda x: list(x)) |
||
1043 | |||
1044 | # aggregate stores |
||
1045 | df_dsm_stores["e_max"] = df_dsm_stores["e_max"].apply( |
||
1046 | lambda x: np.array(x) |
||
1047 | ) |
||
1048 | df_dsm_stores["e_min"] = df_dsm_stores["e_min"].apply( |
||
1049 | lambda x: np.array(x) |
||
1050 | ) |
||
1051 | |||
1052 | grouper = [df_dsm_stores.original_bus, df_dsm_stores.scn_name] |
||
1053 | |||
1054 | e_max = df_dsm_stores.groupby(grouper)["e_max"].apply(np.sum) |
||
1055 | e_min = df_dsm_stores.groupby(grouper)["e_min"].apply(np.sum) |
||
1056 | |||
1057 | df_dsm_stores = df_dsm_stores.groupby(grouper).first() |
||
1058 | df_dsm_stores.e_max = e_max |
||
1059 | df_dsm_stores.e_min = e_min |
||
1060 | |||
1061 | df_dsm_stores.reset_index(inplace=True) |
||
1062 | df_dsm_stores.sort_values("scn_name", inplace=True) |
||
1063 | |||
1064 | # calculate E_nom and E per unit |
||
1065 | for index, row in df_dsm_stores.iterrows(): |
||
1066 | nom = max(max(row.e_max), abs(min(row.e_min))) |
||
1067 | df_dsm_stores.at[index, "e_nom"] = nom |
||
1068 | |||
1069 | df_dsm_stores["e_max"] = df_dsm_stores["e_max"] / df_dsm_stores["e_nom"] |
||
1070 | df_dsm_stores["e_min"] = df_dsm_stores["e_min"] / df_dsm_stores["e_nom"] |
||
1071 | |||
1072 | df_dsm_stores["e_max"] = df_dsm_stores["e_max"].apply(lambda x: list(x)) |
||
1073 | df_dsm_stores["e_min"] = df_dsm_stores["e_min"].apply(lambda x: list(x)) |
||
1074 | |||
1075 | # select new bus_ids for aggregated buses and add to links and stores |
||
1076 | bus_id = db.next_etrago_id("bus", len(df_dsm_buses.index)) |
||
1077 | |||
1078 | df_dsm_buses["bus_id"] = bus_id |
||
1079 | df_dsm_links["dsm_bus"] = bus_id |
||
1080 | df_dsm_stores["bus"] = bus_id |
||
1081 | |||
1082 | # select new link_ids for aggregated links |
||
1083 | link_id = db.next_etrago_id("Link", len(df_dsm_links.index)) |
||
1084 | |||
1085 | df_dsm_links["link_id"] = link_id |
||
1086 | |||
1087 | # select new store_ids to aggregated stores |
||
1088 | |||
1089 | store_id = db.next_etrago_id("Store", len(df_dsm_stores.index)) |
||
1090 | |||
1091 | df_dsm_stores["store_id"] = store_id |
||
1092 | |||
1093 | return df_dsm_buses, df_dsm_links, df_dsm_stores |
||
1094 | |||
1095 | |||
1096 | def data_export(dsm_buses, dsm_links, dsm_stores, carrier): |
||
1097 | """ |
||
1098 | Export new components to database. |
||
1099 | |||
1100 | Parameters |
||
1101 | ---------- |
||
1102 | dsm_buses: DataFrame |
||
1103 | Buses representing locations of DSM-potential |
||
1104 | dsm_links: DataFrame |
||
1105 | Links connecting DSM-buses and DSM-stores |
||
1106 | dsm_stores: DataFrame |
||
1107 | Stores representing DSM-potential |
||
1108 | carrier: str |
||
1109 | Remark to be filled in column 'carrier' identifying DSM-potential |
||
1110 | """ |
||
1111 | |||
1112 | targets = config.datasets()["DSM_CTS_industry"]["targets"] |
||
1113 | |||
1114 | # dsm_buses |
||
1115 | |||
1116 | insert_buses = gpd.GeoDataFrame( |
||
1117 | index=dsm_buses.index, |
||
1118 | data=dsm_buses["geom"], |
||
1119 | geometry="geom", |
||
1120 | crs="EPSG:4326", |
||
1121 | ) |
||
1122 | insert_buses["scn_name"] = dsm_buses["scn_name"] |
||
1123 | insert_buses["bus_id"] = dsm_buses["bus_id"] |
||
1124 | insert_buses["v_nom"] = dsm_buses["v_nom"] |
||
1125 | insert_buses["carrier"] = carrier |
||
1126 | insert_buses["x"] = dsm_buses["x"] |
||
1127 | insert_buses["y"] = dsm_buses["y"] |
||
1128 | |||
1129 | # insert into database |
||
1130 | insert_buses.to_postgis( |
||
1131 | targets["bus"]["table"], |
||
1132 | con=db.engine(), |
||
1133 | schema=targets["bus"]["schema"], |
||
1134 | if_exists="append", |
||
1135 | index=False, |
||
1136 | dtype={"geom": "geometry"}, |
||
1137 | ) |
||
1138 | |||
1139 | # dsm_links |
||
1140 | |||
1141 | insert_links = pd.DataFrame(index=dsm_links.index) |
||
1142 | insert_links["scn_name"] = dsm_links["scn_name"] |
||
1143 | insert_links["link_id"] = dsm_links["link_id"] |
||
1144 | insert_links["bus0"] = dsm_links["original_bus"] |
||
1145 | insert_links["bus1"] = dsm_links["dsm_bus"] |
||
1146 | insert_links["carrier"] = carrier |
||
1147 | insert_links["p_nom"] = dsm_links["p_nom"] |
||
1148 | |||
1149 | # insert into database |
||
1150 | insert_links.to_sql( |
||
1151 | targets["link"]["table"], |
||
1152 | con=db.engine(), |
||
1153 | schema=targets["link"]["schema"], |
||
1154 | if_exists="append", |
||
1155 | index=False, |
||
1156 | ) |
||
1157 | |||
1158 | insert_links_timeseries = pd.DataFrame(index=dsm_links.index) |
||
1159 | insert_links_timeseries["scn_name"] = dsm_links["scn_name"] |
||
1160 | insert_links_timeseries["link_id"] = dsm_links["link_id"] |
||
1161 | insert_links_timeseries["p_min_pu"] = dsm_links["p_min"] |
||
1162 | insert_links_timeseries["p_max_pu"] = dsm_links["p_max"] |
||
1163 | insert_links_timeseries["temp_id"] = 1 |
||
1164 | |||
1165 | # insert into database |
||
1166 | insert_links_timeseries.to_sql( |
||
1167 | targets["link_timeseries"]["table"], |
||
1168 | con=db.engine(), |
||
1169 | schema=targets["link_timeseries"]["schema"], |
||
1170 | if_exists="append", |
||
1171 | index=False, |
||
1172 | ) |
||
1173 | |||
1174 | # dsm_stores |
||
1175 | |||
1176 | insert_stores = pd.DataFrame(index=dsm_stores.index) |
||
1177 | insert_stores["scn_name"] = dsm_stores["scn_name"] |
||
1178 | insert_stores["store_id"] = dsm_stores["store_id"] |
||
1179 | insert_stores["bus"] = dsm_stores["bus"] |
||
1180 | insert_stores["carrier"] = carrier |
||
1181 | insert_stores["e_nom"] = dsm_stores["e_nom"] |
||
1182 | |||
1183 | # insert into database |
||
1184 | insert_stores.to_sql( |
||
1185 | targets["store"]["table"], |
||
1186 | con=db.engine(), |
||
1187 | schema=targets["store"]["schema"], |
||
1188 | if_exists="append", |
||
1189 | index=False, |
||
1190 | ) |
||
1191 | |||
1192 | insert_stores_timeseries = pd.DataFrame(index=dsm_stores.index) |
||
1193 | insert_stores_timeseries["scn_name"] = dsm_stores["scn_name"] |
||
1194 | insert_stores_timeseries["store_id"] = dsm_stores["store_id"] |
||
1195 | insert_stores_timeseries["e_min_pu"] = dsm_stores["e_min"] |
||
1196 | insert_stores_timeseries["e_max_pu"] = dsm_stores["e_max"] |
||
1197 | insert_stores_timeseries["temp_id"] = 1 |
||
1198 | |||
1199 | # insert into database |
||
1200 | insert_stores_timeseries.to_sql( |
||
1201 | targets["store_timeseries"]["table"], |
||
1202 | con=db.engine(), |
||
1203 | schema=targets["store_timeseries"]["schema"], |
||
1204 | if_exists="append", |
||
1205 | index=False, |
||
1206 | ) |
||
1207 | |||
1208 | |||
1209 | def delete_dsm_entries(carrier): |
||
1210 | """ |
||
1211 | Deletes DSM-components from database if they already exist before creating |
||
1212 | new ones. |
||
1213 | |||
1214 | Parameters |
||
1215 | ---------- |
||
1216 | carrier: str |
||
1217 | Remark in column 'carrier' identifying DSM-potential |
||
1218 | """ |
||
1219 | |||
1220 | targets = config.datasets()["DSM_CTS_industry"]["targets"] |
||
1221 | |||
1222 | # buses |
||
1223 | |||
1224 | sql = ( |
||
1225 | f"DELETE FROM {targets['bus']['schema']}.{targets['bus']['table']} b " |
||
1226 | f"WHERE (b.carrier LIKE '{carrier}');" |
||
1227 | ) |
||
1228 | db.execute_sql(sql) |
||
1229 | |||
1230 | # links |
||
1231 | |||
1232 | sql = f""" |
||
1233 | DELETE FROM {targets["link_timeseries"]["schema"]}. |
||
1234 | {targets["link_timeseries"]["table"]} t |
||
1235 | WHERE t.link_id IN |
||
1236 | ( |
||
1237 | SELECT l.link_id FROM {targets["link"]["schema"]}. |
||
1238 | {targets["link"]["table"]} l |
||
1239 | WHERE l.carrier LIKE '{carrier}' |
||
1240 | ); |
||
1241 | """ |
||
1242 | |||
1243 | db.execute_sql(sql) |
||
1244 | |||
1245 | sql = f""" |
||
1246 | DELETE FROM {targets["link"]["schema"]}. |
||
1247 | {targets["link"]["table"]} l |
||
1248 | WHERE (l.carrier LIKE '{carrier}'); |
||
1249 | """ |
||
1250 | |||
1251 | db.execute_sql(sql) |
||
1252 | |||
1253 | # stores |
||
1254 | |||
1255 | sql = f""" |
||
1256 | DELETE FROM {targets["store_timeseries"]["schema"]}. |
||
1257 | {targets["store_timeseries"]["table"]} t |
||
1258 | WHERE t.store_id IN |
||
1259 | ( |
||
1260 | SELECT s.store_id FROM {targets["store"]["schema"]}. |
||
1261 | {targets["store"]["table"]} s |
||
1262 | WHERE s.carrier LIKE '{carrier}' |
||
1263 | ); |
||
1264 | """ |
||
1265 | |||
1266 | db.execute_sql(sql) |
||
1267 | |||
1268 | sql = f""" |
||
1269 | DELETE FROM {targets["store"]["schema"]}.{targets["store"]["table"]} s |
||
1270 | WHERE (s.carrier LIKE '{carrier}'); |
||
1271 | """ |
||
1272 | |||
1273 | db.execute_sql(sql) |
||
1274 | |||
1275 | |||
1276 | def dsm_cts_ind( |
||
1277 | con=db.engine(), |
||
1278 | cts_cool_vent_ac_share=0.22, |
||
1279 | ind_vent_cool_share=0.039, |
||
1280 | ind_vent_share=0.017, |
||
1281 | ): |
||
1282 | """ |
||
1283 | Execute methodology to create and implement components for DSM considering |
||
1284 | |||
1285 | a) CTS per osm-area: combined potentials of cooling, ventilation and air |
||
1286 | conditioning |
||
1287 | b) Industry per osm-are: combined potentials of cooling and ventilation |
||
1288 | c) Industrial Sites: potentials of ventilation in sites of |
||
1289 | "Wirtschaftszweig" (WZ) 23 |
||
1290 | d) Industrial Sites: potentials of sites specified by subsectors |
||
1291 | identified by Schmidt (https://zenodo.org/record/3613767#.YTsGwVtCRhG): |
||
1292 | Paper, Recycled Paper, Pulp, Cement |
||
1293 | |||
1294 | Modelled using the methods by Heitkoetter et. al.: |
||
1295 | https://doi.org/10.1016/j.adapen.2020.100001 |
||
1296 | |||
1297 | Parameters |
||
1298 | ---------- |
||
1299 | con : |
||
1300 | Connection to database |
||
1301 | cts_cool_vent_ac_share: float |
||
1302 | Share of cooling, ventilation and AC in CTS demand |
||
1303 | ind_vent_cool_share: float |
||
1304 | Share of cooling and ventilation in industry demand |
||
1305 | ind_vent_share: float |
||
1306 | Share of ventilation in industry demand in sites of WZ 23 |
||
1307 | |||
1308 | """ |
||
1309 | |||
1310 | # CTS per osm-area: cooling, ventilation and air conditioning |
||
1311 | |||
1312 | print(" ") |
||
1313 | print("CTS per osm-area: cooling, ventilation and air conditioning") |
||
1314 | print(" ") |
||
1315 | |||
1316 | dsm = cts_data_import(cts_cool_vent_ac_share) |
||
1317 | |||
1318 | # calculate combined potentials of cooling, ventilation and air |
||
1319 | # conditioning in CTS using combined parameters by Heitkoetter et. al. |
||
1320 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
1321 | s_flex=S_FLEX_CTS, |
||
1322 | s_util=S_UTIL_CTS, |
||
1323 | s_inc=S_INC_CTS, |
||
1324 | s_dec=S_DEC_CTS, |
||
1325 | delta_t=DELTA_T_CTS, |
||
1326 | dsm=dsm, |
||
1327 | ) |
||
1328 | |||
1329 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
1330 | con, p_max, p_min, e_max, e_min, dsm |
||
1331 | ) |
||
1332 | |||
1333 | df_dsm_buses = dsm_buses.copy() |
||
1334 | df_dsm_links = dsm_links.copy() |
||
1335 | df_dsm_stores = dsm_stores.copy() |
||
1336 | |||
1337 | # industry per osm-area: cooling and ventilation |
||
1338 | |||
1339 | print(" ") |
||
1340 | print("industry per osm-area: cooling and ventilation") |
||
1341 | print(" ") |
||
1342 | |||
1343 | dsm = ind_osm_data_import(ind_vent_cool_share) |
||
1344 | |||
1345 | # calculate combined potentials of cooling and ventilation in industrial |
||
1346 | # sector using combined parameters by Heitkoetter et. al. |
||
1347 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
1348 | s_flex=S_FLEX_OSM, |
||
1349 | s_util=S_UTIL_OSM, |
||
1350 | s_inc=S_INC_OSM, |
||
1351 | s_dec=S_DEC_OSM, |
||
1352 | delta_t=DELTA_T_OSM, |
||
1353 | dsm=dsm, |
||
1354 | ) |
||
1355 | |||
1356 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
1357 | con, p_max, p_min, e_max, e_min, dsm |
||
1358 | ) |
||
1359 | |||
1360 | df_dsm_buses = gpd.GeoDataFrame( |
||
1361 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
1362 | crs="EPSG:4326", |
||
1363 | geometry="geom", |
||
1364 | ) |
||
1365 | df_dsm_links = pd.DataFrame( |
||
1366 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
1367 | ) |
||
1368 | df_dsm_stores = pd.DataFrame( |
||
1369 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
1370 | ) |
||
1371 | |||
1372 | # industry sites |
||
1373 | |||
1374 | # industry sites: different applications |
||
1375 | |||
1376 | dsm = ind_sites_data_import() |
||
1377 | |||
1378 | print(" ") |
||
1379 | print("industry sites: paper") |
||
1380 | print(" ") |
||
1381 | |||
1382 | dsm_paper = gpd.GeoDataFrame( |
||
1383 | dsm[ |
||
1384 | dsm["application"].isin( |
||
1385 | [ |
||
1386 | "Graphic Paper", |
||
1387 | "Packing Paper and Board", |
||
1388 | "Hygiene Paper", |
||
1389 | "Technical/Special Paper and Board", |
||
1390 | ] |
||
1391 | ) |
||
1392 | ] |
||
1393 | ) |
||
1394 | |||
1395 | # calculate potentials of industrial sites with paper-applications |
||
1396 | # using parameters by Heitkoetter et al. |
||
1397 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
1398 | s_flex=S_FLEX_PAPER, |
||
1399 | s_util=S_UTIL_PAPER, |
||
1400 | s_inc=S_INC_PAPER, |
||
1401 | s_dec=S_DEC_PAPER, |
||
1402 | delta_t=DELTA_T_PAPER, |
||
1403 | dsm=dsm_paper, |
||
1404 | ) |
||
1405 | |||
1406 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
1407 | con, p_max, p_min, e_max, e_min, dsm_paper |
||
1408 | ) |
||
1409 | |||
1410 | df_dsm_buses = gpd.GeoDataFrame( |
||
1411 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
1412 | crs="EPSG:4326", |
||
1413 | geometry="geom", |
||
1414 | ) |
||
1415 | df_dsm_links = pd.DataFrame( |
||
1416 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
1417 | ) |
||
1418 | df_dsm_stores = pd.DataFrame( |
||
1419 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
1420 | ) |
||
1421 | |||
1422 | print(" ") |
||
1423 | print("industry sites: recycled paper") |
||
1424 | print(" ") |
||
1425 | |||
1426 | # calculate potentials of industrial sites with recycled paper-applications |
||
1427 | # using parameters by Heitkoetter et. al. |
||
1428 | dsm_recycled_paper = gpd.GeoDataFrame( |
||
1429 | dsm[dsm["application"] == "Recycled Paper"] |
||
1430 | ) |
||
1431 | |||
1432 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
1433 | s_flex=S_FLEX_RECYCLED_PAPER, |
||
1434 | s_util=S_UTIL_RECYCLED_PAPER, |
||
1435 | s_inc=S_INC_RECYCLED_PAPER, |
||
1436 | s_dec=S_DEC_RECYCLED_PAPER, |
||
1437 | delta_t=DELTA_T_RECYCLED_PAPER, |
||
1438 | dsm=dsm_recycled_paper, |
||
1439 | ) |
||
1440 | |||
1441 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
1442 | con, p_max, p_min, e_max, e_min, dsm_recycled_paper |
||
1443 | ) |
||
1444 | |||
1445 | df_dsm_buses = gpd.GeoDataFrame( |
||
1446 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
1447 | crs="EPSG:4326", |
||
1448 | geometry="geom", |
||
1449 | ) |
||
1450 | df_dsm_links = pd.DataFrame( |
||
1451 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
1452 | ) |
||
1453 | df_dsm_stores = pd.DataFrame( |
||
1454 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
1455 | ) |
||
1456 | |||
1457 | print(" ") |
||
1458 | print("industry sites: pulp") |
||
1459 | print(" ") |
||
1460 | |||
1461 | dsm_pulp = gpd.GeoDataFrame(dsm[dsm["application"] == "Mechanical Pulp"]) |
||
1462 | |||
1463 | # calculate potentials of industrial sites with pulp-applications |
||
1464 | # using parameters by Heitkoetter et al. |
||
1465 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
1466 | s_flex=S_FLEX_PULP, |
||
1467 | s_util=S_UTIL_PULP, |
||
1468 | s_inc=S_INC_PULP, |
||
1469 | s_dec=S_DEC_PULP, |
||
1470 | delta_t=DELTA_T_PULP, |
||
1471 | dsm=dsm_pulp, |
||
1472 | ) |
||
1473 | |||
1474 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
1475 | con, p_max, p_min, e_max, e_min, dsm_pulp |
||
1476 | ) |
||
1477 | |||
1478 | df_dsm_buses = gpd.GeoDataFrame( |
||
1479 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
1480 | crs="EPSG:4326", |
||
1481 | geometry="geom", |
||
1482 | ) |
||
1483 | df_dsm_links = pd.DataFrame( |
||
1484 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
1485 | ) |
||
1486 | df_dsm_stores = pd.DataFrame( |
||
1487 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
1488 | ) |
||
1489 | |||
1490 | # industry sites: cement |
||
1491 | |||
1492 | print(" ") |
||
1493 | print("industry sites: cement") |
||
1494 | print(" ") |
||
1495 | |||
1496 | dsm_cement = gpd.GeoDataFrame(dsm[dsm["application"] == "Cement Mill"]) |
||
1497 | |||
1498 | # calculate potentials of industrial sites with cement-applications |
||
1499 | # using parameters by Heitkoetter et al. |
||
1500 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
1501 | s_flex=S_FLEX_CEMENT, |
||
1502 | s_util=S_UTIL_CEMENT, |
||
1503 | s_inc=S_INC_CEMENT, |
||
1504 | s_dec=S_DEC_CEMENT, |
||
1505 | delta_t=DELTA_T_CEMENT, |
||
1506 | dsm=dsm_cement, |
||
1507 | ) |
||
1508 | |||
1509 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
1510 | con, p_max, p_min, e_max, e_min, dsm_cement |
||
1511 | ) |
||
1512 | |||
1513 | df_dsm_buses = gpd.GeoDataFrame( |
||
1514 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
1515 | crs="EPSG:4326", |
||
1516 | geometry="geom", |
||
1517 | ) |
||
1518 | df_dsm_links = pd.DataFrame( |
||
1519 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
1520 | ) |
||
1521 | df_dsm_stores = pd.DataFrame( |
||
1522 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
1523 | ) |
||
1524 | |||
1525 | # industry sites: ventilation in WZ23 |
||
1526 | |||
1527 | print(" ") |
||
1528 | print("industry sites: ventilation in WZ23") |
||
1529 | print(" ") |
||
1530 | |||
1531 | dsm = ind_sites_vent_data_import(ind_vent_share, wz=WZ) |
||
1532 | |||
1533 | # drop entries of Cement Mills whose DSM-potentials have already been |
||
1534 | # modelled |
||
1535 | cement = np.unique(dsm_cement["bus"].values) |
||
1536 | index_names = np.array(dsm[dsm["bus"].isin(cement)].index) |
||
1537 | dsm.drop(index_names, inplace=True) |
||
1538 | |||
1539 | # calculate potentials of ventialtion in industrial sites of WZ 23 |
||
1540 | # using parameters by Heitkoetter et al. |
||
1541 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
1542 | s_flex=S_FLEX_WZ, |
||
1543 | s_util=S_UTIL_WZ, |
||
1544 | s_inc=S_INC_WZ, |
||
1545 | s_dec=S_DEC_WZ, |
||
1546 | delta_t=DELTA_T_WZ, |
||
1547 | dsm=dsm, |
||
1548 | ) |
||
1549 | |||
1550 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
1551 | con, p_max, p_min, e_max, e_min, dsm |
||
1552 | ) |
||
1553 | |||
1554 | df_dsm_buses = gpd.GeoDataFrame( |
||
1555 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
1556 | crs="EPSG:4326", |
||
1557 | geometry="geom", |
||
1558 | ) |
||
1559 | df_dsm_links = pd.DataFrame( |
||
1560 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
1561 | ) |
||
1562 | df_dsm_stores = pd.DataFrame( |
||
1563 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
1564 | ) |
||
1565 | |||
1566 | # aggregate DSM components per substation |
||
1567 | dsm_buses, dsm_links, dsm_stores = aggregate_components( |
||
1568 | df_dsm_buses, df_dsm_links, df_dsm_stores |
||
1569 | ) |
||
1570 | |||
1571 | # export aggregated DSM components to database |
||
1572 | |||
1573 | delete_dsm_entries("dsm-cts") |
||
1574 | delete_dsm_entries("dsm-ind-osm") |
||
1575 | delete_dsm_entries("dsm-ind-sites") |
||
1576 | delete_dsm_entries("dsm") |
||
1577 | |||
1578 | data_export(dsm_buses, dsm_links, dsm_stores, carrier="dsm") |
||
1579 | |||
1580 | |||
1581 | def create_table(df, table, engine=CON): |
||
1582 | """Create table""" |
||
1583 | table.__table__.drop(bind=engine, checkfirst=True) |
||
1584 | table.__table__.create(bind=engine, checkfirst=True) |
||
1585 | |||
1586 | df.to_sql( |
||
1587 | name=table.__table__.name, |
||
1588 | schema=table.__table__.schema, |
||
1589 | con=engine, |
||
1590 | if_exists="append", |
||
1591 | index=False, |
||
1592 | ) |
||
1593 | |||
1594 | |||
1595 | def div_list(lst: list, div: float): |
||
1596 | return [v / div for v in lst] |
||
1597 | |||
1598 | |||
1599 | def dsm_cts_ind_individual( |
||
1600 | cts_cool_vent_ac_share=CTS_COOL_VENT_AC_SHARE, |
||
1601 | ind_vent_cool_share=IND_VENT_COOL_SHARE, |
||
1602 | ind_vent_share=IND_VENT_SHARE, |
||
1603 | ): |
||
1604 | """ |
||
1605 | Execute methodology to create and implement components for DSM considering |
||
1606 | |||
1607 | a) CTS per osm-area: combined potentials of cooling, ventilation and air |
||
1608 | conditioning |
||
1609 | b) Industry per osm-are: combined potentials of cooling and ventilation |
||
1610 | c) Industrial Sites: potentials of ventilation in sites of |
||
1611 | "Wirtschaftszweig" (WZ) 23 |
||
1612 | d) Industrial Sites: potentials of sites specified by subsectors |
||
1613 | identified by Schmidt (https://zenodo.org/record/3613767#.YTsGwVtCRhG): |
||
1614 | Paper, Recycled Paper, Pulp, Cement |
||
1615 | |||
1616 | Modelled using the methods by Heitkoetter et. al.: |
||
1617 | https://doi.org/10.1016/j.adapen.2020.100001 |
||
1618 | |||
1619 | Parameters |
||
1620 | ---------- |
||
1621 | cts_cool_vent_ac_share: float |
||
1622 | Share of cooling, ventilation and AC in CTS demand |
||
1623 | ind_vent_cool_share: float |
||
1624 | Share of cooling and ventilation in industry demand |
||
1625 | ind_vent_share: float |
||
1626 | Share of ventilation in industry demand in sites of WZ 23 |
||
1627 | |||
1628 | """ |
||
1629 | |||
1630 | # CTS per osm-area: cooling, ventilation and air conditioning |
||
1631 | |||
1632 | print(" ") |
||
1633 | print("CTS per osm-area: cooling, ventilation and air conditioning") |
||
1634 | print(" ") |
||
1635 | |||
1636 | dsm = cts_data_import(cts_cool_vent_ac_share) |
||
1637 | |||
1638 | # calculate combined potentials of cooling, ventilation and air |
||
1639 | # conditioning in CTS using combined parameters by Heitkoetter et. al. |
||
1640 | vals = calculate_potentials( |
||
1641 | s_flex=S_FLEX_CTS, |
||
1642 | s_util=S_UTIL_CTS, |
||
1643 | s_inc=S_INC_CTS, |
||
1644 | s_dec=S_DEC_CTS, |
||
1645 | delta_t=DELTA_T_CTS, |
||
1646 | dsm=dsm, |
||
1647 | ) |
||
1648 | |||
1649 | dsm = dsm.assign( |
||
1650 | p_set=dsm.p_set.apply(div_list, div=cts_cool_vent_ac_share) |
||
1651 | ) |
||
1652 | |||
1653 | base_columns = [ |
||
1654 | "bus", |
||
1655 | "scn_name", |
||
1656 | "p_set", |
||
1657 | "p_max", |
||
1658 | "p_min", |
||
1659 | "e_max", |
||
1660 | "e_min", |
||
1661 | ] |
||
1662 | |||
1663 | cts_df = pd.concat([dsm, *vals], axis=1, ignore_index=True) |
||
1664 | cts_df.columns = base_columns |
||
1665 | |||
1666 | print(" ") |
||
1667 | print("industry per osm-area: cooling and ventilation") |
||
1668 | print(" ") |
||
1669 | |||
1670 | dsm = ind_osm_data_import_individual(ind_vent_cool_share) |
||
1671 | |||
1672 | # calculate combined potentials of cooling and ventilation in industrial |
||
1673 | # sector using combined parameters by Heitkoetter et al. |
||
1674 | vals = calculate_potentials( |
||
1675 | s_flex=S_FLEX_OSM, |
||
1676 | s_util=S_UTIL_OSM, |
||
1677 | s_inc=S_INC_OSM, |
||
1678 | s_dec=S_DEC_OSM, |
||
1679 | delta_t=DELTA_T_OSM, |
||
1680 | dsm=dsm, |
||
1681 | ) |
||
1682 | |||
1683 | dsm = dsm.assign(p_set=dsm.p_set.apply(div_list, div=ind_vent_cool_share)) |
||
1684 | |||
1685 | columns = ["osm_id"] + base_columns |
||
1686 | |||
1687 | osm_df = pd.concat([dsm, *vals], axis=1, ignore_index=True) |
||
1688 | osm_df.columns = columns |
||
1689 | |||
1690 | # industry sites |
||
1691 | |||
1692 | # industry sites: different applications |
||
1693 | |||
1694 | dsm = ind_sites_data_import() |
||
1695 | |||
1696 | print(" ") |
||
1697 | print("industry sites: paper") |
||
1698 | print(" ") |
||
1699 | |||
1700 | dsm_paper = gpd.GeoDataFrame( |
||
1701 | dsm[ |
||
1702 | dsm["application"].isin( |
||
1703 | [ |
||
1704 | "Graphic Paper", |
||
1705 | "Packing Paper and Board", |
||
1706 | "Hygiene Paper", |
||
1707 | "Technical/Special Paper and Board", |
||
1708 | ] |
||
1709 | ) |
||
1710 | ] |
||
1711 | ) |
||
1712 | |||
1713 | # calculate potentials of industrial sites with paper-applications |
||
1714 | # using parameters by Heitkoetter et al. |
||
1715 | vals = calculate_potentials( |
||
1716 | s_flex=S_FLEX_PAPER, |
||
1717 | s_util=S_UTIL_PAPER, |
||
1718 | s_inc=S_INC_PAPER, |
||
1719 | s_dec=S_DEC_PAPER, |
||
1720 | delta_t=DELTA_T_PAPER, |
||
1721 | dsm=dsm_paper, |
||
1722 | ) |
||
1723 | |||
1724 | columns = ["application", "industrial_sites_id"] + base_columns |
||
1725 | |||
1726 | paper_df = pd.concat([dsm_paper, *vals], axis=1, ignore_index=True) |
||
1727 | paper_df.columns = columns |
||
1728 | |||
1729 | print(" ") |
||
1730 | print("industry sites: recycled paper") |
||
1731 | print(" ") |
||
1732 | |||
1733 | # calculate potentials of industrial sites with recycled paper-applications |
||
1734 | # using parameters by Heitkoetter et. al. |
||
1735 | dsm_recycled_paper = gpd.GeoDataFrame( |
||
1736 | dsm[dsm["application"] == "Recycled Paper"] |
||
1737 | ) |
||
1738 | |||
1739 | vals = calculate_potentials( |
||
1740 | s_flex=S_FLEX_RECYCLED_PAPER, |
||
1741 | s_util=S_UTIL_RECYCLED_PAPER, |
||
1742 | s_inc=S_INC_RECYCLED_PAPER, |
||
1743 | s_dec=S_DEC_RECYCLED_PAPER, |
||
1744 | delta_t=DELTA_T_RECYCLED_PAPER, |
||
1745 | dsm=dsm_recycled_paper, |
||
1746 | ) |
||
1747 | |||
1748 | recycled_paper_df = pd.concat( |
||
1749 | [dsm_recycled_paper, *vals], axis=1, ignore_index=True |
||
1750 | ) |
||
1751 | recycled_paper_df.columns = columns |
||
1752 | |||
1753 | print(" ") |
||
1754 | print("industry sites: pulp") |
||
1755 | print(" ") |
||
1756 | |||
1757 | dsm_pulp = gpd.GeoDataFrame(dsm[dsm["application"] == "Mechanical Pulp"]) |
||
1758 | |||
1759 | # calculate potentials of industrial sites with pulp-applications |
||
1760 | # using parameters by Heitkoetter et al. |
||
1761 | vals = calculate_potentials( |
||
1762 | s_flex=S_FLEX_PULP, |
||
1763 | s_util=S_UTIL_PULP, |
||
1764 | s_inc=S_INC_PULP, |
||
1765 | s_dec=S_DEC_PULP, |
||
1766 | delta_t=DELTA_T_PULP, |
||
1767 | dsm=dsm_pulp, |
||
1768 | ) |
||
1769 | |||
1770 | pulp_df = pd.concat([dsm_pulp, *vals], axis=1, ignore_index=True) |
||
1771 | pulp_df.columns = columns |
||
1772 | |||
1773 | # industry sites: cement |
||
1774 | |||
1775 | print(" ") |
||
1776 | print("industry sites: cement") |
||
1777 | print(" ") |
||
1778 | |||
1779 | dsm_cement = gpd.GeoDataFrame(dsm[dsm["application"] == "Cement Mill"]) |
||
1780 | |||
1781 | # calculate potentials of industrial sites with cement-applications |
||
1782 | # using parameters by Heitkoetter et al. |
||
1783 | vals = calculate_potentials( |
||
1784 | s_flex=S_FLEX_CEMENT, |
||
1785 | s_util=S_UTIL_CEMENT, |
||
1786 | s_inc=S_INC_CEMENT, |
||
1787 | s_dec=S_DEC_CEMENT, |
||
1788 | delta_t=DELTA_T_CEMENT, |
||
1789 | dsm=dsm_cement, |
||
1790 | ) |
||
1791 | |||
1792 | cement_df = pd.concat([dsm_cement, *vals], axis=1, ignore_index=True) |
||
1793 | cement_df.columns = columns |
||
1794 | |||
1795 | ind_df = pd.concat( |
||
1796 | [paper_df, recycled_paper_df, pulp_df, cement_df], ignore_index=True |
||
1797 | ) |
||
1798 | |||
1799 | # industry sites: ventilation in WZ23 |
||
1800 | |||
1801 | print(" ") |
||
1802 | print("industry sites: ventilation in WZ23") |
||
1803 | print(" ") |
||
1804 | |||
1805 | dsm = ind_sites_vent_data_import_individual(ind_vent_share, wz=WZ) |
||
1806 | |||
1807 | # drop entries of Cement Mills whose DSM-potentials have already been |
||
1808 | # modelled |
||
1809 | cement = np.unique(dsm_cement["bus"].values) |
||
1810 | index_names = np.array(dsm[dsm["bus"].isin(cement)].index) |
||
1811 | dsm.drop(index_names, inplace=True) |
||
1812 | |||
1813 | # calculate potentials of ventialtion in industrial sites of WZ 23 |
||
1814 | # using parameters by Heitkoetter et al. |
||
1815 | vals = calculate_potentials( |
||
1816 | s_flex=S_FLEX_WZ, |
||
1817 | s_util=S_UTIL_WZ, |
||
1818 | s_inc=S_INC_WZ, |
||
1819 | s_dec=S_DEC_WZ, |
||
1820 | delta_t=DELTA_T_WZ, |
||
1821 | dsm=dsm, |
||
1822 | ) |
||
1823 | |||
1824 | columns = ["site_id"] + base_columns |
||
1825 | |||
1826 | ind_sites_df = pd.concat([dsm, *vals], axis=1, ignore_index=True) |
||
1827 | ind_sites_df.columns = columns |
||
1828 | |||
1829 | # create tables |
||
1830 | create_table( |
||
1831 | df=cts_df, table=EgonEtragoElectricityCtsDsmTimeseries, engine=CON |
||
1832 | ) |
||
1833 | create_table( |
||
1834 | df=osm_df, |
||
1835 | table=EgonOsmIndLoadCurvesIndividualDsmTimeseries, |
||
1836 | engine=CON, |
||
1837 | ) |
||
1838 | create_table( |
||
1839 | df=ind_df, |
||
1840 | table=EgonDemandregioSitesIndElectricityDsmTimeseries, |
||
1841 | engine=CON, |
||
1842 | ) |
||
1843 | create_table( |
||
1844 | df=ind_sites_df, |
||
1845 | table=EgonSitesIndLoadCurvesIndividualDsmTimeseries, |
||
1846 | engine=CON, |
||
1847 | ) |
||
1848 | |||
1849 | |||
1850 | def dsm_cts_ind_processing(): |
||
1851 | dsm_cts_ind() |
||
1852 | |||
1853 | dsm_cts_ind_individual() |
||
1854 | |||
1855 | add_metadata_individual() |
||
1856 |