| Total Complexity | 55 |
| Total Lines | 1841 |
| Duplicated Lines | 10.97 % |
| Changes | 0 | ||
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like data.datasets.DSM_cts_ind often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | """ |
||
| 2 | Currently, there are differences in the aggregated and individual DSM time |
||
| 3 | series. These are caused by the truncation of the values at zero. |
||
| 4 | |||
| 5 | The sum of the individual time series is a more accurate value than the |
||
| 6 | aggregated time series used so far and should replace it in the future. Since |
||
| 7 | the deviations are relatively small, a tolerance is currently accepted in the |
||
| 8 | sanity checks. See [#1120](https://github.com/openego/eGon-data/issues/1120) |
||
| 9 | for updates. |
||
| 10 | """ |
||
| 11 | import datetime |
||
| 12 | import json |
||
| 13 | |||
| 14 | from omi.dialects import get_dialect |
||
| 15 | from sqlalchemy import ARRAY, Column, Float, Integer, String |
||
| 16 | from sqlalchemy.ext.declarative import declarative_base |
||
| 17 | import geopandas as gpd |
||
| 18 | import numpy as np |
||
| 19 | import pandas as pd |
||
| 20 | |||
| 21 | from egon.data import config, db |
||
| 22 | from egon.data.datasets import Dataset |
||
| 23 | from egon.data.datasets.electricity_demand.temporal import calc_load_curve |
||
| 24 | from egon.data.datasets.industry.temporal import identify_bus |
||
| 25 | from egon.data.metadata import ( |
||
| 26 | context, |
||
| 27 | contributors, |
||
| 28 | generate_resource_fields_from_db_table, |
||
| 29 | license_odbl, |
||
| 30 | meta_metadata, |
||
| 31 | meta_metadata, |
||
| 32 | sources, |
||
| 33 | ) |
||
| 34 | |||
| 35 | # CONSTANTS |
||
| 36 | # TODO: move to datasets.yml |
||
| 37 | CON = db.engine() |
||
| 38 | |||
| 39 | # CTS |
||
| 40 | CTS_COOL_VENT_AC_SHARE = 0.22 |
||
| 41 | |||
| 42 | S_FLEX_CTS = 0.5 |
||
| 43 | S_UTIL_CTS = 0.67 |
||
| 44 | S_INC_CTS = 1 |
||
| 45 | S_DEC_CTS = 0 |
||
| 46 | DELTA_T_CTS = 1 |
||
| 47 | |||
| 48 | # industry |
||
| 49 | IND_VENT_COOL_SHARE = 0.039 |
||
| 50 | IND_VENT_SHARE = 0.017 |
||
| 51 | |||
| 52 | # OSM |
||
| 53 | S_FLEX_OSM = 0.5 |
||
| 54 | S_UTIL_OSM = 0.73 |
||
| 55 | S_INC_OSM = 0.9 |
||
| 56 | S_DEC_OSM = 0.5 |
||
| 57 | DELTA_T_OSM = 1 |
||
| 58 | |||
| 59 | # paper |
||
| 60 | S_FLEX_PAPER = 0.15 |
||
| 61 | S_UTIL_PAPER = 0.86 |
||
| 62 | S_INC_PAPER = 0.95 |
||
| 63 | S_DEC_PAPER = 0 |
||
| 64 | DELTA_T_PAPER = 3 |
||
| 65 | |||
| 66 | # recycled paper |
||
| 67 | S_FLEX_RECYCLED_PAPER = 0.7 |
||
| 68 | S_UTIL_RECYCLED_PAPER = 0.85 |
||
| 69 | S_INC_RECYCLED_PAPER = 0.95 |
||
| 70 | S_DEC_RECYCLED_PAPER = 0 |
||
| 71 | DELTA_T_RECYCLED_PAPER = 3 |
||
| 72 | |||
| 73 | # pulp |
||
| 74 | S_FLEX_PULP = 0.7 |
||
| 75 | S_UTIL_PULP = 0.83 |
||
| 76 | S_INC_PULP = 0.95 |
||
| 77 | S_DEC_PULP = 0 |
||
| 78 | DELTA_T_PULP = 2 |
||
| 79 | |||
| 80 | # cement |
||
| 81 | S_FLEX_CEMENT = 0.61 |
||
| 82 | S_UTIL_CEMENT = 0.65 |
||
| 83 | S_INC_CEMENT = 0.95 |
||
| 84 | S_DEC_CEMENT = 0 |
||
| 85 | DELTA_T_CEMENT = 4 |
||
| 86 | |||
| 87 | # wz 23 |
||
| 88 | WZ = 23 |
||
| 89 | |||
| 90 | S_FLEX_WZ = 0.5 |
||
| 91 | S_UTIL_WZ = 0.8 |
||
| 92 | S_INC_WZ = 1 |
||
| 93 | S_DEC_WZ = 0.5 |
||
| 94 | DELTA_T_WZ = 1 |
||
| 95 | |||
| 96 | Base = declarative_base() |
||
| 97 | |||
| 98 | |||
| 99 | class DsmPotential(Dataset): |
||
| 100 | """ |
||
| 101 | Calculate Demand-Side Management potentials and transfer to charactersitics of DSM components |
||
| 102 | |||
| 103 | DSM within this work includes the shifting of loads within the sectors of |
||
| 104 | industry and CTS. Therefore, the corresponding formerly prepared demand |
||
| 105 | time sereies are used. Shiftable potentials are calculated using the |
||
| 106 | parametrization elaborated in Heitkoetter et. al (doi:https://doi.org/10.1016/j.adapen.2020.100001). |
||
| 107 | DSM is modelled as storage-equivalent operation using the methods by Kleinhans (doi:10.48550/ARXIV.1401.4121). |
||
| 108 | The potentials are transferred to characterisitcs of DSM links (minimal and |
||
| 109 | maximal shiftable power per time step) and DSM stores (minimum and maximum |
||
| 110 | capacity per time step). DSM buses are created to connect DSM components with |
||
| 111 | the electrical network. All DSM components are added to the corresponding |
||
| 112 | tables for the transmission grid level. For the distribution grids, the |
||
| 113 | respective time series are exported to the corresponding tables (for the |
||
| 114 | required higher spatial resolution). |
||
| 115 | |||
| 116 | *Dependencies* |
||
| 117 | * :py:class:`CtsElectricityDemand <egon.data.datasets.electricity_demand>` |
||
| 118 | * :py:class:`IndustrialDemandCurves <from egon.data.datasets.industry>` |
||
| 119 | * :py:class:`Osmtgmod <egon.data.datasets.osmtgmod>` |
||
| 120 | |||
| 121 | *Resulting tables* |
||
| 122 | * :py:class:`grid.egon_etrago_bus <egon.data.datasets.etrago_setup.EgonPfHvBus>` is extended |
||
| 123 | * :py:class:`grid.egon_etrago_link <egon.data.datasets.etrago_setup.EgonPfHvLink>` is extended |
||
| 124 | * :py:class:`grid.egon_etrago_link_timeseries <egon.data.datasets.etrago_setup.EgonPfHvLinkTimeseries>` is extended |
||
| 125 | * :py:class:`grid.egon_etrago_store <egon.data.datasets.etrago_setup.EgonPfHvStore>` is extended |
||
| 126 | * :py:class:`grid.egon_etrago_store_timeseries <egon.data.datasets.etrago_setup.EgonPfHvStoreTimeseries>` is extended |
||
| 127 | * :py:class:`demand.egon_etrago_electricity_cts_dsm_timeseries <egon.data.datasets.DsmPotential.EgonEtragoElectricityCtsDsmTimeseries>` is created and filled # noqa: E501 |
||
| 128 | * :py:class:`demand.egon_osm_ind_load_curves_individual_dsm_timeseries <egon.data.datasets.DsmPotential.EgonOsmIndLoadCurvesIndividualDsmTimeseries>` is created and filled # noqa: E501 |
||
| 129 | * :py:class:`demand.egon_demandregio_sites_ind_electricity_dsm_timeseries <egon.data.datasets.DsmPotential.EgonDemandregioSitesIndElectricityDsmTimeseries>` is created and filled # noqa: E501 |
||
| 130 | * :py:class:`demand.egon_sites_ind_load_curves_individual_dsm_timeseries <egon.data.datasets.DsmPotential.EgonSitesIndLoadCurvesIndividualDsmTimeseries>` is created and filled # noqa: E501 |
||
| 131 | |||
| 132 | """ |
||
| 133 | |||
| 134 | #: |
||
| 135 | name: str = "DsmPotential" |
||
| 136 | #: |
||
| 137 | version: str = "0.0.6" |
||
| 138 | |||
| 139 | def __init__(self, dependencies): |
||
| 140 | super().__init__( |
||
| 141 | name=self.name, |
||
| 142 | version=self.version, |
||
| 143 | dependencies=self.dependencies, |
||
| 144 | tasks=(dsm_cts_ind_processing,), |
||
| 145 | ) |
||
| 146 | |||
| 147 | |||
| 148 | # Datasets |
||
| 149 | View Code Duplication | class EgonEtragoElectricityCtsDsmTimeseries(Base): |
|
|
|
|||
| 150 | target = config.datasets()["DSM_CTS_industry"]["targets"][ |
||
| 151 | "cts_loadcurves_dsm" |
||
| 152 | ] |
||
| 153 | |||
| 154 | __tablename__ = target["table"] |
||
| 155 | __table_args__ = {"schema": target["schema"]} |
||
| 156 | |||
| 157 | bus = Column(Integer, primary_key=True, index=True) |
||
| 158 | scn_name = Column(String, primary_key=True, index=True) |
||
| 159 | p_set = Column(ARRAY(Float)) |
||
| 160 | p_max = Column(ARRAY(Float)) |
||
| 161 | p_min = Column(ARRAY(Float)) |
||
| 162 | e_max = Column(ARRAY(Float)) |
||
| 163 | e_min = Column(ARRAY(Float)) |
||
| 164 | |||
| 165 | |||
| 166 | View Code Duplication | class EgonOsmIndLoadCurvesIndividualDsmTimeseries(Base): |
|
| 167 | target = config.datasets()["DSM_CTS_industry"]["targets"][ |
||
| 168 | "ind_osm_loadcurves_individual_dsm" |
||
| 169 | ] |
||
| 170 | |||
| 171 | __tablename__ = target["table"] |
||
| 172 | __table_args__ = {"schema": target["schema"]} |
||
| 173 | |||
| 174 | osm_id = Column(Integer, primary_key=True, index=True) |
||
| 175 | scn_name = Column(String, primary_key=True, index=True) |
||
| 176 | bus = Column(Integer) |
||
| 177 | p_set = Column(ARRAY(Float)) |
||
| 178 | p_max = Column(ARRAY(Float)) |
||
| 179 | p_min = Column(ARRAY(Float)) |
||
| 180 | e_max = Column(ARRAY(Float)) |
||
| 181 | e_min = Column(ARRAY(Float)) |
||
| 182 | |||
| 183 | |||
| 184 | View Code Duplication | class EgonDemandregioSitesIndElectricityDsmTimeseries(Base): |
|
| 185 | target = config.datasets()["DSM_CTS_industry"]["targets"][ |
||
| 186 | "demandregio_ind_sites_dsm" |
||
| 187 | ] |
||
| 188 | |||
| 189 | __tablename__ = target["table"] |
||
| 190 | __table_args__ = {"schema": target["schema"]} |
||
| 191 | |||
| 192 | industrial_sites_id = Column(Integer, primary_key=True, index=True) |
||
| 193 | scn_name = Column(String, primary_key=True, index=True) |
||
| 194 | bus = Column(Integer) |
||
| 195 | application = Column(String) |
||
| 196 | p_set = Column(ARRAY(Float)) |
||
| 197 | p_max = Column(ARRAY(Float)) |
||
| 198 | p_min = Column(ARRAY(Float)) |
||
| 199 | e_max = Column(ARRAY(Float)) |
||
| 200 | e_min = Column(ARRAY(Float)) |
||
| 201 | |||
| 202 | |||
| 203 | View Code Duplication | class EgonSitesIndLoadCurvesIndividualDsmTimeseries(Base): |
|
| 204 | target = config.datasets()["DSM_CTS_industry"]["targets"][ |
||
| 205 | "ind_sites_loadcurves_individual" |
||
| 206 | ] |
||
| 207 | |||
| 208 | __tablename__ = target["table"] |
||
| 209 | __table_args__ = {"schema": target["schema"]} |
||
| 210 | |||
| 211 | site_id = Column(Integer, primary_key=True, index=True) |
||
| 212 | scn_name = Column(String, primary_key=True, index=True) |
||
| 213 | bus = Column(Integer) |
||
| 214 | p_set = Column(ARRAY(Float)) |
||
| 215 | p_max = Column(ARRAY(Float)) |
||
| 216 | p_min = Column(ARRAY(Float)) |
||
| 217 | e_max = Column(ARRAY(Float)) |
||
| 218 | e_min = Column(ARRAY(Float)) |
||
| 219 | |||
| 220 | |||
| 221 | def add_metadata_individual(): |
||
| 222 | targets = config.datasets()["DSM_CTS_industry"]["targets"] |
||
| 223 | |||
| 224 | targets = { |
||
| 225 | k: v for k, v in targets.items() if "dsm_timeseries" in v["table"] |
||
| 226 | } |
||
| 227 | |||
| 228 | title_dict = { |
||
| 229 | "egon_etrago_electricity_cts_dsm_timeseries": ( |
||
| 230 | "DSM flexibility band time series for CTS" |
||
| 231 | ), |
||
| 232 | "egon_osm_ind_load_curves_individual_dsm_timeseries": ( |
||
| 233 | "DSM flexibility band time series for OSM industry sites" |
||
| 234 | ), |
||
| 235 | "egon_demandregio_sites_ind_electricity_dsm_timeseries": ( |
||
| 236 | "DSM flexibility band time series for demandregio industry sites" |
||
| 237 | ), |
||
| 238 | "egon_sites_ind_load_curves_individual_dsm_timeseries": ( |
||
| 239 | "DSM flexibility band time series for other industry sites" |
||
| 240 | ), |
||
| 241 | } |
||
| 242 | |||
| 243 | description_dict = { |
||
| 244 | "egon_etrago_electricity_cts_dsm_timeseries": ( |
||
| 245 | "DSM flexibility band time series for CTS in 1 h resolution " |
||
| 246 | "including available store capacity and power potential" |
||
| 247 | ), |
||
| 248 | "egon_osm_ind_load_curves_individual_dsm_timeseries": ( |
||
| 249 | "DSM flexibility band time series for OSM industry sites in 1 h " |
||
| 250 | "resolution including available store capacity and power potential" |
||
| 251 | ), |
||
| 252 | "egon_demandregio_sites_ind_electricity_dsm_timeseries": ( |
||
| 253 | "DSM flexibility band time series for demandregio industry sites " |
||
| 254 | "in 1 h resolution including available store capacity and power " |
||
| 255 | "potential" |
||
| 256 | ), |
||
| 257 | "egon_sites_ind_load_curves_individual_dsm_timeseries": ( |
||
| 258 | "DSM flexibility band time series for other industry sites in 1 h " |
||
| 259 | "resolution including available store capacity and power potential" |
||
| 260 | ), |
||
| 261 | } |
||
| 262 | |||
| 263 | keywords_dict = { |
||
| 264 | "egon_etrago_electricity_cts_dsm_timeseries": ["cts"], |
||
| 265 | "egon_osm_ind_load_curves_individual_dsm_timeseries": [ |
||
| 266 | "osm", |
||
| 267 | "industry", |
||
| 268 | ], |
||
| 269 | "egon_demandregio_sites_ind_electricity_dsm_timeseries": [ |
||
| 270 | "demandregio", |
||
| 271 | "industry", |
||
| 272 | ], |
||
| 273 | "egon_sites_ind_load_curves_individual_dsm_timeseries": ["industry"], |
||
| 274 | } |
||
| 275 | |||
| 276 | primaryKey_dict = { |
||
| 277 | "egon_etrago_electricity_cts_dsm_timeseries": ["bus"], |
||
| 278 | "egon_osm_ind_load_curves_individual_dsm_timeseries": ["osm_id"], |
||
| 279 | "egon_demandregio_sites_ind_electricity_dsm_timeseries": [ |
||
| 280 | "industrial_sites_id", |
||
| 281 | ], |
||
| 282 | "egon_sites_ind_load_curves_individual_dsm_timeseries": ["site_id"], |
||
| 283 | } |
||
| 284 | |||
| 285 | sources_dict = { |
||
| 286 | "egon_etrago_electricity_cts_dsm_timeseries": [ |
||
| 287 | sources()["nep2021"], |
||
| 288 | sources()["zensus"], |
||
| 289 | ], |
||
| 290 | "egon_osm_ind_load_curves_individual_dsm_timeseries": [ |
||
| 291 | sources()["hotmaps_industrial_sites"], |
||
| 292 | sources()["schmidt"], |
||
| 293 | sources()["seenergies"], |
||
| 294 | ], |
||
| 295 | "egon_demandregio_sites_ind_electricity_dsm_timeseries": [ |
||
| 296 | sources()["openstreetmap"], |
||
| 297 | ], |
||
| 298 | "egon_sites_ind_load_curves_individual_dsm_timeseries": [ |
||
| 299 | sources()["hotmaps_industrial_sites"], |
||
| 300 | sources()["openstreetmap"], |
||
| 301 | sources()["schmidt"], |
||
| 302 | sources()["seenergies"], |
||
| 303 | ], |
||
| 304 | } |
||
| 305 | |||
| 306 | contris = contributors(["kh", "kh"]) |
||
| 307 | |||
| 308 | contris[0]["date"] = "2023-03-17" |
||
| 309 | |||
| 310 | contris[0]["object"] = "metadata" |
||
| 311 | contris[1]["object"] = "dataset" |
||
| 312 | |||
| 313 | contris[0]["comment"] = "Add metadata to dataset." |
||
| 314 | contris[1]["comment"] = "Add workflow to generate dataset." |
||
| 315 | |||
| 316 | for t_dict in targets.values(): |
||
| 317 | schema = t_dict["schema"] |
||
| 318 | table = t_dict["table"] |
||
| 319 | name = f"{schema}.{table}" |
||
| 320 | |||
| 321 | meta = { |
||
| 322 | "name": name, |
||
| 323 | "title": title_dict[table], |
||
| 324 | "id": "WILL_BE_SET_AT_PUBLICATION", |
||
| 325 | "description": description_dict[table], |
||
| 326 | "language": "en-US", |
||
| 327 | "keywords": ["dsm", "timeseries"] + keywords_dict[table], |
||
| 328 | "publicationDate": datetime.date.today().isoformat(), |
||
| 329 | "context": context(), |
||
| 330 | "spatial": { |
||
| 331 | "location": "none", |
||
| 332 | "extent": "Germany", |
||
| 333 | "resolution": "none", |
||
| 334 | }, |
||
| 335 | "temporal": { |
||
| 336 | "referenceDate": "2011-01-01", |
||
| 337 | "timeseries": { |
||
| 338 | "start": "2011-01-01", |
||
| 339 | "end": "2011-12-31", |
||
| 340 | "resolution": "1 h", |
||
| 341 | "alignment": "left", |
||
| 342 | "aggregationType": "average", |
||
| 343 | }, |
||
| 344 | }, |
||
| 345 | "sources": [ |
||
| 346 | sources()["egon-data"], |
||
| 347 | sources()["vg250"], |
||
| 348 | sources()["demandregio"], |
||
| 349 | ] |
||
| 350 | + sources_dict[table], |
||
| 351 | "licenses": [license_odbl("© eGon development team")], |
||
| 352 | "contributors": contris, |
||
| 353 | "resources": [ |
||
| 354 | { |
||
| 355 | "profile": "tabular-data-resource", |
||
| 356 | "name": name, |
||
| 357 | "path": "None", |
||
| 358 | "format": "PostgreSQL", |
||
| 359 | "encoding": "UTF-8", |
||
| 360 | "schema": { |
||
| 361 | "fields": generate_resource_fields_from_db_table( |
||
| 362 | schema, |
||
| 363 | table, |
||
| 364 | ), |
||
| 365 | "primaryKey": ["scn_name"] + primaryKey_dict[table], |
||
| 366 | }, |
||
| 367 | "dialect": {"delimiter": "", "decimalSeparator": ""}, |
||
| 368 | } |
||
| 369 | ], |
||
| 370 | "review": {"path": "", "badge": ""}, |
||
| 371 | "metaMetadata": meta_metadata(), |
||
| 372 | "_comment": { |
||
| 373 | "metadata": ( |
||
| 374 | "Metadata documentation and explanation (https://" |
||
| 375 | "github.com/OpenEnergyPlatform/oemetadata/blob/master/" |
||
| 376 | "metadata/v141/metadata_key_description.md)" |
||
| 377 | ), |
||
| 378 | "dates": ( |
||
| 379 | "Dates and time must follow the ISO8601 including time " |
||
| 380 | "zone (YYYY-MM-DD or YYYY-MM-DDThh:mm:ss±hh)" |
||
| 381 | ), |
||
| 382 | "units": "Use a space between numbers and units (100 m)", |
||
| 383 | "languages": ( |
||
| 384 | "Languages must follow the IETF (BCP47) format (en-GB, " |
||
| 385 | "en-US, de-DE)" |
||
| 386 | ), |
||
| 387 | "licenses": ( |
||
| 388 | "License name must follow the SPDX License List " |
||
| 389 | "(https://spdx.org/licenses/)" |
||
| 390 | ), |
||
| 391 | "review": ( |
||
| 392 | "Following the OEP Data Review (https://github.com/" |
||
| 393 | "OpenEnergyPlatform/data-preprocessing/wiki)" |
||
| 394 | ), |
||
| 395 | "none": "If not applicable use (none)", |
||
| 396 | }, |
||
| 397 | } |
||
| 398 | |||
| 399 | dialect = get_dialect(f"oep-v{meta_metadata()['metadataVersion'][4:7]}")() |
||
| 400 | |||
| 401 | meta = dialect.compile_and_render(dialect.parse(json.dumps(meta))) |
||
| 402 | |||
| 403 | db.submit_comment( |
||
| 404 | f"'{json.dumps(meta)}'", |
||
| 405 | schema, |
||
| 406 | table, |
||
| 407 | ) |
||
| 408 | |||
| 409 | |||
| 410 | # Code |
||
| 411 | def cts_data_import(cts_cool_vent_ac_share): |
||
| 412 | """ |
||
| 413 | Import CTS data necessary to identify DSM-potential. |
||
| 414 | |||
| 415 | ---------- |
||
| 416 | cts_share: float |
||
| 417 | Share of cooling, ventilation and AC in CTS demand |
||
| 418 | """ |
||
| 419 | |||
| 420 | # import load data |
||
| 421 | |||
| 422 | sources = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
| 423 | "cts_loadcurves" |
||
| 424 | ] |
||
| 425 | |||
| 426 | ts = db.select_dataframe( |
||
| 427 | f"""SELECT bus_id, scn_name, p_set FROM |
||
| 428 | {sources['schema']}.{sources['table']}""" |
||
| 429 | ) |
||
| 430 | |||
| 431 | # identify relevant columns and prepare df to be returned |
||
| 432 | |||
| 433 | dsm = pd.DataFrame(index=ts.index) |
||
| 434 | |||
| 435 | dsm["bus"] = ts["bus_id"].copy() |
||
| 436 | dsm["scn_name"] = ts["scn_name"].copy() |
||
| 437 | dsm["p_set"] = ts["p_set"].copy() |
||
| 438 | |||
| 439 | # calculate share of timeseries for air conditioning, cooling and |
||
| 440 | # ventilation out of CTS-data |
||
| 441 | |||
| 442 | timeseries = dsm["p_set"].copy() |
||
| 443 | |||
| 444 | for index, liste in timeseries.items(): |
||
| 445 | share = [float(item) * cts_cool_vent_ac_share for item in liste] |
||
| 446 | timeseries.loc[index] = share |
||
| 447 | |||
| 448 | dsm["p_set"] = timeseries.copy() |
||
| 449 | |||
| 450 | return dsm |
||
| 451 | |||
| 452 | |||
| 453 | View Code Duplication | def ind_osm_data_import(ind_vent_cool_share): |
|
| 454 | """ |
||
| 455 | Import industry data per osm-area necessary to identify DSM-potential. |
||
| 456 | ---------- |
||
| 457 | ind_share: float |
||
| 458 | Share of considered application in industry demand |
||
| 459 | """ |
||
| 460 | |||
| 461 | # import load data |
||
| 462 | |||
| 463 | sources = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
| 464 | "ind_osm_loadcurves" |
||
| 465 | ] |
||
| 466 | |||
| 467 | dsm = db.select_dataframe( |
||
| 468 | f""" |
||
| 469 | SELECT bus, scn_name, p_set FROM |
||
| 470 | {sources['schema']}.{sources['table']} |
||
| 471 | """ |
||
| 472 | ) |
||
| 473 | |||
| 474 | # calculate share of timeseries for cooling and ventilation out of |
||
| 475 | # industry-data |
||
| 476 | |||
| 477 | timeseries = dsm["p_set"].copy() |
||
| 478 | |||
| 479 | for index, liste in timeseries.items(): |
||
| 480 | share = [float(item) * ind_vent_cool_share for item in liste] |
||
| 481 | |||
| 482 | timeseries.loc[index] = share |
||
| 483 | |||
| 484 | dsm["p_set"] = timeseries.copy() |
||
| 485 | |||
| 486 | return dsm |
||
| 487 | |||
| 488 | |||
| 489 | View Code Duplication | def ind_osm_data_import_individual(ind_vent_cool_share): |
|
| 490 | """ |
||
| 491 | Import industry data per osm-area necessary to identify DSM-potential. |
||
| 492 | ---------- |
||
| 493 | ind_share: float |
||
| 494 | Share of considered application in industry demand |
||
| 495 | """ |
||
| 496 | |||
| 497 | # import load data |
||
| 498 | |||
| 499 | sources = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
| 500 | "ind_osm_loadcurves_individual" |
||
| 501 | ] |
||
| 502 | |||
| 503 | dsm = db.select_dataframe( |
||
| 504 | f""" |
||
| 505 | SELECT osm_id, bus_id as bus, scn_name, p_set FROM |
||
| 506 | {sources['schema']}.{sources['table']} |
||
| 507 | """ |
||
| 508 | ) |
||
| 509 | |||
| 510 | # calculate share of timeseries for cooling and ventilation out of |
||
| 511 | # industry-data |
||
| 512 | |||
| 513 | timeseries = dsm["p_set"].copy() |
||
| 514 | |||
| 515 | for index, liste in timeseries.items(): |
||
| 516 | share = [float(item) * ind_vent_cool_share for item in liste] |
||
| 517 | |||
| 518 | timeseries.loc[index] = share |
||
| 519 | |||
| 520 | dsm["p_set"] = timeseries.copy() |
||
| 521 | |||
| 522 | return dsm |
||
| 523 | |||
| 524 | |||
| 525 | View Code Duplication | def ind_sites_vent_data_import(ind_vent_share, wz): |
|
| 526 | """ |
||
| 527 | Import industry sites necessary to identify DSM-potential. |
||
| 528 | ---------- |
||
| 529 | ind_vent_share: float |
||
| 530 | Share of considered application in industry demand |
||
| 531 | wz: int |
||
| 532 | Wirtschaftszweig to be considered within industry sites |
||
| 533 | """ |
||
| 534 | |||
| 535 | # import load data |
||
| 536 | |||
| 537 | sources = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
| 538 | "ind_sites_loadcurves" |
||
| 539 | ] |
||
| 540 | |||
| 541 | dsm = db.select_dataframe( |
||
| 542 | f""" |
||
| 543 | SELECT bus, scn_name, p_set FROM |
||
| 544 | {sources['schema']}.{sources['table']} |
||
| 545 | WHERE wz = {wz} |
||
| 546 | """ |
||
| 547 | ) |
||
| 548 | |||
| 549 | # calculate share of timeseries for ventilation |
||
| 550 | |||
| 551 | timeseries = dsm["p_set"].copy() |
||
| 552 | |||
| 553 | for index, liste in timeseries.items(): |
||
| 554 | share = [float(item) * ind_vent_share for item in liste] |
||
| 555 | timeseries.loc[index] = share |
||
| 556 | |||
| 557 | dsm["p_set"] = timeseries.copy() |
||
| 558 | |||
| 559 | return dsm |
||
| 560 | |||
| 561 | |||
| 562 | View Code Duplication | def ind_sites_vent_data_import_individual(ind_vent_share, wz): |
|
| 563 | """ |
||
| 564 | Import industry sites necessary to identify DSM-potential. |
||
| 565 | ---------- |
||
| 566 | ind_vent_share: float |
||
| 567 | Share of considered application in industry demand |
||
| 568 | wz: int |
||
| 569 | Wirtschaftszweig to be considered within industry sites |
||
| 570 | """ |
||
| 571 | |||
| 572 | # import load data |
||
| 573 | |||
| 574 | sources = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
| 575 | "ind_sites_loadcurves_individual" |
||
| 576 | ] |
||
| 577 | |||
| 578 | dsm = db.select_dataframe( |
||
| 579 | f""" |
||
| 580 | SELECT site_id, bus_id as bus, scn_name, p_set FROM |
||
| 581 | {sources['schema']}.{sources['table']} |
||
| 582 | WHERE wz = {wz} |
||
| 583 | """ |
||
| 584 | ) |
||
| 585 | |||
| 586 | # calculate share of timeseries for ventilation |
||
| 587 | |||
| 588 | timeseries = dsm["p_set"].copy() |
||
| 589 | |||
| 590 | for index, liste in timeseries.items(): |
||
| 591 | share = [float(item) * ind_vent_share for item in liste] |
||
| 592 | timeseries.loc[index] = share |
||
| 593 | |||
| 594 | dsm["p_set"] = timeseries.copy() |
||
| 595 | |||
| 596 | return dsm |
||
| 597 | |||
| 598 | |||
| 599 | def calc_ind_site_timeseries(scenario): |
||
| 600 | # calculate timeseries per site |
||
| 601 | # -> using code from egon.data.datasets.industry.temporal: |
||
| 602 | # calc_load_curves_ind_sites |
||
| 603 | |||
| 604 | # select demands per industrial site including the subsector information |
||
| 605 | source1 = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
| 606 | "demandregio_ind_sites" |
||
| 607 | ] |
||
| 608 | |||
| 609 | demands_ind_sites = db.select_dataframe( |
||
| 610 | f"""SELECT industrial_sites_id, wz, demand |
||
| 611 | FROM {source1['schema']}.{source1['table']} |
||
| 612 | WHERE scenario = '{scenario}' |
||
| 613 | AND demand > 0 |
||
| 614 | """ |
||
| 615 | ).set_index(["industrial_sites_id"]) |
||
| 616 | |||
| 617 | # select industrial sites as demand_areas from database |
||
| 618 | source2 = config.datasets()["DSM_CTS_industry"]["sources"]["ind_sites"] |
||
| 619 | |||
| 620 | demand_area = db.select_geodataframe( |
||
| 621 | f"""SELECT id, geom, subsector FROM |
||
| 622 | {source2['schema']}.{source2['table']}""", |
||
| 623 | index_col="id", |
||
| 624 | geom_col="geom", |
||
| 625 | epsg=3035, |
||
| 626 | ) |
||
| 627 | |||
| 628 | # replace entries to bring it in line with demandregio's subsector |
||
| 629 | # definitions |
||
| 630 | demands_ind_sites.replace(1718, 17, inplace=True) |
||
| 631 | share_wz_sites = demands_ind_sites.copy() |
||
| 632 | |||
| 633 | # create additional df on wz_share per industrial site, which is always set |
||
| 634 | # to one as the industrial demand per site is subsector specific |
||
| 635 | share_wz_sites.demand = 1 |
||
| 636 | share_wz_sites.reset_index(inplace=True) |
||
| 637 | |||
| 638 | share_transpose = pd.DataFrame( |
||
| 639 | index=share_wz_sites.industrial_sites_id.unique(), |
||
| 640 | columns=share_wz_sites.wz.unique(), |
||
| 641 | ) |
||
| 642 | share_transpose.index.rename("industrial_sites_id", inplace=True) |
||
| 643 | for wz in share_transpose.columns: |
||
| 644 | share_transpose[wz] = ( |
||
| 645 | share_wz_sites[share_wz_sites.wz == wz] |
||
| 646 | .set_index("industrial_sites_id") |
||
| 647 | .demand |
||
| 648 | ) |
||
| 649 | |||
| 650 | # calculate load curves |
||
| 651 | load_curves = calc_load_curve( |
||
| 652 | share_transpose, scenario, demands_ind_sites["demand"] |
||
| 653 | ) |
||
| 654 | |||
| 655 | # identify bus per industrial site |
||
| 656 | curves_bus = identify_bus(load_curves, demand_area) |
||
| 657 | curves_bus.index = curves_bus["id"].astype(int) |
||
| 658 | |||
| 659 | # initialize dataframe to be returned |
||
| 660 | |||
| 661 | ts = pd.DataFrame( |
||
| 662 | data=curves_bus["bus_id"], index=curves_bus["id"].astype(int) |
||
| 663 | ) |
||
| 664 | ts["scenario_name"] = scenario |
||
| 665 | curves_bus.drop({"id", "bus_id", "geom"}, axis=1, inplace=True) |
||
| 666 | ts["p_set"] = curves_bus.values.tolist() |
||
| 667 | |||
| 668 | # add subsector to relate to Schmidt's tables afterwards |
||
| 669 | ts["application"] = demand_area["subsector"] |
||
| 670 | |||
| 671 | return ts |
||
| 672 | |||
| 673 | |||
| 674 | def relate_to_schmidt_sites(dsm): |
||
| 675 | # import industrial sites by Schmidt |
||
| 676 | |||
| 677 | source = config.datasets()["DSM_CTS_industry"]["sources"][ |
||
| 678 | "ind_sites_schmidt" |
||
| 679 | ] |
||
| 680 | |||
| 681 | schmidt = db.select_dataframe( |
||
| 682 | f"""SELECT application, geom FROM |
||
| 683 | {source['schema']}.{source['table']}""" |
||
| 684 | ) |
||
| 685 | |||
| 686 | # relate calculated timeseries (dsm) to Schmidt's industrial sites |
||
| 687 | |||
| 688 | applications = np.unique(schmidt["application"]) |
||
| 689 | dsm = pd.DataFrame(dsm[dsm["application"].isin(applications)]) |
||
| 690 | |||
| 691 | # initialize dataframe to be returned |
||
| 692 | |||
| 693 | dsm.rename( |
||
| 694 | columns={"scenario_name": "scn_name", "bus_id": "bus"}, |
||
| 695 | inplace=True, |
||
| 696 | ) |
||
| 697 | |||
| 698 | return dsm |
||
| 699 | |||
| 700 | |||
| 701 | def ind_sites_data_import(): |
||
| 702 | """ |
||
| 703 | Import industry sites data necessary to identify DSM-potential. |
||
| 704 | """ |
||
| 705 | # calculate timeseries per site |
||
| 706 | scenarios = config.settings()["egon-data"]["--scenarios"] |
||
| 707 | |||
| 708 | dsm = pd.DataFrame( |
||
| 709 | columns=["bus_id", "scenario_name", "p_set", "application", "id"] |
||
| 710 | ) |
||
| 711 | |||
| 712 | # scenario eGon2035 |
||
| 713 | if "eGon2035" in scenarios: |
||
| 714 | dsm_2035 = calc_ind_site_timeseries("eGon2035").reset_index() |
||
| 715 | dsm = pd.concat([dsm, dsm_2035], ignore_index=True) |
||
| 716 | # scenario eGon100RE |
||
| 717 | if "eGon100RE" in scenarios: |
||
| 718 | dsm_100 = calc_ind_site_timeseries("eGon100RE").reset_index() |
||
| 719 | dsm = pd.concat([dsm, dsm_100], ignore_index=True) |
||
| 720 | |||
| 721 | dsm.index = range(len(dsm)) |
||
| 722 | # relate calculated timeseries to Schmidt's industrial sites |
||
| 723 | |||
| 724 | dsm = relate_to_schmidt_sites(dsm) |
||
| 725 | |||
| 726 | return dsm[["application", "id", "bus", "scn_name", "p_set"]] |
||
| 727 | |||
| 728 | |||
| 729 | def calculate_potentials(s_flex, s_util, s_inc, s_dec, delta_t, dsm): |
||
| 730 | """ |
||
| 731 | Calculate DSM-potential per bus using the methods by Heitkoetter et. al.: |
||
| 732 | https://doi.org/10.1016/j.adapen.2020.100001 |
||
| 733 | Parameters |
||
| 734 | ---------- |
||
| 735 | s_flex: float |
||
| 736 | Feasability factor to account for socio-technical restrictions |
||
| 737 | s_util: float |
||
| 738 | Average annual utilisation rate |
||
| 739 | s_inc: float |
||
| 740 | Shiftable share of installed capacity up to which load can be |
||
| 741 | increased considering technical limitations |
||
| 742 | s_dec: float |
||
| 743 | Shiftable share of installed capacity up to which load can be |
||
| 744 | decreased considering technical limitations |
||
| 745 | delta_t: int |
||
| 746 | Maximum shift duration in hours |
||
| 747 | dsm: DataFrame |
||
| 748 | List of existing buses with DSM-potential including timeseries of |
||
| 749 | loads |
||
| 750 | """ |
||
| 751 | |||
| 752 | # copy relevant timeseries |
||
| 753 | timeseries = dsm["p_set"].copy() |
||
| 754 | |||
| 755 | # calculate scheduled load L(t) |
||
| 756 | |||
| 757 | scheduled_load = timeseries.copy() |
||
| 758 | |||
| 759 | for index, liste in scheduled_load.items(): |
||
| 760 | share = [item * s_flex for item in liste] |
||
| 761 | scheduled_load.loc[index] = share |
||
| 762 | |||
| 763 | # calculate maximum capacity Lambda |
||
| 764 | |||
| 765 | # calculate energy annual requirement |
||
| 766 | energy_annual = pd.Series(index=timeseries.index, dtype=float) |
||
| 767 | for index, liste in timeseries.items(): |
||
| 768 | energy_annual.loc[index] = sum(liste) |
||
| 769 | |||
| 770 | # calculate Lambda |
||
| 771 | lam = (energy_annual * s_flex) / (8760 * s_util) |
||
| 772 | |||
| 773 | # calculation of P_max and P_min |
||
| 774 | |||
| 775 | # P_max |
||
| 776 | p_max = scheduled_load.copy() |
||
| 777 | for index, liste in scheduled_load.items(): |
||
| 778 | lamb = lam.loc[index] |
||
| 779 | p_max.loc[index] = [max(0, lamb * s_inc - item) for item in liste] |
||
| 780 | |||
| 781 | # P_min |
||
| 782 | p_min = scheduled_load.copy() |
||
| 783 | for index, liste in scheduled_load.items(): |
||
| 784 | lamb = lam.loc[index] |
||
| 785 | p_min.loc[index] = [min(0, -(item - lamb * s_dec)) for item in liste] |
||
| 786 | |||
| 787 | # calculation of E_max and E_min |
||
| 788 | |||
| 789 | e_max = scheduled_load.copy() |
||
| 790 | e_min = scheduled_load.copy() |
||
| 791 | |||
| 792 | for index, liste in scheduled_load.items(): |
||
| 793 | emin = [] |
||
| 794 | emax = [] |
||
| 795 | for i in range(len(liste)): |
||
| 796 | if i + delta_t > len(liste): |
||
| 797 | emax.append( |
||
| 798 | (sum(liste[i:]) + sum(liste[: delta_t - (len(liste) - i)])) |
||
| 799 | ) |
||
| 800 | else: |
||
| 801 | emax.append(sum(liste[i : i + delta_t])) |
||
| 802 | if i - delta_t < 0: |
||
| 803 | emin.append( |
||
| 804 | ( |
||
| 805 | -1 |
||
| 806 | * ( |
||
| 807 | ( |
||
| 808 | sum(liste[:i]) |
||
| 809 | + sum(liste[len(liste) - delta_t + i :]) |
||
| 810 | ) |
||
| 811 | ) |
||
| 812 | ) |
||
| 813 | ) |
||
| 814 | else: |
||
| 815 | emin.append(-1 * sum(liste[i - delta_t : i])) |
||
| 816 | e_max.loc[index] = emax |
||
| 817 | e_min.loc[index] = emin |
||
| 818 | |||
| 819 | return p_max, p_min, e_max, e_min |
||
| 820 | |||
| 821 | |||
| 822 | def create_dsm_components( |
||
| 823 | con, p_max, p_min, e_max, e_min, dsm, export_aggregated=True |
||
| 824 | ): |
||
| 825 | """ |
||
| 826 | Create components representing DSM. |
||
| 827 | Parameters |
||
| 828 | ---------- |
||
| 829 | con : |
||
| 830 | Connection to database |
||
| 831 | p_max: DataFrame |
||
| 832 | Timeseries identifying maximum load increase |
||
| 833 | p_min: DataFrame |
||
| 834 | Timeseries identifying maximum load decrease |
||
| 835 | e_max: DataFrame |
||
| 836 | Timeseries identifying maximum energy amount to be preponed |
||
| 837 | e_min: DataFrame |
||
| 838 | Timeseries identifying maximum energy amount to be postponed |
||
| 839 | dsm: DataFrame |
||
| 840 | List of existing buses with DSM-potential including timeseries of loads |
||
| 841 | """ |
||
| 842 | if not export_aggregated: |
||
| 843 | # calculate P_nom and P per unit |
||
| 844 | p_nom = pd.Series(index=p_max.index, dtype=float) |
||
| 845 | for index, row in p_max.items(): |
||
| 846 | nom = max(max(row), abs(min(p_min.loc[index]))) |
||
| 847 | p_nom.loc[index] = nom |
||
| 848 | new = [element / nom for element in row] |
||
| 849 | p_max.loc[index] = new |
||
| 850 | new = [element / nom for element in p_min.loc[index]] |
||
| 851 | p_min.loc[index] = new |
||
| 852 | |||
| 853 | # calculate E_nom and E per unit |
||
| 854 | e_nom = pd.Series(index=p_min.index, dtype=float) |
||
| 855 | for index, row in e_max.items(): |
||
| 856 | nom = max(max(row), abs(min(e_min.loc[index]))) |
||
| 857 | e_nom.loc[index] = nom |
||
| 858 | new = [element / nom for element in row] |
||
| 859 | e_max.loc[index] = new |
||
| 860 | new = [element / nom for element in e_min.loc[index]] |
||
| 861 | e_min.loc[index] = new |
||
| 862 | |||
| 863 | # add DSM-buses to "original" buses |
||
| 864 | dsm_buses = gpd.GeoDataFrame(index=dsm.index) |
||
| 865 | dsm_buses["original_bus"] = dsm["bus"].copy() |
||
| 866 | dsm_buses["scn_name"] = dsm["scn_name"].copy() |
||
| 867 | |||
| 868 | # get original buses and add copy of relevant information |
||
| 869 | target1 = config.datasets()["DSM_CTS_industry"]["targets"]["bus"] |
||
| 870 | original_buses = db.select_geodataframe( |
||
| 871 | f"""SELECT bus_id, v_nom, scn_name, x, y, geom FROM |
||
| 872 | {target1['schema']}.{target1['table']}""", |
||
| 873 | geom_col="geom", |
||
| 874 | epsg=4326, |
||
| 875 | ) |
||
| 876 | |||
| 877 | # copy relevant information from original buses to DSM-buses |
||
| 878 | dsm_buses["index"] = dsm_buses.index |
||
| 879 | originals = original_buses[ |
||
| 880 | original_buses["bus_id"].isin(np.unique(dsm_buses["original_bus"])) |
||
| 881 | ] |
||
| 882 | dsm_buses = originals.merge( |
||
| 883 | dsm_buses, |
||
| 884 | left_on=["bus_id", "scn_name"], |
||
| 885 | right_on=["original_bus", "scn_name"], |
||
| 886 | ) |
||
| 887 | dsm_buses.index = dsm_buses["index"] |
||
| 888 | dsm_buses.drop(["bus_id", "index"], axis=1, inplace=True) |
||
| 889 | |||
| 890 | # new bus_ids for DSM-buses |
||
| 891 | max_id = original_buses["bus_id"].max() |
||
| 892 | if np.isnan(max_id): |
||
| 893 | max_id = 0 |
||
| 894 | dsm_id = max_id + 1 |
||
| 895 | bus_id = pd.Series(index=dsm_buses.index, dtype=int) |
||
| 896 | |||
| 897 | # Get number of DSM buses for both scenarios |
||
| 898 | rows_per_scenario = ( |
||
| 899 | dsm_buses.groupby("scn_name").count().original_bus.to_dict() |
||
| 900 | ) |
||
| 901 | |||
| 902 | # Assignment of DSM ids |
||
| 903 | bus_id.iloc[: rows_per_scenario.get("eGon2035", 0)] = range( |
||
| 904 | dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0) |
||
| 905 | ) |
||
| 906 | |||
| 907 | bus_id.iloc[ |
||
| 908 | rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get( |
||
| 909 | "eGon2035", 0 |
||
| 910 | ) |
||
| 911 | + rows_per_scenario.get("eGon100RE", 0) |
||
| 912 | ] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0)) |
||
| 913 | |||
| 914 | dsm_buses["bus_id"] = bus_id |
||
| 915 | |||
| 916 | # add links from "orignal" buses to DSM-buses |
||
| 917 | |||
| 918 | dsm_links = pd.DataFrame(index=dsm_buses.index) |
||
| 919 | dsm_links["original_bus"] = dsm_buses["original_bus"].copy() |
||
| 920 | dsm_links["dsm_bus"] = dsm_buses["bus_id"].copy() |
||
| 921 | dsm_links["scn_name"] = dsm_buses["scn_name"].copy() |
||
| 922 | |||
| 923 | # set link_id |
||
| 924 | target2 = config.datasets()["DSM_CTS_industry"]["targets"]["link"] |
||
| 925 | sql = f"""SELECT link_id FROM {target2['schema']}.{target2['table']}""" |
||
| 926 | max_id = pd.read_sql_query(sql, con) |
||
| 927 | max_id = max_id["link_id"].max() |
||
| 928 | if np.isnan(max_id): |
||
| 929 | max_id = 0 |
||
| 930 | dsm_id = max_id + 1 |
||
| 931 | link_id = pd.Series(index=dsm_buses.index, dtype=int) |
||
| 932 | |||
| 933 | # Assignment of link ids |
||
| 934 | link_id.iloc[: rows_per_scenario.get("eGon2035", 0)] = range( |
||
| 935 | dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0) |
||
| 936 | ) |
||
| 937 | |||
| 938 | link_id.iloc[ |
||
| 939 | rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get( |
||
| 940 | "eGon2035", 0 |
||
| 941 | ) |
||
| 942 | + rows_per_scenario.get("eGon100RE", 0) |
||
| 943 | ] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0)) |
||
| 944 | |||
| 945 | dsm_links["link_id"] = link_id |
||
| 946 | |||
| 947 | # add calculated timeseries to df to be returned |
||
| 948 | if not export_aggregated: |
||
| 949 | dsm_links["p_nom"] = p_nom |
||
| 950 | dsm_links["p_min"] = p_min |
||
| 951 | dsm_links["p_max"] = p_max |
||
| 952 | |||
| 953 | # add DSM-stores |
||
| 954 | |||
| 955 | dsm_stores = pd.DataFrame(index=dsm_buses.index) |
||
| 956 | dsm_stores["bus"] = dsm_buses["bus_id"].copy() |
||
| 957 | dsm_stores["scn_name"] = dsm_buses["scn_name"].copy() |
||
| 958 | dsm_stores["original_bus"] = dsm_buses["original_bus"].copy() |
||
| 959 | |||
| 960 | # set store_id |
||
| 961 | target3 = config.datasets()["DSM_CTS_industry"]["targets"]["store"] |
||
| 962 | sql = f"""SELECT store_id FROM {target3['schema']}.{target3['table']}""" |
||
| 963 | max_id = pd.read_sql_query(sql, con) |
||
| 964 | max_id = max_id["store_id"].max() |
||
| 965 | if np.isnan(max_id): |
||
| 966 | max_id = 0 |
||
| 967 | dsm_id = max_id + 1 |
||
| 968 | store_id = pd.Series(index=dsm_buses.index, dtype=int) |
||
| 969 | |||
| 970 | # Assignment of store ids |
||
| 971 | store_id.iloc[: rows_per_scenario.get("eGon2035", 0)] = range( |
||
| 972 | dsm_id, dsm_id + rows_per_scenario.get("eGon2035", 0) |
||
| 973 | ) |
||
| 974 | |||
| 975 | store_id.iloc[ |
||
| 976 | rows_per_scenario.get("eGon2035", 0) : rows_per_scenario.get( |
||
| 977 | "eGon2035", 0 |
||
| 978 | ) |
||
| 979 | + rows_per_scenario.get("eGon100RE", 0) |
||
| 980 | ] = range(dsm_id, dsm_id + rows_per_scenario.get("eGon100RE", 0)) |
||
| 981 | |||
| 982 | dsm_stores["store_id"] = store_id |
||
| 983 | |||
| 984 | # add calculated timeseries to df to be returned |
||
| 985 | if not export_aggregated: |
||
| 986 | dsm_stores["e_nom"] = e_nom |
||
| 987 | dsm_stores["e_min"] = e_min |
||
| 988 | dsm_stores["e_max"] = e_max |
||
| 989 | |||
| 990 | return dsm_buses, dsm_links, dsm_stores |
||
| 991 | |||
| 992 | |||
| 993 | def aggregate_components(df_dsm_buses, df_dsm_links, df_dsm_stores): |
||
| 994 | # aggregate buses |
||
| 995 | |||
| 996 | grouper = [df_dsm_buses.original_bus, df_dsm_buses.scn_name] |
||
| 997 | |||
| 998 | df_dsm_buses = df_dsm_buses.groupby(grouper).first() |
||
| 999 | |||
| 1000 | df_dsm_buses.reset_index(inplace=True) |
||
| 1001 | df_dsm_buses.sort_values("scn_name", inplace=True) |
||
| 1002 | |||
| 1003 | # aggregate links |
||
| 1004 | |||
| 1005 | df_dsm_links["p_max"] = df_dsm_links["p_max"].apply(lambda x: np.array(x)) |
||
| 1006 | df_dsm_links["p_min"] = df_dsm_links["p_min"].apply(lambda x: np.array(x)) |
||
| 1007 | |||
| 1008 | grouper = [df_dsm_links.original_bus, df_dsm_links.scn_name] |
||
| 1009 | |||
| 1010 | p_max = df_dsm_links.groupby(grouper)["p_max"].apply(np.sum) |
||
| 1011 | p_min = df_dsm_links.groupby(grouper)["p_min"].apply(np.sum) |
||
| 1012 | |||
| 1013 | df_dsm_links = df_dsm_links.groupby(grouper).first() |
||
| 1014 | df_dsm_links.p_max = p_max |
||
| 1015 | df_dsm_links.p_min = p_min |
||
| 1016 | |||
| 1017 | df_dsm_links.reset_index(inplace=True) |
||
| 1018 | df_dsm_links.sort_values("scn_name", inplace=True) |
||
| 1019 | |||
| 1020 | # calculate P_nom and P per unit |
||
| 1021 | for index, row in df_dsm_links.iterrows(): |
||
| 1022 | nom = max(max(row.p_max), abs(min(row.p_min))) |
||
| 1023 | df_dsm_links.at[index, "p_nom"] = nom |
||
| 1024 | |||
| 1025 | df_dsm_links["p_max"] = df_dsm_links["p_max"] / df_dsm_links["p_nom"] |
||
| 1026 | df_dsm_links["p_min"] = df_dsm_links["p_min"] / df_dsm_links["p_nom"] |
||
| 1027 | |||
| 1028 | df_dsm_links["p_max"] = df_dsm_links["p_max"].apply(lambda x: list(x)) |
||
| 1029 | df_dsm_links["p_min"] = df_dsm_links["p_min"].apply(lambda x: list(x)) |
||
| 1030 | |||
| 1031 | # aggregate stores |
||
| 1032 | df_dsm_stores["e_max"] = df_dsm_stores["e_max"].apply( |
||
| 1033 | lambda x: np.array(x) |
||
| 1034 | ) |
||
| 1035 | df_dsm_stores["e_min"] = df_dsm_stores["e_min"].apply( |
||
| 1036 | lambda x: np.array(x) |
||
| 1037 | ) |
||
| 1038 | |||
| 1039 | grouper = [df_dsm_stores.original_bus, df_dsm_stores.scn_name] |
||
| 1040 | |||
| 1041 | e_max = df_dsm_stores.groupby(grouper)["e_max"].apply(np.sum) |
||
| 1042 | e_min = df_dsm_stores.groupby(grouper)["e_min"].apply(np.sum) |
||
| 1043 | |||
| 1044 | df_dsm_stores = df_dsm_stores.groupby(grouper).first() |
||
| 1045 | df_dsm_stores.e_max = e_max |
||
| 1046 | df_dsm_stores.e_min = e_min |
||
| 1047 | |||
| 1048 | df_dsm_stores.reset_index(inplace=True) |
||
| 1049 | df_dsm_stores.sort_values("scn_name", inplace=True) |
||
| 1050 | |||
| 1051 | # calculate E_nom and E per unit |
||
| 1052 | for index, row in df_dsm_stores.iterrows(): |
||
| 1053 | nom = max(max(row.e_max), abs(min(row.e_min))) |
||
| 1054 | df_dsm_stores.at[index, "e_nom"] = nom |
||
| 1055 | |||
| 1056 | df_dsm_stores["e_max"] = df_dsm_stores["e_max"] / df_dsm_stores["e_nom"] |
||
| 1057 | df_dsm_stores["e_min"] = df_dsm_stores["e_min"] / df_dsm_stores["e_nom"] |
||
| 1058 | |||
| 1059 | df_dsm_stores["e_max"] = df_dsm_stores["e_max"].apply(lambda x: list(x)) |
||
| 1060 | df_dsm_stores["e_min"] = df_dsm_stores["e_min"].apply(lambda x: list(x)) |
||
| 1061 | |||
| 1062 | # select new bus_ids for aggregated buses and add to links and stores |
||
| 1063 | bus_id = db.next_etrago_id("Bus") + df_dsm_buses.index |
||
| 1064 | |||
| 1065 | df_dsm_buses["bus_id"] = bus_id |
||
| 1066 | df_dsm_links["dsm_bus"] = bus_id |
||
| 1067 | df_dsm_stores["bus"] = bus_id |
||
| 1068 | |||
| 1069 | # select new link_ids for aggregated links |
||
| 1070 | link_id = db.next_etrago_id("Link") + df_dsm_links.index |
||
| 1071 | |||
| 1072 | df_dsm_links["link_id"] = link_id |
||
| 1073 | |||
| 1074 | # select new store_ids to aggregated stores |
||
| 1075 | |||
| 1076 | store_id = db.next_etrago_id("Store") + df_dsm_stores.index |
||
| 1077 | |||
| 1078 | df_dsm_stores["store_id"] = store_id |
||
| 1079 | |||
| 1080 | return df_dsm_buses, df_dsm_links, df_dsm_stores |
||
| 1081 | |||
| 1082 | |||
| 1083 | def data_export(dsm_buses, dsm_links, dsm_stores, carrier): |
||
| 1084 | """ |
||
| 1085 | Export new components to database. |
||
| 1086 | |||
| 1087 | Parameters |
||
| 1088 | ---------- |
||
| 1089 | dsm_buses: DataFrame |
||
| 1090 | Buses representing locations of DSM-potential |
||
| 1091 | dsm_links: DataFrame |
||
| 1092 | Links connecting DSM-buses and DSM-stores |
||
| 1093 | dsm_stores: DataFrame |
||
| 1094 | Stores representing DSM-potential |
||
| 1095 | carrier: str |
||
| 1096 | Remark to be filled in column 'carrier' identifying DSM-potential |
||
| 1097 | """ |
||
| 1098 | |||
| 1099 | targets = config.datasets()["DSM_CTS_industry"]["targets"] |
||
| 1100 | |||
| 1101 | # dsm_buses |
||
| 1102 | |||
| 1103 | insert_buses = gpd.GeoDataFrame( |
||
| 1104 | index=dsm_buses.index, |
||
| 1105 | data=dsm_buses["geom"], |
||
| 1106 | geometry="geom", |
||
| 1107 | crs="EPSG:4326", |
||
| 1108 | ) |
||
| 1109 | insert_buses["scn_name"] = dsm_buses["scn_name"] |
||
| 1110 | insert_buses["bus_id"] = dsm_buses["bus_id"] |
||
| 1111 | insert_buses["v_nom"] = dsm_buses["v_nom"] |
||
| 1112 | insert_buses["carrier"] = carrier |
||
| 1113 | insert_buses["x"] = dsm_buses["x"] |
||
| 1114 | insert_buses["y"] = dsm_buses["y"] |
||
| 1115 | |||
| 1116 | # insert into database |
||
| 1117 | insert_buses.to_postgis( |
||
| 1118 | targets["bus"]["table"], |
||
| 1119 | con=db.engine(), |
||
| 1120 | schema=targets["bus"]["schema"], |
||
| 1121 | if_exists="append", |
||
| 1122 | index=False, |
||
| 1123 | dtype={"geom": "geometry"}, |
||
| 1124 | ) |
||
| 1125 | |||
| 1126 | # dsm_links |
||
| 1127 | |||
| 1128 | insert_links = pd.DataFrame(index=dsm_links.index) |
||
| 1129 | insert_links["scn_name"] = dsm_links["scn_name"] |
||
| 1130 | insert_links["link_id"] = dsm_links["link_id"] |
||
| 1131 | insert_links["bus0"] = dsm_links["original_bus"] |
||
| 1132 | insert_links["bus1"] = dsm_links["dsm_bus"] |
||
| 1133 | insert_links["carrier"] = carrier |
||
| 1134 | insert_links["p_nom"] = dsm_links["p_nom"] |
||
| 1135 | |||
| 1136 | # insert into database |
||
| 1137 | insert_links.to_sql( |
||
| 1138 | targets["link"]["table"], |
||
| 1139 | con=db.engine(), |
||
| 1140 | schema=targets["link"]["schema"], |
||
| 1141 | if_exists="append", |
||
| 1142 | index=False, |
||
| 1143 | ) |
||
| 1144 | |||
| 1145 | insert_links_timeseries = pd.DataFrame(index=dsm_links.index) |
||
| 1146 | insert_links_timeseries["scn_name"] = dsm_links["scn_name"] |
||
| 1147 | insert_links_timeseries["link_id"] = dsm_links["link_id"] |
||
| 1148 | insert_links_timeseries["p_min_pu"] = dsm_links["p_min"] |
||
| 1149 | insert_links_timeseries["p_max_pu"] = dsm_links["p_max"] |
||
| 1150 | insert_links_timeseries["temp_id"] = 1 |
||
| 1151 | |||
| 1152 | # insert into database |
||
| 1153 | insert_links_timeseries.to_sql( |
||
| 1154 | targets["link_timeseries"]["table"], |
||
| 1155 | con=db.engine(), |
||
| 1156 | schema=targets["link_timeseries"]["schema"], |
||
| 1157 | if_exists="append", |
||
| 1158 | index=False, |
||
| 1159 | ) |
||
| 1160 | |||
| 1161 | # dsm_stores |
||
| 1162 | |||
| 1163 | insert_stores = pd.DataFrame(index=dsm_stores.index) |
||
| 1164 | insert_stores["scn_name"] = dsm_stores["scn_name"] |
||
| 1165 | insert_stores["store_id"] = dsm_stores["store_id"] |
||
| 1166 | insert_stores["bus"] = dsm_stores["bus"] |
||
| 1167 | insert_stores["carrier"] = carrier |
||
| 1168 | insert_stores["e_nom"] = dsm_stores["e_nom"] |
||
| 1169 | |||
| 1170 | # insert into database |
||
| 1171 | insert_stores.to_sql( |
||
| 1172 | targets["store"]["table"], |
||
| 1173 | con=db.engine(), |
||
| 1174 | schema=targets["store"]["schema"], |
||
| 1175 | if_exists="append", |
||
| 1176 | index=False, |
||
| 1177 | ) |
||
| 1178 | |||
| 1179 | insert_stores_timeseries = pd.DataFrame(index=dsm_stores.index) |
||
| 1180 | insert_stores_timeseries["scn_name"] = dsm_stores["scn_name"] |
||
| 1181 | insert_stores_timeseries["store_id"] = dsm_stores["store_id"] |
||
| 1182 | insert_stores_timeseries["e_min_pu"] = dsm_stores["e_min"] |
||
| 1183 | insert_stores_timeseries["e_max_pu"] = dsm_stores["e_max"] |
||
| 1184 | insert_stores_timeseries["temp_id"] = 1 |
||
| 1185 | |||
| 1186 | # insert into database |
||
| 1187 | insert_stores_timeseries.to_sql( |
||
| 1188 | targets["store_timeseries"]["table"], |
||
| 1189 | con=db.engine(), |
||
| 1190 | schema=targets["store_timeseries"]["schema"], |
||
| 1191 | if_exists="append", |
||
| 1192 | index=False, |
||
| 1193 | ) |
||
| 1194 | |||
| 1195 | |||
| 1196 | def delete_dsm_entries(carrier): |
||
| 1197 | """ |
||
| 1198 | Deletes DSM-components from database if they already exist before creating |
||
| 1199 | new ones. |
||
| 1200 | |||
| 1201 | Parameters |
||
| 1202 | ---------- |
||
| 1203 | carrier: str |
||
| 1204 | Remark in column 'carrier' identifying DSM-potential |
||
| 1205 | """ |
||
| 1206 | |||
| 1207 | targets = config.datasets()["DSM_CTS_industry"]["targets"] |
||
| 1208 | |||
| 1209 | # buses |
||
| 1210 | |||
| 1211 | sql = ( |
||
| 1212 | f"DELETE FROM {targets['bus']['schema']}.{targets['bus']['table']} b " |
||
| 1213 | f"WHERE (b.carrier LIKE '{carrier}');" |
||
| 1214 | ) |
||
| 1215 | db.execute_sql(sql) |
||
| 1216 | |||
| 1217 | # links |
||
| 1218 | |||
| 1219 | sql = f""" |
||
| 1220 | DELETE FROM {targets["link_timeseries"]["schema"]}. |
||
| 1221 | {targets["link_timeseries"]["table"]} t |
||
| 1222 | WHERE t.link_id IN |
||
| 1223 | ( |
||
| 1224 | SELECT l.link_id FROM {targets["link"]["schema"]}. |
||
| 1225 | {targets["link"]["table"]} l |
||
| 1226 | WHERE l.carrier LIKE '{carrier}' |
||
| 1227 | ); |
||
| 1228 | """ |
||
| 1229 | |||
| 1230 | db.execute_sql(sql) |
||
| 1231 | |||
| 1232 | sql = f""" |
||
| 1233 | DELETE FROM {targets["link"]["schema"]}. |
||
| 1234 | {targets["link"]["table"]} l |
||
| 1235 | WHERE (l.carrier LIKE '{carrier}'); |
||
| 1236 | """ |
||
| 1237 | |||
| 1238 | db.execute_sql(sql) |
||
| 1239 | |||
| 1240 | # stores |
||
| 1241 | |||
| 1242 | sql = f""" |
||
| 1243 | DELETE FROM {targets["store_timeseries"]["schema"]}. |
||
| 1244 | {targets["store_timeseries"]["table"]} t |
||
| 1245 | WHERE t.store_id IN |
||
| 1246 | ( |
||
| 1247 | SELECT s.store_id FROM {targets["store"]["schema"]}. |
||
| 1248 | {targets["store"]["table"]} s |
||
| 1249 | WHERE s.carrier LIKE '{carrier}' |
||
| 1250 | ); |
||
| 1251 | """ |
||
| 1252 | |||
| 1253 | db.execute_sql(sql) |
||
| 1254 | |||
| 1255 | sql = f""" |
||
| 1256 | DELETE FROM {targets["store"]["schema"]}.{targets["store"]["table"]} s |
||
| 1257 | WHERE (s.carrier LIKE '{carrier}'); |
||
| 1258 | """ |
||
| 1259 | |||
| 1260 | db.execute_sql(sql) |
||
| 1261 | |||
| 1262 | |||
| 1263 | def dsm_cts_ind( |
||
| 1264 | con=db.engine(), |
||
| 1265 | cts_cool_vent_ac_share=0.22, |
||
| 1266 | ind_vent_cool_share=0.039, |
||
| 1267 | ind_vent_share=0.017, |
||
| 1268 | ): |
||
| 1269 | """ |
||
| 1270 | Execute methodology to create and implement components for DSM considering |
||
| 1271 | a) CTS per osm-area: combined potentials of cooling, ventilation and air |
||
| 1272 | conditioning |
||
| 1273 | b) Industry per osm-are: combined potentials of cooling and ventilation |
||
| 1274 | c) Industrial Sites: potentials of ventilation in sites of |
||
| 1275 | "Wirtschaftszweig" (WZ) 23 |
||
| 1276 | d) Industrial Sites: potentials of sites specified by subsectors |
||
| 1277 | identified by Schmidt (https://zenodo.org/record/3613767#.YTsGwVtCRhG): |
||
| 1278 | Paper, Recycled Paper, Pulp, Cement |
||
| 1279 | |||
| 1280 | Modelled using the methods by Heitkoetter et. al.: |
||
| 1281 | https://doi.org/10.1016/j.adapen.2020.100001 |
||
| 1282 | |||
| 1283 | Parameters |
||
| 1284 | ---------- |
||
| 1285 | con : |
||
| 1286 | Connection to database |
||
| 1287 | cts_cool_vent_ac_share: float |
||
| 1288 | Share of cooling, ventilation and AC in CTS demand |
||
| 1289 | ind_vent_cool_share: float |
||
| 1290 | Share of cooling and ventilation in industry demand |
||
| 1291 | ind_vent_share: float |
||
| 1292 | Share of ventilation in industry demand in sites of WZ 23 |
||
| 1293 | |||
| 1294 | """ |
||
| 1295 | |||
| 1296 | # CTS per osm-area: cooling, ventilation and air conditioning |
||
| 1297 | |||
| 1298 | print(" ") |
||
| 1299 | print("CTS per osm-area: cooling, ventilation and air conditioning") |
||
| 1300 | print(" ") |
||
| 1301 | |||
| 1302 | dsm = cts_data_import(cts_cool_vent_ac_share) |
||
| 1303 | |||
| 1304 | # calculate combined potentials of cooling, ventilation and air |
||
| 1305 | # conditioning in CTS using combined parameters by Heitkoetter et. al. |
||
| 1306 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
| 1307 | s_flex=S_FLEX_CTS, |
||
| 1308 | s_util=S_UTIL_CTS, |
||
| 1309 | s_inc=S_INC_CTS, |
||
| 1310 | s_dec=S_DEC_CTS, |
||
| 1311 | delta_t=DELTA_T_CTS, |
||
| 1312 | dsm=dsm, |
||
| 1313 | ) |
||
| 1314 | |||
| 1315 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
| 1316 | con, p_max, p_min, e_max, e_min, dsm |
||
| 1317 | ) |
||
| 1318 | |||
| 1319 | df_dsm_buses = dsm_buses.copy() |
||
| 1320 | df_dsm_links = dsm_links.copy() |
||
| 1321 | df_dsm_stores = dsm_stores.copy() |
||
| 1322 | |||
| 1323 | # industry per osm-area: cooling and ventilation |
||
| 1324 | |||
| 1325 | print(" ") |
||
| 1326 | print("industry per osm-area: cooling and ventilation") |
||
| 1327 | print(" ") |
||
| 1328 | |||
| 1329 | dsm = ind_osm_data_import(ind_vent_cool_share) |
||
| 1330 | |||
| 1331 | # calculate combined potentials of cooling and ventilation in industrial |
||
| 1332 | # sector using combined parameters by Heitkoetter et. al. |
||
| 1333 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
| 1334 | s_flex=S_FLEX_OSM, |
||
| 1335 | s_util=S_UTIL_OSM, |
||
| 1336 | s_inc=S_INC_OSM, |
||
| 1337 | s_dec=S_DEC_OSM, |
||
| 1338 | delta_t=DELTA_T_OSM, |
||
| 1339 | dsm=dsm, |
||
| 1340 | ) |
||
| 1341 | |||
| 1342 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
| 1343 | con, p_max, p_min, e_max, e_min, dsm |
||
| 1344 | ) |
||
| 1345 | |||
| 1346 | df_dsm_buses = gpd.GeoDataFrame( |
||
| 1347 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
| 1348 | crs="EPSG:4326", |
||
| 1349 | geometry="geom", |
||
| 1350 | ) |
||
| 1351 | df_dsm_links = pd.DataFrame( |
||
| 1352 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
| 1353 | ) |
||
| 1354 | df_dsm_stores = pd.DataFrame( |
||
| 1355 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
| 1356 | ) |
||
| 1357 | |||
| 1358 | # industry sites |
||
| 1359 | |||
| 1360 | # industry sites: different applications |
||
| 1361 | |||
| 1362 | dsm = ind_sites_data_import() |
||
| 1363 | |||
| 1364 | print(" ") |
||
| 1365 | print("industry sites: paper") |
||
| 1366 | print(" ") |
||
| 1367 | |||
| 1368 | dsm_paper = gpd.GeoDataFrame( |
||
| 1369 | dsm[ |
||
| 1370 | dsm["application"].isin( |
||
| 1371 | [ |
||
| 1372 | "Graphic Paper", |
||
| 1373 | "Packing Paper and Board", |
||
| 1374 | "Hygiene Paper", |
||
| 1375 | "Technical/Special Paper and Board", |
||
| 1376 | ] |
||
| 1377 | ) |
||
| 1378 | ] |
||
| 1379 | ) |
||
| 1380 | |||
| 1381 | # calculate potentials of industrial sites with paper-applications |
||
| 1382 | # using parameters by Heitkoetter et al. |
||
| 1383 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
| 1384 | s_flex=S_FLEX_PAPER, |
||
| 1385 | s_util=S_UTIL_PAPER, |
||
| 1386 | s_inc=S_INC_PAPER, |
||
| 1387 | s_dec=S_DEC_PAPER, |
||
| 1388 | delta_t=DELTA_T_PAPER, |
||
| 1389 | dsm=dsm_paper, |
||
| 1390 | ) |
||
| 1391 | |||
| 1392 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
| 1393 | con, p_max, p_min, e_max, e_min, dsm_paper |
||
| 1394 | ) |
||
| 1395 | |||
| 1396 | df_dsm_buses = gpd.GeoDataFrame( |
||
| 1397 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
| 1398 | crs="EPSG:4326", |
||
| 1399 | geometry="geom", |
||
| 1400 | ) |
||
| 1401 | df_dsm_links = pd.DataFrame( |
||
| 1402 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
| 1403 | ) |
||
| 1404 | df_dsm_stores = pd.DataFrame( |
||
| 1405 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
| 1406 | ) |
||
| 1407 | |||
| 1408 | print(" ") |
||
| 1409 | print("industry sites: recycled paper") |
||
| 1410 | print(" ") |
||
| 1411 | |||
| 1412 | # calculate potentials of industrial sites with recycled paper-applications |
||
| 1413 | # using parameters by Heitkoetter et. al. |
||
| 1414 | dsm_recycled_paper = gpd.GeoDataFrame( |
||
| 1415 | dsm[dsm["application"] == "Recycled Paper"] |
||
| 1416 | ) |
||
| 1417 | |||
| 1418 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
| 1419 | s_flex=S_FLEX_RECYCLED_PAPER, |
||
| 1420 | s_util=S_UTIL_RECYCLED_PAPER, |
||
| 1421 | s_inc=S_INC_RECYCLED_PAPER, |
||
| 1422 | s_dec=S_DEC_RECYCLED_PAPER, |
||
| 1423 | delta_t=DELTA_T_RECYCLED_PAPER, |
||
| 1424 | dsm=dsm_recycled_paper, |
||
| 1425 | ) |
||
| 1426 | |||
| 1427 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
| 1428 | con, p_max, p_min, e_max, e_min, dsm_recycled_paper |
||
| 1429 | ) |
||
| 1430 | |||
| 1431 | df_dsm_buses = gpd.GeoDataFrame( |
||
| 1432 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
| 1433 | crs="EPSG:4326", |
||
| 1434 | geometry="geom", |
||
| 1435 | ) |
||
| 1436 | df_dsm_links = pd.DataFrame( |
||
| 1437 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
| 1438 | ) |
||
| 1439 | df_dsm_stores = pd.DataFrame( |
||
| 1440 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
| 1441 | ) |
||
| 1442 | |||
| 1443 | print(" ") |
||
| 1444 | print("industry sites: pulp") |
||
| 1445 | print(" ") |
||
| 1446 | |||
| 1447 | dsm_pulp = gpd.GeoDataFrame(dsm[dsm["application"] == "Mechanical Pulp"]) |
||
| 1448 | |||
| 1449 | # calculate potentials of industrial sites with pulp-applications |
||
| 1450 | # using parameters by Heitkoetter et al. |
||
| 1451 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
| 1452 | s_flex=S_FLEX_PULP, |
||
| 1453 | s_util=S_UTIL_PULP, |
||
| 1454 | s_inc=S_INC_PULP, |
||
| 1455 | s_dec=S_DEC_PULP, |
||
| 1456 | delta_t=DELTA_T_PULP, |
||
| 1457 | dsm=dsm_pulp, |
||
| 1458 | ) |
||
| 1459 | |||
| 1460 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
| 1461 | con, p_max, p_min, e_max, e_min, dsm_pulp |
||
| 1462 | ) |
||
| 1463 | |||
| 1464 | df_dsm_buses = gpd.GeoDataFrame( |
||
| 1465 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
| 1466 | crs="EPSG:4326", |
||
| 1467 | geometry="geom", |
||
| 1468 | ) |
||
| 1469 | df_dsm_links = pd.DataFrame( |
||
| 1470 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
| 1471 | ) |
||
| 1472 | df_dsm_stores = pd.DataFrame( |
||
| 1473 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
| 1474 | ) |
||
| 1475 | |||
| 1476 | # industry sites: cement |
||
| 1477 | |||
| 1478 | print(" ") |
||
| 1479 | print("industry sites: cement") |
||
| 1480 | print(" ") |
||
| 1481 | |||
| 1482 | dsm_cement = gpd.GeoDataFrame(dsm[dsm["application"] == "Cement Mill"]) |
||
| 1483 | |||
| 1484 | # calculate potentials of industrial sites with cement-applications |
||
| 1485 | # using parameters by Heitkoetter et al. |
||
| 1486 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
| 1487 | s_flex=S_FLEX_CEMENT, |
||
| 1488 | s_util=S_UTIL_CEMENT, |
||
| 1489 | s_inc=S_INC_CEMENT, |
||
| 1490 | s_dec=S_DEC_CEMENT, |
||
| 1491 | delta_t=DELTA_T_CEMENT, |
||
| 1492 | dsm=dsm_cement, |
||
| 1493 | ) |
||
| 1494 | |||
| 1495 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
| 1496 | con, p_max, p_min, e_max, e_min, dsm_cement |
||
| 1497 | ) |
||
| 1498 | |||
| 1499 | df_dsm_buses = gpd.GeoDataFrame( |
||
| 1500 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
| 1501 | crs="EPSG:4326", |
||
| 1502 | geometry="geom", |
||
| 1503 | ) |
||
| 1504 | df_dsm_links = pd.DataFrame( |
||
| 1505 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
| 1506 | ) |
||
| 1507 | df_dsm_stores = pd.DataFrame( |
||
| 1508 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
| 1509 | ) |
||
| 1510 | |||
| 1511 | # industry sites: ventilation in WZ23 |
||
| 1512 | |||
| 1513 | print(" ") |
||
| 1514 | print("industry sites: ventilation in WZ23") |
||
| 1515 | print(" ") |
||
| 1516 | |||
| 1517 | dsm = ind_sites_vent_data_import(ind_vent_share, wz=WZ) |
||
| 1518 | |||
| 1519 | # drop entries of Cement Mills whose DSM-potentials have already been |
||
| 1520 | # modelled |
||
| 1521 | cement = np.unique(dsm_cement["bus"].values) |
||
| 1522 | index_names = np.array(dsm[dsm["bus"].isin(cement)].index) |
||
| 1523 | dsm.drop(index_names, inplace=True) |
||
| 1524 | |||
| 1525 | # calculate potentials of ventialtion in industrial sites of WZ 23 |
||
| 1526 | # using parameters by Heitkoetter et al. |
||
| 1527 | p_max, p_min, e_max, e_min = calculate_potentials( |
||
| 1528 | s_flex=S_FLEX_WZ, |
||
| 1529 | s_util=S_UTIL_WZ, |
||
| 1530 | s_inc=S_INC_WZ, |
||
| 1531 | s_dec=S_DEC_WZ, |
||
| 1532 | delta_t=DELTA_T_WZ, |
||
| 1533 | dsm=dsm, |
||
| 1534 | ) |
||
| 1535 | |||
| 1536 | dsm_buses, dsm_links, dsm_stores = create_dsm_components( |
||
| 1537 | con, p_max, p_min, e_max, e_min, dsm |
||
| 1538 | ) |
||
| 1539 | |||
| 1540 | df_dsm_buses = gpd.GeoDataFrame( |
||
| 1541 | pd.concat([df_dsm_buses, dsm_buses], ignore_index=True), |
||
| 1542 | crs="EPSG:4326", |
||
| 1543 | geometry="geom", |
||
| 1544 | ) |
||
| 1545 | df_dsm_links = pd.DataFrame( |
||
| 1546 | pd.concat([df_dsm_links, dsm_links], ignore_index=True) |
||
| 1547 | ) |
||
| 1548 | df_dsm_stores = pd.DataFrame( |
||
| 1549 | pd.concat([df_dsm_stores, dsm_stores], ignore_index=True) |
||
| 1550 | ) |
||
| 1551 | |||
| 1552 | # aggregate DSM components per substation |
||
| 1553 | dsm_buses, dsm_links, dsm_stores = aggregate_components( |
||
| 1554 | df_dsm_buses, df_dsm_links, df_dsm_stores |
||
| 1555 | ) |
||
| 1556 | |||
| 1557 | # export aggregated DSM components to database |
||
| 1558 | |||
| 1559 | delete_dsm_entries("dsm-cts") |
||
| 1560 | delete_dsm_entries("dsm-ind-osm") |
||
| 1561 | delete_dsm_entries("dsm-ind-sites") |
||
| 1562 | delete_dsm_entries("dsm") |
||
| 1563 | |||
| 1564 | data_export(dsm_buses, dsm_links, dsm_stores, carrier="dsm") |
||
| 1565 | |||
| 1566 | |||
| 1567 | def create_table(df, table, engine=CON): |
||
| 1568 | """Create table""" |
||
| 1569 | table.__table__.drop(bind=engine, checkfirst=True) |
||
| 1570 | table.__table__.create(bind=engine, checkfirst=True) |
||
| 1571 | |||
| 1572 | df.to_sql( |
||
| 1573 | name=table.__table__.name, |
||
| 1574 | schema=table.__table__.schema, |
||
| 1575 | con=engine, |
||
| 1576 | if_exists="append", |
||
| 1577 | index=False, |
||
| 1578 | ) |
||
| 1579 | |||
| 1580 | |||
| 1581 | def div_list(lst: list, div: float): |
||
| 1582 | return [v / div for v in lst] |
||
| 1583 | |||
| 1584 | |||
| 1585 | def dsm_cts_ind_individual( |
||
| 1586 | cts_cool_vent_ac_share=CTS_COOL_VENT_AC_SHARE, |
||
| 1587 | ind_vent_cool_share=IND_VENT_COOL_SHARE, |
||
| 1588 | ind_vent_share=IND_VENT_SHARE, |
||
| 1589 | ): |
||
| 1590 | """ |
||
| 1591 | Execute methodology to create and implement components for DSM considering |
||
| 1592 | a) CTS per osm-area: combined potentials of cooling, ventilation and air |
||
| 1593 | conditioning |
||
| 1594 | b) Industry per osm-are: combined potentials of cooling and ventilation |
||
| 1595 | c) Industrial Sites: potentials of ventilation in sites of |
||
| 1596 | "Wirtschaftszweig" (WZ) 23 |
||
| 1597 | d) Industrial Sites: potentials of sites specified by subsectors |
||
| 1598 | identified by Schmidt (https://zenodo.org/record/3613767#.YTsGwVtCRhG): |
||
| 1599 | Paper, Recycled Paper, Pulp, Cement |
||
| 1600 | |||
| 1601 | Modelled using the methods by Heitkoetter et. al.: |
||
| 1602 | https://doi.org/10.1016/j.adapen.2020.100001 |
||
| 1603 | |||
| 1604 | Parameters |
||
| 1605 | ---------- |
||
| 1606 | cts_cool_vent_ac_share: float |
||
| 1607 | Share of cooling, ventilation and AC in CTS demand |
||
| 1608 | ind_vent_cool_share: float |
||
| 1609 | Share of cooling and ventilation in industry demand |
||
| 1610 | ind_vent_share: float |
||
| 1611 | Share of ventilation in industry demand in sites of WZ 23 |
||
| 1612 | |||
| 1613 | """ |
||
| 1614 | |||
| 1615 | # CTS per osm-area: cooling, ventilation and air conditioning |
||
| 1616 | |||
| 1617 | print(" ") |
||
| 1618 | print("CTS per osm-area: cooling, ventilation and air conditioning") |
||
| 1619 | print(" ") |
||
| 1620 | |||
| 1621 | dsm = cts_data_import(cts_cool_vent_ac_share) |
||
| 1622 | |||
| 1623 | # calculate combined potentials of cooling, ventilation and air |
||
| 1624 | # conditioning in CTS using combined parameters by Heitkoetter et. al. |
||
| 1625 | vals = calculate_potentials( |
||
| 1626 | s_flex=S_FLEX_CTS, |
||
| 1627 | s_util=S_UTIL_CTS, |
||
| 1628 | s_inc=S_INC_CTS, |
||
| 1629 | s_dec=S_DEC_CTS, |
||
| 1630 | delta_t=DELTA_T_CTS, |
||
| 1631 | dsm=dsm, |
||
| 1632 | ) |
||
| 1633 | |||
| 1634 | dsm = dsm.assign( |
||
| 1635 | p_set=dsm.p_set.apply(div_list, div=cts_cool_vent_ac_share) |
||
| 1636 | ) |
||
| 1637 | |||
| 1638 | base_columns = [ |
||
| 1639 | "bus", |
||
| 1640 | "scn_name", |
||
| 1641 | "p_set", |
||
| 1642 | "p_max", |
||
| 1643 | "p_min", |
||
| 1644 | "e_max", |
||
| 1645 | "e_min", |
||
| 1646 | ] |
||
| 1647 | |||
| 1648 | cts_df = pd.concat([dsm, *vals], axis=1, ignore_index=True) |
||
| 1649 | cts_df.columns = base_columns |
||
| 1650 | |||
| 1651 | print(" ") |
||
| 1652 | print("industry per osm-area: cooling and ventilation") |
||
| 1653 | print(" ") |
||
| 1654 | |||
| 1655 | dsm = ind_osm_data_import_individual(ind_vent_cool_share) |
||
| 1656 | |||
| 1657 | # calculate combined potentials of cooling and ventilation in industrial |
||
| 1658 | # sector using combined parameters by Heitkoetter et al. |
||
| 1659 | vals = calculate_potentials( |
||
| 1660 | s_flex=S_FLEX_OSM, |
||
| 1661 | s_util=S_UTIL_OSM, |
||
| 1662 | s_inc=S_INC_OSM, |
||
| 1663 | s_dec=S_DEC_OSM, |
||
| 1664 | delta_t=DELTA_T_OSM, |
||
| 1665 | dsm=dsm, |
||
| 1666 | ) |
||
| 1667 | |||
| 1668 | dsm = dsm.assign(p_set=dsm.p_set.apply(div_list, div=ind_vent_cool_share)) |
||
| 1669 | |||
| 1670 | columns = ["osm_id"] + base_columns |
||
| 1671 | |||
| 1672 | osm_df = pd.concat([dsm, *vals], axis=1, ignore_index=True) |
||
| 1673 | osm_df.columns = columns |
||
| 1674 | |||
| 1675 | # industry sites |
||
| 1676 | |||
| 1677 | # industry sites: different applications |
||
| 1678 | |||
| 1679 | dsm = ind_sites_data_import() |
||
| 1680 | |||
| 1681 | print(" ") |
||
| 1682 | print("industry sites: paper") |
||
| 1683 | print(" ") |
||
| 1684 | |||
| 1685 | dsm_paper = gpd.GeoDataFrame( |
||
| 1686 | dsm[ |
||
| 1687 | dsm["application"].isin( |
||
| 1688 | [ |
||
| 1689 | "Graphic Paper", |
||
| 1690 | "Packing Paper and Board", |
||
| 1691 | "Hygiene Paper", |
||
| 1692 | "Technical/Special Paper and Board", |
||
| 1693 | ] |
||
| 1694 | ) |
||
| 1695 | ] |
||
| 1696 | ) |
||
| 1697 | |||
| 1698 | # calculate potentials of industrial sites with paper-applications |
||
| 1699 | # using parameters by Heitkoetter et al. |
||
| 1700 | vals = calculate_potentials( |
||
| 1701 | s_flex=S_FLEX_PAPER, |
||
| 1702 | s_util=S_UTIL_PAPER, |
||
| 1703 | s_inc=S_INC_PAPER, |
||
| 1704 | s_dec=S_DEC_PAPER, |
||
| 1705 | delta_t=DELTA_T_PAPER, |
||
| 1706 | dsm=dsm_paper, |
||
| 1707 | ) |
||
| 1708 | |||
| 1709 | columns = ["application", "industrial_sites_id"] + base_columns |
||
| 1710 | |||
| 1711 | paper_df = pd.concat([dsm_paper, *vals], axis=1, ignore_index=True) |
||
| 1712 | paper_df.columns = columns |
||
| 1713 | |||
| 1714 | print(" ") |
||
| 1715 | print("industry sites: recycled paper") |
||
| 1716 | print(" ") |
||
| 1717 | |||
| 1718 | # calculate potentials of industrial sites with recycled paper-applications |
||
| 1719 | # using parameters by Heitkoetter et. al. |
||
| 1720 | dsm_recycled_paper = gpd.GeoDataFrame( |
||
| 1721 | dsm[dsm["application"] == "Recycled Paper"] |
||
| 1722 | ) |
||
| 1723 | |||
| 1724 | vals = calculate_potentials( |
||
| 1725 | s_flex=S_FLEX_RECYCLED_PAPER, |
||
| 1726 | s_util=S_UTIL_RECYCLED_PAPER, |
||
| 1727 | s_inc=S_INC_RECYCLED_PAPER, |
||
| 1728 | s_dec=S_DEC_RECYCLED_PAPER, |
||
| 1729 | delta_t=DELTA_T_RECYCLED_PAPER, |
||
| 1730 | dsm=dsm_recycled_paper, |
||
| 1731 | ) |
||
| 1732 | |||
| 1733 | recycled_paper_df = pd.concat( |
||
| 1734 | [dsm_recycled_paper, *vals], axis=1, ignore_index=True |
||
| 1735 | ) |
||
| 1736 | recycled_paper_df.columns = columns |
||
| 1737 | |||
| 1738 | print(" ") |
||
| 1739 | print("industry sites: pulp") |
||
| 1740 | print(" ") |
||
| 1741 | |||
| 1742 | dsm_pulp = gpd.GeoDataFrame(dsm[dsm["application"] == "Mechanical Pulp"]) |
||
| 1743 | |||
| 1744 | # calculate potentials of industrial sites with pulp-applications |
||
| 1745 | # using parameters by Heitkoetter et al. |
||
| 1746 | vals = calculate_potentials( |
||
| 1747 | s_flex=S_FLEX_PULP, |
||
| 1748 | s_util=S_UTIL_PULP, |
||
| 1749 | s_inc=S_INC_PULP, |
||
| 1750 | s_dec=S_DEC_PULP, |
||
| 1751 | delta_t=DELTA_T_PULP, |
||
| 1752 | dsm=dsm_pulp, |
||
| 1753 | ) |
||
| 1754 | |||
| 1755 | pulp_df = pd.concat([dsm_pulp, *vals], axis=1, ignore_index=True) |
||
| 1756 | pulp_df.columns = columns |
||
| 1757 | |||
| 1758 | # industry sites: cement |
||
| 1759 | |||
| 1760 | print(" ") |
||
| 1761 | print("industry sites: cement") |
||
| 1762 | print(" ") |
||
| 1763 | |||
| 1764 | dsm_cement = gpd.GeoDataFrame(dsm[dsm["application"] == "Cement Mill"]) |
||
| 1765 | |||
| 1766 | # calculate potentials of industrial sites with cement-applications |
||
| 1767 | # using parameters by Heitkoetter et al. |
||
| 1768 | vals = calculate_potentials( |
||
| 1769 | s_flex=S_FLEX_CEMENT, |
||
| 1770 | s_util=S_UTIL_CEMENT, |
||
| 1771 | s_inc=S_INC_CEMENT, |
||
| 1772 | s_dec=S_DEC_CEMENT, |
||
| 1773 | delta_t=DELTA_T_CEMENT, |
||
| 1774 | dsm=dsm_cement, |
||
| 1775 | ) |
||
| 1776 | |||
| 1777 | cement_df = pd.concat([dsm_cement, *vals], axis=1, ignore_index=True) |
||
| 1778 | cement_df.columns = columns |
||
| 1779 | |||
| 1780 | ind_df = pd.concat( |
||
| 1781 | [paper_df, recycled_paper_df, pulp_df, cement_df], ignore_index=True |
||
| 1782 | ) |
||
| 1783 | |||
| 1784 | # industry sites: ventilation in WZ23 |
||
| 1785 | |||
| 1786 | print(" ") |
||
| 1787 | print("industry sites: ventilation in WZ23") |
||
| 1788 | print(" ") |
||
| 1789 | |||
| 1790 | dsm = ind_sites_vent_data_import_individual(ind_vent_share, wz=WZ) |
||
| 1791 | |||
| 1792 | # drop entries of Cement Mills whose DSM-potentials have already been |
||
| 1793 | # modelled |
||
| 1794 | cement = np.unique(dsm_cement["bus"].values) |
||
| 1795 | index_names = np.array(dsm[dsm["bus"].isin(cement)].index) |
||
| 1796 | dsm.drop(index_names, inplace=True) |
||
| 1797 | |||
| 1798 | # calculate potentials of ventialtion in industrial sites of WZ 23 |
||
| 1799 | # using parameters by Heitkoetter et al. |
||
| 1800 | vals = calculate_potentials( |
||
| 1801 | s_flex=S_FLEX_WZ, |
||
| 1802 | s_util=S_UTIL_WZ, |
||
| 1803 | s_inc=S_INC_WZ, |
||
| 1804 | s_dec=S_DEC_WZ, |
||
| 1805 | delta_t=DELTA_T_WZ, |
||
| 1806 | dsm=dsm, |
||
| 1807 | ) |
||
| 1808 | |||
| 1809 | columns = ["site_id"] + base_columns |
||
| 1810 | |||
| 1811 | ind_sites_df = pd.concat([dsm, *vals], axis=1, ignore_index=True) |
||
| 1812 | ind_sites_df.columns = columns |
||
| 1813 | |||
| 1814 | # create tables |
||
| 1815 | create_table( |
||
| 1816 | df=cts_df, table=EgonEtragoElectricityCtsDsmTimeseries, engine=CON |
||
| 1817 | ) |
||
| 1818 | create_table( |
||
| 1819 | df=osm_df, |
||
| 1820 | table=EgonOsmIndLoadCurvesIndividualDsmTimeseries, |
||
| 1821 | engine=CON, |
||
| 1822 | ) |
||
| 1823 | create_table( |
||
| 1824 | df=ind_df, |
||
| 1825 | table=EgonDemandregioSitesIndElectricityDsmTimeseries, |
||
| 1826 | engine=CON, |
||
| 1827 | ) |
||
| 1828 | create_table( |
||
| 1829 | df=ind_sites_df, |
||
| 1830 | table=EgonSitesIndLoadCurvesIndividualDsmTimeseries, |
||
| 1831 | engine=CON, |
||
| 1832 | ) |
||
| 1833 | |||
| 1834 | |||
| 1835 | def dsm_cts_ind_processing(): |
||
| 1836 | dsm_cts_ind() |
||
| 1837 | |||
| 1838 | dsm_cts_ind_individual() |
||
| 1839 | |||
| 1840 | add_metadata_individual() |
||
| 1841 |