Total Complexity | 113 |
Total Lines | 2476 |
Duplicated Lines | 3.27 % |
Changes | 0 |
Duplicate code is one of the most pungent code smells. A rule that is often used is to re-structure code once it is duplicated in three or more places.
Common duplication problems, and corresponding solutions are:
Complex classes like data.datasets.heat_supply.individual_heating often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | """The central module containing all code dealing with individual heat supply. |
||
2 | |||
3 | The following main things are done in this module: |
||
4 | |||
5 | * ?? |
||
6 | * Desaggregation of heat pump capacities to individual buildings |
||
7 | * Determination of minimum required heat pump capacity for pypsa-eur-sec |
||
8 | |||
9 | """ |
||
10 | |||
11 | from pathlib import Path |
||
12 | import os |
||
13 | import random |
||
14 | |||
15 | from airflow.operators.python import PythonOperator |
||
16 | from psycopg2.extensions import AsIs, register_adapter |
||
17 | from sqlalchemy import ARRAY, REAL, Column, Integer, String |
||
18 | from sqlalchemy.ext.declarative import declarative_base |
||
19 | import geopandas as gpd |
||
20 | import numpy as np |
||
21 | import pandas as pd |
||
22 | import saio |
||
23 | |||
24 | from egon.data import config, db, logger |
||
25 | from egon.data.datasets import Dataset, wrapped_partial |
||
26 | from egon.data.datasets.district_heating_areas import ( |
||
27 | MapZensusDistrictHeatingAreas, |
||
28 | ) |
||
29 | from egon.data.datasets.electricity_demand_timeseries.cts_buildings import ( |
||
30 | calc_cts_building_profiles, |
||
31 | ) |
||
32 | from egon.data.datasets.electricity_demand_timeseries.mapping import ( |
||
33 | EgonMapZensusMvgdBuildings, |
||
34 | ) |
||
35 | from egon.data.datasets.electricity_demand_timeseries.tools import ( |
||
36 | write_table_to_postgres, |
||
37 | ) |
||
38 | from egon.data.datasets.emobility.motorized_individual_travel.helpers import ( |
||
39 | reduce_mem_usage, |
||
40 | ) |
||
41 | from egon.data.datasets.heat_demand import EgonPetaHeat |
||
42 | from egon.data.datasets.heat_demand_timeseries.daily import ( |
||
43 | EgonDailyHeatDemandPerClimateZone, |
||
44 | EgonMapZensusClimateZones, |
||
45 | ) |
||
46 | from egon.data.datasets.heat_demand_timeseries.idp_pool import ( |
||
47 | EgonHeatTimeseries, |
||
48 | ) |
||
49 | |||
50 | # get zensus cells with district heating |
||
51 | from egon.data.datasets.zensus_mv_grid_districts import MapZensusGridDistricts |
||
52 | |||
53 | engine = db.engine() |
||
54 | Base = declarative_base() |
||
55 | |||
56 | scenarios = config.settings()["egon-data"]["--scenarios"] |
||
57 | |||
58 | |||
59 | class EgonEtragoTimeseriesIndividualHeating(Base): |
||
60 | """ |
||
61 | Class definition of table demand.egon_etrago_timeseries_individual_heating. |
||
62 | |||
63 | This table contains aggregated heat load profiles of all buildings with heat pumps |
||
64 | within an MV grid as well as of all buildings with gas boilers within an MV grid for |
||
65 | the different scenarios. The data is used in eTraGo. |
||
66 | |||
67 | """ |
||
68 | |||
69 | __tablename__ = "egon_etrago_timeseries_individual_heating" |
||
70 | __table_args__ = {"schema": "demand"} |
||
71 | bus_id = Column(Integer, primary_key=True) |
||
72 | scenario = Column(String, primary_key=True) |
||
73 | carrier = Column(String, primary_key=True) |
||
74 | dist_aggregated_mw = Column(ARRAY(REAL)) |
||
75 | |||
76 | |||
77 | class EgonHpCapacityBuildings(Base): |
||
78 | """ |
||
79 | Class definition of table demand.egon_hp_capacity_buildings. |
||
80 | |||
81 | This table contains the heat pump capacity of all buildings with a heat pump. |
||
82 | |||
83 | """ |
||
84 | |||
85 | __tablename__ = "egon_hp_capacity_buildings" |
||
86 | __table_args__ = {"schema": "demand"} |
||
87 | building_id = Column(Integer, primary_key=True) |
||
88 | scenario = Column(String, primary_key=True) |
||
89 | hp_capacity = Column(REAL) |
||
90 | |||
91 | |||
92 | class HeatPumpsPypsaEur(Dataset): |
||
93 | """ |
||
94 | Class to determine minimum heat pump capcacities per building for the PyPSA-EUR run. |
||
95 | |||
96 | The goal is to ensure that the heat pump capacities determined in PyPSA-EUR are |
||
97 | sufficient to serve the heat demand of individual buildings after the |
||
98 | desaggregation from a few nodes in PyPSA-EUR to the individual buildings. |
||
99 | As the heat peak load is not previously determined, it is as well done in this |
||
100 | dataset. Further, as determining heat peak load requires heat load |
||
101 | profiles of the buildings to be set up, this task is also utilised to set up |
||
102 | heat load profiles of all buildings with heat pumps within a grid in the eGon100RE |
||
103 | scenario used in eTraGo. |
||
104 | |||
105 | For more information see data documentation on :ref:`dec-heat-pumps-ref`. |
||
106 | |||
107 | *Dependencies* |
||
108 | * :py:class:`CtsDemandBuildings |
||
109 | <egon.data.datasets.electricity_demand_timeseries.cts_buildings.CtsDemandBuildings>` |
||
110 | * :py:class:`DistrictHeatingAreas |
||
111 | <egon.data.datasets.district_heating_areas.DistrictHeatingAreas>` |
||
112 | * :py:class:`HeatTimeSeries |
||
113 | <egon.data.datasets.heat_demand_timeseries.HeatTimeSeries>` |
||
114 | |||
115 | *Resulting tables* |
||
116 | * `input-pypsa-eur-sec/minimum_hp_capacity_mv_grid_100RE.csv` file is created, |
||
117 | containing the minimum required heat pump capacity per MV grid in MW as |
||
118 | input for PyPSA-EUR (created within :func:`export_min_cap_to_csv`) |
||
119 | * :py:class:`demand.egon_etrago_timeseries_individual_heating |
||
120 | <egon.data.datasets.heat_supply.individual_heating.EgonEtragoTimeseriesIndividualHeating>` |
||
121 | is created and filled |
||
122 | * :py:class:`demand.egon_building_heat_peak_loads |
||
123 | <egon.data.datasets.heat_supply.individual_heating.BuildingHeatPeakLoads>` |
||
124 | is created and filled |
||
125 | |||
126 | **What is the challenge?** |
||
127 | |||
128 | The main challenge lies in the set up of heat demand profiles per building in |
||
129 | :func:`aggregate_residential_and_cts_profiles()` as it takes alot of time and |
||
130 | in grids with a high number of buildings requires alot of RAM. Both runtime and RAM |
||
131 | usage needed to be improved several times. To speed up the process, tasks are set |
||
132 | up to run in parallel. This currently leads to alot of connections being opened and |
||
133 | at a certain point to a runtime error due to too many open connections. |
||
134 | |||
135 | **What are central assumptions during the data processing?** |
||
136 | |||
137 | Central assumption for determining the minimum required heat pump capacity |
||
138 | is that heat pumps can be dimensioned using an approach from the network development |
||
139 | plan that uses the building's peak heat demand and a fixed COP (see |
||
140 | data documentation on :ref:`dec-heat-pumps-ref`). |
||
141 | |||
142 | **Drawbacks and limitations of the data** |
||
143 | |||
144 | The heat demand profiles used here to determine the heat peak load have very few |
||
145 | very high peaks that lead to large heat pump capacities. This should be solved |
||
146 | somehow. Cutting off the peak is not possible, as the time series of each building |
||
147 | is not saved but generated on the fly. Also, just using smaller heat pumps would |
||
148 | lead to infeasibilities in eDisGo. |
||
149 | |||
150 | """ |
||
151 | |||
152 | #: |
||
153 | name: str = "HeatPumpsPypsaEurSec" |
||
154 | #: |
||
155 | version: str = "0.0.3" |
||
156 | |||
157 | def __init__(self, dependencies): |
||
158 | def dyn_parallel_tasks_pypsa_eur(): |
||
159 | """Dynamically generate tasks |
||
160 | The goal is to speed up tasks by parallelising bulks of mvgds. |
||
161 | |||
162 | The number of parallel tasks is defined via parameter |
||
163 | `parallel_tasks` in the dataset config `datasets.yml`. |
||
164 | |||
165 | Returns |
||
166 | ------- |
||
167 | set of airflow.PythonOperators |
||
168 | The tasks. Each element is of |
||
169 | :func:`egon.data.datasets.heat_supply.individual_heating. |
||
170 | determine_hp_cap_peak_load_mvgd_ts_pypsa_eur` |
||
171 | """ |
||
172 | parallel_tasks = config.datasets()["demand_timeseries_mvgd"].get( |
||
173 | "parallel_tasks", 1 |
||
174 | ) |
||
175 | |||
176 | tasks = set() |
||
177 | |||
178 | for i in range(parallel_tasks): |
||
179 | tasks.add( |
||
180 | PythonOperator( |
||
181 | task_id=( |
||
182 | f"individual_heating." |
||
183 | f"determine-hp-capacity-pypsa-eur-" |
||
184 | f"mvgd-bulk{i}" |
||
185 | ), |
||
186 | python_callable=split_mvgds_into_bulks, |
||
187 | op_kwargs={ |
||
188 | "n": i, |
||
189 | "max_n": parallel_tasks, |
||
190 | "func": determine_hp_cap_peak_load_mvgd_ts_pypsa_eur, # noqa: E501 |
||
191 | }, |
||
192 | ) |
||
193 | ) |
||
194 | return tasks |
||
195 | |||
196 | tasks_HeatPumpsPypsaEur = set() |
||
197 | |||
198 | if "eGon100RE" in scenarios: |
||
199 | tasks_HeatPumpsPypsaEur = ( |
||
200 | delete_pypsa_eur_sec_csv_file, |
||
201 | delete_mvgd_ts_100RE, |
||
202 | delete_heat_peak_loads_100RE, |
||
203 | {*dyn_parallel_tasks_pypsa_eur()}, |
||
204 | ) |
||
205 | else: |
||
206 | tasks_HeatPumpsPypsaEur = ( |
||
207 | PythonOperator( |
||
208 | task_id="HeatPumpsPypsaEur_skipped", |
||
209 | python_callable=skip_task, |
||
210 | op_kwargs={ |
||
211 | "scn": "eGon100RE", |
||
212 | "task": "HeatPumpsPypsaEur", |
||
213 | }, |
||
214 | ), |
||
215 | ) |
||
216 | |||
217 | super().__init__( |
||
218 | name=self.name, |
||
219 | version=self.version, |
||
220 | dependencies=dependencies, |
||
221 | tasks=tasks_HeatPumpsPypsaEur, |
||
222 | ) |
||
223 | |||
224 | |||
225 | class HeatPumpsStatusQuo(Dataset): |
||
226 | def __init__(self, dependencies): |
||
227 | def dyn_parallel_tasks_status_quo(scenario): |
||
228 | """Dynamically generate tasks |
||
229 | |||
230 | The goal is to speed up tasks by parallelising bulks of mvgds. |
||
231 | |||
232 | The number of parallel tasks is defined via parameter |
||
233 | `parallel_tasks` in the dataset config `datasets.yml`. |
||
234 | |||
235 | Returns |
||
236 | ------- |
||
237 | set of airflow.PythonOperators |
||
238 | The tasks. Each element is of |
||
239 | :func:`egon.data.datasets.heat_supply.individual_heating. |
||
240 | determine_hp_cap_peak_load_mvgd_ts_status_quo` |
||
241 | """ |
||
242 | parallel_tasks = config.datasets()["demand_timeseries_mvgd"].get( |
||
243 | "parallel_tasks", 1 |
||
244 | ) |
||
245 | |||
246 | tasks = set() |
||
247 | |||
248 | for i in range(parallel_tasks): |
||
249 | tasks.add( |
||
250 | PythonOperator( |
||
251 | task_id=( |
||
252 | "individual_heating." |
||
253 | f"determine-hp-capacity-{scenario}-" |
||
254 | f"mvgd-bulk{i}" |
||
255 | ), |
||
256 | python_callable=split_mvgds_into_bulks, |
||
257 | op_kwargs={ |
||
258 | "n": i, |
||
259 | "max_n": parallel_tasks, |
||
260 | "scenario": scenario, |
||
261 | "func": determine_hp_cap_peak_load_mvgd_ts_status_quo, |
||
262 | }, |
||
263 | ) |
||
264 | ) |
||
265 | return tasks |
||
266 | |||
267 | if any( |
||
268 | "status" in scenario |
||
269 | for scenario in config.settings()["egon-data"]["--scenarios"] |
||
270 | ): |
||
271 | tasks = () |
||
272 | |||
273 | for scenario in config.settings()["egon-data"]["--scenarios"]: |
||
274 | if "status" in scenario: |
||
275 | postfix = f"_{scenario[-4:]}" |
||
276 | |||
277 | tasks += ( |
||
278 | wrapped_partial( |
||
279 | delete_heat_peak_loads_status_quo, |
||
280 | scenario=scenario, |
||
281 | postfix=postfix, |
||
282 | ), |
||
283 | wrapped_partial( |
||
284 | delete_hp_capacity_status_quo, |
||
285 | scenario=scenario, |
||
286 | postfix=postfix, |
||
287 | ), |
||
288 | wrapped_partial( |
||
289 | delete_mvgd_ts_status_quo, |
||
290 | scenario=scenario, |
||
291 | postfix=postfix, |
||
292 | ), |
||
293 | ) |
||
294 | |||
295 | tasks += ({*dyn_parallel_tasks_status_quo(scenario)},) |
||
296 | else: |
||
297 | tasks = ( |
||
298 | PythonOperator( |
||
299 | task_id="HeatPumpsSQ_skipped", |
||
300 | python_callable=skip_task, |
||
301 | op_kwargs={"scn": "sq", "task": "HeatPumpsStatusQuo"}, |
||
302 | ), |
||
303 | ) |
||
304 | |||
305 | super().__init__( |
||
306 | name="HeatPumpsStatusQuo", |
||
307 | version="0.0.4", |
||
308 | dependencies=dependencies, |
||
309 | tasks=tasks, |
||
310 | ) |
||
311 | |||
312 | |||
313 | class HeatPumps2035(Dataset): |
||
314 | """ |
||
315 | Class for desaggregation of heat pump capcacities per MV grid district to individual |
||
316 | buildings for eGon2035 scenario. |
||
317 | |||
318 | The heat pump capacity per MV grid district is disaggregated to buildings |
||
319 | with individual heating based on the buildings heat peak demand. The buildings are |
||
320 | chosen randomly until the target capacity per MV grid district is reached. Buildings |
||
321 | with PV rooftop have a higher probability to be assigned a heat pump. As the |
||
322 | building's heat peak load is not previously determined, it is as well done in this |
||
323 | dataset. Further, as determining heat peak load requires heat load |
||
324 | profiles of the buildings to be set up, this task is also utilised to set up |
||
325 | aggregated heat load profiles of all buildings with heat pumps within a grid as |
||
326 | well as for all buildings with a gas boiler (i.e. all buildings with decentral |
||
327 | heating system minus buildings with heat pump) needed in eTraGo. |
||
328 | |||
329 | For more information see data documentation on :ref:`dec-heat-pumps-ref`. |
||
330 | |||
331 | Heat pump capacity per building in the eGon100RE scenario is set up in a separate |
||
332 | dataset, :py:class:`HeatPumps2050 <HeatPumps2050>`, as for one reason in case of the |
||
333 | eGon100RE scenario the minimum required heat pump capacity per building can directly |
||
334 | be determined using the peak heat demand per building determined in the dataset |
||
335 | :py:class:`HeatPumpsPypsaEurSec <HeatPumpsPypsaEurSec>`, whereas peak heat |
||
336 | demand data does not yet exist for the eGon2035 scenario. Another reason is, |
||
337 | that in case of the eGon100RE scenario all buildings with individual heating have a |
||
338 | heat pump whereas in the eGon2035 scenario buildings are randomly selected until the |
||
339 | installed heat pump capacity per MV grid is met. All other buildings with individual |
||
340 | heating but no heat pump are assigned a gas boiler. |
||
341 | |||
342 | *Dependencies* |
||
343 | * :py:class:`CtsDemandBuildings |
||
344 | <egon.data.datasets.electricity_demand_timeseries.cts_buildings.CtsDemandBuildings>` |
||
345 | * :py:class:`DistrictHeatingAreas |
||
346 | <egon.data.datasets.district_heating_areas.DistrictHeatingAreas>` |
||
347 | * :py:class:`HeatSupply <egon.data.datasets.heat_supply.HeatSupply>` |
||
348 | * :py:class:`HeatTimeSeries |
||
349 | <egon.data.datasets.heat_demand_timeseries.HeatTimeSeries>` |
||
350 | * :py:class:`HeatPumpsPypsaEurSec |
||
351 | <egon.data.datasets.heat_supply.individual_heating.HeatPumpsPypsaEurSec>` |
||
352 | * :py:func:`pv_rooftop_to_buildings |
||
353 | <egon.data.datasets.power_plants.pv_rooftop_buildings.pv_rooftop_to_buildings>` |
||
354 | |||
355 | *Resulting tables* |
||
356 | * :py:class:`demand.egon_hp_capacity_buildings |
||
357 | <egon.data.datasets.heat_supply.individual_heating.EgonHpCapacityBuildings>` |
||
358 | is created (if it doesn't yet exist) and filled |
||
359 | * :py:class:`demand.egon_etrago_timeseries_individual_heating |
||
360 | <egon.data.datasets.heat_supply.individual_heating.EgonEtragoTimeseriesIndividualHeating>` |
||
361 | is created (if it doesn't yet exist) and filled |
||
362 | * :py:class:`demand.egon_building_heat_peak_loads |
||
363 | <egon.data.datasets.heat_supply.individual_heating.BuildingHeatPeakLoads>` |
||
364 | is created (if it doesn't yet exist) and filled |
||
365 | |||
366 | **What is the challenge?** |
||
367 | |||
368 | The main challenge lies in the set up of heat demand profiles per building in |
||
369 | :func:`aggregate_residential_and_cts_profiles()` as it takes alot of time and |
||
370 | in grids with a high number of buildings requires alot of RAM. Both runtime and RAM |
||
371 | usage needed to be improved several times. To speed up the process, tasks are set |
||
372 | up to run in parallel. This currently leads to alot of connections being opened and |
||
373 | at a certain point to a runtime error due to too many open connections. |
||
374 | |||
375 | **What are central assumptions during the data processing?** |
||
376 | |||
377 | Central assumption for desaggregating the heat pump capacity to individual buildings |
||
378 | is that heat pumps can be dimensioned using an approach from the network development |
||
379 | plan that uses the building's peak heat demand and a fixed COP (see |
||
380 | data documentation on :ref:`dec-heat-pumps-ref`). |
||
381 | Another central assumption is, that buildings with PV rooftop plants are more likely |
||
382 | to have a heat pump than other buildings (see |
||
383 | :func:`determine_buildings_with_hp_in_mv_grid()` for details). |
||
384 | |||
385 | **Drawbacks and limitations of the data** |
||
386 | |||
387 | The heat demand profiles used here to determine the heat peak load have very few |
||
388 | very high peaks that lead to large heat pump capacities. This should be solved |
||
389 | somehow. Cutting off the peak is not possible, as the time series of each building |
||
390 | is not saved but generated on the fly. Also, just using smaller heat pumps would |
||
391 | lead to infeasibilities in eDisGo. |
||
392 | |||
393 | """ |
||
394 | |||
395 | #: |
||
396 | name: str = "HeatPumps2035" |
||
397 | #: |
||
398 | version: str = "0.0.3" |
||
399 | |||
400 | def __init__(self, dependencies): |
||
401 | def dyn_parallel_tasks_2035(): |
||
402 | """Dynamically generate tasks |
||
403 | |||
404 | The goal is to speed up tasks by parallelising bulks of mvgds. |
||
405 | |||
406 | The number of parallel tasks is defined via parameter |
||
407 | `parallel_tasks` in the dataset config `datasets.yml`. |
||
408 | |||
409 | Returns |
||
410 | ------- |
||
411 | set of airflow.PythonOperators |
||
412 | The tasks. Each element is of |
||
413 | :func:`egon.data.datasets.heat_supply.individual_heating. |
||
414 | determine_hp_cap_peak_load_mvgd_ts_2035` |
||
415 | """ |
||
416 | parallel_tasks = config.datasets()["demand_timeseries_mvgd"].get( |
||
417 | "parallel_tasks", 1 |
||
418 | ) |
||
419 | |||
420 | tasks = set() |
||
421 | |||
422 | for i in range(parallel_tasks): |
||
423 | tasks.add( |
||
424 | PythonOperator( |
||
425 | task_id=( |
||
426 | "individual_heating." |
||
427 | f"determine-hp-capacity-2035-" |
||
428 | f"mvgd-bulk{i}" |
||
429 | ), |
||
430 | python_callable=split_mvgds_into_bulks, |
||
431 | op_kwargs={ |
||
432 | "n": i, |
||
433 | "max_n": parallel_tasks, |
||
434 | "func": determine_hp_cap_peak_load_mvgd_ts_2035, |
||
435 | }, |
||
436 | ) |
||
437 | ) |
||
438 | return tasks |
||
439 | |||
440 | if "eGon2035" in scenarios: |
||
441 | tasks_HeatPumps2035 = ( |
||
442 | delete_heat_peak_loads_2035, |
||
443 | delete_hp_capacity_2035, |
||
444 | delete_mvgd_ts_2035, |
||
445 | {*dyn_parallel_tasks_2035()}, |
||
446 | ) |
||
447 | else: |
||
448 | tasks_HeatPumps2035 = ( |
||
449 | PythonOperator( |
||
450 | task_id="HeatPumps2035_skipped", |
||
451 | python_callable=skip_task, |
||
452 | op_kwargs={"scn": "eGon2035", "task": "HeatPumps2035"}, |
||
453 | ), |
||
454 | ) |
||
455 | |||
456 | super().__init__( |
||
457 | name=self.version, |
||
458 | version="0.0.3", |
||
459 | dependencies=dependencies, |
||
460 | tasks=tasks_HeatPumps2035, |
||
461 | ) |
||
462 | |||
463 | |||
464 | class HeatPumps2050(Dataset): |
||
465 | """ |
||
466 | Class for desaggregation of heat pump capcacities per MV grid district to individual |
||
467 | buildings for eGon100RE scenario. |
||
468 | |||
469 | Optimised heat pump capacity from PyPSA-EUR run is disaggregated to all buildings |
||
470 | with individual heating (as heat pumps are the only option for individual heating |
||
471 | in the eGon100RE scenario) based on buildings heat peak demand. The heat peak demand |
||
472 | per building does in this dataset, in contrast to the |
||
473 | :py:class:`HeatPumps2035 <egon.data.datasets.pypsaeursec.HeatPumps2035>` dataset, |
||
474 | not need to be determined, as it was already determined in the |
||
475 | :py:class:`PypsaEurSec <egon.data.datasets.pypsaeursec.PypsaEurSec>` dataset. |
||
476 | |||
477 | For more information see data documentation on :ref:`dec-heat-pumps-ref`. |
||
478 | |||
479 | Heat pump capacity per building for the eGon2035 scenario is set up in a separate |
||
480 | dataset, :py:class:`HeatPumps2035 <HeatPumps2035>`. See there for further |
||
481 | information as to why. |
||
482 | |||
483 | *Dependencies* |
||
484 | * :py:class:`PypsaEurSec <egon.data.datasets.pypsaeursec.PypsaEurSec>` |
||
485 | * :py:class:`HeatPumpsPypsaEurSec |
||
486 | <egon.data.datasets.heat_supply.individual_heating.HeatPumpsPypsaEurSec>` |
||
487 | * :py:class:`HeatSupply <egon.data.datasets.heat_supply.HeatSupply>` |
||
488 | |||
489 | *Resulting tables* |
||
490 | * :py:class:`demand.egon_hp_capacity_buildings |
||
491 | <egon.data.datasets.heat_supply.individual_heating.EgonHpCapacityBuildings>` |
||
492 | is created (if it doesn't yet exist) and filled |
||
493 | |||
494 | **What are central assumptions during the data processing?** |
||
495 | |||
496 | Central assumption for desaggregating the heat pump capacity to individual buildings |
||
497 | is that heat pumps can be dimensioned using an approach from the network development |
||
498 | plan that uses the building's peak heat demand and a fixed COP (see |
||
499 | data documentation on :ref:`dec-heat-pumps-ref`). |
||
500 | |||
501 | **Drawbacks and limitations of the data** |
||
502 | |||
503 | The heat demand profiles used here to determine the heat peak load have very few |
||
504 | very high peaks that lead to large heat pump capacities. This should be solved |
||
505 | somehow. Cutting off the peak is not possible, as the time series of each building |
||
506 | is not saved but generated on the fly. Also, just using smaller heat pumps would |
||
507 | lead to infeasibilities in eDisGo. |
||
508 | |||
509 | """ |
||
510 | |||
511 | #: |
||
512 | name: str = "HeatPumps2050" |
||
513 | #: |
||
514 | version: str = "0.0.3" |
||
515 | |||
516 | def __init__(self, dependencies): |
||
517 | tasks_HeatPumps2050 = set() |
||
518 | |||
519 | if "eGon100RE" in scenarios: |
||
520 | tasks_HeatPumps2050 = ( |
||
521 | delete_hp_capacity_100RE, |
||
522 | determine_hp_cap_buildings_eGon100RE, |
||
523 | ) |
||
524 | else: |
||
525 | tasks_HeatPumps2050 = ( |
||
526 | PythonOperator( |
||
527 | task_id="HeatPumps2050_skipped", |
||
528 | python_callable=skip_task, |
||
529 | op_kwargs={"scn": "eGon100RE", "task": "HeatPumps2050"}, |
||
530 | ), |
||
531 | ) |
||
532 | |||
533 | super().__init__( |
||
534 | name=self.name, |
||
535 | version=self.version, |
||
536 | dependencies=dependencies, |
||
537 | tasks=tasks_HeatPumps2050, |
||
538 | ) |
||
539 | |||
540 | |||
541 | View Code Duplication | class BuildingHeatPeakLoads(Base): |
|
|
|||
542 | """ |
||
543 | Class definition of table demand.egon_building_heat_peak_loads. |
||
544 | |||
545 | Table with peak heat demand of residential and CTS heat demand combined for |
||
546 | each building. |
||
547 | |||
548 | """ |
||
549 | |||
550 | __tablename__ = "egon_building_heat_peak_loads" |
||
551 | __table_args__ = {"schema": "demand"} |
||
552 | |||
553 | building_id = Column(Integer, primary_key=True) |
||
554 | scenario = Column(String, primary_key=True) |
||
555 | sector = Column(String, primary_key=True) |
||
556 | peak_load_in_w = Column(REAL) |
||
557 | |||
558 | |||
559 | def skip_task(scn=str, task=str): |
||
560 | logger.info( |
||
561 | f"{scn} is not in the list of scenarios. {task} dataset is skipped." |
||
562 | ) |
||
563 | |||
564 | return |
||
565 | |||
566 | |||
567 | def adapt_numpy_float64(numpy_float64): |
||
568 | return AsIs(numpy_float64) |
||
569 | |||
570 | |||
571 | def adapt_numpy_int64(numpy_int64): |
||
572 | return AsIs(numpy_int64) |
||
573 | |||
574 | |||
575 | def cascade_per_technology( |
||
576 | heat_per_mv, |
||
577 | technologies, |
||
578 | scenario, |
||
579 | distribution_level, |
||
580 | max_size_individual_chp=0.05, |
||
581 | ): |
||
582 | """Add plants for individual heat. |
||
583 | Currently only on mv grid district level. |
||
584 | |||
585 | Parameters |
||
586 | ---------- |
||
587 | mv_grid_districts : geopandas.geodataframe.GeoDataFrame |
||
588 | MV grid districts including the heat demand |
||
589 | technologies : pandas.DataFrame |
||
590 | List of supply technologies and their parameters |
||
591 | scenario : str |
||
592 | Name of the scenario |
||
593 | max_size_individual_chp : float |
||
594 | Maximum capacity of an individual chp in MW |
||
595 | Returns |
||
596 | ------- |
||
597 | mv_grid_districts : geopandas.geodataframe.GeoDataFrame |
||
598 | MV grid district which need additional individual heat supply |
||
599 | technologies : pandas.DataFrame |
||
600 | List of supply technologies and their parameters |
||
601 | append_df : pandas.DataFrame |
||
602 | List of plants per mv grid for the selected technology |
||
603 | |||
604 | """ |
||
605 | sources = config.datasets()["heat_supply"]["sources"] |
||
606 | |||
607 | tech = technologies[technologies.priority == technologies.priority.max()] |
||
608 | |||
609 | # Distribute heat pumps linear to remaining demand. |
||
610 | if tech.index == "heat_pump": |
||
611 | if distribution_level == "federal_states": |
||
612 | # Select target values per federal state |
||
613 | target = db.select_dataframe( |
||
614 | f""" |
||
615 | SELECT DISTINCT ON (gen) gen as state, capacity |
||
616 | FROM {sources['scenario_capacities']['schema']}. |
||
617 | {sources['scenario_capacities']['table']} a |
||
618 | JOIN {sources['federal_states']['schema']}. |
||
619 | {sources['federal_states']['table']} b |
||
620 | ON a.nuts = b.nuts |
||
621 | WHERE scenario_name = '{scenario}' |
||
622 | AND carrier = 'residential_rural_heat_pump' |
||
623 | """, |
||
624 | index_col="state", |
||
625 | ) |
||
626 | |||
627 | heat_per_mv["share"] = heat_per_mv.groupby( |
||
628 | "state", |
||
629 | group_keys=False, |
||
630 | ).remaining_demand.apply(lambda grp: grp / grp.sum()) |
||
631 | |||
632 | append_df = ( |
||
633 | heat_per_mv["share"] |
||
634 | .mul(target.capacity[heat_per_mv["state"]].values) |
||
635 | .reset_index() |
||
636 | ) |
||
637 | else: |
||
638 | # Select target value for Germany |
||
639 | target = db.select_dataframe( |
||
640 | f""" |
||
641 | SELECT SUM(capacity) AS capacity |
||
642 | FROM {sources['scenario_capacities']['schema']}. |
||
643 | {sources['scenario_capacities']['table']} a |
||
644 | WHERE scenario_name = '{scenario}' |
||
645 | AND carrier = 'rural_heat_pump' |
||
646 | """ |
||
647 | ) |
||
648 | |||
649 | if not target.capacity[0]: |
||
650 | target.capacity[0] = 0 |
||
651 | |||
652 | if ( |
||
653 | config.settings()["egon-data"]["--dataset-boundary"] |
||
654 | == "Schleswig-Holstein" |
||
655 | ): |
||
656 | target.capacity[0] /= 16 |
||
657 | |||
658 | heat_per_mv["share"] = ( |
||
659 | heat_per_mv.remaining_demand |
||
660 | / heat_per_mv.remaining_demand.sum() |
||
661 | ) |
||
662 | |||
663 | append_df = ( |
||
664 | heat_per_mv["share"].mul(target.capacity[0]).reset_index() |
||
665 | ) |
||
666 | |||
667 | append_df.rename( |
||
668 | {"bus_id": "mv_grid_id", "share": "capacity"}, axis=1, inplace=True |
||
669 | ) |
||
670 | |||
671 | elif (tech.index == "gas_boiler") & (scenario == "eGon2035"): |
||
672 | append_df = pd.DataFrame( |
||
673 | data={ |
||
674 | "capacity": heat_per_mv.remaining_demand.div( |
||
675 | tech.estimated_flh.values[0] |
||
676 | ), |
||
677 | "carrier": f"residential_rural_{tech.index}", |
||
678 | "mv_grid_id": heat_per_mv.index, |
||
679 | "scenario": scenario, |
||
680 | } |
||
681 | ) |
||
682 | |||
683 | elif tech.index in ("gas_boiler", "resistive_heater", "solar_thermal"): |
||
684 | # Select target value for Germany |
||
685 | target = db.select_dataframe( |
||
686 | f""" |
||
687 | SELECT SUM(capacity) AS capacity |
||
688 | FROM {sources['scenario_capacities']['schema']}. |
||
689 | {sources['scenario_capacities']['table']} a |
||
690 | WHERE scenario_name = '{scenario}' |
||
691 | AND carrier = 'rural_{tech.index[0]}' |
||
692 | """ |
||
693 | ) |
||
694 | |||
695 | if ( |
||
696 | config.settings()["egon-data"]["--dataset-boundary"] |
||
697 | == "Schleswig-Holstein" |
||
698 | ): |
||
699 | target.capacity[0] /= 16 |
||
700 | |||
701 | heat_per_mv["share"] = ( |
||
702 | heat_per_mv.remaining_demand / heat_per_mv.remaining_demand.sum() |
||
703 | ) |
||
704 | |||
705 | append_df = heat_per_mv["share"].mul(target.capacity[0]).reset_index() |
||
706 | |||
707 | append_df.rename( |
||
708 | {"bus_id": "mv_grid_id", "share": "capacity"}, axis=1, inplace=True |
||
709 | ) |
||
710 | |||
711 | else: |
||
712 | append_df = pd.DataFrame( |
||
713 | data={ |
||
714 | "capacity": heat_per_mv.remaining_demand.div( |
||
715 | tech.estimated_flh.values[0] |
||
716 | ), |
||
717 | "carrier": f"residential_rural_{tech.index}", |
||
718 | "mv_grid_id": heat_per_mv.index, |
||
719 | "scenario": scenario, |
||
720 | } |
||
721 | ) |
||
722 | |||
723 | if append_df.size > 0: |
||
724 | append_df["carrier"] = tech.index[0] |
||
725 | heat_per_mv.loc[ |
||
726 | append_df.mv_grid_id, "remaining_demand" |
||
727 | ] -= append_df.set_index("mv_grid_id").capacity.mul( |
||
728 | tech.estimated_flh.values[0] |
||
729 | ) |
||
730 | |||
731 | heat_per_mv = heat_per_mv[heat_per_mv.remaining_demand >= 0] |
||
732 | |||
733 | technologies = technologies.drop(tech.index) |
||
734 | |||
735 | return heat_per_mv, technologies, append_df |
||
736 | |||
737 | |||
738 | def cascade_heat_supply_indiv(scenario, distribution_level, plotting=True): |
||
739 | """Assigns supply strategy for individual heating in four steps. |
||
740 | 1. all small scale CHP are connected. |
||
741 | 2. If the supply can not meet the heat demand, solar thermal collectors |
||
742 | are attached. This is not implemented yet, since individual |
||
743 | solar thermal plants are not considered in eGon2035 scenario. |
||
744 | 3. If this is not suitable, the mv grid is also supplied by heat pumps. |
||
745 | 4. The last option are individual gas boilers. |
||
746 | |||
747 | Parameters |
||
748 | ---------- |
||
749 | scenario : str |
||
750 | Name of scenario |
||
751 | plotting : bool, optional |
||
752 | Choose if individual heating supply is plotted. The default is True. |
||
753 | |||
754 | Returns |
||
755 | ------- |
||
756 | resulting_capacities : pandas.DataFrame |
||
757 | List of plants per mv grid |
||
758 | |||
759 | """ |
||
760 | |||
761 | sources = config.datasets()["heat_supply"]["sources"] |
||
762 | |||
763 | # Select residential heat demand per mv grid district and federal state |
||
764 | heat_per_mv = db.select_geodataframe( |
||
765 | f""" |
||
766 | SELECT d.bus_id as bus_id, SUM(demand) as demand, |
||
767 | c.vg250_lan as state, d.geom |
||
768 | FROM {sources['heat_demand']['schema']}. |
||
769 | {sources['heat_demand']['table']} a |
||
770 | JOIN {sources['map_zensus_grid']['schema']}. |
||
771 | {sources['map_zensus_grid']['table']} b |
||
772 | ON a.zensus_population_id = b.zensus_population_id |
||
773 | JOIN {sources['map_vg250_grid']['schema']}. |
||
774 | {sources['map_vg250_grid']['table']} c |
||
775 | ON b.bus_id = c.bus_id |
||
776 | JOIN {sources['mv_grids']['schema']}. |
||
777 | {sources['mv_grids']['table']} d |
||
778 | ON d.bus_id = c.bus_id |
||
779 | WHERE scenario = '{scenario}' |
||
780 | AND a.zensus_population_id NOT IN ( |
||
781 | SELECT zensus_population_id |
||
782 | FROM {sources['map_dh']['schema']}.{sources['map_dh']['table']} |
||
783 | WHERE scenario = '{scenario}') |
||
784 | GROUP BY d.bus_id, vg250_lan, geom |
||
785 | """, |
||
786 | index_col="bus_id", |
||
787 | ) |
||
788 | |||
789 | # Store geometry of mv grid |
||
790 | geom_mv = heat_per_mv.geom.centroid.copy() |
||
791 | |||
792 | # Initalize Dataframe for results |
||
793 | resulting_capacities = pd.DataFrame( |
||
794 | columns=["mv_grid_id", "carrier", "capacity"] |
||
795 | ) |
||
796 | |||
797 | # Set technology data according to |
||
798 | # http://www.wbzu.de/seminare/infopool/infopool-bhkw |
||
799 | if scenario == "eGon2035": |
||
800 | technologies = pd.DataFrame( |
||
801 | index=["heat_pump", "gas_boiler"], |
||
802 | columns=["estimated_flh", "priority"], |
||
803 | data={"estimated_flh": [4000, 8000], "priority": [2, 1]}, |
||
804 | ) |
||
805 | elif scenario == "eGon100RE": |
||
806 | technologies = pd.DataFrame( |
||
807 | index=[ |
||
808 | "heat_pump", |
||
809 | "resistive_heater", |
||
810 | "solar_thermal", |
||
811 | "gas_boiler", |
||
812 | "oil_boiler", |
||
813 | ], |
||
814 | columns=["estimated_flh", "priority"], |
||
815 | data={ |
||
816 | "estimated_flh": [4000, 2000, 2000, 8000, 8000], |
||
817 | "priority": [5, 4, 3, 2, 1], |
||
818 | }, |
||
819 | ) |
||
820 | elif "status" in scenario: |
||
821 | technologies = pd.DataFrame( |
||
822 | index=["heat_pump"], |
||
823 | columns=["estimated_flh", "priority"], |
||
824 | data={"estimated_flh": [4000], "priority": [1]}, |
||
825 | ) |
||
826 | else: |
||
827 | raise ValueError(f"{scenario=} is not valid.") |
||
828 | |||
829 | # In the beginning, the remaining demand equals demand |
||
830 | heat_per_mv["remaining_demand"] = heat_per_mv["demand"] |
||
831 | |||
832 | # Connect new technologies, if there is still heat demand left |
||
833 | while (len(technologies) > 0) and (len(heat_per_mv) > 0): |
||
834 | # Attach new supply technology |
||
835 | heat_per_mv, technologies, append_df = cascade_per_technology( |
||
836 | heat_per_mv, technologies, scenario, distribution_level |
||
837 | ) |
||
838 | # Collect resulting capacities |
||
839 | resulting_capacities = pd.concat( |
||
840 | [resulting_capacities, append_df], ignore_index=True |
||
841 | ) |
||
842 | |||
843 | if plotting: |
||
844 | plot_heat_supply(resulting_capacities) |
||
845 | |||
846 | return gpd.GeoDataFrame( |
||
847 | resulting_capacities, |
||
848 | geometry=geom_mv[resulting_capacities.mv_grid_id].values, |
||
849 | ) |
||
850 | |||
851 | |||
852 | View Code Duplication | def get_peta_demand(mvgd, scenario): |
|
853 | """ |
||
854 | Retrieve annual peta heat demand for residential buildings for either |
||
855 | eGon2035 or eGon100RE scenario. |
||
856 | |||
857 | Parameters |
||
858 | ---------- |
||
859 | mvgd : int |
||
860 | MV grid ID. |
||
861 | scenario : str |
||
862 | Possible options are eGon2035 or eGon100RE |
||
863 | |||
864 | Returns |
||
865 | ------- |
||
866 | df_peta_demand : pd.DataFrame |
||
867 | Annual residential heat demand per building and scenario. Columns of |
||
868 | the dataframe are zensus_population_id and demand. |
||
869 | |||
870 | """ |
||
871 | |||
872 | with db.session_scope() as session: |
||
873 | query = ( |
||
874 | session.query( |
||
875 | MapZensusGridDistricts.zensus_population_id, |
||
876 | EgonPetaHeat.demand, |
||
877 | ) |
||
878 | .filter(MapZensusGridDistricts.bus_id == mvgd) |
||
879 | .filter( |
||
880 | MapZensusGridDistricts.zensus_population_id |
||
881 | == EgonPetaHeat.zensus_population_id |
||
882 | ) |
||
883 | .filter( |
||
884 | EgonPetaHeat.sector == "residential", |
||
885 | EgonPetaHeat.scenario == scenario, |
||
886 | ) |
||
887 | ) |
||
888 | |||
889 | df_peta_demand = pd.read_sql( |
||
890 | query.statement, query.session.bind, index_col=None |
||
891 | ) |
||
892 | |||
893 | return df_peta_demand |
||
894 | |||
895 | |||
896 | def get_residential_heat_profile_ids(mvgd): |
||
897 | """ |
||
898 | Retrieve 365 daily heat profiles ids per residential building and selected |
||
899 | mvgd. |
||
900 | |||
901 | Parameters |
||
902 | ---------- |
||
903 | mvgd : int |
||
904 | ID of MVGD |
||
905 | |||
906 | Returns |
||
907 | ------- |
||
908 | df_profiles_ids : pd.DataFrame |
||
909 | Residential daily heat profile ID's per building. Columns of the |
||
910 | dataframe are zensus_population_id, building_id, |
||
911 | selected_idp_profiles, buildings and day_of_year. |
||
912 | |||
913 | """ |
||
914 | with db.session_scope() as session: |
||
915 | query = ( |
||
916 | session.query( |
||
917 | MapZensusGridDistricts.zensus_population_id, |
||
918 | EgonHeatTimeseries.building_id, |
||
919 | EgonHeatTimeseries.selected_idp_profiles, |
||
920 | ) |
||
921 | .filter(MapZensusGridDistricts.bus_id == mvgd) |
||
922 | .filter( |
||
923 | MapZensusGridDistricts.zensus_population_id |
||
924 | == EgonHeatTimeseries.zensus_population_id |
||
925 | ) |
||
926 | ) |
||
927 | |||
928 | df_profiles_ids = pd.read_sql( |
||
929 | query.statement, query.session.bind, index_col=None |
||
930 | ) |
||
931 | # Add building count per cell |
||
932 | df_profiles_ids = pd.merge( |
||
933 | left=df_profiles_ids, |
||
934 | right=df_profiles_ids.groupby("zensus_population_id")["building_id"] |
||
935 | .count() |
||
936 | .rename("buildings"), |
||
937 | left_on="zensus_population_id", |
||
938 | right_index=True, |
||
939 | ) |
||
940 | |||
941 | # unnest array of ids per building |
||
942 | df_profiles_ids = df_profiles_ids.explode("selected_idp_profiles") |
||
943 | # add day of year column by order of list |
||
944 | df_profiles_ids["day_of_year"] = ( |
||
945 | df_profiles_ids.groupby("building_id").cumcount() + 1 |
||
946 | ) |
||
947 | return df_profiles_ids |
||
948 | |||
949 | |||
950 | def get_daily_profiles(profile_ids): |
||
951 | """ |
||
952 | Parameters |
||
953 | ---------- |
||
954 | profile_ids : list(int) |
||
955 | daily heat profile ID's |
||
956 | |||
957 | Returns |
||
958 | ------- |
||
959 | df_profiles : pd.DataFrame |
||
960 | Residential daily heat profiles. Columns of the dataframe are idp, |
||
961 | house, temperature_class and hour. |
||
962 | |||
963 | """ |
||
964 | |||
965 | saio.register_schema("demand", db.engine()) |
||
966 | from saio.demand import egon_heat_idp_pool |
||
967 | |||
968 | with db.session_scope() as session: |
||
969 | query = session.query(egon_heat_idp_pool).filter( |
||
970 | egon_heat_idp_pool.index.in_(profile_ids) |
||
971 | ) |
||
972 | |||
973 | df_profiles = pd.read_sql( |
||
974 | query.statement, query.session.bind, index_col="index" |
||
975 | ) |
||
976 | |||
977 | # unnest array of profile values per id |
||
978 | df_profiles = df_profiles.explode("idp") |
||
979 | # Add column for hour of day |
||
980 | df_profiles["hour"] = df_profiles.groupby(axis=0, level=0).cumcount() + 1 |
||
981 | |||
982 | return df_profiles |
||
983 | |||
984 | |||
985 | def get_daily_demand_share(mvgd): |
||
986 | """per census cell |
||
987 | Parameters |
||
988 | ---------- |
||
989 | mvgd : int |
||
990 | MVGD id |
||
991 | |||
992 | Returns |
||
993 | ------- |
||
994 | df_daily_demand_share : pd.DataFrame |
||
995 | Daily annual demand share per cencus cell. Columns of the dataframe |
||
996 | are zensus_population_id, day_of_year and daily_demand_share. |
||
997 | |||
998 | """ |
||
999 | |||
1000 | with db.session_scope() as session: |
||
1001 | query = session.query( |
||
1002 | MapZensusGridDistricts.zensus_population_id, |
||
1003 | EgonDailyHeatDemandPerClimateZone.day_of_year, |
||
1004 | EgonDailyHeatDemandPerClimateZone.daily_demand_share, |
||
1005 | ).filter( |
||
1006 | EgonMapZensusClimateZones.climate_zone |
||
1007 | == EgonDailyHeatDemandPerClimateZone.climate_zone, |
||
1008 | MapZensusGridDistricts.zensus_population_id |
||
1009 | == EgonMapZensusClimateZones.zensus_population_id, |
||
1010 | MapZensusGridDistricts.bus_id == mvgd, |
||
1011 | ) |
||
1012 | |||
1013 | df_daily_demand_share = pd.read_sql( |
||
1014 | query.statement, query.session.bind, index_col=None |
||
1015 | ) |
||
1016 | return df_daily_demand_share |
||
1017 | |||
1018 | |||
1019 | def calc_residential_heat_profiles_per_mvgd(mvgd, scenario): |
||
1020 | """ |
||
1021 | Gets residential heat profiles per building in MV grid for either eGon2035 |
||
1022 | or eGon100RE scenario. |
||
1023 | |||
1024 | Parameters |
||
1025 | ---------- |
||
1026 | mvgd : int |
||
1027 | MV grid ID. |
||
1028 | scenario : str |
||
1029 | Possible options are eGon2035 or eGon100RE. |
||
1030 | |||
1031 | Returns |
||
1032 | -------- |
||
1033 | pd.DataFrame |
||
1034 | Heat demand profiles of buildings. Columns are: |
||
1035 | * zensus_population_id : int |
||
1036 | Zensus cell ID building is in. |
||
1037 | * building_id : int |
||
1038 | ID of building. |
||
1039 | * day_of_year : int |
||
1040 | Day of the year (1 - 365). |
||
1041 | * hour : int |
||
1042 | Hour of the day (1 - 24). |
||
1043 | * demand_ts : float |
||
1044 | Building's residential heat demand in MW, for specified hour |
||
1045 | of the year (specified through columns `day_of_year` and |
||
1046 | `hour`). |
||
1047 | """ |
||
1048 | |||
1049 | columns = [ |
||
1050 | "zensus_population_id", |
||
1051 | "building_id", |
||
1052 | "day_of_year", |
||
1053 | "hour", |
||
1054 | "demand_ts", |
||
1055 | ] |
||
1056 | |||
1057 | df_peta_demand = get_peta_demand(mvgd, scenario) |
||
1058 | df_peta_demand = reduce_mem_usage(df_peta_demand) |
||
1059 | |||
1060 | # TODO maybe return empty dataframe |
||
1061 | if df_peta_demand.empty: |
||
1062 | logger.info(f"No demand for MVGD: {mvgd}") |
||
1063 | return pd.DataFrame(columns=columns) |
||
1064 | |||
1065 | df_profiles_ids = get_residential_heat_profile_ids(mvgd) |
||
1066 | |||
1067 | if df_profiles_ids.empty: |
||
1068 | logger.info(f"No profiles for MVGD: {mvgd}") |
||
1069 | return pd.DataFrame(columns=columns) |
||
1070 | |||
1071 | df_profiles = get_daily_profiles( |
||
1072 | df_profiles_ids["selected_idp_profiles"].unique() |
||
1073 | ) |
||
1074 | |||
1075 | df_daily_demand_share = get_daily_demand_share(mvgd) |
||
1076 | |||
1077 | # Merge profile ids to peta demand by zensus_population_id |
||
1078 | df_profile_merge = pd.merge( |
||
1079 | left=df_peta_demand, right=df_profiles_ids, on="zensus_population_id" |
||
1080 | ) |
||
1081 | |||
1082 | df_profile_merge.demand = df_profile_merge.demand.div( |
||
1083 | df_profile_merge.buildings |
||
1084 | ) |
||
1085 | df_profile_merge.drop("buildings", axis="columns", inplace=True) |
||
1086 | |||
1087 | # Merge daily demand to daily profile ids by zensus_population_id and day |
||
1088 | df_profile_merge = pd.merge( |
||
1089 | left=df_profile_merge, |
||
1090 | right=df_daily_demand_share, |
||
1091 | on=["zensus_population_id", "day_of_year"], |
||
1092 | ) |
||
1093 | df_profile_merge.demand = df_profile_merge.demand.mul( |
||
1094 | df_profile_merge.daily_demand_share |
||
1095 | ) |
||
1096 | df_profile_merge.drop("daily_demand_share", axis="columns", inplace=True) |
||
1097 | df_profile_merge = reduce_mem_usage(df_profile_merge) |
||
1098 | |||
1099 | # Merge daily profiles by profile id |
||
1100 | df_profile_merge = pd.merge( |
||
1101 | left=df_profile_merge, |
||
1102 | right=df_profiles[["idp", "hour"]], |
||
1103 | left_on="selected_idp_profiles", |
||
1104 | right_index=True, |
||
1105 | ) |
||
1106 | df_profile_merge = reduce_mem_usage(df_profile_merge) |
||
1107 | |||
1108 | df_profile_merge.demand = df_profile_merge.demand.mul( |
||
1109 | df_profile_merge.idp.astype(float) |
||
1110 | ) |
||
1111 | df_profile_merge.drop("idp", axis="columns", inplace=True) |
||
1112 | |||
1113 | df_profile_merge.rename( |
||
1114 | {"demand": "demand_ts"}, axis="columns", inplace=True |
||
1115 | ) |
||
1116 | |||
1117 | df_profile_merge = reduce_mem_usage(df_profile_merge) |
||
1118 | |||
1119 | return df_profile_merge.loc[:, columns] |
||
1120 | |||
1121 | |||
1122 | View Code Duplication | def plot_heat_supply(resulting_capacities): |
|
1123 | from matplotlib import pyplot as plt |
||
1124 | |||
1125 | mv_grids = db.select_geodataframe( |
||
1126 | """ |
||
1127 | SELECT * FROM grid.egon_mv_grid_district |
||
1128 | """, |
||
1129 | index_col="bus_id", |
||
1130 | ) |
||
1131 | |||
1132 | for c in ["CHP", "heat_pump"]: |
||
1133 | mv_grids[c] = ( |
||
1134 | resulting_capacities[resulting_capacities.carrier == c] |
||
1135 | .set_index("mv_grid_id") |
||
1136 | .capacity |
||
1137 | ) |
||
1138 | |||
1139 | fig, ax = plt.subplots(1, 1) |
||
1140 | mv_grids.boundary.plot(linewidth=0.2, ax=ax, color="black") |
||
1141 | mv_grids.plot( |
||
1142 | ax=ax, |
||
1143 | column=c, |
||
1144 | cmap="magma_r", |
||
1145 | legend=True, |
||
1146 | legend_kwds={ |
||
1147 | "label": f"Installed {c} in MW", |
||
1148 | "orientation": "vertical", |
||
1149 | }, |
||
1150 | ) |
||
1151 | plt.savefig(f"plots/individual_heat_supply_{c}.png", dpi=300) |
||
1152 | |||
1153 | |||
1154 | def get_zensus_cells_with_decentral_heat_demand_in_mv_grid( |
||
1155 | scenario, mv_grid_id |
||
1156 | ): |
||
1157 | """ |
||
1158 | Returns zensus cell IDs with decentral heating systems in given MV grid. |
||
1159 | |||
1160 | As cells with district heating differ between scenarios, this is also |
||
1161 | depending on the scenario. |
||
1162 | |||
1163 | Parameters |
||
1164 | ----------- |
||
1165 | scenario : str |
||
1166 | Name of scenario. Can be either "eGon2035" or "eGon100RE". |
||
1167 | mv_grid_id : int |
||
1168 | ID of MV grid. |
||
1169 | |||
1170 | Returns |
||
1171 | -------- |
||
1172 | pd.Index(int) |
||
1173 | Zensus cell IDs (as int) of buildings with decentral heating systems in |
||
1174 | given MV grid. Type is pandas Index to avoid errors later on when it is |
||
1175 | used in a query. |
||
1176 | |||
1177 | """ |
||
1178 | |||
1179 | # get zensus cells in grid |
||
1180 | zensus_population_ids = db.select_dataframe( |
||
1181 | f""" |
||
1182 | SELECT zensus_population_id |
||
1183 | FROM boundaries.egon_map_zensus_grid_districts |
||
1184 | WHERE bus_id = {mv_grid_id} |
||
1185 | """, |
||
1186 | index_col=None, |
||
1187 | ).zensus_population_id.values |
||
1188 | |||
1189 | # maybe use adapter |
||
1190 | # convert to pd.Index (otherwise type is np.int64, which will for some |
||
1191 | # reason throw an error when used in a query) |
||
1192 | zensus_population_ids = pd.Index(zensus_population_ids) |
||
1193 | |||
1194 | # get zensus cells with district heating |
||
1195 | with db.session_scope() as session: |
||
1196 | query = session.query( |
||
1197 | MapZensusDistrictHeatingAreas.zensus_population_id, |
||
1198 | ).filter( |
||
1199 | MapZensusDistrictHeatingAreas.scenario == scenario, |
||
1200 | MapZensusDistrictHeatingAreas.zensus_population_id.in_( |
||
1201 | zensus_population_ids |
||
1202 | ), |
||
1203 | ) |
||
1204 | |||
1205 | cells_with_dh = pd.read_sql( |
||
1206 | query.statement, query.session.bind, index_col=None |
||
1207 | ).zensus_population_id.values |
||
1208 | |||
1209 | # remove zensus cells with district heating |
||
1210 | zensus_population_ids = zensus_population_ids.drop( |
||
1211 | cells_with_dh, errors="ignore" |
||
1212 | ) |
||
1213 | return pd.Index(zensus_population_ids) |
||
1214 | |||
1215 | |||
1216 | def get_residential_buildings_with_decentral_heat_demand_in_mv_grid( |
||
1217 | scenario, mv_grid_id |
||
1218 | ): |
||
1219 | """ |
||
1220 | Returns building IDs of buildings with decentral residential heat demand in |
||
1221 | given MV grid. |
||
1222 | |||
1223 | As cells with district heating differ between scenarios, this is also |
||
1224 | depending on the scenario. |
||
1225 | |||
1226 | Parameters |
||
1227 | ----------- |
||
1228 | scenario : str |
||
1229 | Name of scenario. Can be either "eGon2035" or "eGon100RE". |
||
1230 | mv_grid_id : int |
||
1231 | ID of MV grid. |
||
1232 | |||
1233 | Returns |
||
1234 | -------- |
||
1235 | pd.Index(int) |
||
1236 | Building IDs (as int) of buildings with decentral heating system in |
||
1237 | given MV grid. Type is pandas Index to avoid errors later on when it is |
||
1238 | used in a query. |
||
1239 | |||
1240 | """ |
||
1241 | # get zensus cells with decentral heating |
||
1242 | zensus_population_ids = ( |
||
1243 | get_zensus_cells_with_decentral_heat_demand_in_mv_grid( |
||
1244 | scenario, mv_grid_id |
||
1245 | ) |
||
1246 | ) |
||
1247 | |||
1248 | # get buildings with decentral heat demand |
||
1249 | saio.register_schema("demand", engine) |
||
1250 | from saio.demand import egon_heat_timeseries_selected_profiles |
||
1251 | |||
1252 | with db.session_scope() as session: |
||
1253 | query = session.query( |
||
1254 | egon_heat_timeseries_selected_profiles.building_id, |
||
1255 | ).filter( |
||
1256 | egon_heat_timeseries_selected_profiles.zensus_population_id.in_( |
||
1257 | zensus_population_ids |
||
1258 | ) |
||
1259 | ) |
||
1260 | |||
1261 | buildings_with_heat_demand = pd.read_sql( |
||
1262 | query.statement, query.session.bind, index_col=None |
||
1263 | ).building_id.values |
||
1264 | |||
1265 | return pd.Index(buildings_with_heat_demand) |
||
1266 | |||
1267 | |||
1268 | def get_cts_buildings_with_decentral_heat_demand_in_mv_grid( |
||
1269 | scenario, mv_grid_id |
||
1270 | ): |
||
1271 | """ |
||
1272 | Returns building IDs of buildings with decentral CTS heat demand in |
||
1273 | given MV grid. |
||
1274 | |||
1275 | As cells with district heating differ between scenarios, this is also |
||
1276 | depending on the scenario. |
||
1277 | |||
1278 | Parameters |
||
1279 | ----------- |
||
1280 | scenario : str |
||
1281 | Name of scenario. Can be either "eGon2035" or "eGon100RE". |
||
1282 | mv_grid_id : int |
||
1283 | ID of MV grid. |
||
1284 | |||
1285 | Returns |
||
1286 | -------- |
||
1287 | pd.Index(int) |
||
1288 | Building IDs (as int) of buildings with decentral heating system in |
||
1289 | given MV grid. Type is pandas Index to avoid errors later on when it is |
||
1290 | used in a query. |
||
1291 | |||
1292 | """ |
||
1293 | |||
1294 | # get zensus cells with decentral heating |
||
1295 | zensus_population_ids = ( |
||
1296 | get_zensus_cells_with_decentral_heat_demand_in_mv_grid( |
||
1297 | scenario, mv_grid_id |
||
1298 | ) |
||
1299 | ) |
||
1300 | |||
1301 | # get buildings with decentral heat demand |
||
1302 | with db.session_scope() as session: |
||
1303 | query = session.query(EgonMapZensusMvgdBuildings.building_id).filter( |
||
1304 | EgonMapZensusMvgdBuildings.sector == "cts", |
||
1305 | EgonMapZensusMvgdBuildings.zensus_population_id.in_( |
||
1306 | zensus_population_ids |
||
1307 | ), |
||
1308 | ) |
||
1309 | |||
1310 | buildings_with_heat_demand = pd.read_sql( |
||
1311 | query.statement, query.session.bind, index_col=None |
||
1312 | ).building_id.values |
||
1313 | |||
1314 | return pd.Index(buildings_with_heat_demand) |
||
1315 | |||
1316 | |||
1317 | def get_buildings_with_decentral_heat_demand_in_mv_grid(mvgd, scenario): |
||
1318 | """ |
||
1319 | Returns building IDs of buildings with decentral heat demand in |
||
1320 | given MV grid. |
||
1321 | |||
1322 | As cells with district heating differ between scenarios, this is also |
||
1323 | depending on the scenario. CTS and residential have to be retrieved |
||
1324 | seperatly as some residential buildings only have electricity but no |
||
1325 | heat demand. This does not occure in CTS. |
||
1326 | |||
1327 | Parameters |
||
1328 | ----------- |
||
1329 | mvgd : int |
||
1330 | ID of MV grid. |
||
1331 | scenario : str |
||
1332 | Name of scenario. Can be either "eGon2035" or "eGon100RE". |
||
1333 | |||
1334 | Returns |
||
1335 | -------- |
||
1336 | pd.Index(int) |
||
1337 | Building IDs (as int) of buildings with decentral heating system in |
||
1338 | given MV grid. Type is pandas Index to avoid errors later on when it is |
||
1339 | used in a query. |
||
1340 | |||
1341 | """ |
||
1342 | # get residential buildings with decentral heating systems |
||
1343 | buildings_decentral_heating_res = ( |
||
1344 | get_residential_buildings_with_decentral_heat_demand_in_mv_grid( |
||
1345 | scenario, mvgd |
||
1346 | ) |
||
1347 | ) |
||
1348 | |||
1349 | # get CTS buildings with decentral heating systems |
||
1350 | buildings_decentral_heating_cts = ( |
||
1351 | get_cts_buildings_with_decentral_heat_demand_in_mv_grid(scenario, mvgd) |
||
1352 | ) |
||
1353 | |||
1354 | # merge residential and CTS buildings |
||
1355 | buildings_decentral_heating = buildings_decentral_heating_res.union( |
||
1356 | buildings_decentral_heating_cts |
||
1357 | ).unique() |
||
1358 | |||
1359 | return buildings_decentral_heating |
||
1360 | |||
1361 | |||
1362 | def get_total_heat_pump_capacity_of_mv_grid(scenario, mv_grid_id): |
||
1363 | """ |
||
1364 | Returns total heat pump capacity per grid that was previously defined |
||
1365 | (by NEP or pypsa-eur-sec). |
||
1366 | |||
1367 | Parameters |
||
1368 | ----------- |
||
1369 | scenario : str |
||
1370 | Name of scenario. Can be either "eGon2035" or "eGon100RE". |
||
1371 | mv_grid_id : int |
||
1372 | ID of MV grid. |
||
1373 | |||
1374 | Returns |
||
1375 | -------- |
||
1376 | float |
||
1377 | Total heat pump capacity in MW in given MV grid. |
||
1378 | |||
1379 | """ |
||
1380 | from egon.data.datasets.heat_supply import EgonIndividualHeatingSupply |
||
1381 | |||
1382 | with db.session_scope() as session: |
||
1383 | query = ( |
||
1384 | session.query( |
||
1385 | EgonIndividualHeatingSupply.mv_grid_id, |
||
1386 | EgonIndividualHeatingSupply.capacity, |
||
1387 | ) |
||
1388 | .filter(EgonIndividualHeatingSupply.scenario == scenario) |
||
1389 | .filter(EgonIndividualHeatingSupply.carrier == "heat_pump") |
||
1390 | .filter(EgonIndividualHeatingSupply.mv_grid_id == mv_grid_id) |
||
1391 | ) |
||
1392 | |||
1393 | hp_cap_mv_grid = pd.read_sql( |
||
1394 | query.statement, query.session.bind, index_col="mv_grid_id" |
||
1395 | ) |
||
1396 | if hp_cap_mv_grid.empty: |
||
1397 | return 0.0 |
||
1398 | else: |
||
1399 | return hp_cap_mv_grid.capacity.values[0] |
||
1400 | |||
1401 | |||
1402 | def get_heat_peak_demand_per_building(scenario, building_ids): |
||
1403 | """""" |
||
1404 | |||
1405 | with db.session_scope() as session: |
||
1406 | query = ( |
||
1407 | session.query( |
||
1408 | BuildingHeatPeakLoads.building_id, |
||
1409 | BuildingHeatPeakLoads.peak_load_in_w, |
||
1410 | ) |
||
1411 | .filter(BuildingHeatPeakLoads.scenario == scenario) |
||
1412 | .filter(BuildingHeatPeakLoads.building_id.in_(building_ids)) |
||
1413 | ) |
||
1414 | |||
1415 | df_heat_peak_demand = pd.read_sql( |
||
1416 | query.statement, query.session.bind, index_col=None |
||
1417 | ) |
||
1418 | |||
1419 | # TODO remove check |
||
1420 | if df_heat_peak_demand.duplicated("building_id").any(): |
||
1421 | raise ValueError("Duplicate building_id") |
||
1422 | |||
1423 | # convert to series and from W to MW |
||
1424 | df_heat_peak_demand = ( |
||
1425 | df_heat_peak_demand.set_index("building_id").loc[:, "peak_load_in_w"] |
||
1426 | * 1e6 |
||
1427 | ) |
||
1428 | return df_heat_peak_demand |
||
1429 | |||
1430 | |||
1431 | def determine_minimum_hp_capacity_per_building( |
||
1432 | peak_heat_demand, flexibility_factor=24 / 18, cop=1.7 |
||
1433 | ): |
||
1434 | """ |
||
1435 | Determines minimum required heat pump capacity. |
||
1436 | |||
1437 | Parameters |
||
1438 | ---------- |
||
1439 | peak_heat_demand : pd.Series |
||
1440 | Series with peak heat demand per building in MW. Index contains the |
||
1441 | building ID. |
||
1442 | flexibility_factor : float |
||
1443 | Factor to overdimension the heat pump to allow for some flexible |
||
1444 | dispatch in times of high heat demand. Per default, a factor of 24/18 |
||
1445 | is used, to take into account |
||
1446 | |||
1447 | Returns |
||
1448 | ------- |
||
1449 | pd.Series |
||
1450 | Pandas series with minimum required heat pump capacity per building in |
||
1451 | MW. |
||
1452 | |||
1453 | """ |
||
1454 | return peak_heat_demand * flexibility_factor / cop |
||
1455 | |||
1456 | |||
1457 | def determine_buildings_with_hp_in_mv_grid( |
||
1458 | hp_cap_mv_grid, min_hp_cap_per_building |
||
1459 | ): |
||
1460 | """ |
||
1461 | Distributes given total heat pump capacity to buildings based on their peak |
||
1462 | heat demand. |
||
1463 | |||
1464 | Parameters |
||
1465 | ----------- |
||
1466 | hp_cap_mv_grid : float |
||
1467 | Total heat pump capacity in MW in given MV grid. |
||
1468 | min_hp_cap_per_building : pd.Series |
||
1469 | Pandas series with minimum required heat pump capacity per building |
||
1470 | in MW. |
||
1471 | |||
1472 | Returns |
||
1473 | ------- |
||
1474 | pd.Index(int) |
||
1475 | Building IDs (as int) of buildings to get heat demand time series for. |
||
1476 | |||
1477 | """ |
||
1478 | building_ids = min_hp_cap_per_building.index |
||
1479 | |||
1480 | # get buildings with PV to give them a higher priority when selecting |
||
1481 | # buildings a heat pump will be allocated to |
||
1482 | saio.register_schema("supply", engine) |
||
1483 | from saio.supply import egon_power_plants_pv_roof_building |
||
1484 | |||
1485 | with db.session_scope() as session: |
||
1486 | query = session.query( |
||
1487 | egon_power_plants_pv_roof_building.building_id |
||
1488 | ).filter( |
||
1489 | egon_power_plants_pv_roof_building.building_id.in_(building_ids), |
||
1490 | egon_power_plants_pv_roof_building.scenario == "eGon2035", |
||
1491 | ) |
||
1492 | |||
1493 | buildings_with_pv = pd.read_sql( |
||
1494 | query.statement, query.session.bind, index_col=None |
||
1495 | ).building_id.values |
||
1496 | # set different weights for buildings with PV and without PV |
||
1497 | weight_with_pv = 1.5 |
||
1498 | weight_without_pv = 1.0 |
||
1499 | weights = pd.concat( |
||
1500 | [ |
||
1501 | pd.DataFrame( |
||
1502 | {"weight": weight_without_pv}, |
||
1503 | index=building_ids.drop(buildings_with_pv, errors="ignore"), |
||
1504 | ), |
||
1505 | pd.DataFrame({"weight": weight_with_pv}, index=buildings_with_pv), |
||
1506 | ] |
||
1507 | ) |
||
1508 | # normalise weights (probability needs to add up to 1) |
||
1509 | weights.weight = weights.weight / weights.weight.sum() |
||
1510 | |||
1511 | # get random order at which buildings are chosen |
||
1512 | np.random.seed(db.credentials()["--random-seed"]) |
||
1513 | buildings_with_hp_order = np.random.choice( |
||
1514 | weights.index, |
||
1515 | size=len(weights), |
||
1516 | replace=False, |
||
1517 | p=weights.weight.values, |
||
1518 | ) |
||
1519 | |||
1520 | # select buildings until HP capacity in MV grid is reached (some rest |
||
1521 | # capacity will remain) |
||
1522 | hp_cumsum = min_hp_cap_per_building.loc[buildings_with_hp_order].cumsum() |
||
1523 | buildings_with_hp = hp_cumsum[hp_cumsum <= hp_cap_mv_grid].index |
||
1524 | |||
1525 | # choose random heat pumps until remaining heat pumps are larger than |
||
1526 | # remaining heat pump capacity |
||
1527 | remaining_hp_cap = ( |
||
1528 | hp_cap_mv_grid - min_hp_cap_per_building.loc[buildings_with_hp].sum() |
||
1529 | ) |
||
1530 | min_cap_buildings_wo_hp = min_hp_cap_per_building.loc[ |
||
1531 | building_ids.drop(buildings_with_hp) |
||
1532 | ] |
||
1533 | possible_buildings = min_cap_buildings_wo_hp[ |
||
1534 | min_cap_buildings_wo_hp <= remaining_hp_cap |
||
1535 | ].index |
||
1536 | while len(possible_buildings) > 0: |
||
1537 | random.seed(db.credentials()["--random-seed"]) |
||
1538 | new_hp_building = random.choice(possible_buildings) |
||
1539 | # add new building to building with HP |
||
1540 | buildings_with_hp = buildings_with_hp.union( |
||
1541 | pd.Index([new_hp_building]) |
||
1542 | ) |
||
1543 | # determine if there are still possible buildings |
||
1544 | remaining_hp_cap = ( |
||
1545 | hp_cap_mv_grid |
||
1546 | - min_hp_cap_per_building.loc[buildings_with_hp].sum() |
||
1547 | ) |
||
1548 | min_cap_buildings_wo_hp = min_hp_cap_per_building.loc[ |
||
1549 | building_ids.drop(buildings_with_hp) |
||
1550 | ] |
||
1551 | possible_buildings = min_cap_buildings_wo_hp[ |
||
1552 | min_cap_buildings_wo_hp <= remaining_hp_cap |
||
1553 | ].index |
||
1554 | |||
1555 | return buildings_with_hp |
||
1556 | |||
1557 | |||
1558 | def desaggregate_hp_capacity(min_hp_cap_per_building, hp_cap_mv_grid): |
||
1559 | """ |
||
1560 | Desaggregates the required total heat pump capacity to buildings. |
||
1561 | |||
1562 | All buildings are previously assigned a minimum required heat pump |
||
1563 | capacity. If the total heat pump capacity exceeds this, larger heat pumps |
||
1564 | are assigned. |
||
1565 | |||
1566 | Parameters |
||
1567 | ------------ |
||
1568 | min_hp_cap_per_building : pd.Series |
||
1569 | Pandas series with minimum required heat pump capacity per building |
||
1570 | in MW. |
||
1571 | hp_cap_mv_grid : float |
||
1572 | Total heat pump capacity in MW in given MV grid. |
||
1573 | |||
1574 | Returns |
||
1575 | -------- |
||
1576 | pd.Series |
||
1577 | Pandas series with heat pump capacity per building in MW. |
||
1578 | |||
1579 | """ |
||
1580 | # distribute remaining capacity to all buildings with HP depending on |
||
1581 | # installed HP capacity |
||
1582 | |||
1583 | allocated_cap = min_hp_cap_per_building.sum() |
||
1584 | remaining_cap = hp_cap_mv_grid - allocated_cap |
||
1585 | |||
1586 | fac = remaining_cap / allocated_cap |
||
1587 | hp_cap_per_building = ( |
||
1588 | min_hp_cap_per_building * fac + min_hp_cap_per_building |
||
1589 | ) |
||
1590 | hp_cap_per_building.index.name = "building_id" |
||
1591 | |||
1592 | return hp_cap_per_building |
||
1593 | |||
1594 | |||
1595 | def determine_min_hp_cap_buildings_pypsa_eur_sec( |
||
1596 | peak_heat_demand, building_ids |
||
1597 | ): |
||
1598 | """ |
||
1599 | Determines minimum required HP capacity in MV grid in MW as input for |
||
1600 | pypsa-eur-sec. |
||
1601 | |||
1602 | Parameters |
||
1603 | ---------- |
||
1604 | peak_heat_demand : pd.Series |
||
1605 | Series with peak heat demand per building in MW. Index contains the |
||
1606 | building ID. |
||
1607 | building_ids : pd.Index(int) |
||
1608 | Building IDs (as int) of buildings with decentral heating system in |
||
1609 | given MV grid. |
||
1610 | |||
1611 | Returns |
||
1612 | -------- |
||
1613 | float |
||
1614 | Minimum required HP capacity in MV grid in MW. |
||
1615 | |||
1616 | """ |
||
1617 | if len(building_ids) > 0: |
||
1618 | peak_heat_demand = peak_heat_demand.loc[building_ids] |
||
1619 | # determine minimum required heat pump capacity per building |
||
1620 | min_hp_cap_buildings = determine_minimum_hp_capacity_per_building( |
||
1621 | peak_heat_demand |
||
1622 | ) |
||
1623 | return min_hp_cap_buildings.sum() |
||
1624 | else: |
||
1625 | return 0.0 |
||
1626 | |||
1627 | |||
1628 | def determine_hp_cap_buildings_pvbased_per_mvgd( |
||
1629 | scenario, mv_grid_id, peak_heat_demand, building_ids |
||
1630 | ): |
||
1631 | """ |
||
1632 | Determines which buildings in the MV grid will have a HP (buildings with PV |
||
1633 | rooftop are more likely to be assigned) in the eGon2035 scenario, as well |
||
1634 | as their respective HP capacity in MW. |
||
1635 | |||
1636 | Parameters |
||
1637 | ----------- |
||
1638 | mv_grid_id : int |
||
1639 | ID of MV grid. |
||
1640 | peak_heat_demand : pd.Series |
||
1641 | Series with peak heat demand per building in MW. Index contains the |
||
1642 | building ID. |
||
1643 | building_ids : pd.Index(int) |
||
1644 | Building IDs (as int) of buildings with decentral heating system in |
||
1645 | given MV grid. |
||
1646 | |||
1647 | """ |
||
1648 | |||
1649 | hp_cap_grid = get_total_heat_pump_capacity_of_mv_grid(scenario, mv_grid_id) |
||
1650 | |||
1651 | if len(building_ids) > 0 and hp_cap_grid > 0.0: |
||
1652 | peak_heat_demand = peak_heat_demand.loc[building_ids] |
||
1653 | |||
1654 | # determine minimum required heat pump capacity per building |
||
1655 | min_hp_cap_buildings = determine_minimum_hp_capacity_per_building( |
||
1656 | peak_heat_demand |
||
1657 | ) |
||
1658 | |||
1659 | # select buildings that will have a heat pump |
||
1660 | buildings_with_hp = determine_buildings_with_hp_in_mv_grid( |
||
1661 | hp_cap_grid, min_hp_cap_buildings |
||
1662 | ) |
||
1663 | |||
1664 | # distribute total heat pump capacity to all buildings with HP |
||
1665 | hp_cap_per_building = desaggregate_hp_capacity( |
||
1666 | min_hp_cap_buildings.loc[buildings_with_hp], hp_cap_grid |
||
1667 | ) |
||
1668 | |||
1669 | return hp_cap_per_building.rename("hp_capacity") |
||
1670 | |||
1671 | else: |
||
1672 | return pd.Series(dtype="float64").rename("hp_capacity") |
||
1673 | |||
1674 | |||
1675 | def determine_hp_cap_buildings_eGon100RE_per_mvgd(mv_grid_id): |
||
1676 | """ |
||
1677 | Determines HP capacity per building in eGon100RE scenario. |
||
1678 | |||
1679 | In eGon100RE scenario all buildings without district heating get a heat |
||
1680 | pump. |
||
1681 | |||
1682 | Returns |
||
1683 | -------- |
||
1684 | pd.Series |
||
1685 | Pandas series with heat pump capacity per building in MW. |
||
1686 | |||
1687 | """ |
||
1688 | |||
1689 | hp_cap_grid = get_total_heat_pump_capacity_of_mv_grid( |
||
1690 | "eGon100RE", mv_grid_id |
||
1691 | ) |
||
1692 | |||
1693 | if hp_cap_grid > 0.0: |
||
1694 | # get buildings with decentral heating systems |
||
1695 | building_ids = get_buildings_with_decentral_heat_demand_in_mv_grid( |
||
1696 | mv_grid_id, scenario="eGon100RE" |
||
1697 | ) |
||
1698 | |||
1699 | logger.info(f"MVGD={mv_grid_id} | Get peak loads from DB") |
||
1700 | df_peak_heat_demand = get_heat_peak_demand_per_building( |
||
1701 | "eGon100RE", building_ids |
||
1702 | ) |
||
1703 | |||
1704 | logger.info(f"MVGD={mv_grid_id} | Determine HP capacities.") |
||
1705 | # determine minimum required heat pump capacity per building |
||
1706 | min_hp_cap_buildings = determine_minimum_hp_capacity_per_building( |
||
1707 | df_peak_heat_demand, flexibility_factor=24 / 18, cop=1.7 |
||
1708 | ) |
||
1709 | |||
1710 | logger.info(f"MVGD={mv_grid_id} | Desaggregate HP capacities.") |
||
1711 | # distribute total heat pump capacity to all buildings with HP |
||
1712 | hp_cap_per_building = desaggregate_hp_capacity( |
||
1713 | min_hp_cap_buildings, hp_cap_grid |
||
1714 | ) |
||
1715 | |||
1716 | return hp_cap_per_building.rename("hp_capacity") |
||
1717 | else: |
||
1718 | return pd.Series(dtype="float64").rename("hp_capacity") |
||
1719 | |||
1720 | |||
1721 | def determine_hp_cap_buildings_eGon100RE(): |
||
1722 | """ |
||
1723 | Main function to determine HP capacity per building in eGon100RE scenario. |
||
1724 | |||
1725 | """ |
||
1726 | |||
1727 | # ========== Register np datatypes with SQLA ========== |
||
1728 | register_adapter(np.float64, adapt_numpy_float64) |
||
1729 | register_adapter(np.int64, adapt_numpy_int64) |
||
1730 | # ===================================================== |
||
1731 | |||
1732 | with db.session_scope() as session: |
||
1733 | query = ( |
||
1734 | session.query( |
||
1735 | MapZensusGridDistricts.bus_id, |
||
1736 | ) |
||
1737 | .filter( |
||
1738 | MapZensusGridDistricts.zensus_population_id |
||
1739 | == EgonPetaHeat.zensus_population_id |
||
1740 | ) |
||
1741 | .distinct(MapZensusGridDistricts.bus_id) |
||
1742 | ) |
||
1743 | mvgd_ids = pd.read_sql( |
||
1744 | query.statement, query.session.bind, index_col=None |
||
1745 | ) |
||
1746 | mvgd_ids = mvgd_ids.sort_values("bus_id") |
||
1747 | mvgd_ids = mvgd_ids["bus_id"].values |
||
1748 | |||
1749 | df_hp_cap_per_building_100RE_db = pd.DataFrame( |
||
1750 | columns=["building_id", "hp_capacity"] |
||
1751 | ) |
||
1752 | |||
1753 | for mvgd_id in mvgd_ids: |
||
1754 | logger.info(f"MVGD={mvgd_id} | Start") |
||
1755 | |||
1756 | hp_cap_per_building_100RE = ( |
||
1757 | determine_hp_cap_buildings_eGon100RE_per_mvgd(mvgd_id) |
||
1758 | ) |
||
1759 | |||
1760 | if not hp_cap_per_building_100RE.empty: |
||
1761 | df_hp_cap_per_building_100RE_db = pd.concat( |
||
1762 | [ |
||
1763 | df_hp_cap_per_building_100RE_db, |
||
1764 | hp_cap_per_building_100RE.reset_index(), |
||
1765 | ], |
||
1766 | axis=0, |
||
1767 | ) |
||
1768 | |||
1769 | logger.info(f"MVGD={min(mvgd_ids)} : {max(mvgd_ids)} | Write data to db.") |
||
1770 | df_hp_cap_per_building_100RE_db["scenario"] = "eGon100RE" |
||
1771 | |||
1772 | EgonHpCapacityBuildings.__table__.create(bind=engine, checkfirst=True) |
||
1773 | |||
1774 | write_table_to_postgres( |
||
1775 | df_hp_cap_per_building_100RE_db, |
||
1776 | EgonHpCapacityBuildings, |
||
1777 | drop=False, |
||
1778 | ) |
||
1779 | |||
1780 | |||
1781 | def aggregate_residential_and_cts_profiles(mvgd, scenario): |
||
1782 | """ |
||
1783 | Gets residential and CTS heat demand profiles per building and aggregates |
||
1784 | them. |
||
1785 | |||
1786 | Parameters |
||
1787 | ---------- |
||
1788 | mvgd : int |
||
1789 | MV grid ID. |
||
1790 | scenario : str |
||
1791 | Possible options are eGon2035 or eGon100RE. |
||
1792 | |||
1793 | Returns |
||
1794 | -------- |
||
1795 | pd.DataFrame |
||
1796 | Table of demand profile per building. Column names are building IDs and |
||
1797 | index is hour of the year as int (0-8759). |
||
1798 | |||
1799 | """ |
||
1800 | # ############### get residential heat demand profiles ############### |
||
1801 | df_heat_ts = calc_residential_heat_profiles_per_mvgd( |
||
1802 | mvgd=mvgd, scenario=scenario |
||
1803 | ) |
||
1804 | |||
1805 | # pivot to allow aggregation with CTS profiles |
||
1806 | df_heat_ts = df_heat_ts.pivot( |
||
1807 | index=["day_of_year", "hour"], |
||
1808 | columns="building_id", |
||
1809 | values="demand_ts", |
||
1810 | ) |
||
1811 | df_heat_ts = df_heat_ts.sort_index().reset_index(drop=True) |
||
1812 | |||
1813 | # ############### get CTS heat demand profiles ############### |
||
1814 | heat_demand_cts_ts = calc_cts_building_profiles( |
||
1815 | bus_ids=[mvgd], |
||
1816 | scenario=scenario, |
||
1817 | sector="heat", |
||
1818 | ) |
||
1819 | |||
1820 | # ############# aggregate residential and CTS demand profiles ############# |
||
1821 | df_heat_ts = pd.concat([df_heat_ts, heat_demand_cts_ts], axis=1) |
||
1822 | |||
1823 | df_heat_ts = df_heat_ts.groupby(axis=1, level=0).sum() |
||
1824 | |||
1825 | return df_heat_ts |
||
1826 | |||
1827 | |||
1828 | def export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db, drop=False): |
||
1829 | """ |
||
1830 | Function to export the collected results of all MVGDs per bulk to DB. |
||
1831 | |||
1832 | Parameters |
||
1833 | ---------- |
||
1834 | df_peak_loads_db : pd.DataFrame |
||
1835 | Table of building peak loads of all MVGDs per bulk |
||
1836 | df_heat_mvgd_ts_db : pd.DataFrame |
||
1837 | Table of all aggregated MVGD profiles per bulk |
||
1838 | drop : boolean |
||
1839 | Drop and recreate table if True |
||
1840 | |||
1841 | """ |
||
1842 | |||
1843 | df_peak_loads_db = df_peak_loads_db.melt( |
||
1844 | id_vars="building_id", |
||
1845 | var_name="scenario", |
||
1846 | value_name="peak_load_in_w", |
||
1847 | ) |
||
1848 | df_peak_loads_db["building_id"] = df_peak_loads_db["building_id"].astype( |
||
1849 | int |
||
1850 | ) |
||
1851 | df_peak_loads_db["sector"] = "residential+cts" |
||
1852 | # From MW to W |
||
1853 | df_peak_loads_db["peak_load_in_w"] = ( |
||
1854 | df_peak_loads_db["peak_load_in_w"] * 1e6 |
||
1855 | ) |
||
1856 | write_table_to_postgres(df_peak_loads_db, BuildingHeatPeakLoads, drop=drop) |
||
1857 | |||
1858 | dtypes = { |
||
1859 | column.key: column.type |
||
1860 | for column in EgonEtragoTimeseriesIndividualHeating.__table__.columns |
||
1861 | } |
||
1862 | df_heat_mvgd_ts_db = df_heat_mvgd_ts_db.loc[:, dtypes.keys()] |
||
1863 | |||
1864 | if drop: |
||
1865 | logger.info( |
||
1866 | f"Drop and recreate table " |
||
1867 | f"{EgonEtragoTimeseriesIndividualHeating.__table__.name}." |
||
1868 | ) |
||
1869 | EgonEtragoTimeseriesIndividualHeating.__table__.drop( |
||
1870 | bind=engine, checkfirst=True |
||
1871 | ) |
||
1872 | EgonEtragoTimeseriesIndividualHeating.__table__.create( |
||
1873 | bind=engine, checkfirst=True |
||
1874 | ) |
||
1875 | |||
1876 | with db.session_scope() as session: |
||
1877 | df_heat_mvgd_ts_db.to_sql( |
||
1878 | name=EgonEtragoTimeseriesIndividualHeating.__table__.name, |
||
1879 | schema=EgonEtragoTimeseriesIndividualHeating.__table__.schema, |
||
1880 | con=session.connection(), |
||
1881 | if_exists="append", |
||
1882 | method="multi", |
||
1883 | index=False, |
||
1884 | dtype=dtypes, |
||
1885 | ) |
||
1886 | |||
1887 | |||
1888 | def export_min_cap_to_csv(df_hp_min_cap_mv_grid_pypsa_eur_sec): |
||
1889 | """Export minimum capacity of heat pumps for pypsa eur sec to csv""" |
||
1890 | |||
1891 | df_hp_min_cap_mv_grid_pypsa_eur_sec.index.name = "mvgd_id" |
||
1892 | df_hp_min_cap_mv_grid_pypsa_eur_sec = ( |
||
1893 | df_hp_min_cap_mv_grid_pypsa_eur_sec.to_frame( |
||
1894 | name="min_hp_capacity" |
||
1895 | ).reset_index() |
||
1896 | ) |
||
1897 | |||
1898 | folder = Path(".") / "input-pypsa-eur-sec" |
||
1899 | file = folder / "minimum_hp_capacity_mv_grid_100RE.csv" |
||
1900 | # Create the folder, if it does not exist already |
||
1901 | if not os.path.exists(folder): |
||
1902 | os.mkdir(folder) |
||
1903 | if not file.is_file(): |
||
1904 | logger.info(f"Create {file}") |
||
1905 | df_hp_min_cap_mv_grid_pypsa_eur_sec.to_csv(file, mode="w", header=True) |
||
1906 | else: |
||
1907 | df_hp_min_cap_mv_grid_pypsa_eur_sec.to_csv( |
||
1908 | file, mode="a", header=False |
||
1909 | ) |
||
1910 | |||
1911 | |||
1912 | def delete_pypsa_eur_sec_csv_file(): |
||
1913 | """Delete pypsa eur sec minimum heat pump capacity csv before new run""" |
||
1914 | |||
1915 | folder = Path(".") / "input-pypsa-eur-sec" |
||
1916 | file = folder / "minimum_hp_capacity_mv_grid_100RE.csv" |
||
1917 | if file.is_file(): |
||
1918 | logger.info(f"Delete {file}") |
||
1919 | os.remove(file) |
||
1920 | |||
1921 | |||
1922 | def catch_missing_buidings(buildings_decentral_heating, peak_load): |
||
1923 | """ |
||
1924 | Check for missing buildings and reduce the list of buildings with |
||
1925 | decentral heating if no peak loads available. This should only happen |
||
1926 | in case of cutout SH |
||
1927 | |||
1928 | Parameters |
||
1929 | ----------- |
||
1930 | buildings_decentral_heating : list(int) |
||
1931 | Array or list of buildings with decentral heating |
||
1932 | |||
1933 | peak_load : pd.Series |
||
1934 | Peak loads of all building within the mvgd |
||
1935 | |||
1936 | """ |
||
1937 | # Catch missing buildings key error |
||
1938 | # should only happen within cutout SH |
||
1939 | if not all(buildings_decentral_heating.isin(peak_load.index)): |
||
1940 | diff = buildings_decentral_heating.difference(peak_load.index) |
||
1941 | logger.warning( |
||
1942 | f"Dropped {len(diff)} building ids due to missing peak " |
||
1943 | f"loads. {len(buildings_decentral_heating)} left." |
||
1944 | ) |
||
1945 | logger.info(f"Dropped buildings: {diff.values}") |
||
1946 | buildings_decentral_heating = buildings_decentral_heating.drop(diff) |
||
1947 | |||
1948 | return buildings_decentral_heating |
||
1949 | |||
1950 | |||
1951 | def determine_hp_cap_peak_load_mvgd_ts_2035(mvgd_ids): |
||
1952 | """ |
||
1953 | Main function to determine HP capacity per building in eGon2035 scenario. |
||
1954 | Further, creates heat demand time series for all buildings with heat pumps |
||
1955 | in MV grid, as well as for all buildings with gas boilers, used in eTraGo. |
||
1956 | |||
1957 | Parameters |
||
1958 | ----------- |
||
1959 | mvgd_ids : list(int) |
||
1960 | List of MV grid IDs to determine data for. |
||
1961 | |||
1962 | """ |
||
1963 | |||
1964 | # ========== Register np datatypes with SQLA ========== |
||
1965 | register_adapter(np.float64, adapt_numpy_float64) |
||
1966 | register_adapter(np.int64, adapt_numpy_int64) |
||
1967 | # ===================================================== |
||
1968 | |||
1969 | df_peak_loads_db = pd.DataFrame() |
||
1970 | df_hp_cap_per_building_2035_db = pd.DataFrame() |
||
1971 | df_heat_mvgd_ts_db = pd.DataFrame() |
||
1972 | |||
1973 | for mvgd in mvgd_ids: |
||
1974 | logger.info(f"MVGD={mvgd} | Start") |
||
1975 | |||
1976 | # ############# aggregate residential and CTS demand profiles ##### |
||
1977 | |||
1978 | df_heat_ts = aggregate_residential_and_cts_profiles( |
||
1979 | mvgd, scenario="eGon2035" |
||
1980 | ) |
||
1981 | |||
1982 | # ##################### determine peak loads ################### |
||
1983 | logger.info(f"MVGD={mvgd} | Determine peak loads.") |
||
1984 | |||
1985 | peak_load_2035 = df_heat_ts.max().rename("eGon2035") |
||
1986 | |||
1987 | # ######## determine HP capacity per building ######### |
||
1988 | logger.info(f"MVGD={mvgd} | Determine HP capacities.") |
||
1989 | |||
1990 | buildings_decentral_heating = ( |
||
1991 | get_buildings_with_decentral_heat_demand_in_mv_grid( |
||
1992 | mvgd, scenario="eGon2035" |
||
1993 | ) |
||
1994 | ) |
||
1995 | |||
1996 | # Reduce list of decentral heating if no Peak load available |
||
1997 | # TODO maybe remove after succesfull DE run |
||
1998 | # Might be fixed in #990 |
||
1999 | buildings_decentral_heating = catch_missing_buidings( |
||
2000 | buildings_decentral_heating, peak_load_2035 |
||
2001 | ) |
||
2002 | |||
2003 | hp_cap_per_building_2035 = determine_hp_cap_buildings_pvbased_per_mvgd( |
||
2004 | "eGon2035", |
||
2005 | mvgd, |
||
2006 | peak_load_2035, |
||
2007 | buildings_decentral_heating, |
||
2008 | ) |
||
2009 | buildings_gas_2035 = pd.Index(buildings_decentral_heating).drop( |
||
2010 | hp_cap_per_building_2035.index |
||
2011 | ) |
||
2012 | |||
2013 | # ################ aggregated heat profiles ################### |
||
2014 | logger.info(f"MVGD={mvgd} | Aggregate heat profiles.") |
||
2015 | |||
2016 | df_mvgd_ts_2035_hp = df_heat_ts.loc[ |
||
2017 | :, |
||
2018 | hp_cap_per_building_2035.index, |
||
2019 | ].sum(axis=1) |
||
2020 | |||
2021 | # heat demand time series for buildings with gas boiler |
||
2022 | df_mvgd_ts_2035_gas = df_heat_ts.loc[:, buildings_gas_2035].sum(axis=1) |
||
2023 | |||
2024 | df_heat_mvgd_ts = pd.DataFrame( |
||
2025 | data={ |
||
2026 | "carrier": ["heat_pump", "CH4"], |
||
2027 | "bus_id": mvgd, |
||
2028 | "scenario": ["eGon2035", "eGon2035"], |
||
2029 | "dist_aggregated_mw": [ |
||
2030 | df_mvgd_ts_2035_hp.to_list(), |
||
2031 | df_mvgd_ts_2035_gas.to_list(), |
||
2032 | ], |
||
2033 | } |
||
2034 | ) |
||
2035 | |||
2036 | # ################ collect results ################## |
||
2037 | logger.info(f"MVGD={mvgd} | Collect results.") |
||
2038 | |||
2039 | df_peak_loads_db = pd.concat( |
||
2040 | [df_peak_loads_db, peak_load_2035.reset_index()], |
||
2041 | axis=0, |
||
2042 | ignore_index=True, |
||
2043 | ) |
||
2044 | |||
2045 | df_heat_mvgd_ts_db = pd.concat( |
||
2046 | [df_heat_mvgd_ts_db, df_heat_mvgd_ts], axis=0, ignore_index=True |
||
2047 | ) |
||
2048 | |||
2049 | df_hp_cap_per_building_2035_db = pd.concat( |
||
2050 | [ |
||
2051 | df_hp_cap_per_building_2035_db, |
||
2052 | hp_cap_per_building_2035.reset_index(), |
||
2053 | ], |
||
2054 | axis=0, |
||
2055 | ) |
||
2056 | |||
2057 | # ################ export to db ####################### |
||
2058 | logger.info(f"MVGD={min(mvgd_ids)} : {max(mvgd_ids)} | Write data to db.") |
||
2059 | |||
2060 | export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db, drop=False) |
||
2061 | |||
2062 | df_hp_cap_per_building_2035_db["scenario"] = "eGon2035" |
||
2063 | |||
2064 | # TODO debug duplicated building_ids |
||
2065 | duplicates = df_hp_cap_per_building_2035_db.loc[ |
||
2066 | df_hp_cap_per_building_2035_db.duplicated("building_id", keep=False) |
||
2067 | ] |
||
2068 | |||
2069 | if not duplicates.empty: |
||
2070 | logger.info( |
||
2071 | f"Dropped duplicated buildings: " |
||
2072 | f"{duplicates.loc[:,['building_id', 'hp_capacity']]}" |
||
2073 | ) |
||
2074 | |||
2075 | df_hp_cap_per_building_2035_db.drop_duplicates("building_id", inplace=True) |
||
2076 | |||
2077 | df_hp_cap_per_building_2035_db.building_id = ( |
||
2078 | df_hp_cap_per_building_2035_db.building_id.astype(int) |
||
2079 | ) |
||
2080 | |||
2081 | write_table_to_postgres( |
||
2082 | df_hp_cap_per_building_2035_db, |
||
2083 | EgonHpCapacityBuildings, |
||
2084 | drop=False, |
||
2085 | ) |
||
2086 | |||
2087 | |||
2088 | def determine_hp_cap_peak_load_mvgd_ts_status_quo(mvgd_ids, scenario): |
||
2089 | """ |
||
2090 | Main function to determine HP capacity per building in status quo scenario. |
||
2091 | Further, creates heat demand time series for all buildings with heat pumps |
||
2092 | in MV grid, as well as for all buildings with gas boilers, used in eTraGo. |
||
2093 | |||
2094 | Parameters |
||
2095 | ----------- |
||
2096 | mvgd_ids : list(int) |
||
2097 | List of MV grid IDs to determine data for. |
||
2098 | |||
2099 | """ |
||
2100 | |||
2101 | # ========== Register np datatypes with SQLA ========== |
||
2102 | register_adapter(np.float64, adapt_numpy_float64) |
||
2103 | register_adapter(np.int64, adapt_numpy_int64) |
||
2104 | # ===================================================== |
||
2105 | |||
2106 | df_peak_loads_db = pd.DataFrame() |
||
2107 | df_hp_cap_per_building_status_quo_db = pd.DataFrame() |
||
2108 | df_heat_mvgd_ts_db = pd.DataFrame() |
||
2109 | |||
2110 | for mvgd in mvgd_ids: |
||
2111 | logger.info(f"MVGD={mvgd} | Start") |
||
2112 | |||
2113 | # ############# aggregate residential and CTS demand profiles ##### |
||
2114 | |||
2115 | df_heat_ts = aggregate_residential_and_cts_profiles( |
||
2116 | mvgd, scenario=scenario |
||
2117 | ) |
||
2118 | |||
2119 | # ##################### determine peak loads ################### |
||
2120 | logger.info(f"MVGD={mvgd} | Determine peak loads.") |
||
2121 | |||
2122 | peak_load_status_quo = df_heat_ts.max().rename(scenario) |
||
2123 | |||
2124 | # ######## determine HP capacity per building ######### |
||
2125 | logger.info(f"MVGD={mvgd} | Determine HP capacities.") |
||
2126 | |||
2127 | buildings_decentral_heating = ( |
||
2128 | get_buildings_with_decentral_heat_demand_in_mv_grid( |
||
2129 | mvgd, scenario=scenario |
||
2130 | ) |
||
2131 | ) |
||
2132 | |||
2133 | # Reduce list of decentral heating if no Peak load available |
||
2134 | # TODO maybe remove after succesfull DE run |
||
2135 | # Might be fixed in #990 |
||
2136 | buildings_decentral_heating = catch_missing_buidings( |
||
2137 | buildings_decentral_heating, peak_load_status_quo |
||
2138 | ) |
||
2139 | |||
2140 | hp_cap_per_building_status_quo = ( |
||
2141 | determine_hp_cap_buildings_pvbased_per_mvgd( |
||
2142 | scenario, |
||
2143 | mvgd, |
||
2144 | peak_load_status_quo, |
||
2145 | buildings_decentral_heating, |
||
2146 | ) |
||
2147 | ) |
||
2148 | |||
2149 | # ################ aggregated heat profiles ################### |
||
2150 | logger.info(f"MVGD={mvgd} | Aggregate heat profiles.") |
||
2151 | |||
2152 | df_mvgd_ts_status_quo_hp = df_heat_ts.loc[ |
||
2153 | :, |
||
2154 | hp_cap_per_building_status_quo.index, |
||
2155 | ].sum(axis=1) |
||
2156 | |||
2157 | df_heat_mvgd_ts = pd.DataFrame( |
||
2158 | data={ |
||
2159 | "carrier": "heat_pump", |
||
2160 | "bus_id": mvgd, |
||
2161 | "scenario": scenario, |
||
2162 | "dist_aggregated_mw": [df_mvgd_ts_status_quo_hp.to_list()], |
||
2163 | } |
||
2164 | ) |
||
2165 | |||
2166 | # ################ collect results ################## |
||
2167 | logger.info(f"MVGD={mvgd} | Collect results.") |
||
2168 | |||
2169 | df_peak_loads_db = pd.concat( |
||
2170 | [df_peak_loads_db, peak_load_status_quo.reset_index()], |
||
2171 | axis=0, |
||
2172 | ignore_index=True, |
||
2173 | ) |
||
2174 | |||
2175 | df_heat_mvgd_ts_db = pd.concat( |
||
2176 | [df_heat_mvgd_ts_db, df_heat_mvgd_ts], axis=0, ignore_index=True |
||
2177 | ) |
||
2178 | |||
2179 | df_hp_cap_per_building_status_quo_db = pd.concat( |
||
2180 | [ |
||
2181 | df_hp_cap_per_building_status_quo_db, |
||
2182 | hp_cap_per_building_status_quo.reset_index(), |
||
2183 | ], |
||
2184 | axis=0, |
||
2185 | ) |
||
2186 | |||
2187 | # ################ export to db ####################### |
||
2188 | logger.info(f"MVGD={min(mvgd_ids)} : {max(mvgd_ids)} | Write data to db.") |
||
2189 | |||
2190 | export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db, drop=False) |
||
2191 | |||
2192 | df_hp_cap_per_building_status_quo_db["scenario"] = scenario |
||
2193 | |||
2194 | # TODO debug duplicated building_ids |
||
2195 | duplicates = df_hp_cap_per_building_status_quo_db.loc[ |
||
2196 | df_hp_cap_per_building_status_quo_db.duplicated( |
||
2197 | "building_id", keep=False |
||
2198 | ) |
||
2199 | ] |
||
2200 | |||
2201 | if not duplicates.empty: |
||
2202 | logger.info( |
||
2203 | f"Dropped duplicated buildings: " |
||
2204 | f"{duplicates.loc[:,['building_id', 'hp_capacity']]}" |
||
2205 | ) |
||
2206 | |||
2207 | df_hp_cap_per_building_status_quo_db.drop_duplicates( |
||
2208 | "building_id", inplace=True |
||
2209 | ) |
||
2210 | |||
2211 | df_hp_cap_per_building_status_quo_db.building_id = ( |
||
2212 | df_hp_cap_per_building_status_quo_db.building_id.astype(int) |
||
2213 | ) |
||
2214 | |||
2215 | write_table_to_postgres( |
||
2216 | df_hp_cap_per_building_status_quo_db, |
||
2217 | EgonHpCapacityBuildings, |
||
2218 | drop=False, |
||
2219 | ) |
||
2220 | |||
2221 | |||
2222 | def determine_hp_cap_peak_load_mvgd_ts_pypsa_eur(mvgd_ids): |
||
2223 | """ |
||
2224 | Main function to determine minimum required HP capacity in MV for |
||
2225 | pypsa-eur-sec. Further, creates heat demand time series for all buildings |
||
2226 | with heat pumps in MV grid in eGon100RE scenario, used in eTraGo. |
||
2227 | |||
2228 | Parameters |
||
2229 | ----------- |
||
2230 | mvgd_ids : list(int) |
||
2231 | List of MV grid IDs to determine data for. |
||
2232 | |||
2233 | """ |
||
2234 | |||
2235 | # ========== Register np datatypes with SQLA ========== |
||
2236 | register_adapter(np.float64, adapt_numpy_float64) |
||
2237 | register_adapter(np.int64, adapt_numpy_int64) |
||
2238 | # ===================================================== |
||
2239 | |||
2240 | df_peak_loads_db = pd.DataFrame() |
||
2241 | df_heat_mvgd_ts_db = pd.DataFrame() |
||
2242 | df_hp_min_cap_mv_grid_pypsa_eur_sec = pd.Series(dtype="float64") |
||
2243 | |||
2244 | for mvgd in mvgd_ids: |
||
2245 | logger.info(f"MVGD={mvgd} | Start") |
||
2246 | |||
2247 | # ############# aggregate residential and CTS demand profiles ##### |
||
2248 | |||
2249 | df_heat_ts = aggregate_residential_and_cts_profiles( |
||
2250 | mvgd, scenario="eGon100RE" |
||
2251 | ) |
||
2252 | |||
2253 | # ##################### determine peak loads ################### |
||
2254 | logger.info(f"MVGD={mvgd} | Determine peak loads.") |
||
2255 | |||
2256 | peak_load_100RE = df_heat_ts.max().rename("eGon100RE") |
||
2257 | |||
2258 | # ######## determine minimum HP capacity pypsa-eur-sec ########### |
||
2259 | logger.info(f"MVGD={mvgd} | Determine minimum HP capacity.") |
||
2260 | |||
2261 | buildings_decentral_heating = ( |
||
2262 | get_buildings_with_decentral_heat_demand_in_mv_grid( |
||
2263 | mvgd, scenario="eGon100RE" |
||
2264 | ) |
||
2265 | ) |
||
2266 | |||
2267 | # Reduce list of decentral heating if no Peak load available |
||
2268 | # TODO maybe remove after succesfull DE run |
||
2269 | buildings_decentral_heating = catch_missing_buidings( |
||
2270 | buildings_decentral_heating, peak_load_100RE |
||
2271 | ) |
||
2272 | |||
2273 | hp_min_cap_mv_grid_pypsa_eur_sec = ( |
||
2274 | determine_min_hp_cap_buildings_pypsa_eur_sec( |
||
2275 | peak_load_100RE, |
||
2276 | buildings_decentral_heating, |
||
2277 | ) |
||
2278 | ) |
||
2279 | |||
2280 | # ################ aggregated heat profiles ################### |
||
2281 | logger.info(f"MVGD={mvgd} | Aggregate heat profiles.") |
||
2282 | |||
2283 | df_mvgd_ts_hp = df_heat_ts.loc[ |
||
2284 | :, |
||
2285 | buildings_decentral_heating, |
||
2286 | ].sum(axis=1) |
||
2287 | |||
2288 | df_heat_mvgd_ts = pd.DataFrame( |
||
2289 | data={ |
||
2290 | "carrier": "heat_pump", |
||
2291 | "bus_id": mvgd, |
||
2292 | "scenario": "eGon100RE", |
||
2293 | "dist_aggregated_mw": [df_mvgd_ts_hp.to_list()], |
||
2294 | } |
||
2295 | ) |
||
2296 | |||
2297 | # ################ collect results ################## |
||
2298 | logger.info(f"MVGD={mvgd} | Collect results.") |
||
2299 | |||
2300 | df_peak_loads_db = pd.concat( |
||
2301 | [df_peak_loads_db, peak_load_100RE.reset_index()], |
||
2302 | axis=0, |
||
2303 | ignore_index=True, |
||
2304 | ) |
||
2305 | |||
2306 | df_heat_mvgd_ts_db = pd.concat( |
||
2307 | [df_heat_mvgd_ts_db, df_heat_mvgd_ts], axis=0, ignore_index=True |
||
2308 | ) |
||
2309 | |||
2310 | df_hp_min_cap_mv_grid_pypsa_eur_sec.loc[mvgd] = ( |
||
2311 | hp_min_cap_mv_grid_pypsa_eur_sec |
||
2312 | ) |
||
2313 | |||
2314 | # ################ export to db and csv ###################### |
||
2315 | logger.info(f"MVGD={min(mvgd_ids)} : {max(mvgd_ids)} | Write data to db.") |
||
2316 | |||
2317 | export_to_db(df_peak_loads_db, df_heat_mvgd_ts_db, drop=False) |
||
2318 | |||
2319 | logger.info( |
||
2320 | f"MVGD={min(mvgd_ids)} : {max(mvgd_ids)} | Write " |
||
2321 | f"pypsa-eur-sec min " |
||
2322 | f"HP capacities to csv." |
||
2323 | ) |
||
2324 | export_min_cap_to_csv(df_hp_min_cap_mv_grid_pypsa_eur_sec) |
||
2325 | |||
2326 | |||
2327 | def split_mvgds_into_bulks(n, max_n, func, scenario=None): |
||
2328 | """ |
||
2329 | Generic function to split task into multiple parallel tasks, |
||
2330 | dividing the number of MVGDs into even bulks. |
||
2331 | |||
2332 | Parameters |
||
2333 | ----------- |
||
2334 | n : int |
||
2335 | Number of bulk |
||
2336 | max_n: int |
||
2337 | Maximum number of bulks |
||
2338 | func : function |
||
2339 | The funnction which is then called with the list of MVGD as |
||
2340 | parameter. |
||
2341 | """ |
||
2342 | |||
2343 | with db.session_scope() as session: |
||
2344 | query = ( |
||
2345 | session.query( |
||
2346 | MapZensusGridDistricts.bus_id, |
||
2347 | ) |
||
2348 | .filter( |
||
2349 | MapZensusGridDistricts.zensus_population_id |
||
2350 | == EgonPetaHeat.zensus_population_id |
||
2351 | ) |
||
2352 | .distinct(MapZensusGridDistricts.bus_id) |
||
2353 | ) |
||
2354 | mvgd_ids = pd.read_sql( |
||
2355 | query.statement, query.session.bind, index_col=None |
||
2356 | ) |
||
2357 | |||
2358 | mvgd_ids = mvgd_ids.sort_values("bus_id").reset_index(drop=True) |
||
2359 | |||
2360 | mvgd_ids = np.array_split(mvgd_ids["bus_id"].values, max_n) |
||
2361 | # Only take split n |
||
2362 | mvgd_ids = mvgd_ids[n] |
||
2363 | |||
2364 | logger.info(f"Bulk takes care of MVGD: {min(mvgd_ids)} : {max(mvgd_ids)}") |
||
2365 | |||
2366 | if scenario is not None: |
||
2367 | func(mvgd_ids, scenario=scenario) |
||
2368 | else: |
||
2369 | func(mvgd_ids) |
||
2370 | |||
2371 | |||
2372 | def delete_hp_capacity(scenario): |
||
2373 | """Remove all hp capacities for the selected scenario |
||
2374 | |||
2375 | Parameters |
||
2376 | ----------- |
||
2377 | scenario : string |
||
2378 | Either eGon2035 or eGon100RE |
||
2379 | |||
2380 | """ |
||
2381 | |||
2382 | with db.session_scope() as session: |
||
2383 | # Buses |
||
2384 | session.query(EgonHpCapacityBuildings).filter( |
||
2385 | EgonHpCapacityBuildings.scenario == scenario |
||
2386 | ).delete(synchronize_session=False) |
||
2387 | |||
2388 | |||
2389 | def delete_mvgd_ts(scenario): |
||
2390 | """Remove all hp capacities for the selected scenario |
||
2391 | |||
2392 | Parameters |
||
2393 | ----------- |
||
2394 | scenario : string |
||
2395 | Either eGon2035 or eGon100RE |
||
2396 | |||
2397 | """ |
||
2398 | |||
2399 | with db.session_scope() as session: |
||
2400 | # Buses |
||
2401 | session.query(EgonEtragoTimeseriesIndividualHeating).filter( |
||
2402 | EgonEtragoTimeseriesIndividualHeating.scenario == scenario |
||
2403 | ).delete(synchronize_session=False) |
||
2404 | |||
2405 | |||
2406 | def delete_hp_capacity_100RE(): |
||
2407 | """Remove all hp capacities for the selected eGon100RE""" |
||
2408 | EgonHpCapacityBuildings.__table__.create(bind=engine, checkfirst=True) |
||
2409 | delete_hp_capacity(scenario="eGon100RE") |
||
2410 | |||
2411 | |||
2412 | def delete_hp_capacity_status_quo(scenario): |
||
2413 | """Remove all hp capacities for the selected status quo""" |
||
2414 | EgonHpCapacityBuildings.__table__.create(bind=engine, checkfirst=True) |
||
2415 | delete_hp_capacity(scenario=scenario) |
||
2416 | |||
2417 | |||
2418 | def delete_hp_capacity_2035(): |
||
2419 | """Remove all hp capacities for the selected eGon2035""" |
||
2420 | EgonHpCapacityBuildings.__table__.create(bind=engine, checkfirst=True) |
||
2421 | delete_hp_capacity(scenario="eGon2035") |
||
2422 | |||
2423 | |||
2424 | def delete_mvgd_ts_status_quo(scenario): |
||
2425 | """Remove all mvgd ts for the selected status quo""" |
||
2426 | EgonEtragoTimeseriesIndividualHeating.__table__.create( |
||
2427 | bind=engine, checkfirst=True |
||
2428 | ) |
||
2429 | delete_mvgd_ts(scenario=scenario) |
||
2430 | |||
2431 | |||
2432 | def delete_mvgd_ts_2035(): |
||
2433 | """Remove all mvgd ts for the selected eGon2035""" |
||
2434 | EgonEtragoTimeseriesIndividualHeating.__table__.create( |
||
2435 | bind=engine, checkfirst=True |
||
2436 | ) |
||
2437 | delete_mvgd_ts(scenario="eGon2035") |
||
2438 | |||
2439 | |||
2440 | def delete_mvgd_ts_100RE(): |
||
2441 | """Remove all mvgd ts for the selected eGon100RE""" |
||
2442 | EgonEtragoTimeseriesIndividualHeating.__table__.create( |
||
2443 | bind=engine, checkfirst=True |
||
2444 | ) |
||
2445 | delete_mvgd_ts(scenario="eGon100RE") |
||
2446 | |||
2447 | |||
2448 | def delete_heat_peak_loads_status_quo(scenario): |
||
2449 | """Remove all heat peak loads for status quo.""" |
||
2450 | BuildingHeatPeakLoads.__table__.create(bind=engine, checkfirst=True) |
||
2451 | with db.session_scope() as session: |
||
2452 | # Buses |
||
2453 | session.query(BuildingHeatPeakLoads).filter( |
||
2454 | BuildingHeatPeakLoads.scenario == scenario |
||
2455 | ).delete(synchronize_session=False) |
||
2456 | |||
2457 | |||
2458 | def delete_heat_peak_loads_2035(): |
||
2459 | """Remove all heat peak loads for eGon2035.""" |
||
2460 | BuildingHeatPeakLoads.__table__.create(bind=engine, checkfirst=True) |
||
2461 | with db.session_scope() as session: |
||
2462 | # Buses |
||
2463 | session.query(BuildingHeatPeakLoads).filter( |
||
2464 | BuildingHeatPeakLoads.scenario == "eGon2035" |
||
2465 | ).delete(synchronize_session=False) |
||
2466 | |||
2467 | |||
2468 | def delete_heat_peak_loads_100RE(): |
||
2469 | """Remove all heat peak loads for eGon100RE.""" |
||
2470 | BuildingHeatPeakLoads.__table__.create(bind=engine, checkfirst=True) |
||
2471 | with db.session_scope() as session: |
||
2472 | # Buses |
||
2473 | session.query(BuildingHeatPeakLoads).filter( |
||
2474 | BuildingHeatPeakLoads.scenario == "eGon100RE" |
||
2475 | ).delete(synchronize_session=False) |
||
2476 |