|
1
|
|
|
# -*- coding: utf-8 -*- |
|
2
|
|
|
|
|
3
|
|
|
"""Modules for providing a convenient data structure for solph results. |
|
4
|
|
|
|
|
5
|
|
|
Information about the possible usage is provided within the examples. |
|
6
|
|
|
|
|
7
|
|
|
SPDX-FileCopyrightText: Uwe Krien <[email protected]> |
|
8
|
|
|
SPDX-FileCopyrightText: Simon Hilpert |
|
9
|
|
|
SPDX-FileCopyrightText: Cord Kaldemeyer |
|
10
|
|
|
SPDX-FileCopyrightText: Stephan Günther |
|
11
|
|
|
SPDX-FileCopyrightText: henhuy |
|
12
|
|
|
SPDX-FileCopyrightText: Johannes Kochems |
|
13
|
|
|
SPDX-FileCopyrightText: Patrik Schönfeldt <[email protected]> |
|
14
|
|
|
|
|
15
|
|
|
SPDX-License-Identifier: MIT |
|
16
|
|
|
|
|
17
|
|
|
""" |
|
18
|
|
|
import itertools |
|
19
|
|
|
import numbers |
|
20
|
|
|
import operator |
|
21
|
|
|
import sys |
|
22
|
|
|
from collections import abc |
|
23
|
|
|
from itertools import groupby |
|
24
|
|
|
from typing import Dict |
|
25
|
|
|
from typing import Tuple |
|
26
|
|
|
|
|
27
|
|
|
import numpy as np |
|
28
|
|
|
import pandas as pd |
|
29
|
|
|
from oemof.network.network import Entity |
|
30
|
|
|
from pyomo.core.base.piecewise import IndexedPiecewise |
|
31
|
|
|
from pyomo.core.base.var import Var |
|
32
|
|
|
|
|
33
|
|
|
from oemof.solph.components._generic_storage import GenericStorage |
|
34
|
|
|
|
|
35
|
|
|
from ._plumbing import _FakeSequence |
|
36
|
|
|
from .helpers import flatten |
|
37
|
|
|
|
|
38
|
|
|
PERIOD_INDEXES = ("invest", "total", "old", "old_end", "old_exo") |
|
39
|
|
|
|
|
40
|
|
|
|
|
41
|
|
|
def get_tuple(x): |
|
42
|
|
|
"""Get oemof tuple within iterable or create it |
|
43
|
|
|
|
|
44
|
|
|
Tuples from Pyomo are of type `(n, n, int)`, `(n, n)` and `(n, int)`. |
|
45
|
|
|
For single nodes `n` a tuple with one object `(n,)` is created. |
|
46
|
|
|
""" |
|
47
|
|
|
for i in x: |
|
48
|
|
|
if isinstance(i, tuple): |
|
49
|
|
|
return i |
|
50
|
|
|
elif issubclass(type(i), Entity): |
|
51
|
|
|
return (i,) |
|
52
|
|
|
|
|
53
|
|
|
# for standalone variables, x is used as identifying tuple |
|
54
|
|
|
if isinstance(x, tuple): |
|
55
|
|
|
return x |
|
56
|
|
|
|
|
57
|
|
|
|
|
58
|
|
|
def get_timestep(x): |
|
59
|
|
|
"""Get the timestep from oemof tuples |
|
60
|
|
|
|
|
61
|
|
|
The timestep from tuples `(n, n, int)`, `(n, n)`, `(n, int)` and (n,) |
|
62
|
|
|
is fetched as the last element. For time-independent data (scalars) |
|
63
|
|
|
zero ist returned. |
|
64
|
|
|
""" |
|
65
|
|
|
if all(issubclass(type(n), Entity) for n in x): |
|
66
|
|
|
return 0 |
|
67
|
|
|
else: |
|
68
|
|
|
return x[-1] |
|
69
|
|
|
|
|
70
|
|
|
|
|
71
|
|
|
def remove_timestep(x): |
|
72
|
|
|
"""Remove the timestep from oemof tuples |
|
73
|
|
|
|
|
74
|
|
|
The timestep is removed from tuples of type `(n, n, int)` and `(n, int)`. |
|
75
|
|
|
""" |
|
76
|
|
|
if all(issubclass(type(n), Entity) for n in x): |
|
77
|
|
|
return x |
|
78
|
|
|
else: |
|
79
|
|
|
return x[:-1] |
|
80
|
|
|
|
|
81
|
|
|
|
|
82
|
|
|
def create_dataframe(om): |
|
83
|
|
|
"""Create a result DataFrame with all optimization data |
|
84
|
|
|
|
|
85
|
|
|
Results from Pyomo are written into one common pandas.DataFrame where |
|
86
|
|
|
separate columns are created for the variable index e.g. for tuples |
|
87
|
|
|
of the flows and components or the timesteps. |
|
88
|
|
|
""" |
|
89
|
|
|
# get all pyomo variables including their block |
|
90
|
|
|
block_vars = list( |
|
91
|
|
|
set([bv.parent_component() for bv in om.component_data_objects(Var)]) |
|
92
|
|
|
) |
|
93
|
|
|
var_dict = {} |
|
94
|
|
|
for bv in block_vars: |
|
95
|
|
|
# Drop the auxiliary variables introduced by pyomo's Piecewise |
|
96
|
|
|
parent_component = bv.parent_block().parent_component() |
|
97
|
|
|
if not isinstance(parent_component, IndexedPiecewise): |
|
98
|
|
|
try: |
|
99
|
|
|
idx_set = getattr(bv, "_index_set") |
|
100
|
|
|
except AttributeError: |
|
101
|
|
|
# To make it compatible with Pyomo < 6.4.1 |
|
102
|
|
|
idx_set = getattr(bv, "_index") |
|
103
|
|
|
|
|
104
|
|
|
for i in idx_set: |
|
105
|
|
|
key = (str(bv).split(".")[0], str(bv).split(".")[-1], i) |
|
106
|
|
|
value = bv[i].value |
|
107
|
|
|
var_dict[key] = value |
|
108
|
|
|
|
|
109
|
|
|
# use this to create a pandas dataframe |
|
110
|
|
|
df = pd.DataFrame(list(var_dict.items()), columns=["pyomo_tuple", "value"]) |
|
111
|
|
|
df["variable_name"] = df["pyomo_tuple"].str[1] |
|
112
|
|
|
|
|
113
|
|
|
# adapt the dataframe by separating tuple data into columns depending |
|
114
|
|
|
# on which dimension the variable/parameter has (scalar/sequence). |
|
115
|
|
|
# columns for the oemof tuple and timestep are created |
|
116
|
|
|
df["oemof_tuple"] = df["pyomo_tuple"].map(get_tuple) |
|
117
|
|
|
df = df[df["oemof_tuple"].map(lambda x: x is not None)] |
|
118
|
|
|
df["timestep"] = df["oemof_tuple"].map(get_timestep) |
|
119
|
|
|
df["oemof_tuple"] = df["oemof_tuple"].map(remove_timestep) |
|
120
|
|
|
|
|
121
|
|
|
# Use another call of remove timestep to get rid of period not needed |
|
122
|
|
|
df.loc[df["variable_name"] == "flow", "oemof_tuple"] = df.loc[ |
|
123
|
|
|
df["variable_name"] == "flow", "oemof_tuple" |
|
124
|
|
|
].map(remove_timestep) |
|
125
|
|
|
|
|
126
|
|
|
# order the data by oemof tuple and timestep |
|
127
|
|
|
df = df.sort_values(["oemof_tuple", "timestep"], ascending=[True, True]) |
|
128
|
|
|
|
|
129
|
|
|
# drop empty decision variables |
|
130
|
|
|
df = df.dropna(subset=["value"]) |
|
131
|
|
|
|
|
132
|
|
|
return df |
|
133
|
|
|
|
|
134
|
|
|
|
|
135
|
|
|
def divide_scalars_sequences(df_dict, k): |
|
136
|
|
|
"""Split results into scalars and sequences results |
|
137
|
|
|
|
|
138
|
|
|
Parameters |
|
139
|
|
|
---------- |
|
140
|
|
|
df_dict: dict |
|
141
|
|
|
dict of pd.DataFrames, keyed by oemof tuples |
|
142
|
|
|
k: tuple |
|
143
|
|
|
oemof tuple for results processing |
|
144
|
|
|
""" |
|
145
|
|
|
try: |
|
146
|
|
|
condition = df_dict[k][:-1].isnull().any() |
|
147
|
|
|
scalars = df_dict[k].loc[:, condition].dropna().iloc[0] |
|
148
|
|
|
sequences = df_dict[k].loc[:, ~condition] |
|
149
|
|
|
return {"scalars": scalars, "sequences": sequences} |
|
150
|
|
|
except IndexError: |
|
151
|
|
|
error_message = ( |
|
152
|
|
|
"Cannot access index on result data. " |
|
153
|
|
|
+ "Did the optimization terminate" |
|
154
|
|
|
+ " without errors?" |
|
155
|
|
|
) |
|
156
|
|
|
raise IndexError(error_message) |
|
157
|
|
|
|
|
158
|
|
|
|
|
159
|
|
|
def set_result_index(df_dict, k, result_index): |
|
160
|
|
|
"""Define index for results |
|
161
|
|
|
|
|
162
|
|
|
Parameters |
|
163
|
|
|
---------- |
|
164
|
|
|
df_dict: dict |
|
165
|
|
|
dict of pd.DataFrames, keyed by oemof tuples |
|
166
|
|
|
k: tuple |
|
167
|
|
|
oemof tuple for results processing |
|
168
|
|
|
result_index: pd.Index |
|
169
|
|
|
Index to use for results |
|
170
|
|
|
""" |
|
171
|
|
|
try: |
|
172
|
|
|
df_dict[k].index = result_index |
|
173
|
|
|
except ValueError: |
|
174
|
|
|
try: |
|
175
|
|
|
df_dict[k] = df_dict[k][:-1] |
|
176
|
|
|
df_dict[k].index = result_index |
|
177
|
|
|
except ValueError as e: |
|
178
|
|
|
msg = ( |
|
179
|
|
|
"\nFlow: {0}-{1}. This could be caused by NaN-values " |
|
180
|
|
|
"in your input data." |
|
181
|
|
|
) |
|
182
|
|
|
raise type(e)( |
|
183
|
|
|
str(e) + msg.format(k[0].label, k[1].label) |
|
184
|
|
|
).with_traceback(sys.exc_info()[2]) |
|
185
|
|
|
|
|
186
|
|
|
|
|
187
|
|
|
def set_sequences_index(df, result_index): |
|
188
|
|
|
try: |
|
189
|
|
|
df.index = result_index |
|
190
|
|
|
except ValueError: |
|
191
|
|
|
try: |
|
192
|
|
|
df = df[:-1] |
|
193
|
|
|
df.index = result_index |
|
194
|
|
|
except ValueError: |
|
195
|
|
|
raise ValueError("Results extraction failed!") |
|
196
|
|
|
|
|
197
|
|
|
|
|
198
|
|
|
def results(model, remove_last_time_point=False): |
|
199
|
|
|
"""Create a nested result dictionary from the result DataFrame |
|
200
|
|
|
|
|
201
|
|
|
The already rearranged results from Pyomo from the result DataFrame are |
|
202
|
|
|
transferred into a nested dictionary of pandas objects. |
|
203
|
|
|
The first level key of that dictionary is a node (denoting the respective |
|
204
|
|
|
flow or component). |
|
205
|
|
|
|
|
206
|
|
|
The second level keys are "sequences" and "scalars" for a *standard model*: |
|
207
|
|
|
|
|
208
|
|
|
* A pd.DataFrame holds all results that are time-dependent, i.e. given as |
|
209
|
|
|
a sequence and can be indexed with the energy system's timeindex. |
|
210
|
|
|
* A pd.Series holds all scalar values which are applicable for timestep 0 |
|
211
|
|
|
(i.e. investments). |
|
212
|
|
|
|
|
213
|
|
|
For a *multi-period model*, the second level key for "sequences" remains |
|
214
|
|
|
the same while instead of "scalars", the key "period_scalars" is used: |
|
215
|
|
|
|
|
216
|
|
|
* For sequences, see standard model. |
|
217
|
|
|
* Instead of a pd.Series, a pd.DataFrame holds scalar values indexed |
|
218
|
|
|
by periods. These hold investment-related variables. |
|
219
|
|
|
|
|
220
|
|
|
Examples |
|
221
|
|
|
-------- |
|
222
|
|
|
* *Standard model*: `results[idx]['scalars']` |
|
223
|
|
|
and flows `results[n, n]['sequences']`. |
|
224
|
|
|
* *Multi-period model*: `results[idx]['period_scalars']` |
|
225
|
|
|
and flows `results[n, n]['sequences']`. |
|
226
|
|
|
|
|
227
|
|
|
Parameters |
|
228
|
|
|
---------- |
|
229
|
|
|
model : oemof.solph.Model |
|
230
|
|
|
A solved oemof.solph model. |
|
231
|
|
|
remove_last_time_point : bool |
|
232
|
|
|
The last time point of all TIMEPOINT variables is removed to get the |
|
233
|
|
|
same length as the TIMESTEP (interval) variables without getting |
|
234
|
|
|
nan-values. By default, the last time point is removed if it has not |
|
235
|
|
|
been defined by the user in the EnergySystem but inferred. If all |
|
236
|
|
|
time points have been defined explicitly by the user the last time |
|
237
|
|
|
point will not be removed by default. In that case all interval |
|
238
|
|
|
variables will get one row with nan-values to have the same index |
|
239
|
|
|
for all variables. |
|
240
|
|
|
""" |
|
241
|
|
|
# Extraction steps that are the same for both model types |
|
242
|
|
|
df = create_dataframe(model) |
|
243
|
|
|
|
|
244
|
|
|
# create a dict of dataframes keyed by oemof tuples |
|
245
|
|
|
df_dict = { |
|
246
|
|
|
k if len(k) > 1 else (k[0], None): v[ |
|
247
|
|
|
["timestep", "variable_name", "value"] |
|
248
|
|
|
] |
|
249
|
|
|
for k, v in df.groupby("oemof_tuple") |
|
250
|
|
|
} |
|
251
|
|
|
|
|
252
|
|
|
# Define index |
|
253
|
|
|
if model.es.tsa_parameters: |
|
254
|
|
|
for p, period_data in enumerate(model.es.tsa_parameters): |
|
255
|
|
|
if p == 0: |
|
256
|
|
|
if model.es.periods is None: |
|
257
|
|
|
timeindex = model.es.timeindex |
|
258
|
|
|
else: |
|
259
|
|
|
timeindex = model.es.periods[0] |
|
260
|
|
|
result_index = _disaggregate_tsa_timeindex( |
|
261
|
|
|
timeindex, period_data |
|
262
|
|
|
) |
|
263
|
|
|
else: |
|
264
|
|
|
result_index = result_index.union( |
|
|
|
|
|
|
265
|
|
|
_disaggregate_tsa_timeindex( |
|
266
|
|
|
model.es.periods[p], period_data |
|
267
|
|
|
) |
|
268
|
|
|
) |
|
269
|
|
|
else: |
|
270
|
|
|
if model.es.timeindex is None: |
|
271
|
|
|
result_index = list(range(len(model.es.timeincrement) + 1)) |
|
272
|
|
|
else: |
|
273
|
|
|
result_index = model.es.timeindex |
|
274
|
|
|
|
|
275
|
|
|
if model.es.tsa_parameters is not None: |
|
276
|
|
|
df_dict = _disaggregate_tsa_result(df_dict, model.es.tsa_parameters) |
|
277
|
|
|
|
|
278
|
|
|
# create final result dictionary by splitting up the dataframes in the |
|
279
|
|
|
# dataframe dict into a series for scalar data and dataframe for sequences |
|
280
|
|
|
result = {} |
|
281
|
|
|
|
|
282
|
|
|
# Standard model results extraction |
|
283
|
|
|
if model.es.periods is None: |
|
284
|
|
|
result = _extract_standard_model_result( |
|
285
|
|
|
df_dict, result, result_index, remove_last_time_point |
|
286
|
|
|
) |
|
287
|
|
|
scalars_col = "scalars" |
|
288
|
|
|
|
|
289
|
|
|
# Results extraction for a multi-period model |
|
290
|
|
|
else: |
|
291
|
|
|
period_indexed = ["invest", "total", "old", "old_end", "old_exo"] |
|
292
|
|
|
|
|
293
|
|
|
result = _extract_multi_period_model_result( |
|
294
|
|
|
model, |
|
295
|
|
|
df_dict, |
|
296
|
|
|
period_indexed, |
|
297
|
|
|
result, |
|
298
|
|
|
result_index, |
|
299
|
|
|
remove_last_time_point, |
|
300
|
|
|
) |
|
301
|
|
|
scalars_col = "period_scalars" |
|
302
|
|
|
|
|
303
|
|
|
# add dual variables for bus constraints |
|
304
|
|
|
if model.dual is not None: |
|
305
|
|
|
grouped = groupby( |
|
306
|
|
|
sorted(model.BusBlock.balance.iterkeys()), lambda t: t[0] |
|
307
|
|
|
) |
|
308
|
|
|
for bus, timestep in grouped: |
|
309
|
|
|
duals = [ |
|
310
|
|
|
model.dual[model.BusBlock.balance[bus, t]] for _, t in timestep |
|
311
|
|
|
] |
|
312
|
|
|
if model.es.periods is None: |
|
313
|
|
|
df = pd.DataFrame({"duals": duals}, index=result_index[:-1]) |
|
314
|
|
|
# TODO: Align with standard model |
|
315
|
|
|
else: |
|
316
|
|
|
df = pd.DataFrame({"duals": duals}, index=result_index) |
|
317
|
|
|
if (bus, None) not in result.keys(): |
|
318
|
|
|
result[(bus, None)] = { |
|
319
|
|
|
"sequences": df, |
|
320
|
|
|
scalars_col: pd.Series(dtype=float), |
|
321
|
|
|
} |
|
322
|
|
|
else: |
|
323
|
|
|
result[(bus, None)]["sequences"]["duals"] = duals |
|
324
|
|
|
|
|
325
|
|
|
return result |
|
326
|
|
|
|
|
327
|
|
|
|
|
328
|
|
|
def _extract_standard_model_result( |
|
329
|
|
|
df_dict, result, result_index, remove_last_time_point |
|
330
|
|
|
): |
|
331
|
|
|
"""Extract and return the results of a standard model |
|
332
|
|
|
|
|
333
|
|
|
* Optionally remove last time point or include it elsewise. |
|
334
|
|
|
* Set index to timeindex and pivot results such that values are displayed |
|
335
|
|
|
for the respective variables. Reindex with the energy system's timeindex. |
|
336
|
|
|
* Filter for columns with nan values to retrieve scalar variables. Split |
|
337
|
|
|
up the DataFrame into sequences and scalars and return it. |
|
338
|
|
|
|
|
339
|
|
|
Parameters |
|
340
|
|
|
---------- |
|
341
|
|
|
df_dict : dict |
|
342
|
|
|
dictionary of results DataFrames |
|
343
|
|
|
result : dict |
|
344
|
|
|
dictionary to store the results |
|
345
|
|
|
result_index : pd.DatetimeIndex |
|
346
|
|
|
timeindex to use for the results (derived from EnergySystem) |
|
347
|
|
|
remove_last_time_point : bool |
|
348
|
|
|
if True, remove the last time point |
|
349
|
|
|
|
|
350
|
|
|
Returns |
|
351
|
|
|
------- |
|
352
|
|
|
result : dict |
|
353
|
|
|
dictionary with results stored |
|
354
|
|
|
""" |
|
355
|
|
|
if remove_last_time_point: |
|
356
|
|
|
# The values of intervals belong to the time at the beginning of the |
|
357
|
|
|
# interval. |
|
358
|
|
|
for k in df_dict: |
|
359
|
|
|
df_dict[k].set_index("timestep", inplace=True) |
|
360
|
|
|
df_dict[k] = df_dict[k].pivot( |
|
361
|
|
|
columns="variable_name", values="value" |
|
362
|
|
|
) |
|
363
|
|
|
set_result_index(df_dict, k, result_index[:-1]) |
|
364
|
|
|
result[k] = divide_scalars_sequences(df_dict, k) |
|
365
|
|
|
else: |
|
366
|
|
|
for k in df_dict: |
|
367
|
|
|
df_dict[k].set_index("timestep", inplace=True) |
|
368
|
|
|
df_dict[k] = df_dict[k].pivot( |
|
369
|
|
|
columns="variable_name", values="value" |
|
370
|
|
|
) |
|
371
|
|
|
# Add empty row with nan at the end of the table by adding 1 to the |
|
372
|
|
|
# last value of the numeric index. |
|
373
|
|
|
df_dict[k].loc[df_dict[k].index[-1] + 1, :] = np.nan |
|
374
|
|
|
set_result_index(df_dict, k, result_index) |
|
375
|
|
|
result[k] = divide_scalars_sequences(df_dict, k) |
|
376
|
|
|
|
|
377
|
|
|
return result |
|
378
|
|
|
|
|
379
|
|
|
|
|
380
|
|
|
def _extract_multi_period_model_result( |
|
381
|
|
|
model, |
|
382
|
|
|
df_dict, |
|
383
|
|
|
period_indexed=None, |
|
384
|
|
|
result=None, |
|
385
|
|
|
result_index=None, |
|
386
|
|
|
remove_last_time_point=False, |
|
387
|
|
|
): |
|
388
|
|
|
"""Extract and return the results of a multi-period model |
|
389
|
|
|
|
|
390
|
|
|
Difference to standard model is in the way, scalar values are extracted |
|
391
|
|
|
since they now depend on periods. |
|
392
|
|
|
|
|
393
|
|
|
Parameters |
|
394
|
|
|
---------- |
|
395
|
|
|
model : oemof.solph.models.Model |
|
396
|
|
|
The optimization model |
|
397
|
|
|
df_dict : dict |
|
398
|
|
|
dictionary of results DataFrames |
|
399
|
|
|
period_indexed : list |
|
400
|
|
|
list of variables that are indexed by periods |
|
401
|
|
|
result : dict |
|
402
|
|
|
dictionary to store the results |
|
403
|
|
|
result_index : pd.DatetimeIndex |
|
404
|
|
|
timeindex to use for the results (derived from EnergySystem) |
|
405
|
|
|
remove_last_time_point : bool |
|
406
|
|
|
if True, remove the last time point |
|
407
|
|
|
|
|
408
|
|
|
Returns |
|
409
|
|
|
------- |
|
410
|
|
|
result : dict |
|
411
|
|
|
dictionary with results stored |
|
412
|
|
|
""" |
|
413
|
|
|
for k in df_dict: |
|
414
|
|
|
df_dict[k].set_index("timestep", inplace=True) |
|
415
|
|
|
df_dict[k] = df_dict[k].pivot(columns="variable_name", values="value") |
|
416
|
|
|
# Split data set |
|
417
|
|
|
period_cols = [ |
|
418
|
|
|
col for col in df_dict[k].columns if col in period_indexed |
|
419
|
|
|
] |
|
420
|
|
|
# map periods to their start years for displaying period results |
|
421
|
|
|
d = { |
|
422
|
|
|
key: val + model.es.periods[0].min().year |
|
423
|
|
|
for key, val in enumerate(model.es.periods_years) |
|
424
|
|
|
} |
|
425
|
|
|
period_scalars = df_dict[k].loc[:, period_cols].dropna() |
|
426
|
|
|
sequences = df_dict[k].loc[ |
|
427
|
|
|
:, [col for col in df_dict[k].columns if col not in period_cols] |
|
428
|
|
|
] |
|
429
|
|
|
if remove_last_time_point: |
|
430
|
|
|
set_sequences_index(sequences, result_index[:-1]) |
|
431
|
|
|
else: |
|
432
|
|
|
set_sequences_index(sequences, result_index) |
|
433
|
|
|
if period_scalars.empty: |
|
434
|
|
|
period_scalars = pd.DataFrame(index=d.values()) |
|
435
|
|
|
try: |
|
436
|
|
|
period_scalars.rename(index=d, inplace=True) |
|
437
|
|
|
period_scalars.index.name = "period" |
|
438
|
|
|
result[k] = { |
|
439
|
|
|
"period_scalars": period_scalars, |
|
440
|
|
|
"sequences": sequences, |
|
441
|
|
|
} |
|
442
|
|
|
except IndexError: |
|
443
|
|
|
error_message = ( |
|
444
|
|
|
"Some indices seem to be not matching.\n" |
|
445
|
|
|
"Cannot properly extract model results." |
|
446
|
|
|
) |
|
447
|
|
|
raise IndexError(error_message) |
|
448
|
|
|
|
|
449
|
|
|
return result |
|
450
|
|
|
|
|
451
|
|
|
|
|
452
|
|
|
def _disaggregate_tsa_result(df_dict, tsa_parameters): |
|
453
|
|
|
""" |
|
454
|
|
|
Disaggregate timeseries aggregated by TSAM |
|
455
|
|
|
|
|
456
|
|
|
All component flows are disaggregated using mapping order of original and |
|
457
|
|
|
typical clusters in TSAM parameters. Additionally, storage SOC is |
|
458
|
|
|
disaggregated from inter and intra storage contents. |
|
459
|
|
|
|
|
460
|
|
|
Multi-period indexes are removed from results up front and added again |
|
461
|
|
|
after disaggregation. |
|
462
|
|
|
|
|
463
|
|
|
Parameters |
|
464
|
|
|
---------- |
|
465
|
|
|
df_dict : dict |
|
466
|
|
|
Raw results from oemof model |
|
467
|
|
|
tsa_parameters : list-of-dicts |
|
468
|
|
|
TSAM parameters holding order, occurrences and timsteps_per_period for |
|
469
|
|
|
each period |
|
470
|
|
|
|
|
471
|
|
|
Returns |
|
472
|
|
|
------- |
|
473
|
|
|
dict: Disaggregated sequences |
|
474
|
|
|
""" |
|
475
|
|
|
periodic_dict = {} |
|
476
|
|
|
flow_dict = {} |
|
477
|
|
|
for key, data in df_dict.items(): |
|
478
|
|
|
periodic_values = data[data["variable_name"].isin(PERIOD_INDEXES)] |
|
479
|
|
|
if not periodic_values.empty: |
|
480
|
|
|
periodic_dict[key] = periodic_values |
|
481
|
|
|
flow_dict[key] = data[~data["variable_name"].isin(PERIOD_INDEXES)] |
|
482
|
|
|
|
|
483
|
|
|
# Find storages and remove related entries from flow dict: |
|
484
|
|
|
storages, storage_keys = _get_storage_soc_flows_and_keys(flow_dict) |
|
485
|
|
|
for key in storage_keys: |
|
486
|
|
|
del flow_dict[key] |
|
487
|
|
|
|
|
488
|
|
|
# Find multiplexer and remove related entries from flow dict: |
|
489
|
|
|
multiplexer, multiplexer_keys = _get_multiplexer_flows_and_keys(flow_dict) |
|
490
|
|
|
for key in multiplexer_keys: |
|
491
|
|
|
del flow_dict[key] |
|
492
|
|
|
|
|
493
|
|
|
# Disaggregate flows |
|
494
|
|
|
for flow in flow_dict: |
|
495
|
|
|
disaggregated_flow_frames = [] |
|
496
|
|
|
period_offset = 0 |
|
497
|
|
|
for tsa_period in tsa_parameters: |
|
498
|
|
|
for k in tsa_period["order"]: |
|
499
|
|
|
flow_k = flow_dict[flow].iloc[ |
|
500
|
|
|
period_offset |
|
501
|
|
|
+ k * tsa_period["timesteps"] : period_offset |
|
502
|
|
|
+ (k + 1) * tsa_period["timesteps"] |
|
503
|
|
|
] |
|
504
|
|
|
# Disaggregate segmentation |
|
505
|
|
|
if "segments" in tsa_period: |
|
506
|
|
|
flow_k = _disaggregate_segmentation( |
|
507
|
|
|
flow_k, tsa_period["segments"], k |
|
508
|
|
|
) |
|
509
|
|
|
disaggregated_flow_frames.append(flow_k) |
|
510
|
|
|
period_offset += tsa_period["timesteps"] * len( |
|
511
|
|
|
tsa_period["occurrences"] |
|
512
|
|
|
) |
|
513
|
|
|
ts = pd.concat(disaggregated_flow_frames) |
|
514
|
|
|
ts.timestep = range(len(ts)) |
|
515
|
|
|
ts = ts.set_index("timestep") # Have to set and reset index as |
|
516
|
|
|
# interpolation in pandas<2.1.0 cannot handle NANs in index |
|
517
|
|
|
flow_dict[flow] = ts.interpolate(method="pad").reset_index("timestep") |
|
518
|
|
|
|
|
519
|
|
|
# Add storage SOC flows: |
|
520
|
|
|
for storage, soc in storages.items(): |
|
521
|
|
|
flow_dict[(storage, None)] = _calculate_soc_from_inter_and_intra_soc( |
|
522
|
|
|
soc, storage, tsa_parameters |
|
523
|
|
|
) |
|
524
|
|
|
# Add multiplexer boolean actives values: |
|
525
|
|
|
for multiplexer, values in multiplexer.items(): |
|
526
|
|
|
flow_dict[(multiplexer, None)] = _calculate_multiplexer_actives( |
|
527
|
|
|
values, multiplexer, tsa_parameters |
|
528
|
|
|
) |
|
529
|
|
|
# Add periodic values (they get extracted in period extraction fct) |
|
530
|
|
|
for key, data in periodic_dict.items(): |
|
531
|
|
|
flow_dict[key] = pd.concat([flow_dict[key], data]) |
|
532
|
|
|
|
|
533
|
|
|
return flow_dict |
|
534
|
|
|
|
|
535
|
|
|
|
|
536
|
|
|
def _disaggregate_segmentation( |
|
537
|
|
|
df: pd.DataFrame, |
|
538
|
|
|
segments: Dict[Tuple[int, int], int], |
|
539
|
|
|
current_period: int, |
|
540
|
|
|
) -> pd.DataFrame: |
|
541
|
|
|
"""Disaggregate segmentation |
|
542
|
|
|
|
|
543
|
|
|
For each segment values are reindex by segment length holding None values, |
|
544
|
|
|
which are interpolated in a later step (as storages need linear |
|
545
|
|
|
interpolation while flows need padded interpolation). |
|
546
|
|
|
|
|
547
|
|
|
Parameters |
|
548
|
|
|
---------- |
|
549
|
|
|
df : pd.Dataframe |
|
550
|
|
|
holding values for each segment |
|
551
|
|
|
segments : Dict[Tuple[int, int], int] |
|
552
|
|
|
Segmentation dict from TSAM, holding segmentation length for each |
|
553
|
|
|
timestep in each typical period |
|
554
|
|
|
current_period: int |
|
555
|
|
|
Typical period the data belongs to, needed to extract related segments |
|
556
|
|
|
|
|
557
|
|
|
Returns |
|
558
|
|
|
------- |
|
559
|
|
|
pd.Dataframe |
|
560
|
|
|
holding values for each timestep instead of each segment. |
|
561
|
|
|
Added timesteps contain None values and are interpolated later. |
|
562
|
|
|
""" |
|
563
|
|
|
current_segments = list( |
|
564
|
|
|
v for ((k, s), v) in segments.items() if k == current_period |
|
565
|
|
|
) |
|
566
|
|
|
df.index = range(len(current_segments)) |
|
567
|
|
|
segmented_index = itertools.chain.from_iterable( |
|
568
|
|
|
[i] + list(itertools.repeat(None, s - 1)) |
|
569
|
|
|
for i, s in enumerate(current_segments) |
|
570
|
|
|
) |
|
571
|
|
|
disaggregated_data = df.reindex(segmented_index) |
|
572
|
|
|
return disaggregated_data |
|
573
|
|
|
|
|
574
|
|
|
|
|
575
|
|
|
def _calculate_soc_from_inter_and_intra_soc(soc, storage, tsa_parameters): |
|
576
|
|
|
"""Calculate resulting SOC from inter and intra SOC flows""" |
|
577
|
|
|
soc_frames = [] |
|
578
|
|
|
i_offset = 0 |
|
579
|
|
|
t_offset = 0 |
|
580
|
|
|
for p, tsa_period in enumerate(tsa_parameters): |
|
581
|
|
|
for i, k in enumerate(tsa_period["order"]): |
|
582
|
|
|
inter_value = soc["inter"].iloc[i_offset + i]["value"] |
|
583
|
|
|
# Self-discharge has to be taken into account for calculating |
|
584
|
|
|
# inter SOC for each timestep in cluster |
|
585
|
|
|
t0 = t_offset + i * tsa_period["timesteps"] |
|
586
|
|
|
# Add last timesteps of simulation in order to interpolate SOC for |
|
587
|
|
|
# last segment correctly: |
|
588
|
|
|
is_last_timestep = ( |
|
589
|
|
|
p == len(tsa_parameters) - 1 |
|
|
|
|
|
|
590
|
|
|
and i == len(tsa_period["order"]) - 1 |
|
591
|
|
|
) |
|
592
|
|
|
timesteps = ( |
|
593
|
|
|
tsa_period["timesteps"] + 1 |
|
594
|
|
|
if is_last_timestep |
|
595
|
|
|
else tsa_period["timesteps"] |
|
596
|
|
|
) |
|
597
|
|
|
inter_series = ( |
|
598
|
|
|
pd.Series( |
|
599
|
|
|
itertools.accumulate( |
|
600
|
|
|
( |
|
601
|
|
|
( |
|
602
|
|
|
(1 - storage.loss_rate[t]) |
|
|
|
|
|
|
603
|
|
|
** tsa_period["segments"][(k, t - t0)] |
|
604
|
|
|
if "segments" in tsa_period |
|
605
|
|
|
else 1 - storage.loss_rate[t] |
|
606
|
|
|
) |
|
607
|
|
|
for t in range( |
|
608
|
|
|
t0, |
|
609
|
|
|
t0 + timesteps - 1, |
|
610
|
|
|
) |
|
611
|
|
|
), |
|
612
|
|
|
operator.mul, |
|
613
|
|
|
initial=1, |
|
614
|
|
|
) |
|
615
|
|
|
) |
|
616
|
|
|
* inter_value |
|
617
|
|
|
) |
|
618
|
|
|
intra_series = soc["intra"][(p, k)].iloc[0:timesteps] |
|
619
|
|
|
soc_frame = pd.DataFrame( |
|
620
|
|
|
intra_series["value"].values |
|
621
|
|
|
+ inter_series.values, # Neglect indexes, otherwise none |
|
622
|
|
|
columns=["value"], |
|
623
|
|
|
) |
|
624
|
|
|
|
|
625
|
|
|
# Disaggregate segmentation |
|
626
|
|
|
if "segments" in tsa_period: |
|
627
|
|
|
soc_disaggregated = _disaggregate_segmentation( |
|
628
|
|
|
soc_frame[:-1] if is_last_timestep else soc_frame, |
|
629
|
|
|
tsa_period["segments"], |
|
630
|
|
|
k, |
|
631
|
|
|
) |
|
632
|
|
|
if is_last_timestep: |
|
633
|
|
|
soc_disaggregated.loc[len(soc_disaggregated)] = ( |
|
634
|
|
|
soc_frame.iloc[-1] |
|
635
|
|
|
) |
|
636
|
|
|
soc_frame = soc_disaggregated |
|
637
|
|
|
|
|
638
|
|
|
soc_frames.append(soc_frame) |
|
639
|
|
|
i_offset += len(tsa_period["order"]) |
|
640
|
|
|
t_offset += i_offset * tsa_period["timesteps"] |
|
641
|
|
|
soc_ts = pd.concat(soc_frames) |
|
642
|
|
|
soc_ts["variable_name"] = "soc" |
|
643
|
|
|
soc_ts["timestep"] = range(len(soc_ts)) |
|
644
|
|
|
|
|
645
|
|
|
# Disaggregate segments by linear interpolation and remove |
|
646
|
|
|
# last timestep afterwards (only needed for interpolation) |
|
647
|
|
|
interpolated_soc = soc_ts.interpolate() |
|
648
|
|
|
return interpolated_soc.iloc[:-1] |
|
649
|
|
|
|
|
650
|
|
|
|
|
651
|
|
|
def _calculate_multiplexer_actives(values, multiplexer, tsa_parameters): |
|
652
|
|
|
"""Calculate multiplexer actives""" |
|
653
|
|
|
actives_frames = [] |
|
654
|
|
|
for p, tsa_period in enumerate(tsa_parameters): |
|
655
|
|
|
for i, k in enumerate(tsa_period["order"]): |
|
656
|
|
|
timesteps = tsa_period["timesteps"] |
|
657
|
|
|
actives_frames.append( |
|
658
|
|
|
pd.DataFrame( |
|
659
|
|
|
values[(p, k)].iloc[0:timesteps], columns=["value"] |
|
660
|
|
|
) |
|
661
|
|
|
) |
|
662
|
|
|
actives_frames_ts = pd.concat(actives_frames) |
|
663
|
|
|
actives_frames_ts["variable_name"] = values[(p, k)][ |
|
|
|
|
|
|
664
|
|
|
"variable_name" |
|
665
|
|
|
].values[0] |
|
666
|
|
|
actives_frames_ts["timestep"] = range(len(actives_frames_ts)) |
|
667
|
|
|
return actives_frames_ts |
|
668
|
|
|
|
|
669
|
|
|
|
|
670
|
|
|
def _get_storage_soc_flows_and_keys(flow_dict): |
|
671
|
|
|
"""Detect storage flows in flow dict""" |
|
672
|
|
|
storages = {} |
|
673
|
|
|
storage_keys = [] |
|
674
|
|
|
for oemof_tuple, data in flow_dict.items(): |
|
675
|
|
|
if not isinstance(oemof_tuple[0], GenericStorage): |
|
676
|
|
|
continue # Skip components other than Storage |
|
677
|
|
|
if oemof_tuple[1] is not None and not isinstance(oemof_tuple[1], int): |
|
678
|
|
|
continue # Skip storage output flows |
|
679
|
|
|
|
|
680
|
|
|
# Here we have either inter or intra storage index, |
|
681
|
|
|
# depending on oemof tuple length |
|
682
|
|
|
storage_keys.append(oemof_tuple) |
|
683
|
|
|
if oemof_tuple[0] not in storages: |
|
684
|
|
|
storages[oemof_tuple[0]] = {"inter": 0, "intra": {}} |
|
685
|
|
|
if len(oemof_tuple) == 2: |
|
686
|
|
|
# Must be filtered for variable name "storage_content_inter", |
|
687
|
|
|
# otherwise "init_content" variable (in non-multi-period approach) |
|
688
|
|
|
# interferes with SOC results |
|
689
|
|
|
storages[oemof_tuple[0]]["inter"] = data[ |
|
690
|
|
|
data["variable_name"] == "storage_content_inter" |
|
691
|
|
|
] |
|
692
|
|
|
if len(oemof_tuple) == 3: |
|
693
|
|
|
storages[oemof_tuple[0]]["intra"][ |
|
694
|
|
|
(oemof_tuple[1], oemof_tuple[2]) |
|
695
|
|
|
] = data |
|
696
|
|
|
return storages, storage_keys |
|
697
|
|
|
|
|
698
|
|
|
|
|
699
|
|
|
def _get_multiplexer_flows_and_keys(flow_dict): |
|
700
|
|
|
"""Detect multiplexer flows in flow dict""" |
|
701
|
|
|
multiplexer = {} |
|
702
|
|
|
multiplexer_keys = [] |
|
703
|
|
|
for oemof_tuple, data in flow_dict.items(): |
|
704
|
|
|
if oemof_tuple[1] is not None and not isinstance(oemof_tuple[1], int): |
|
705
|
|
|
continue |
|
706
|
|
|
if "multiplexer_active" in data["variable_name"].values[0]: |
|
707
|
|
|
multiplexer.setdefault(oemof_tuple[0], {}) |
|
708
|
|
|
multiplexer_keys.append(oemof_tuple) |
|
709
|
|
|
multiplexer[oemof_tuple[0]][ |
|
710
|
|
|
(oemof_tuple[1], oemof_tuple[2]) |
|
711
|
|
|
] = data |
|
712
|
|
|
return multiplexer, multiplexer_keys |
|
713
|
|
|
|
|
714
|
|
|
|
|
715
|
|
|
def _disaggregate_tsa_timeindex(period_index, tsa_parameters): |
|
716
|
|
|
"""Disaggregate aggregated period timeindex by using TSA parameters""" |
|
717
|
|
|
return pd.date_range( |
|
718
|
|
|
start=period_index[0], |
|
719
|
|
|
periods=tsa_parameters["timesteps_per_period"] |
|
720
|
|
|
* len(tsa_parameters["order"]), |
|
721
|
|
|
freq=period_index.freq, |
|
722
|
|
|
) |
|
723
|
|
|
|
|
724
|
|
|
|
|
725
|
|
|
def convert_keys_to_strings(result, keep_none_type=False): |
|
726
|
|
|
""" |
|
727
|
|
|
Convert the dictionary keys to strings. |
|
728
|
|
|
|
|
729
|
|
|
All (tuple) keys of the result object e.g. results[(pp1, bus1)] are |
|
730
|
|
|
converted into strings that represent the object labels |
|
731
|
|
|
e.g. results[('pp1','bus1')]. |
|
732
|
|
|
""" |
|
733
|
|
|
if keep_none_type: |
|
734
|
|
|
converted = { |
|
735
|
|
|
( |
|
736
|
|
|
tuple([str(e) if e is not None else None for e in k]) |
|
737
|
|
|
if isinstance(k, tuple) |
|
738
|
|
|
else str(k) if k is not None else None |
|
739
|
|
|
): v |
|
740
|
|
|
for k, v in result.items() |
|
741
|
|
|
} |
|
742
|
|
|
else: |
|
743
|
|
|
converted = { |
|
744
|
|
|
tuple(map(str, k)) if isinstance(k, tuple) else str(k): v |
|
745
|
|
|
for k, v in result.items() |
|
746
|
|
|
} |
|
747
|
|
|
return converted |
|
748
|
|
|
|
|
749
|
|
|
|
|
750
|
|
|
def meta_results(om, undefined=False): |
|
751
|
|
|
""" |
|
752
|
|
|
Fetch some metadata from the Solver. Feel free to add more keys. |
|
753
|
|
|
|
|
754
|
|
|
Valid keys of the resulting dictionary are: 'objective', 'problem', |
|
755
|
|
|
'solver'. |
|
756
|
|
|
|
|
757
|
|
|
om : oemof.solph.Model |
|
758
|
|
|
A solved Model. |
|
759
|
|
|
undefined : bool |
|
760
|
|
|
By default (False) only defined keys can be found in the dictionary. |
|
761
|
|
|
Set to True to get also the undefined keys. |
|
762
|
|
|
|
|
763
|
|
|
Returns |
|
764
|
|
|
------- |
|
765
|
|
|
dict |
|
766
|
|
|
""" |
|
767
|
|
|
meta_res = {"objective": om.objective()} |
|
768
|
|
|
|
|
769
|
|
|
for k1 in ["Problem", "Solver"]: |
|
770
|
|
|
k1 = k1.lower() |
|
771
|
|
|
meta_res[k1] = {} |
|
772
|
|
|
for k2, v2 in om.es.results[k1][0].items(): |
|
773
|
|
|
try: |
|
774
|
|
|
if str(om.es.results[k1][0][k2]) == "<undefined>": |
|
775
|
|
|
if undefined: |
|
776
|
|
|
meta_res[k1][k2] = str(om.es.results[k1][0][k2]) |
|
777
|
|
|
else: |
|
778
|
|
|
meta_res[k1][k2] = om.es.results[k1][0][k2] |
|
779
|
|
|
except TypeError: |
|
780
|
|
|
if undefined: |
|
781
|
|
|
msg = "Cannot fetch meta results of type {0}" |
|
782
|
|
|
meta_res[k1][k2] = msg.format( |
|
783
|
|
|
type(om.es.results[k1][0][k2]) |
|
784
|
|
|
) |
|
785
|
|
|
|
|
786
|
|
|
return meta_res |
|
787
|
|
|
|
|
788
|
|
|
|
|
789
|
|
|
def __separate_attrs( |
|
790
|
|
|
system, exclude_attrs, get_flows=False, exclude_none=True |
|
791
|
|
|
): |
|
792
|
|
|
""" |
|
793
|
|
|
Create a dictionary with flow scalars and series. |
|
794
|
|
|
|
|
795
|
|
|
The dictionary is structured with flows as tuples and nested dictionaries |
|
796
|
|
|
holding the scalars and series e.g. |
|
797
|
|
|
{(node1, node2): {'scalars': {'attr1': scalar, 'attr2': 'text'}, |
|
798
|
|
|
'sequences': {'attr1': iterable, 'attr2': iterable}}} |
|
799
|
|
|
|
|
800
|
|
|
system: |
|
801
|
|
|
A solved oemof.solph.Model or oemof.solph.Energysystem |
|
802
|
|
|
exclude_attrs: List[str] |
|
803
|
|
|
List of additional attributes which shall be excluded from |
|
804
|
|
|
parameter dict |
|
805
|
|
|
get_flows: bool |
|
806
|
|
|
Whether to include flow values or not |
|
807
|
|
|
exclude_none: bool |
|
808
|
|
|
If set, scalars and sequences containing None values are excluded |
|
809
|
|
|
|
|
810
|
|
|
Returns |
|
811
|
|
|
------- |
|
812
|
|
|
dict |
|
813
|
|
|
""" |
|
814
|
|
|
|
|
815
|
|
|
def detect_scalars_and_sequences(com): |
|
816
|
|
|
scalars = {} |
|
817
|
|
|
sequences = {} |
|
818
|
|
|
|
|
819
|
|
|
default_exclusions = [ |
|
820
|
|
|
"__", |
|
821
|
|
|
"_", |
|
822
|
|
|
"registry", |
|
823
|
|
|
"inputs", |
|
824
|
|
|
"outputs", |
|
825
|
|
|
"Label", |
|
826
|
|
|
"input", |
|
827
|
|
|
"output", |
|
828
|
|
|
"constraint_group", |
|
829
|
|
|
] |
|
830
|
|
|
# Must be tuple in order to work with `str.startswith()`: |
|
831
|
|
|
exclusions = tuple(default_exclusions + exclude_attrs) |
|
832
|
|
|
attrs = [ |
|
833
|
|
|
i |
|
834
|
|
|
for i in dir(com) |
|
835
|
|
|
if not (i.startswith(exclusions) or callable(getattr(com, i))) |
|
836
|
|
|
] |
|
837
|
|
|
|
|
838
|
|
|
for a in attrs: |
|
839
|
|
|
attr_value = getattr(com, a) |
|
840
|
|
|
|
|
841
|
|
|
# Iterate trough investment and add scalars and sequences with |
|
842
|
|
|
# "investment" prefix to component data: |
|
843
|
|
|
if attr_value.__class__.__name__ == "Investment": |
|
844
|
|
|
invest_data = detect_scalars_and_sequences(attr_value) |
|
845
|
|
|
scalars.update( |
|
846
|
|
|
{ |
|
847
|
|
|
"investment_" + str(k): v |
|
848
|
|
|
for k, v in invest_data["scalars"].items() |
|
849
|
|
|
} |
|
850
|
|
|
) |
|
851
|
|
|
sequences.update( |
|
852
|
|
|
{ |
|
853
|
|
|
"investment_" + str(k): v |
|
854
|
|
|
for k, v in invest_data["sequences"].items() |
|
855
|
|
|
} |
|
856
|
|
|
) |
|
857
|
|
|
continue |
|
858
|
|
|
|
|
859
|
|
|
if isinstance(attr_value, str): |
|
860
|
|
|
scalars[a] = attr_value |
|
861
|
|
|
continue |
|
862
|
|
|
|
|
863
|
|
|
# If the label is a tuple it is iterable, therefore it should be |
|
864
|
|
|
# converted to a string. Otherwise, it will be a sequence. |
|
865
|
|
|
if a == "label": |
|
866
|
|
|
attr_value = str(attr_value) |
|
867
|
|
|
|
|
868
|
|
|
if isinstance(attr_value, abc.Iterable): |
|
869
|
|
|
sequences[a] = attr_value |
|
870
|
|
|
elif isinstance(attr_value, _FakeSequence): |
|
871
|
|
|
scalars[a] = attr_value.value |
|
872
|
|
|
else: |
|
873
|
|
|
scalars[a] = attr_value |
|
874
|
|
|
|
|
875
|
|
|
sequences = flatten(sequences) |
|
876
|
|
|
|
|
877
|
|
|
com_data = { |
|
878
|
|
|
"scalars": scalars, |
|
879
|
|
|
"sequences": sequences, |
|
880
|
|
|
} |
|
881
|
|
|
move_undetected_scalars(com_data) |
|
882
|
|
|
if exclude_none: |
|
883
|
|
|
remove_nones(com_data) |
|
884
|
|
|
|
|
885
|
|
|
com_data = { |
|
886
|
|
|
"scalars": pd.Series(com_data["scalars"]), |
|
887
|
|
|
"sequences": pd.DataFrame(com_data["sequences"]), |
|
888
|
|
|
} |
|
889
|
|
|
return com_data |
|
890
|
|
|
|
|
891
|
|
|
def move_undetected_scalars(com): |
|
892
|
|
|
for ckey, value in list(com["sequences"].items()): |
|
893
|
|
|
if isinstance(value, (str, numbers.Number)): |
|
894
|
|
|
com["scalars"][ckey] = value |
|
895
|
|
|
del com["sequences"][ckey] |
|
896
|
|
|
elif isinstance(value, _FakeSequence): |
|
897
|
|
|
com["scalars"][ckey] = value.value |
|
898
|
|
|
del com["sequences"][ckey] |
|
899
|
|
|
elif len(value) == 0: |
|
900
|
|
|
del com["sequences"][ckey] |
|
901
|
|
|
|
|
902
|
|
|
def remove_nones(com): |
|
903
|
|
|
for ckey, value in list(com["scalars"].items()): |
|
904
|
|
|
if value is None: |
|
905
|
|
|
del com["scalars"][ckey] |
|
906
|
|
|
for ckey, value in list(com["sequences"].items()): |
|
907
|
|
|
if len(value) == 0 or value[0] is None: |
|
908
|
|
|
del com["sequences"][ckey] |
|
909
|
|
|
|
|
910
|
|
|
# Check if system is es or om: |
|
911
|
|
|
if system.__class__.__name__ == "EnergySystem": |
|
912
|
|
|
components = system.flows() if get_flows else system.nodes |
|
913
|
|
|
else: |
|
914
|
|
|
components = system.flows if get_flows else system.es.nodes |
|
915
|
|
|
|
|
916
|
|
|
data = {} |
|
917
|
|
|
for com_key in components: |
|
918
|
|
|
component = components[com_key] if get_flows else com_key |
|
919
|
|
|
component_data = detect_scalars_and_sequences(component) |
|
920
|
|
|
comkey = com_key if get_flows else (com_key, None) |
|
921
|
|
|
data[comkey] = component_data |
|
922
|
|
|
return data |
|
923
|
|
|
|
|
924
|
|
|
|
|
925
|
|
|
def parameter_as_dict(system, exclude_none=True, exclude_attrs=None): |
|
926
|
|
|
""" |
|
927
|
|
|
Create a result dictionary containing node parameters. |
|
928
|
|
|
|
|
929
|
|
|
Results are written into a dictionary of pandas objects where |
|
930
|
|
|
a Series holds all scalar values and a dataframe all sequences for nodes |
|
931
|
|
|
and flows. |
|
932
|
|
|
The dictionary is keyed by flows (n, n) and nodes (n, None), e.g. |
|
933
|
|
|
`parameter[(n, n)]['sequences']` or `parameter[(n, n)]['scalars']`. |
|
934
|
|
|
|
|
935
|
|
|
Parameters |
|
936
|
|
|
---------- |
|
937
|
|
|
system: energy_system.EnergySystem |
|
938
|
|
|
A populated energy system. |
|
939
|
|
|
exclude_none: bool |
|
940
|
|
|
If True, all scalars and sequences containing None values are excluded |
|
941
|
|
|
exclude_attrs: Optional[List[str]] |
|
942
|
|
|
Optional list of additional attributes which shall be excluded from |
|
943
|
|
|
parameter dict |
|
944
|
|
|
|
|
945
|
|
|
Returns |
|
946
|
|
|
------- |
|
947
|
|
|
dict: Parameters for all nodes and flows |
|
948
|
|
|
""" |
|
949
|
|
|
|
|
950
|
|
|
if exclude_attrs is None: |
|
951
|
|
|
exclude_attrs = [] |
|
952
|
|
|
|
|
953
|
|
|
flow_data = __separate_attrs( |
|
954
|
|
|
system, exclude_attrs, get_flows=True, exclude_none=exclude_none |
|
955
|
|
|
) |
|
956
|
|
|
node_data = __separate_attrs( |
|
957
|
|
|
system, exclude_attrs, get_flows=False, exclude_none=exclude_none |
|
958
|
|
|
) |
|
959
|
|
|
|
|
960
|
|
|
flow_data.update(node_data) |
|
961
|
|
|
return flow_data |
|
962
|
|
|
|