Conditions | 13 |
Total Lines | 213 |
Code Lines | 126 |
Lines | 0 |
Ratio | 0 % |
Changes | 0 |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like data.datasets.heat_demand_timeseries.idp_pool.idp_pool_generator() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | from datetime import datetime |
||
90 | def idp_pool_generator(): |
||
91 | """ |
||
92 | Create List of Dataframes for each temperature class for each household stock |
||
93 | |||
94 | Returns |
||
95 | ------- |
||
96 | list |
||
97 | List of dataframes with each element representing a dataframe |
||
98 | for every combination of household stock and temperature class |
||
99 | |||
100 | """ |
||
101 | path = os.path.join( |
||
102 | os.getcwd(), |
||
103 | "data_bundle_egon_data", |
||
104 | "household_heat_demand_profiles", |
||
105 | "household_heat_demand_profiles.hdf5", |
||
106 | ) |
||
107 | index = pd.date_range(datetime(2011, 1, 1, 0), periods=8760, freq="H") |
||
108 | |||
109 | sfh = pd.read_hdf(path, key="SFH") |
||
110 | mfh = pd.read_hdf(path, key="MFH") |
||
111 | temp = pd.read_hdf(path, key="temperature") |
||
112 | |||
113 | globals()["luebeck_sfh"] = sfh[sfh.filter(like="Luebeck").columns] |
||
114 | globals()["luebeck_mfh"] = mfh[mfh.filter(like="Luebeck").columns] |
||
115 | |||
116 | globals()["kassel_sfh"] = sfh[sfh.filter(like="Kassel").columns] |
||
117 | globals()["kassel_mfh"] = mfh[mfh.filter(like="Kassel").columns] |
||
118 | |||
119 | globals()["wuerzburg_sfh"] = sfh[sfh.filter(like="Wuerzburg").columns] |
||
120 | globals()["wuerzburg_mfh"] = mfh[mfh.filter(like="Wuerzburg").columns] |
||
121 | |||
122 | globals()["chemnitz_sfh"] = sfh[sfh.filter(like="Chemnitz").columns] |
||
123 | globals()["chemnitz_mfh"] = mfh[mfh.filter(like="Chemnitz").columns] |
||
124 | |||
125 | temp_daily = pd.DataFrame() |
||
126 | for column in temp.columns: |
||
127 | temp_current = ( |
||
128 | temp[column] |
||
129 | .resample("D") |
||
130 | .mean() |
||
131 | .reindex(temp.index) |
||
132 | .fillna(method="ffill") |
||
133 | .fillna(method="bfill") |
||
134 | ) |
||
135 | temp_current_geom = temp_current |
||
136 | temp_daily = pd.concat([temp_daily, temp_current_geom], axis=1) |
||
137 | |||
138 | def round_temperature(station): |
||
139 | """ |
||
140 | Description: Create dataframe to assign temperature class to each day of TRY climate zones |
||
141 | |||
142 | Parameters |
||
143 | ---------- |
||
144 | station : str |
||
145 | Name of the location |
||
146 | |||
147 | Returns |
||
148 | ------- |
||
149 | temp_class : pandas.DataFrame |
||
150 | Each day assignd to their respective temperature class |
||
151 | |||
152 | """ |
||
153 | intervals = temperature_classes() |
||
154 | temperature_rounded = [] |
||
155 | for i in temp_daily.loc[:, station]: |
||
156 | temperature_rounded.append(ceil(i)) |
||
157 | temperature_interval = [] |
||
158 | for i in temperature_rounded: |
||
159 | temperature_interval.append(intervals[i]) |
||
160 | temp_class_dic = {f"Class_{station}": temperature_interval} |
||
161 | temp_class = pd.DataFrame.from_dict(temp_class_dic) |
||
162 | return temp_class |
||
163 | |||
164 | temp_class_luebeck = round_temperature("Luebeck") |
||
165 | temp_class_kassel = round_temperature("Kassel") |
||
166 | temp_class_wuerzburg = round_temperature("Wuerzburg") |
||
167 | temp_class_chemnitz = round_temperature("Chemnitz") |
||
168 | temp_class = pd.concat( |
||
169 | [ |
||
170 | temp_class_luebeck, |
||
171 | temp_class_kassel, |
||
172 | temp_class_wuerzburg, |
||
173 | temp_class_chemnitz, |
||
174 | ], |
||
175 | axis=1, |
||
176 | ) |
||
177 | temp_class.set_index(index, inplace=True) |
||
178 | |||
179 | def unique_classes(station): |
||
180 | """ |
||
181 | |||
182 | Returns |
||
183 | ------- |
||
184 | classes : list |
||
185 | Collection of temperature classes for each location |
||
186 | |||
187 | """ |
||
188 | classes = [] |
||
189 | for x in temp_class[f"Class_{station}"]: |
||
190 | if x not in classes: |
||
191 | classes.append(x) |
||
192 | classes.sort() |
||
193 | return classes |
||
194 | |||
195 | globals()["luebeck_classes"] = unique_classes("Luebeck") |
||
196 | globals()["kassel_classes"] = unique_classes("Kassel") |
||
197 | globals()["wuerzburg_classes"] = unique_classes("Wuerzburg") |
||
198 | globals()["chemnitz_classes"] = unique_classes("Chemnitz") |
||
199 | |||
200 | stock = ["MFH", "SFH"] |
||
201 | class_list = [2, 3, 4, 5, 6, 7, 8, 9, 10] |
||
202 | for s in stock: |
||
203 | for m in class_list: |
||
204 | globals()[f"idp_collection_class_{m}_{s}"] = pd.DataFrame( |
||
205 | index=range(24) |
||
206 | ) |
||
207 | |||
208 | def splitter(station, household_stock): |
||
209 | """ |
||
210 | |||
211 | |||
212 | Parameters |
||
213 | ---------- |
||
214 | station : str |
||
215 | Name of the location |
||
216 | household_stock : str |
||
217 | SFH or MFH |
||
218 | |||
219 | Returns |
||
220 | ------- |
||
221 | None. |
||
222 | |||
223 | """ |
||
224 | this_classes = globals()["{}_classes".format(station.lower())] |
||
225 | for classes in this_classes: |
||
226 | this_itteration = globals()[ |
||
227 | "{}_{}".format(station.lower(), household_stock.lower()) |
||
228 | ].loc[temp_class["Class_{}".format(station)] == classes, :] |
||
229 | days = list(range(int(len(this_itteration) / 24))) |
||
230 | for day in days: |
||
231 | this_day = this_itteration[day * 24 : (day + 1) * 24] |
||
232 | this_day = this_day.reset_index(drop=True) |
||
233 | globals()[ |
||
234 | f"idp_collection_class_{classes}_{household_stock}" |
||
235 | ] = pd.concat( |
||
236 | [ |
||
237 | globals()[ |
||
238 | f"idp_collection_class_{classes}_{household_stock}" |
||
239 | ], |
||
240 | this_day, |
||
241 | ], |
||
242 | axis=1, |
||
243 | ignore_index=True, |
||
244 | ) |
||
245 | |||
246 | splitter("Luebeck", "SFH") |
||
247 | splitter("Kassel", "SFH") |
||
248 | splitter("Wuerzburg", "SFH") |
||
249 | splitter("Chemnitz", "SFH") |
||
250 | splitter("Luebeck", "MFH") |
||
251 | splitter("Kassel", "MFH") |
||
252 | splitter("Chemnitz", "MFH") |
||
253 | |||
254 | def pool_normalize(x): |
||
255 | """ |
||
256 | |||
257 | |||
258 | Parameters |
||
259 | ---------- |
||
260 | x : pandas.Series |
||
261 | 24-hour profiles of IDP pool |
||
262 | |||
263 | Returns |
||
264 | ------- |
||
265 | TYPE : pandas.Series |
||
266 | Normalized to their daily total |
||
267 | |||
268 | """ |
||
269 | if x.sum() != 0: |
||
270 | c = x.sum() |
||
271 | return x / c |
||
272 | else: |
||
273 | return x |
||
274 | |||
275 | stock = ["MFH", "SFH"] |
||
276 | class_list = [2, 3, 4, 5, 6, 7, 8, 9, 10] |
||
277 | for s in stock: |
||
278 | for m in class_list: |
||
279 | df_name = globals()[f"idp_collection_class_{m}_{s}"] |
||
280 | globals()[f"idp_collection_class_{m}_{s}_norm"] = df_name.apply( |
||
281 | pool_normalize |
||
282 | ) |
||
283 | |||
284 | return [ |
||
285 | idp_collection_class_2_SFH_norm, |
||
286 | idp_collection_class_3_SFH_norm, |
||
287 | idp_collection_class_4_SFH_norm, |
||
288 | idp_collection_class_5_SFH_norm, |
||
289 | idp_collection_class_6_SFH_norm, |
||
290 | idp_collection_class_7_SFH_norm, |
||
291 | idp_collection_class_8_SFH_norm, |
||
292 | idp_collection_class_9_SFH_norm, |
||
293 | idp_collection_class_10_SFH_norm, |
||
294 | idp_collection_class_2_MFH_norm, |
||
295 | idp_collection_class_3_MFH_norm, |
||
296 | idp_collection_class_4_MFH_norm, |
||
297 | idp_collection_class_5_MFH_norm, |
||
298 | idp_collection_class_6_MFH_norm, |
||
299 | idp_collection_class_7_MFH_norm, |
||
300 | idp_collection_class_8_MFH_norm, |
||
301 | idp_collection_class_9_MFH_norm, |
||
302 | idp_collection_class_10_MFH_norm, |
||
303 | ] |
||
680 |