Conditions | 13 |
Total Lines | 213 |
Code Lines | 126 |
Lines | 0 |
Ratio | 0 % |
Changes | 0 |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like data.datasets.heat_demand_timeseries.idp_pool.idp_pool_generator() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | from datetime import datetime |
||
95 | def idp_pool_generator(): |
||
96 | """ |
||
97 | Create List of Dataframes for each temperature class for each household stock |
||
98 | |||
99 | Returns |
||
100 | ------- |
||
101 | list |
||
102 | List of dataframes with each element representing a dataframe |
||
103 | for every combination of household stock and temperature class |
||
104 | |||
105 | """ |
||
106 | path = os.path.join( |
||
107 | os.getcwd(), |
||
108 | "data_bundle_egon_data", |
||
109 | "household_heat_demand_profiles", |
||
110 | "household_heat_demand_profiles.hdf5", |
||
111 | ) |
||
112 | index = pd.date_range(datetime(2011, 1, 1, 0), periods=8760, freq="H") |
||
113 | |||
114 | sfh = pd.read_hdf(path, key="SFH") |
||
115 | mfh = pd.read_hdf(path, key="MFH") |
||
116 | temp = pd.read_hdf(path, key="temperature") |
||
117 | |||
118 | globals()["luebeck_sfh"] = sfh[sfh.filter(like="Luebeck").columns] |
||
119 | globals()["luebeck_mfh"] = mfh[mfh.filter(like="Luebeck").columns] |
||
120 | |||
121 | globals()["kassel_sfh"] = sfh[sfh.filter(like="Kassel").columns] |
||
122 | globals()["kassel_mfh"] = mfh[mfh.filter(like="Kassel").columns] |
||
123 | |||
124 | globals()["wuerzburg_sfh"] = sfh[sfh.filter(like="Wuerzburg").columns] |
||
125 | globals()["wuerzburg_mfh"] = mfh[mfh.filter(like="Wuerzburg").columns] |
||
126 | |||
127 | globals()["chemnitz_sfh"] = sfh[sfh.filter(like="Chemnitz").columns] |
||
128 | globals()["chemnitz_mfh"] = mfh[mfh.filter(like="Chemnitz").columns] |
||
129 | |||
130 | temp_daily = pd.DataFrame() |
||
131 | for column in temp.columns: |
||
132 | temp_current = ( |
||
133 | temp[column] |
||
134 | .resample("D") |
||
135 | .mean() |
||
136 | .reindex(temp.index) |
||
137 | .fillna(method="ffill") |
||
138 | .fillna(method="bfill") |
||
139 | ) |
||
140 | temp_current_geom = temp_current |
||
141 | temp_daily = pd.concat([temp_daily, temp_current_geom], axis=1) |
||
142 | |||
143 | def round_temperature(station): |
||
144 | """ |
||
145 | Description: Create dataframe to assign temperature class to each day of TRY climate zones |
||
146 | |||
147 | Parameters |
||
148 | ---------- |
||
149 | station : str |
||
150 | Name of the location |
||
151 | |||
152 | Returns |
||
153 | ------- |
||
154 | temp_class : pandas.DataFrame |
||
155 | Each day assignd to their respective temperature class |
||
156 | |||
157 | """ |
||
158 | intervals = temperature_classes() |
||
159 | temperature_rounded = [] |
||
160 | for i in temp_daily.loc[:, station]: |
||
161 | temperature_rounded.append(ceil(i)) |
||
162 | temperature_interval = [] |
||
163 | for i in temperature_rounded: |
||
164 | temperature_interval.append(intervals[i]) |
||
165 | temp_class_dic = {f"Class_{station}": temperature_interval} |
||
166 | temp_class = pd.DataFrame.from_dict(temp_class_dic) |
||
167 | return temp_class |
||
168 | |||
169 | temp_class_luebeck = round_temperature("Luebeck") |
||
170 | temp_class_kassel = round_temperature("Kassel") |
||
171 | temp_class_wuerzburg = round_temperature("Wuerzburg") |
||
172 | temp_class_chemnitz = round_temperature("Chemnitz") |
||
173 | temp_class = pd.concat( |
||
174 | [ |
||
175 | temp_class_luebeck, |
||
176 | temp_class_kassel, |
||
177 | temp_class_wuerzburg, |
||
178 | temp_class_chemnitz, |
||
179 | ], |
||
180 | axis=1, |
||
181 | ) |
||
182 | temp_class.set_index(index, inplace=True) |
||
183 | |||
184 | def unique_classes(station): |
||
185 | """ |
||
186 | |||
187 | Returns |
||
188 | ------- |
||
189 | classes : list |
||
190 | Collection of temperature classes for each location |
||
191 | |||
192 | """ |
||
193 | classes = [] |
||
194 | for x in temp_class[f"Class_{station}"]: |
||
195 | if x not in classes: |
||
196 | classes.append(x) |
||
197 | classes.sort() |
||
198 | return classes |
||
199 | |||
200 | globals()["luebeck_classes"] = unique_classes("Luebeck") |
||
201 | globals()["kassel_classes"] = unique_classes("Kassel") |
||
202 | globals()["wuerzburg_classes"] = unique_classes("Wuerzburg") |
||
203 | globals()["chemnitz_classes"] = unique_classes("Chemnitz") |
||
204 | |||
205 | stock = ["MFH", "SFH"] |
||
206 | class_list = [2, 3, 4, 5, 6, 7, 8, 9, 10] |
||
207 | for s in stock: |
||
208 | for m in class_list: |
||
209 | globals()[f"idp_collection_class_{m}_{s}"] = pd.DataFrame( |
||
210 | index=range(24) |
||
211 | ) |
||
212 | |||
213 | def splitter(station, household_stock): |
||
214 | """ |
||
215 | |||
216 | |||
217 | Parameters |
||
218 | ---------- |
||
219 | station : str |
||
220 | Name of the location |
||
221 | household_stock : str |
||
222 | SFH or MFH |
||
223 | |||
224 | Returns |
||
225 | ------- |
||
226 | None. |
||
227 | |||
228 | """ |
||
229 | this_classes = globals()["{}_classes".format(station.lower())] |
||
230 | for classes in this_classes: |
||
231 | this_itteration = globals()[ |
||
232 | "{}_{}".format(station.lower(), household_stock.lower()) |
||
233 | ].loc[temp_class["Class_{}".format(station)] == classes, :] |
||
234 | days = list(range(int(len(this_itteration) / 24))) |
||
235 | for day in days: |
||
236 | this_day = this_itteration[day * 24 : (day + 1) * 24] |
||
237 | this_day = this_day.reset_index(drop=True) |
||
238 | globals()[ |
||
239 | f"idp_collection_class_{classes}_{household_stock}" |
||
240 | ] = pd.concat( |
||
241 | [ |
||
242 | globals()[ |
||
243 | f"idp_collection_class_{classes}_{household_stock}" |
||
244 | ], |
||
245 | this_day, |
||
246 | ], |
||
247 | axis=1, |
||
248 | ignore_index=True, |
||
249 | ) |
||
250 | |||
251 | splitter("Luebeck", "SFH") |
||
252 | splitter("Kassel", "SFH") |
||
253 | splitter("Wuerzburg", "SFH") |
||
254 | splitter("Chemnitz", "SFH") |
||
255 | splitter("Luebeck", "MFH") |
||
256 | splitter("Kassel", "MFH") |
||
257 | splitter("Chemnitz", "MFH") |
||
258 | |||
259 | def pool_normalize(x): |
||
260 | """ |
||
261 | |||
262 | |||
263 | Parameters |
||
264 | ---------- |
||
265 | x : pandas.Series |
||
266 | 24-hour profiles of IDP pool |
||
267 | |||
268 | Returns |
||
269 | ------- |
||
270 | TYPE : pandas.Series |
||
271 | Normalized to their daily total |
||
272 | |||
273 | """ |
||
274 | if x.sum() != 0: |
||
275 | c = x.sum() |
||
276 | return x / c |
||
277 | else: |
||
278 | return x |
||
279 | |||
280 | stock = ["MFH", "SFH"] |
||
281 | class_list = [2, 3, 4, 5, 6, 7, 8, 9, 10] |
||
282 | for s in stock: |
||
283 | for m in class_list: |
||
284 | df_name = globals()[f"idp_collection_class_{m}_{s}"] |
||
285 | globals()[f"idp_collection_class_{m}_{s}_norm"] = df_name.apply( |
||
286 | pool_normalize |
||
287 | ) |
||
288 | |||
289 | return [ |
||
290 | idp_collection_class_2_SFH_norm, |
||
291 | idp_collection_class_3_SFH_norm, |
||
292 | idp_collection_class_4_SFH_norm, |
||
293 | idp_collection_class_5_SFH_norm, |
||
294 | idp_collection_class_6_SFH_norm, |
||
295 | idp_collection_class_7_SFH_norm, |
||
296 | idp_collection_class_8_SFH_norm, |
||
297 | idp_collection_class_9_SFH_norm, |
||
298 | idp_collection_class_10_SFH_norm, |
||
299 | idp_collection_class_2_MFH_norm, |
||
300 | idp_collection_class_3_MFH_norm, |
||
301 | idp_collection_class_4_MFH_norm, |
||
302 | idp_collection_class_5_MFH_norm, |
||
303 | idp_collection_class_6_MFH_norm, |
||
304 | idp_collection_class_7_MFH_norm, |
||
305 | idp_collection_class_8_MFH_norm, |
||
306 | idp_collection_class_9_MFH_norm, |
||
307 | idp_collection_class_10_MFH_norm, |
||
308 | ] |
||
685 |