Conditions | 31 |
Total Lines | 186 |
Code Lines | 135 |
Lines | 30 |
Ratio | 16.13 % |
Changes | 0 |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like data.datasets.calculate_dlr.DLR_Regions() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
1 | """ |
||
174 | def DLR_Regions(weather_year, regions_shape_path): |
||
175 | """Calculate DLR values for the given regions |
||
176 | |||
177 | Parameters |
||
178 | ---------- |
||
179 | weather_info_path: str, mandatory |
||
180 | path of the weather data downloaded from ERA5 |
||
181 | regions_shape_path: str, mandatory |
||
182 | path to the shape file with the shape of the regions to analyze |
||
183 | |||
184 | """ |
||
185 | # load, index and sort shapefile with the 9 regions defined by NEP 2020 |
||
186 | regions = gpd.read_file(regions_shape_path) |
||
187 | regions = regions.set_index(["Region"]) |
||
188 | regions = regions.sort_values(by=["Region"]) |
||
189 | |||
190 | # The data downloaded using Atlite is loaded in 'weather_data_raw'. |
||
191 | file_name = f"germany-{weather_year}-era5.nc" |
||
192 | weather_info_path = Path(".") / "cutouts" / file_name |
||
193 | weather_data_raw = xr.open_mfdataset(str(weather_info_path)) |
||
|
|||
194 | weather_data_raw = weather_data_raw.rio.write_crs(4326) |
||
195 | weather_data_raw = weather_data_raw.rio.clip_box( |
||
196 | minx=5.5, miny=47, maxx=15.5, maxy=55.5 |
||
197 | ) |
||
198 | |||
199 | wind_speed_raw = weather_data_raw.wnd100m.values |
||
200 | temperature_raw = weather_data_raw.temperature.values |
||
201 | roughness_raw = weather_data_raw.roughness.values |
||
202 | index = weather_data_raw.indexes._indexes |
||
203 | # The info in 'weather_data_raw' has 3 dimensions. In 'weather_data' will be |
||
204 | # stored all the relevant data in a 2 dimensions array. |
||
205 | weather_data = np.zeros(shape=(wind_speed_raw.size, 5)) |
||
206 | count = 0 |
||
207 | for hour in range(index["time"].size): |
||
208 | for row in range(index["y"].size): |
||
209 | for column in range(index["x"].size): |
||
210 | rough = roughness_raw[hour, row, column] |
||
211 | ws_100m = wind_speed_raw[hour, row, column] |
||
212 | # Use Log Law to calculate wind speed at 50m height |
||
213 | ws_50m = ws_100m * (np.log(50 / rough) / np.log(100 / rough)) |
||
214 | weather_data[count, 0] = hour |
||
215 | weather_data[count, 1] = index["y"][row] |
||
216 | weather_data[count, 2] = index["x"][column] |
||
217 | weather_data[count, 3] = ws_50m |
||
218 | weather_data[count, 4] = ( |
||
219 | temperature_raw[hour, row, column] - 273.15 |
||
220 | ) |
||
221 | count += 1 |
||
222 | |||
223 | weather_data = pd.DataFrame( |
||
224 | weather_data, columns=["hour", "lat", "lon", "wind_s", "temp"] |
||
225 | ) |
||
226 | |||
227 | region_selec = weather_data[0 : index["x"].size * index["y"].size].copy() |
||
228 | region_selec["geom"] = region_selec.apply( |
||
229 | lambda x: Point(x["lon"], x["lat"]), axis=1 |
||
230 | ) |
||
231 | region_selec = gpd.GeoDataFrame(region_selec) |
||
232 | region_selec = region_selec.set_geometry("geom") |
||
233 | region_selec["region"] = np.zeros(index["x"].size * index["y"].size) |
||
234 | |||
235 | # Mask weather information for each region defined by NEP 2020 |
||
236 | for reg in regions.index: |
||
237 | weather_region = gpd.clip(region_selec, regions.loc[reg][0]) |
||
238 | region_selec["region"][ |
||
239 | region_selec.isin(weather_region).any(axis=1) |
||
240 | ] = reg |
||
241 | |||
242 | weather_data["region"] = ( |
||
243 | region_selec["region"].tolist() * index["time"].size |
||
244 | ) |
||
245 | weather_data = weather_data[weather_data["region"] != 0] |
||
246 | |||
247 | # Create data frame to save results(Min wind speed, max temperature and %DLR per region along 8760h in a year) |
||
248 | time = pd.date_range( |
||
249 | f"{weather_year}-01-01", f"{weather_year}-12-31 23:00:00", freq="H" |
||
250 | ) |
||
251 | # time = time.transpose() |
||
252 | dlr = pd.DataFrame( |
||
253 | 0, |
||
254 | columns=[ |
||
255 | "R1-Wind_min", |
||
256 | "R1-Temp_max", |
||
257 | "R1-DLR", |
||
258 | "R2-Wind_min", |
||
259 | "R2-Temp_max", |
||
260 | "R2-DLR", |
||
261 | "R3-Wind_min", |
||
262 | "R3-Temp_max", |
||
263 | "R3-DLR", |
||
264 | "R4-Wind_min", |
||
265 | "R4-Temp_max", |
||
266 | "R4-DLR", |
||
267 | "R5-Wind_min", |
||
268 | "R5-Temp_max", |
||
269 | "R5-DLR", |
||
270 | "R6-Wind_min", |
||
271 | "R6-Temp_max", |
||
272 | "R6-DLR", |
||
273 | "R7-Wind_min", |
||
274 | "R7-Temp_max", |
||
275 | "R7-DLR", |
||
276 | "R8-Wind_min", |
||
277 | "R8-Temp_max", |
||
278 | "R8-DLR", |
||
279 | "R9-Wind_min", |
||
280 | "R9-Temp_max", |
||
281 | "R9-DLR", |
||
282 | ], |
||
283 | index=time, |
||
284 | ) |
||
285 | |||
286 | # Calculate and save min wind speed and max temperature in a dataframe. |
||
287 | # Since the dataframe generated by the function era5.weather_df_from_era5() is sorted by date, |
||
288 | # it is faster to calculate the hourly results using blocks of data defined by "step", instead of |
||
289 | # using a filter or a search function. |
||
290 | for reg, df in weather_data.groupby("region"): |
||
291 | for t in range(0, len(time)): |
||
292 | step = df.shape[0] / len(time) |
||
293 | low_limit = int(t * step) |
||
294 | up_limit = int(step * (t + 1)) |
||
295 | dlr.iloc[t, 0 + int(reg - 1) * 3] = min( |
||
296 | df.iloc[low_limit:up_limit, 3] |
||
297 | ) |
||
298 | dlr.iloc[t, 1 + int(reg - 1) * 3] = max( |
||
299 | df.iloc[low_limit:up_limit, 4] |
||
300 | ) |
||
301 | |||
302 | # The next loop use the min wind speed and max temperature calculated previously to |
||
303 | # define the hourly DLR for each region based on the table given by NEP 2020 pag 31 |
||
304 | for i in range(0, len(regions)): |
||
305 | for j in range(0, len(time)): |
||
306 | if dlr.iloc[j, 1 + i * 3] <= 5: |
||
307 | if dlr.iloc[j, 0 + i * 3] < 3: |
||
308 | dlr.iloc[j, 2 + i * 3] = 1.30 |
||
309 | elif dlr.iloc[j, 0 + i * 3] < 4: |
||
310 | dlr.iloc[j, 2 + i * 3] = 1.35 |
||
311 | elif dlr.iloc[j, 0 + i * 3] < 5: |
||
312 | dlr.iloc[j, 2 + i * 3] = 1.45 |
||
313 | else: |
||
314 | dlr.iloc[j, 2 + i * 3] = 1.50 |
||
315 | elif dlr.iloc[j, 1 + i * 3] <= 15: |
||
316 | View Code Duplication | if dlr.iloc[j, 0 + i * 3] < 3: |
|
317 | dlr.iloc[j, 2 + i * 3] = 1.20 |
||
318 | elif dlr.iloc[j, 0 + i * 3] < 4: |
||
319 | dlr.iloc[j, 2 + i * 3] = 1.25 |
||
320 | elif dlr.iloc[j, 0 + i * 3] < 5: |
||
321 | dlr.iloc[j, 2 + i * 3] = 1.35 |
||
322 | elif dlr.iloc[j, 0 + i * 3] < 6: |
||
323 | dlr.iloc[j, 2 + i * 3] = 1.45 |
||
324 | else: |
||
325 | dlr.iloc[j, 2 + i * 3] = 1.50 |
||
326 | elif dlr.iloc[j, 1 + i * 3] <= 25: |
||
327 | View Code Duplication | if dlr.iloc[j, 0 + i * 3] < 3: |
|
328 | dlr.iloc[j, 2 + i * 3] = 1.10 |
||
329 | elif dlr.iloc[j, 0 + i * 3] < 4: |
||
330 | dlr.iloc[j, 2 + i * 3] = 1.15 |
||
331 | elif dlr.iloc[j, 0 + i * 3] < 5: |
||
332 | dlr.iloc[j, 2 + i * 3] = 1.20 |
||
333 | elif dlr.iloc[j, 0 + i * 3] < 6: |
||
334 | dlr.iloc[j, 2 + i * 3] = 1.30 |
||
335 | else: |
||
336 | dlr.iloc[j, 2 + i * 3] = 1.40 |
||
337 | elif dlr.iloc[j, 1 + i * 3] <= 35: |
||
338 | View Code Duplication | if dlr.iloc[j, 0 + i * 3] < 3: |
|
339 | dlr.iloc[j, 2 + i * 3] = 1.00 |
||
340 | elif dlr.iloc[j, 0 + i * 3] < 4: |
||
341 | dlr.iloc[j, 2 + i * 3] = 1.05 |
||
342 | elif dlr.iloc[j, 0 + i * 3] < 5: |
||
343 | dlr.iloc[j, 2 + i * 3] = 1.10 |
||
344 | elif dlr.iloc[j, 0 + i * 3] < 6: |
||
345 | dlr.iloc[j, 2 + i * 3] = 1.15 |
||
346 | else: |
||
347 | dlr.iloc[j, 2 + i * 3] = 1.25 |
||
348 | else: |
||
349 | dlr.iloc[j, 2 + i * 3] = 1.00 |
||
350 | |||
351 | DLR_hourly_df_dic = {} |
||
352 | for i in dlr.columns[range(2, 29, 3)]: # columns with DLR values |
||
353 | DLR_hourly_df_dic[i] = dlr[i].values |
||
354 | |||
355 | dlr_hourly = pd.DataFrame(index=time) |
||
356 | for i in range(len(regions)): |
||
357 | dlr_hourly["Reg_" + str(i + 1)] = dlr.iloc[:, 3 * i + 2] |
||
358 | |||
359 | return DLR_hourly_df_dic, dlr_hourly |
||
360 |