| Conditions | 14 |
| Total Lines | 118 |
| Code Lines | 67 |
| Lines | 0 |
| Ratio | 0 % |
| Changes | 0 | ||
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like data.datasets.calculate_dlr.dlr() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | """ |
||
| 54 | def dlr(): |
||
| 55 | """Calculate DLR and assign values to each line in the db |
||
| 56 | |||
| 57 | Parameters |
||
| 58 | ---------- |
||
| 59 | *No parameters required |
||
| 60 | |||
| 61 | """ |
||
| 62 | cfg = config.datasets()["dlr"] |
||
| 63 | for scn in set(config.settings()["egon-data"]["--scenarios"]): |
||
| 64 | weather_year = get_sector_parameters("global", scn)["weather_year"] |
||
| 65 | |||
| 66 | regions_shape_path = ( |
||
| 67 | Path(".") |
||
| 68 | / "data_bundle_egon_data" |
||
| 69 | / "regions_dynamic_line_rating" |
||
| 70 | / "Germany_regions.shp" |
||
| 71 | ) |
||
| 72 | |||
| 73 | # Calculate hourly DLR per region |
||
| 74 | dlr_hourly_dic, dlr_hourly = DLR_Regions( |
||
| 75 | weather_year, regions_shape_path |
||
| 76 | ) |
||
| 77 | |||
| 78 | regions = gpd.read_file(regions_shape_path) |
||
| 79 | regions = regions.sort_values(by=["Region"]) |
||
| 80 | |||
| 81 | # Connect to the data base |
||
| 82 | con = db.engine() |
||
| 83 | |||
| 84 | sql = f""" |
||
| 85 | SELECT scn_name, line_id, topo, s_nom FROM |
||
| 86 | {cfg['sources']['trans_lines']['schema']}. |
||
| 87 | {cfg['sources']['trans_lines']['table']} |
||
| 88 | """ |
||
| 89 | df = gpd.GeoDataFrame.from_postgis( |
||
| 90 | sql, con, crs="EPSG:4326", geom_col="topo" |
||
| 91 | ) |
||
| 92 | |||
| 93 | trans_lines_R = {} |
||
| 94 | for i in regions.Region: |
||
| 95 | shape_area = regions[regions["Region"] == i] |
||
| 96 | trans_lines_R[i] = gpd.clip(df, shape_area) |
||
| 97 | trans_lines = df[["s_nom"]] |
||
| 98 | trans_lines["in_regions"] = [[] for i in range(len(df))] |
||
| 99 | |||
| 100 | trans_lines[["line_id", "geometry", "scn_name"]] = df[ |
||
| 101 | ["line_id", "topo", "scn_name"] |
||
| 102 | ] |
||
| 103 | trans_lines = gpd.GeoDataFrame(trans_lines) |
||
| 104 | # Assign to each transmission line the region to which it belongs |
||
| 105 | for i in trans_lines_R: |
||
| 106 | for j in trans_lines_R[i].index: |
||
| 107 | trans_lines.loc[j][1] = trans_lines.loc[j][1].append(i) |
||
| 108 | trans_lines["crossborder"] = ~trans_lines.within(regions.unary_union) |
||
| 109 | |||
| 110 | DLR = [] |
||
| 111 | |||
| 112 | # Assign to each transmision line the final values of DLR based on location |
||
| 113 | # and type of line (overhead or underground) |
||
| 114 | for i in trans_lines.index: |
||
| 115 | # The concept of DLR does not apply to crossborder lines |
||
| 116 | if trans_lines.loc[i, "crossborder"] == True: |
||
| 117 | DLR.append([1] * 8760) |
||
| 118 | continue |
||
| 119 | # Underground lines have DLR = 1 |
||
| 120 | if ( |
||
| 121 | trans_lines.loc[i][0] % 280 == 0 |
||
| 122 | or trans_lines.loc[i][0] % 550 == 0 |
||
| 123 | or trans_lines.loc[i][0] % 925 == 0 |
||
| 124 | ): |
||
| 125 | DLR.append([1] * 8760) |
||
| 126 | continue |
||
| 127 | # Lines completely in one of the regions, have the DLR of the region |
||
| 128 | if len(trans_lines.loc[i][1]) == 1: |
||
| 129 | region = int(trans_lines.loc[i][1][0]) |
||
| 130 | DLR.append(dlr_hourly_dic["R" + str(region) + "-DLR"]) |
||
| 131 | continue |
||
| 132 | # For lines crossing 2 or more regions, the lowest DLR between the |
||
| 133 | # different regions per hour is assigned. |
||
| 134 | if len(trans_lines.loc[i][1]) > 1: |
||
| 135 | reg = [] |
||
| 136 | for j in trans_lines.loc[i][1]: |
||
| 137 | reg.append("Reg_" + str(j)) |
||
| 138 | min_DLR_reg = dlr_hourly[reg].min(axis=1) |
||
| 139 | DLR.append(list(min_DLR_reg)) |
||
| 140 | |||
| 141 | trans_lines["s_max_pu"] = DLR |
||
| 142 | |||
| 143 | # delete unnecessary columns |
||
| 144 | trans_lines.drop( |
||
| 145 | columns=["in_regions", "s_nom", "geometry", "crossborder"], |
||
| 146 | inplace=True, |
||
| 147 | ) |
||
| 148 | |||
| 149 | # Modify column "s_max_pu" to fit the requirement of the table |
||
| 150 | trans_lines["s_max_pu"] = trans_lines.apply( |
||
| 151 | lambda x: list(x["s_max_pu"]), axis=1 |
||
| 152 | ) |
||
| 153 | trans_lines["temp_id"] = 1 |
||
| 154 | |||
| 155 | # Delete existing data |
||
| 156 | db.execute_sql( |
||
| 157 | f""" |
||
| 158 | DELETE FROM {cfg['sources']['line_timeseries']['schema']}. |
||
| 159 | {cfg['sources']['line_timeseries']['table']}; |
||
| 160 | """ |
||
| 161 | ) |
||
| 162 | |||
| 163 | # Insert into database |
||
| 164 | trans_lines.to_sql( |
||
| 165 | f"{cfg['targets']['line_timeseries']['table']}", |
||
| 166 | schema=f"{cfg['targets']['line_timeseries']['schema']}", |
||
| 167 | con=db.engine(), |
||
| 168 | if_exists="append", |
||
| 169 | index=False, |
||
| 170 | ) |
||
| 171 | return 0 |
||
| 172 | |||
| 360 |