Conditions | 3 |
Total Lines | 69 |
Lines | 0 |
Ratio | 0 % |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
1 | import datetime |
||
125 | def load_adjusted_array(self, columns, dates, assets, mask): |
||
126 | data_query_time = self._data_query_time |
||
127 | data_query_tz = self._data_query_tz |
||
128 | expr = self._expr |
||
129 | |||
130 | filtered = expr[ |
||
131 | expr[TS_FIELD_NAME] <= |
||
132 | normalize_data_query_time( |
||
133 | dates[0], |
||
134 | data_query_time, |
||
135 | data_query_tz, |
||
136 | ) |
||
137 | ] |
||
138 | lower = odo( |
||
139 | bz.by( |
||
140 | filtered[SID_FIELD_NAME], |
||
141 | timestamp=filtered[TS_FIELD_NAME].max(), |
||
142 | ).timestamp.min(), |
||
143 | pd.Timestamp, |
||
144 | **self._odo_kwargs |
||
145 | ) |
||
146 | if pd.isnull(lower): |
||
147 | # If there is no lower date, just query for data in the date |
||
148 | # range. It must all be null anyways. |
||
149 | lower = dates[0] |
||
150 | |||
151 | upper = normalize_data_query_time( |
||
152 | dates[-1], |
||
153 | data_query_time, |
||
154 | data_query_tz, |
||
155 | ) |
||
156 | raw = odo( |
||
157 | expr[ |
||
158 | (expr[TS_FIELD_NAME] >= lower) & |
||
159 | (expr[TS_FIELD_NAME] <= upper) |
||
160 | ], |
||
161 | pd.DataFrame, |
||
162 | **self._odo_kwargs |
||
163 | ) |
||
164 | raw[TS_FIELD_NAME] = raw[TS_FIELD_NAME].astype('datetime64[ns]') |
||
165 | sids = raw.loc[:, SID_FIELD_NAME] |
||
166 | raw.drop( |
||
167 | sids[~(sids.isin(assets) | sids.notnull())].index, |
||
168 | inplace=True |
||
169 | ) |
||
170 | normalize_timestamp_to_query_time( |
||
171 | raw, |
||
172 | data_query_time, |
||
173 | data_query_tz, |
||
174 | inplace=True, |
||
175 | ts_field=TS_FIELD_NAME, |
||
176 | ) |
||
177 | |||
178 | gb = raw.groupby(SID_FIELD_NAME) |
||
179 | |||
180 | def mkseries(idx, raw_loc=raw.loc): |
||
181 | vs = raw_loc[ |
||
182 | idx, [TS_FIELD_NAME, ANNOUNCEMENT_FIELD_NAME] |
||
183 | ].values |
||
184 | return pd.Series( |
||
185 | index=pd.DatetimeIndex(vs[:, 0]), |
||
186 | data=vs[:, 1], |
||
187 | ) |
||
188 | |||
189 | return EarningsCalendarLoader( |
||
190 | dates, |
||
191 | valmap(mkseries, gb.groups), |
||
192 | dataset=self._dataset, |
||
193 | ).load_adjusted_array(columns, dates, assets, mask) |
||
194 |