Conditions | 7 |
Total Lines | 150 |
Lines | 0 |
Ratio | 0 % |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
1 | import os |
||
159 | def test_adjust_forward_fill_minute(self): |
||
160 | tempdir = TempDirectory() |
||
161 | try: |
||
162 | start_day = pd.Timestamp("2013-06-21", tz='UTC') |
||
163 | end_day = pd.Timestamp("2013-06-24", tz='UTC') |
||
164 | |||
165 | env = TradingEnvironment() |
||
166 | env.write_data( |
||
167 | equities_data={ |
||
168 | 0: { |
||
169 | 'start_date': start_day, |
||
170 | 'end_date': env.next_trading_day(end_day) |
||
171 | } |
||
172 | } |
||
173 | ) |
||
174 | |||
175 | minutes = env.minutes_for_days_in_range( |
||
176 | start=start_day, |
||
177 | end=end_day |
||
178 | ) |
||
179 | |||
180 | df = pd.DataFrame({ |
||
181 | # 390 bars of real data, then 100 missing bars, then 290 |
||
182 | # bars of data again |
||
183 | "open": np.array(list(range(0, 390)) + [0] * 100 + |
||
184 | list(range(390, 680))) * 1000, |
||
185 | "high": np.array(list(range(1000, 1390)) + [0] * 100 + |
||
186 | list(range(1390, 1680))) * 1000, |
||
187 | "low": np.array(list(range(2000, 2390)) + [0] * 100 + |
||
188 | list(range(2390, 2680))) * 1000, |
||
189 | "close": np.array(list(range(3000, 3390)) + [0] * 100 + |
||
190 | list(range(3390, 3680))) * 1000, |
||
191 | "volume": np.array(list(range(4000, 4390)) + [0] * 100 + |
||
192 | list(range(4390, 4680))), |
||
193 | "minute": minutes |
||
194 | }) |
||
195 | |||
196 | MinuteBarWriterFromDataFrames( |
||
197 | pd.Timestamp('2002-01-02', tz='UTC')).write( |
||
198 | tempdir.path, {0: df}) |
||
199 | |||
200 | sim_params = SimulationParameters( |
||
201 | period_start=minutes[0], |
||
202 | period_end=minutes[-1], |
||
203 | data_frequency="minute", |
||
204 | env=env |
||
205 | ) |
||
206 | |||
207 | # create a split for 6/24 |
||
208 | adjustments_path = os.path.join(tempdir.path, "adjustments.db") |
||
209 | writer = SQLiteAdjustmentWriter(adjustments_path, |
||
210 | pd.date_range(start=start_day, |
||
211 | end=end_day), |
||
212 | None) |
||
213 | |||
214 | splits = pd.DataFrame([{ |
||
215 | 'effective_date': int(end_day.value / 1e9), |
||
216 | 'ratio': 0.5, |
||
217 | 'sid': 0 |
||
218 | }]) |
||
219 | |||
220 | dividend_data = { |
||
221 | # Hackery to make the dtypes correct on an empty frame. |
||
222 | 'ex_date': np.array([], dtype='datetime64[ns]'), |
||
223 | 'pay_date': np.array([], dtype='datetime64[ns]'), |
||
224 | 'record_date': np.array([], dtype='datetime64[ns]'), |
||
225 | 'declared_date': np.array([], dtype='datetime64[ns]'), |
||
226 | 'amount': np.array([], dtype=float), |
||
227 | 'sid': np.array([], dtype=int), |
||
228 | } |
||
229 | dividends = pd.DataFrame( |
||
230 | dividend_data, |
||
231 | index=pd.DatetimeIndex([], tz='UTC'), |
||
232 | columns=['ex_date', |
||
233 | 'pay_date', |
||
234 | 'record_date', |
||
235 | 'declared_date', |
||
236 | 'amount', |
||
237 | 'sid'] |
||
238 | ) |
||
239 | |||
240 | merger_data = { |
||
241 | # Hackery to make the dtypes correct on an empty frame. |
||
242 | 'effective_date': np.array([], dtype=int), |
||
243 | 'ratio': np.array([], dtype=float), |
||
244 | 'sid': np.array([], dtype=int), |
||
245 | } |
||
246 | mergers = pd.DataFrame( |
||
247 | merger_data, |
||
248 | index=pd.DatetimeIndex([], tz='UTC') |
||
249 | ) |
||
250 | |||
251 | writer.write(splits, mergers, dividends) |
||
252 | |||
253 | equity_minute_reader = BcolzMinuteBarReader(tempdir.path) |
||
254 | |||
255 | dp = DataPortal( |
||
256 | env, |
||
257 | equity_minute_reader=equity_minute_reader, |
||
258 | sim_params=sim_params, |
||
259 | adjustment_reader=SQLiteAdjustmentReader(adjustments_path) |
||
260 | ) |
||
261 | |||
262 | # phew, finally ready to start testing. |
||
263 | for idx, minute in enumerate(minutes[:390]): |
||
264 | for field_idx, field in enumerate(["open", "high", "low", |
||
265 | "close", "volume"]): |
||
266 | self.assertEqual( |
||
267 | dp.get_spot_value( |
||
268 | 0, field, |
||
269 | dt=minute, |
||
270 | data_frequency=sim_params.data_frequency), |
||
271 | idx + (1000 * field_idx) |
||
272 | ) |
||
273 | |||
274 | for idx, minute in enumerate(minutes[390:490]): |
||
275 | # no actual data for this part, so we'll forward-fill. |
||
276 | # make sure the forward-filled values are adjusted. |
||
277 | for field_idx, field in enumerate(["open", "high", "low", |
||
278 | "close"]): |
||
279 | self.assertEqual( |
||
280 | dp.get_spot_value( |
||
281 | 0, field, |
||
282 | dt=minute, |
||
283 | data_frequency=sim_params.data_frequency), |
||
284 | (389 + (1000 * field_idx)) / 2.0 |
||
285 | ) |
||
286 | |||
287 | self.assertEqual( |
||
288 | dp.get_spot_value( |
||
289 | 0, "volume", |
||
290 | dt=minute, |
||
291 | data_frequency=sim_params.data_frequency), |
||
292 | 8778 # 4389 * 2 |
||
293 | ) |
||
294 | |||
295 | for idx, minute in enumerate(minutes[490:]): |
||
296 | # back to real data |
||
297 | for field_idx, field in enumerate(["open", "high", "low", |
||
298 | "close", "volume"]): |
||
299 | self.assertEqual( |
||
300 | dp.get_spot_value( |
||
301 | 0, field, |
||
302 | dt=minute, |
||
303 | data_frequency=sim_params.data_frequency |
||
304 | ), |
||
305 | (390 + idx + (1000 * field_idx)) |
||
306 | ) |
||
307 | finally: |
||
308 | tempdir.cleanup() |
||
309 | |||
376 |