| Conditions | 7 |
| Total Lines | 149 |
| Lines | 0 |
| Ratio | 0 % |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
| 1 | # |
||
| 175 | def test_adjust_forward_fill_minute(self): |
||
| 176 | tempdir = TempDirectory() |
||
| 177 | try: |
||
| 178 | start_day = pd.Timestamp("2013-06-21", tz='UTC') |
||
| 179 | end_day = pd.Timestamp("2013-06-24", tz='UTC') |
||
| 180 | |||
| 181 | env = TradingEnvironment() |
||
| 182 | env.write_data( |
||
| 183 | equities_data={ |
||
| 184 | 0: { |
||
| 185 | 'start_date': start_day, |
||
| 186 | 'end_date': env.next_trading_day(end_day) |
||
| 187 | } |
||
| 188 | } |
||
| 189 | ) |
||
| 190 | |||
| 191 | minutes = env.minutes_for_days_in_range( |
||
| 192 | start=start_day, |
||
| 193 | end=end_day |
||
| 194 | ) |
||
| 195 | |||
| 196 | df = pd.DataFrame({ |
||
| 197 | # 390 bars of real data, then 100 missing bars, then 290 |
||
| 198 | # bars of data again |
||
| 199 | "open": np.array(list(range(0, 390)) + [0] * 100 + |
||
| 200 | list(range(390, 680))) * 1000, |
||
| 201 | "high": np.array(list(range(1000, 1390)) + [0] * 100 + |
||
| 202 | list(range(1390, 1680))) * 1000, |
||
| 203 | "low": np.array(list(range(2000, 2390)) + [0] * 100 + |
||
| 204 | list(range(2390, 2680))) * 1000, |
||
| 205 | "close": np.array(list(range(3000, 3390)) + [0] * 100 + |
||
| 206 | list(range(3390, 3680))) * 1000, |
||
| 207 | "volume": np.array(list(range(4000, 4390)) + [0] * 100 + |
||
| 208 | list(range(4390, 4680))), |
||
| 209 | "minute": minutes |
||
| 210 | }) |
||
| 211 | |||
| 212 | MinuteBarWriterFromDataFrames( |
||
| 213 | pd.Timestamp('2002-01-02', tz='UTC')).write( |
||
| 214 | tempdir.path, {0: df}) |
||
| 215 | |||
| 216 | sim_params = SimulationParameters( |
||
| 217 | period_start=minutes[0], |
||
| 218 | period_end=minutes[-1], |
||
| 219 | data_frequency="minute", |
||
| 220 | env=env |
||
| 221 | ) |
||
| 222 | |||
| 223 | # create a split for 6/24 |
||
| 224 | adjustments_path = os.path.join(tempdir.path, "adjustments.db") |
||
| 225 | writer = SQLiteAdjustmentWriter(adjustments_path, |
||
| 226 | pd.date_range(start=start_day, |
||
| 227 | end=end_day), |
||
| 228 | None) |
||
| 229 | |||
| 230 | splits = pd.DataFrame([{ |
||
| 231 | 'effective_date': int(end_day.value / 1e9), |
||
| 232 | 'ratio': 0.5, |
||
| 233 | 'sid': 0 |
||
| 234 | }]) |
||
| 235 | |||
| 236 | dividend_data = { |
||
| 237 | # Hackery to make the dtypes correct on an empty frame. |
||
| 238 | 'ex_date': np.array([], dtype='datetime64[ns]'), |
||
| 239 | 'pay_date': np.array([], dtype='datetime64[ns]'), |
||
| 240 | 'record_date': np.array([], dtype='datetime64[ns]'), |
||
| 241 | 'declared_date': np.array([], dtype='datetime64[ns]'), |
||
| 242 | 'amount': np.array([], dtype=float), |
||
| 243 | 'sid': np.array([], dtype=int), |
||
| 244 | } |
||
| 245 | dividends = pd.DataFrame( |
||
| 246 | dividend_data, |
||
| 247 | index=pd.DatetimeIndex([], tz='UTC'), |
||
| 248 | columns=['ex_date', |
||
| 249 | 'pay_date', |
||
| 250 | 'record_date', |
||
| 251 | 'declared_date', |
||
| 252 | 'amount', |
||
| 253 | 'sid'] |
||
| 254 | ) |
||
| 255 | |||
| 256 | merger_data = { |
||
| 257 | # Hackery to make the dtypes correct on an empty frame. |
||
| 258 | 'effective_date': np.array([], dtype=int), |
||
| 259 | 'ratio': np.array([], dtype=float), |
||
| 260 | 'sid': np.array([], dtype=int), |
||
| 261 | } |
||
| 262 | mergers = pd.DataFrame( |
||
| 263 | merger_data, |
||
| 264 | index=pd.DatetimeIndex([], tz='UTC') |
||
| 265 | ) |
||
| 266 | |||
| 267 | writer.write(splits, mergers, dividends) |
||
| 268 | |||
| 269 | equity_minute_reader = BcolzMinuteBarReader(tempdir.path) |
||
| 270 | |||
| 271 | dp = DataPortal( |
||
| 272 | env, |
||
| 273 | equity_minute_reader=equity_minute_reader, |
||
| 274 | adjustment_reader=SQLiteAdjustmentReader(adjustments_path) |
||
| 275 | ) |
||
| 276 | |||
| 277 | # phew, finally ready to start testing. |
||
| 278 | for idx, minute in enumerate(minutes[:390]): |
||
| 279 | for field_idx, field in enumerate(["open", "high", "low", |
||
| 280 | "close", "volume"]): |
||
| 281 | self.assertEqual( |
||
| 282 | dp.get_spot_value( |
||
| 283 | 0, field, |
||
| 284 | dt=minute, |
||
| 285 | data_frequency=sim_params.data_frequency), |
||
| 286 | idx + (1000 * field_idx) |
||
| 287 | ) |
||
| 288 | |||
| 289 | for idx, minute in enumerate(minutes[390:490]): |
||
| 290 | # no actual data for this part, so we'll forward-fill. |
||
| 291 | # make sure the forward-filled values are adjusted. |
||
| 292 | for field_idx, field in enumerate(["open", "high", "low", |
||
| 293 | "close"]): |
||
| 294 | self.assertEqual( |
||
| 295 | dp.get_spot_value( |
||
| 296 | 0, field, |
||
| 297 | dt=minute, |
||
| 298 | data_frequency=sim_params.data_frequency), |
||
| 299 | (389 + (1000 * field_idx)) / 2.0 |
||
| 300 | ) |
||
| 301 | |||
| 302 | self.assertEqual( |
||
| 303 | dp.get_spot_value( |
||
| 304 | 0, "volume", |
||
| 305 | dt=minute, |
||
| 306 | data_frequency=sim_params.data_frequency), |
||
| 307 | 8778 # 4389 * 2 |
||
| 308 | ) |
||
| 309 | |||
| 310 | for idx, minute in enumerate(minutes[490:]): |
||
| 311 | # back to real data |
||
| 312 | for field_idx, field in enumerate(["open", "high", "low", |
||
| 313 | "close", "volume"]): |
||
| 314 | self.assertEqual( |
||
| 315 | dp.get_spot_value( |
||
| 316 | 0, field, |
||
| 317 | dt=minute, |
||
| 318 | data_frequency=sim_params.data_frequency |
||
| 319 | ), |
||
| 320 | (390 + idx + (1000 * field_idx)) |
||
| 321 | ) |
||
| 322 | finally: |
||
| 323 | tempdir.cleanup() |
||
| 324 | |||
| 393 |