| Conditions | 7 | 
| Total Lines | 149 | 
| Lines | 0 | 
| Ratio | 0 % | 
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
| 1 | import os  | 
            ||
| 161 | def test_adjust_forward_fill_minute(self):  | 
            ||
| 162 | tempdir = TempDirectory()  | 
            ||
| 163 | try:  | 
            ||
| 164 |             start_day = pd.Timestamp("2013-06-21", tz='UTC') | 
            ||
| 165 |             end_day = pd.Timestamp("2013-06-24", tz='UTC') | 
            ||
| 166 | |||
| 167 | env = TradingEnvironment()  | 
            ||
| 168 | env.write_data(  | 
            ||
| 169 |                 equities_data={ | 
            ||
| 170 |                     0: { | 
            ||
| 171 | 'start_date': start_day,  | 
            ||
| 172 | 'end_date': env.next_trading_day(end_day)  | 
            ||
| 173 | }  | 
            ||
| 174 | }  | 
            ||
| 175 | )  | 
            ||
| 176 | |||
| 177 | minutes = env.minutes_for_days_in_range(  | 
            ||
| 178 | start=start_day,  | 
            ||
| 179 | end=end_day  | 
            ||
| 180 | )  | 
            ||
| 181 | |||
| 182 |             df = pd.DataFrame({ | 
            ||
| 183 | # 390 bars of real data, then 100 missing bars, then 290  | 
            ||
| 184 | # bars of data again  | 
            ||
| 185 | "open": np.array(list(range(0, 390)) + [0] * 100 +  | 
            ||
| 186 | list(range(390, 680))) * 1000,  | 
            ||
| 187 | "high": np.array(list(range(1000, 1390)) + [0] * 100 +  | 
            ||
| 188 | list(range(1390, 1680))) * 1000,  | 
            ||
| 189 | "low": np.array(list(range(2000, 2390)) + [0] * 100 +  | 
            ||
| 190 | list(range(2390, 2680))) * 1000,  | 
            ||
| 191 | "close": np.array(list(range(3000, 3390)) + [0] * 100 +  | 
            ||
| 192 | list(range(3390, 3680))) * 1000,  | 
            ||
| 193 | "volume": np.array(list(range(4000, 4390)) + [0] * 100 +  | 
            ||
| 194 | list(range(4390, 4680))),  | 
            ||
| 195 | "minute": minutes  | 
            ||
| 196 | })  | 
            ||
| 197 | |||
| 198 | MinuteBarWriterFromDataFrames(  | 
            ||
| 199 |                 pd.Timestamp('2002-01-02', tz='UTC')).write( | 
            ||
| 200 |                     tempdir.path, {0: df}) | 
            ||
| 201 | |||
| 202 | sim_params = SimulationParameters(  | 
            ||
| 203 | period_start=minutes[0],  | 
            ||
| 204 | period_end=minutes[-1],  | 
            ||
| 205 | data_frequency="minute",  | 
            ||
| 206 | env=env  | 
            ||
| 207 | )  | 
            ||
| 208 | |||
| 209 | # create a split for 6/24  | 
            ||
| 210 | adjustments_path = os.path.join(tempdir.path, "adjustments.db")  | 
            ||
| 211 | writer = SQLiteAdjustmentWriter(adjustments_path,  | 
            ||
| 212 | pd.date_range(start=start_day,  | 
            ||
| 213 | end=end_day),  | 
            ||
| 214 | None)  | 
            ||
| 215 | |||
| 216 |             splits = pd.DataFrame([{ | 
            ||
| 217 | 'effective_date': int(end_day.value / 1e9),  | 
            ||
| 218 | 'ratio': 0.5,  | 
            ||
| 219 | 'sid': 0  | 
            ||
| 220 | }])  | 
            ||
| 221 | |||
| 222 |             dividend_data = { | 
            ||
| 223 | # Hackery to make the dtypes correct on an empty frame.  | 
            ||
| 224 | 'ex_date': np.array([], dtype='datetime64[ns]'),  | 
            ||
| 225 | 'pay_date': np.array([], dtype='datetime64[ns]'),  | 
            ||
| 226 | 'record_date': np.array([], dtype='datetime64[ns]'),  | 
            ||
| 227 | 'declared_date': np.array([], dtype='datetime64[ns]'),  | 
            ||
| 228 | 'amount': np.array([], dtype=float),  | 
            ||
| 229 | 'sid': np.array([], dtype=int),  | 
            ||
| 230 | }  | 
            ||
| 231 | dividends = pd.DataFrame(  | 
            ||
| 232 | dividend_data,  | 
            ||
| 233 | index=pd.DatetimeIndex([], tz='UTC'),  | 
            ||
| 234 | columns=['ex_date',  | 
            ||
| 235 | 'pay_date',  | 
            ||
| 236 | 'record_date',  | 
            ||
| 237 | 'declared_date',  | 
            ||
| 238 | 'amount',  | 
            ||
| 239 | 'sid']  | 
            ||
| 240 | )  | 
            ||
| 241 | |||
| 242 |             merger_data = { | 
            ||
| 243 | # Hackery to make the dtypes correct on an empty frame.  | 
            ||
| 244 | 'effective_date': np.array([], dtype=int),  | 
            ||
| 245 | 'ratio': np.array([], dtype=float),  | 
            ||
| 246 | 'sid': np.array([], dtype=int),  | 
            ||
| 247 | }  | 
            ||
| 248 | mergers = pd.DataFrame(  | 
            ||
| 249 | merger_data,  | 
            ||
| 250 | index=pd.DatetimeIndex([], tz='UTC')  | 
            ||
| 251 | )  | 
            ||
| 252 | |||
| 253 | writer.write(splits, mergers, dividends)  | 
            ||
| 254 | |||
| 255 | equity_minute_reader = BcolzMinuteBarReader(tempdir.path)  | 
            ||
| 256 | |||
| 257 | dp = DataPortal(  | 
            ||
| 258 | env,  | 
            ||
| 259 | equity_minute_reader=equity_minute_reader,  | 
            ||
| 260 | adjustment_reader=SQLiteAdjustmentReader(adjustments_path)  | 
            ||
| 261 | )  | 
            ||
| 262 | |||
| 263 | # phew, finally ready to start testing.  | 
            ||
| 264 | for idx, minute in enumerate(minutes[:390]):  | 
            ||
| 265 | for field_idx, field in enumerate(["open", "high", "low",  | 
            ||
| 266 | "close", "volume"]):  | 
            ||
| 267 | self.assertEqual(  | 
            ||
| 268 | dp.get_spot_value(  | 
            ||
| 269 | 0, field,  | 
            ||
| 270 | dt=minute,  | 
            ||
| 271 | data_frequency=sim_params.data_frequency),  | 
            ||
| 272 | idx + (1000 * field_idx)  | 
            ||
| 273 | )  | 
            ||
| 274 | |||
| 275 | for idx, minute in enumerate(minutes[390:490]):  | 
            ||
| 276 | # no actual data for this part, so we'll forward-fill.  | 
            ||
| 277 | # make sure the forward-filled values are adjusted.  | 
            ||
| 278 | for field_idx, field in enumerate(["open", "high", "low",  | 
            ||
| 279 | "close"]):  | 
            ||
| 280 | self.assertEqual(  | 
            ||
| 281 | dp.get_spot_value(  | 
            ||
| 282 | 0, field,  | 
            ||
| 283 | dt=minute,  | 
            ||
| 284 | data_frequency=sim_params.data_frequency),  | 
            ||
| 285 | (389 + (1000 * field_idx)) / 2.0  | 
            ||
| 286 | )  | 
            ||
| 287 | |||
| 288 | self.assertEqual(  | 
            ||
| 289 | dp.get_spot_value(  | 
            ||
| 290 | 0, "volume",  | 
            ||
| 291 | dt=minute,  | 
            ||
| 292 | data_frequency=sim_params.data_frequency),  | 
            ||
| 293 | 8778 # 4389 * 2  | 
            ||
| 294 | )  | 
            ||
| 295 | |||
| 296 | for idx, minute in enumerate(minutes[490:]):  | 
            ||
| 297 | # back to real data  | 
            ||
| 298 | for field_idx, field in enumerate(["open", "high", "low",  | 
            ||
| 299 | "close", "volume"]):  | 
            ||
| 300 | self.assertEqual(  | 
            ||
| 301 | dp.get_spot_value(  | 
            ||
| 302 | 0, field,  | 
            ||
| 303 | dt=minute,  | 
            ||
| 304 | data_frequency=sim_params.data_frequency  | 
            ||
| 305 | ),  | 
            ||
| 306 | (390 + idx + (1000 * field_idx))  | 
            ||
| 307 | )  | 
            ||
| 308 | finally:  | 
            ||
| 309 | tempdir.cleanup()  | 
            ||
| 310 | |||
| 379 |