| Conditions | 18 |
| Total Lines | 105 |
| Lines | 0 |
| Ratio | 0 % |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like zipline.gens.AlgorithmSimulator.transform() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | # |
||
| 82 | def transform(self): |
||
| 83 | """ |
||
| 84 | Main generator work loop. |
||
| 85 | """ |
||
| 86 | algo = self.algo |
||
| 87 | algo.data_portal = self.data_portal |
||
| 88 | handle_data = algo.event_manager.handle_data |
||
| 89 | current_data = self.current_data |
||
| 90 | |||
| 91 | data_portal = self.data_portal |
||
| 92 | |||
| 93 | # can't cache a pointer to algo.perf_tracker because we're not |
||
| 94 | # guaranteed that the algo doesn't swap out perf trackers during |
||
| 95 | # its lifetime. |
||
| 96 | # likewise, we can't cache a pointer to the blotter. |
||
| 97 | |||
| 98 | algo.perf_tracker.position_tracker.data_portal = data_portal |
||
| 99 | |||
| 100 | def inner_loop(dt_to_use): |
||
| 101 | # called every tick (minute or day). |
||
| 102 | |||
| 103 | data_portal.current_dt = dt_to_use |
||
| 104 | self.simulation_dt = dt_to_use |
||
| 105 | algo.on_dt_changed(dt_to_use) |
||
| 106 | |||
| 107 | blotter = algo.blotter |
||
| 108 | perf_tracker = algo.perf_tracker |
||
| 109 | |||
| 110 | # handle any transactions and commissions coming out new orders |
||
| 111 | # placed in the last bar |
||
| 112 | new_transactions, new_commissions = \ |
||
| 113 | blotter.get_transactions(data_portal) |
||
| 114 | |||
| 115 | for transaction in new_transactions: |
||
| 116 | perf_tracker.process_transaction(transaction) |
||
| 117 | |||
| 118 | # since this order was modified, record it |
||
| 119 | order = blotter.orders[transaction.order_id] |
||
| 120 | perf_tracker.process_order(order) |
||
| 121 | |||
| 122 | if new_commissions: |
||
| 123 | for commission in new_commissions: |
||
| 124 | perf_tracker.process_commission(commission) |
||
| 125 | |||
| 126 | handle_data(algo, current_data, dt_to_use) |
||
| 127 | |||
| 128 | # grab any new orders from the blotter, then clear the list. |
||
| 129 | # this includes cancelled orders. |
||
| 130 | new_orders = blotter.new_orders |
||
| 131 | blotter.new_orders = [] |
||
| 132 | |||
| 133 | # if we have any new orders, record them so that we know |
||
| 134 | # in what perf period they were placed. |
||
| 135 | if new_orders: |
||
| 136 | for new_order in new_orders: |
||
| 137 | perf_tracker.process_order(new_order) |
||
| 138 | |||
| 139 | def once_a_day(midnight_dt): |
||
| 140 | # set all the timestamps |
||
| 141 | self.simulation_dt = midnight_dt |
||
| 142 | algo.on_dt_changed(midnight_dt) |
||
| 143 | data_portal.current_day = midnight_dt |
||
| 144 | |||
| 145 | # call before trading start |
||
| 146 | algo.before_trading_start(current_data) |
||
| 147 | |||
| 148 | perf_tracker = algo.perf_tracker |
||
| 149 | |||
| 150 | # handle any splits that impact any positions or any open orders. |
||
| 151 | sids_we_care_about = \ |
||
| 152 | list(set(list(perf_tracker.position_tracker.positions.keys()) + |
||
| 153 | list(algo.blotter.open_orders.keys()))) |
||
| 154 | |||
| 155 | if len(sids_we_care_about) > 0: |
||
| 156 | splits = data_portal.get_splits(sids_we_care_about, |
||
| 157 | midnight_dt) |
||
| 158 | if len(splits) > 0: |
||
| 159 | algo.blotter.process_splits(splits) |
||
| 160 | perf_tracker.position_tracker.handle_splits(splits) |
||
| 161 | |||
| 162 | def handle_benchmark(date): |
||
| 163 | algo.perf_tracker.all_benchmark_returns[date] = \ |
||
| 164 | self.benchmark_source.get_value(date) |
||
| 165 | |||
| 166 | with self.processor, ZiplineAPI(self.algo): |
||
| 167 | for dt, action in self.clock: |
||
| 168 | if action == DATA_AVAILABLE: |
||
| 169 | inner_loop(dt) |
||
| 170 | elif action == ONCE_A_DAY: |
||
| 171 | once_a_day(dt) |
||
| 172 | elif action == CALC_DAILY_PERFORMANCE: |
||
| 173 | handle_benchmark(dt) |
||
| 174 | yield self._get_daily_message(dt, algo, algo.perf_tracker) |
||
| 175 | elif action == CALC_MINUTE_PERFORMANCE: |
||
| 176 | handle_benchmark(dt) |
||
| 177 | minute_msg, daily_msg = \ |
||
| 178 | self._get_minute_message(dt, algo, algo.perf_tracker) |
||
| 179 | |||
| 180 | yield minute_msg |
||
| 181 | |||
| 182 | if daily_msg: |
||
| 183 | yield daily_msg |
||
| 184 | |||
| 185 | risk_message = algo.perf_tracker.handle_simulation_end() |
||
| 186 | yield risk_message |
||
| 187 | |||
| 211 |