| Conditions | 18 | 
| Total Lines | 105 | 
| Lines | 0 | 
| Ratio | 0 % | 
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
Complex classes like zipline.gens.AlgorithmSimulator.transform() often do a lot of different things. To break such a class down, we need to identify a cohesive component within that class. A common approach to find such a component is to look for fields/methods that share the same prefixes, or suffixes.
Once you have determined the fields that belong together, you can apply the Extract Class refactoring. If the component makes sense as a sub-class, Extract Subclass is also a candidate, and is often faster.
| 1 | # | ||
| 81 | def transform(self): | ||
| 82 | """ | ||
| 83 | Main generator work loop. | ||
| 84 | """ | ||
| 85 | algo = self.algo | ||
| 86 | algo.data_portal = self.data_portal | ||
| 87 | handle_data = algo.event_manager.handle_data | ||
| 88 | current_data = self.current_data | ||
| 89 | |||
| 90 | data_portal = self.data_portal | ||
| 91 | |||
| 92 | # can't cache a pointer to algo.perf_tracker because we're not | ||
| 93 | # guaranteed that the algo doesn't swap out perf trackers during | ||
| 94 | # its lifetime. | ||
| 95 | # likewise, we can't cache a pointer to the blotter. | ||
| 96 | |||
| 97 | algo.perf_tracker.position_tracker.data_portal = data_portal | ||
| 98 | |||
| 99 | def every_bar(dt_to_use): | ||
| 100 | # called every tick (minute or day). | ||
| 101 | |||
| 102 | self.simulation_dt = dt_to_use | ||
| 103 | algo.on_dt_changed(dt_to_use) | ||
| 104 | |||
| 105 | blotter = algo.blotter | ||
| 106 | perf_tracker = algo.perf_tracker | ||
| 107 | |||
| 108 | # handle any transactions and commissions coming out new orders | ||
| 109 | # placed in the last bar | ||
| 110 | new_transactions, new_commissions = \ | ||
| 111 | blotter.get_transactions(data_portal) | ||
| 112 | |||
| 113 | for transaction in new_transactions: | ||
| 114 | perf_tracker.process_transaction(transaction) | ||
| 115 | |||
| 116 | # since this order was modified, record it | ||
| 117 | order = blotter.orders[transaction.order_id] | ||
| 118 | perf_tracker.process_order(order) | ||
| 119 | |||
| 120 | if new_commissions: | ||
| 121 | for commission in new_commissions: | ||
| 122 | perf_tracker.process_commission(commission) | ||
| 123 | |||
| 124 | handle_data(algo, current_data, dt_to_use) | ||
| 125 | |||
| 126 | # grab any new orders from the blotter, then clear the list. | ||
| 127 | # this includes cancelled orders. | ||
| 128 | new_orders = blotter.new_orders | ||
| 129 | blotter.new_orders = [] | ||
| 130 | |||
| 131 | # if we have any new orders, record them so that we know | ||
| 132 | # in what perf period they were placed. | ||
| 133 | if new_orders: | ||
| 134 | for new_order in new_orders: | ||
| 135 | perf_tracker.process_order(new_order) | ||
| 136 | |||
| 137 | def once_a_day(midnight_dt): | ||
| 138 | # set all the timestamps | ||
| 139 | self.simulation_dt = midnight_dt | ||
| 140 | algo.on_dt_changed(midnight_dt) | ||
| 141 | data_portal.current_day = midnight_dt | ||
| 142 | |||
| 143 | # call before trading start | ||
| 144 | algo.before_trading_start(current_data) | ||
| 145 | |||
| 146 | perf_tracker = algo.perf_tracker | ||
| 147 | |||
| 148 | # handle any splits that impact any positions or any open orders. | ||
| 149 | sids_we_care_about = \ | ||
| 150 | list(set(list(perf_tracker.position_tracker.positions.keys()) + | ||
| 151 | list(algo.blotter.open_orders.keys()))) | ||
| 152 | |||
| 153 | if len(sids_we_care_about) > 0: | ||
| 154 | splits = data_portal.get_splits(sids_we_care_about, | ||
| 155 | midnight_dt) | ||
| 156 | if len(splits) > 0: | ||
| 157 | algo.blotter.process_splits(splits) | ||
| 158 | perf_tracker.position_tracker.handle_splits(splits) | ||
| 159 | |||
| 160 | def handle_benchmark(date): | ||
| 161 | algo.perf_tracker.all_benchmark_returns[date] = \ | ||
| 162 | self.benchmark_source.get_value(date) | ||
| 163 | |||
| 164 | with self.processor, ZiplineAPI(self.algo): | ||
| 165 | for dt, action in self.clock: | ||
| 166 | if action == BAR: | ||
| 167 | every_bar(dt) | ||
| 168 | elif action == DAY_START: | ||
| 169 | once_a_day(dt) | ||
| 170 | elif action == DAY_END: | ||
| 171 | # End of the day. | ||
| 172 | handle_benchmark(dt) | ||
| 173 | yield self._get_daily_message(dt, algo, algo.perf_tracker) | ||
| 174 | elif action == MINUTE_END: | ||
| 175 | handle_benchmark(dt) | ||
| 176 | minute_msg, daily_msg = \ | ||
| 177 | self._get_minute_message(dt, algo, algo.perf_tracker) | ||
| 178 | |||
| 179 | yield minute_msg | ||
| 180 | |||
| 181 | if daily_msg: | ||
| 182 | yield daily_msg | ||
| 183 | |||
| 184 | risk_message = algo.perf_tracker.handle_simulation_end() | ||
| 185 | yield risk_message | ||
| 186 | |||
| 210 |