| Conditions | 6 |
| Total Lines | 114 |
| Code Lines | 72 |
| Lines | 0 |
| Ratio | 0 % |
| Changes | 0 | ||
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
| 1 | # coding=utf-8 |
||
| 61 | def train( |
||
| 62 | gpu: str, |
||
| 63 | config_path: Union[str, List[str]], |
||
| 64 | gpu_allow_growth: bool, |
||
| 65 | ckpt_path: str, |
||
| 66 | exp_name: str = "", |
||
| 67 | log_dir: str = "logs", |
||
| 68 | max_epochs: int = -1, |
||
| 69 | ): |
||
| 70 | """ |
||
| 71 | Function to train a model. |
||
| 72 | |||
| 73 | :param gpu: which local gpu to use to train. |
||
| 74 | :param config_path: path to configuration set up. |
||
| 75 | :param gpu_allow_growth: whether to allocate whole GPU memory for training. |
||
| 76 | :param ckpt_path: where to store training checkpoints. |
||
| 77 | :param log_dir: path of the log directory. |
||
| 78 | :param exp_name: experiment name. |
||
| 79 | :param max_epochs: if max_epochs > 0, will use it to overwrite the configuration. |
||
| 80 | """ |
||
| 81 | # set env variables |
||
| 82 | os.environ["CUDA_VISIBLE_DEVICES"] = gpu |
||
| 83 | os.environ["TF_FORCE_GPU_ALLOW_GROWTH"] = "true" if gpu_allow_growth else "false" |
||
| 84 | |||
| 85 | # load config |
||
| 86 | config, log_dir, ckpt_path = build_config( |
||
| 87 | config_path=config_path, |
||
| 88 | log_dir=log_dir, |
||
| 89 | exp_name=exp_name, |
||
| 90 | ckpt_path=ckpt_path, |
||
| 91 | max_epochs=max_epochs, |
||
| 92 | ) |
||
| 93 | |||
| 94 | # build dataset |
||
| 95 | data_loader_train, dataset_train, steps_per_epoch_train = build_dataset( |
||
| 96 | dataset_config=config["dataset"], |
||
| 97 | preprocess_config=config["train"]["preprocess"], |
||
| 98 | mode="train", |
||
| 99 | training=True, |
||
| 100 | repeat=True, |
||
| 101 | ) |
||
| 102 | assert data_loader_train is not None # train data should not be None |
||
| 103 | data_loader_val, dataset_val, steps_per_epoch_val = build_dataset( |
||
| 104 | dataset_config=config["dataset"], |
||
| 105 | preprocess_config=config["train"]["preprocess"], |
||
| 106 | mode="valid", |
||
| 107 | training=False, |
||
| 108 | repeat=True, |
||
| 109 | ) |
||
| 110 | |||
| 111 | # use strategy to support multiple GPUs |
||
| 112 | # the network is mirrored in each GPU so that we can use larger batch size |
||
| 113 | # https://www.tensorflow.org/guide/distributed_training |
||
| 114 | # only model, optimizer and metrics need to be defined inside the strategy |
||
| 115 | num_devices = max(len(tf.config.list_physical_devices("GPU")), 1) |
||
| 116 | batch_size = config["train"]["preprocess"]["batch_size"] |
||
| 117 | if num_devices > 1: # pragma: no cover |
||
| 118 | strategy = tf.distribute.MirroredStrategy() |
||
| 119 | if batch_size % num_devices != 0: |
||
| 120 | raise ValueError( |
||
| 121 | f"batch size {batch_size} can not be divided evenly " |
||
| 122 | f"by the number of devices." |
||
| 123 | ) |
||
| 124 | else: |
||
| 125 | strategy = tf.distribute.get_strategy() |
||
| 126 | with strategy.scope(): |
||
| 127 | model: tf.keras.Model = REGISTRY.build_model( |
||
| 128 | config=dict( |
||
| 129 | name=config["train"]["method"], |
||
| 130 | moving_image_size=data_loader_train.moving_image_shape, |
||
| 131 | fixed_image_size=data_loader_train.fixed_image_shape, |
||
| 132 | index_size=data_loader_train.num_indices, |
||
| 133 | labeled=config["dataset"]["labeled"], |
||
| 134 | batch_size=batch_size, |
||
| 135 | config=config["train"], |
||
| 136 | ) |
||
| 137 | ) |
||
| 138 | optimizer = opt.build_optimizer(optimizer_config=config["train"]["optimizer"]) |
||
| 139 | model.compile(optimizer=optimizer) |
||
| 140 | model.plot_model(output_dir=log_dir) |
||
| 141 | |||
| 142 | # build callbacks |
||
| 143 | tensorboard_callback = tf.keras.callbacks.TensorBoard( |
||
| 144 | log_dir=log_dir, |
||
| 145 | histogram_freq=config["train"]["save_period"], |
||
| 146 | update_freq=config["train"].get("update_freq", "epoch"), |
||
| 147 | ) |
||
| 148 | ckpt_callback, initial_epoch = build_checkpoint_callback( |
||
| 149 | model=model, |
||
| 150 | dataset=dataset_train, |
||
| 151 | log_dir=log_dir, |
||
| 152 | save_period=config["train"]["save_period"], |
||
| 153 | ckpt_path=ckpt_path, |
||
| 154 | ) |
||
| 155 | callbacks = [tensorboard_callback, ckpt_callback] |
||
| 156 | |||
| 157 | # train |
||
| 158 | # it's necessary to define the steps_per_epoch |
||
| 159 | # and validation_steps to prevent errors like |
||
| 160 | # BaseCollectiveExecutor::StartAbort Out of range: End of sequence |
||
| 161 | model.fit( |
||
| 162 | x=dataset_train, |
||
| 163 | steps_per_epoch=steps_per_epoch_train, |
||
| 164 | initial_epoch=initial_epoch, |
||
| 165 | epochs=config["train"]["epochs"], |
||
| 166 | validation_data=dataset_val, |
||
| 167 | validation_steps=steps_per_epoch_val, |
||
| 168 | callbacks=callbacks, |
||
| 169 | ) |
||
| 170 | |||
| 171 | # close file loaders in data loaders after training |
||
| 172 | data_loader_train.close() |
||
| 173 | if data_loader_val is not None: |
||
| 174 | data_loader_val.close() |
||
| 175 | |||
| 258 |