Conditions | 7 |
Total Lines | 94 |
Code Lines | 57 |
Lines | 0 |
Ratio | 0 % |
Changes | 0 |
Small methods make your code easier to understand, in particular if combined with a good name. Besides, if your method is small, finding a good name is usually much easier.
For example, if you find yourself adding comments to a method's body, this is usually a good sign to extract the commented part to a new method, and use the comment as a starting point when coming up with a good name for this new method.
Commonly applied refactorings include:
If many parameters/temporary variables are present:
1 | """ |
||
122 | def test_config_sanity_check(caplog): |
||
123 | """ |
||
124 | Test config_sanity_check by check error messages |
||
125 | |||
126 | :param caplog: used to check warning message. |
||
127 | """ |
||
128 | |||
129 | # unknown data type |
||
130 | with pytest.raises(ValueError) as err_info: |
||
131 | config_sanity_check(config=dict(dataset=dict(type="type"))) |
||
132 | assert "data type must be paired / unpaired / grouped" in str(err_info.value) |
||
133 | |||
134 | # unknown data format |
||
135 | with pytest.raises(ValueError) as err_info: |
||
136 | config_sanity_check(config=dict(dataset=dict(type="paired", format="format"))) |
||
137 | assert "data format must be nifti / h5" in str(err_info.value) |
||
138 | |||
139 | # dir is not in data_config |
||
140 | with pytest.raises(AssertionError): |
||
141 | config_sanity_check(config=dict(dataset=dict(type="paired", format="h5"))) |
||
142 | |||
143 | # dir doesn't have train/valid/test |
||
144 | with pytest.raises(AssertionError): |
||
145 | config_sanity_check( |
||
146 | config=dict(dataset=dict(type="paired", format="h5", dir=dict())) |
||
147 | ) |
||
148 | |||
149 | # train/valid/test of dir is not string or list of string |
||
150 | with pytest.raises(ValueError) as err_info: |
||
151 | config_sanity_check( |
||
152 | config=dict( |
||
153 | dataset=dict( |
||
154 | type="paired", |
||
155 | format="h5", |
||
156 | dir=dict(train=1, valid=None, test=None), |
||
157 | labeled=True, |
||
158 | ), |
||
159 | train=dict(model=dict(method="ddf")), |
||
160 | ) |
||
161 | ) |
||
162 | assert "data_dir for mode train must be string or list of strings" in str( |
||
163 | err_info.value |
||
164 | ) |
||
165 | |||
166 | # use unlabeled data for conditional model |
||
167 | with pytest.raises(ValueError) as err_info: |
||
168 | config_sanity_check( |
||
169 | config=dict( |
||
170 | dataset=dict( |
||
171 | type="paired", |
||
172 | format="h5", |
||
173 | dir=dict(train=None, valid=None, test=None), |
||
174 | labeled=False, |
||
175 | ), |
||
176 | train=dict( |
||
177 | method="conditional", |
||
178 | loss=dict(), |
||
179 | preprocess=dict(), |
||
180 | optimizer=dict(name="Adam"), |
||
181 | ), |
||
182 | ) |
||
183 | ) |
||
184 | assert "For conditional model, data have to be labeled, got unlabeled data." in str( |
||
185 | err_info.value |
||
186 | ) |
||
187 | |||
188 | # check warnings |
||
189 | # train/valid/test of dir is None |
||
190 | # all loss weight <= 0 |
||
191 | caplog.clear() # clear previous log |
||
192 | config_sanity_check( |
||
193 | config=dict( |
||
194 | dataset=dict( |
||
195 | type="paired", |
||
196 | format="h5", |
||
197 | dir=dict(train=None, valid=None, test=None), |
||
198 | labeled=False, |
||
199 | ), |
||
200 | train=dict( |
||
201 | method="ddf", |
||
202 | loss=dict( |
||
203 | image=dict(name="lncc", weight=0.0), |
||
204 | label=dict(name="ssd", weight=0.0), |
||
205 | regularization=dict(name="bending", weight=0.0), |
||
206 | ), |
||
207 | preprocess=dict(), |
||
208 | optimizer=dict(name="Adam"), |
||
209 | ), |
||
210 | ) |
||
211 | ) |
||
212 | # warning messages can be detected together |
||
213 | assert "Data directory for train is not defined." in caplog.text |
||
214 | assert "Data directory for valid is not defined." in caplog.text |
||
215 | assert "Data directory for test is not defined." in caplog.text |
||
216 |