1
|
|
|
#!/usr/bin/env python |
2
|
|
|
# -*- coding: utf-8 -*- |
3
|
|
|
|
4
|
|
|
""" |
5
|
|
|
Unit tests for the Regularized Cannon model class and associated functions. |
6
|
|
|
""" |
7
|
|
|
|
8
|
|
|
import numpy as np |
9
|
|
|
import unittest |
10
|
|
|
from AnniesLasso import regularized, utils |
11
|
|
|
|
12
|
|
|
|
13
|
|
|
class TestRegularizedCannonModel(unittest.TestCase): |
14
|
|
|
|
15
|
|
|
def setUp(self): |
|
|
|
|
16
|
|
|
# Initialise some faux data and labels. |
17
|
|
|
labels = "ABCDE" |
18
|
|
|
N_labels = len(labels) |
19
|
|
|
N_stars = np.random.randint(1, 500) |
20
|
|
|
N_pixels = np.random.randint(1, 10000) |
21
|
|
|
shape = (N_stars, N_pixels) |
22
|
|
|
|
23
|
|
|
self.valid_training_labels = np.rec.array( |
24
|
|
|
np.random.uniform(size=(N_stars, N_labels)), |
25
|
|
|
dtype=[(label, '<f8') for label in labels]) |
26
|
|
|
|
27
|
|
|
self.valid_fluxes = np.random.uniform(size=shape) |
28
|
|
|
self.valid_flux_uncertainties = np.random.uniform(size=shape) |
29
|
|
|
|
30
|
|
|
def get_model(self): |
31
|
|
|
return regularized.RegularizedCannonModel( |
32
|
|
|
self.valid_training_labels, self.valid_fluxes, |
33
|
|
|
self.valid_flux_uncertainties) |
34
|
|
|
|
35
|
|
|
def test_init(self): |
36
|
|
|
self.assertIsNotNone(self.get_model()) |
37
|
|
|
|
38
|
|
|
def test_remind_myself_to_write_unit_tests_for_these_functions(self): |
39
|
|
|
m = self.get_model() |
40
|
|
|
m.label_vector = "A + B + C" |
41
|
|
|
self.assertIsNotNone(m.label_vector) |
42
|
|
|
|
43
|
|
|
# Cannot train without regularization term. |
44
|
|
|
with self.assertRaises(TypeError): |
45
|
|
|
m.train() |
46
|
|
|
|
47
|
|
|
# Regularization must be positive and finite. |
48
|
|
|
for each in (-1, np.nan, +np.inf, -np.inf): |
49
|
|
|
with self.assertRaises(ValueError): |
50
|
|
|
m.regularization = each |
51
|
|
|
|
52
|
|
|
# Regularization must be a float or match the dispersion size. |
53
|
|
|
with self.assertRaises(ValueError): |
54
|
|
|
m.regularization = [0., 1.] |
55
|
|
|
m.regularization = np.zeros_like(m.dispersion) |
56
|
|
|
m.train() |
57
|
|
|
|
Duplicated code is one of the most pungent code smells. If you need to duplicate the same code in three or more different places, we strongly encourage you to look into extracting the code into a single class or operation.
You can also find more detailed suggestions in the “Code” section of your repository.