Completed
Push — master ( 5ad295...e683ec )
by Andy
31s
created

CannonModel.censored_design_matrix()   C

Complexity

Conditions 7

Size

Total Lines 32

Duplication

Lines 0
Ratio 0 %

Importance

Changes 3
Bugs 1 Features 0
Metric Value
cc 7
c 3
b 1
f 0
dl 0
loc 32
rs 5.5
1
#!/usr/bin/env python
2
# -*- coding: utf-8 -*-
3
4
"""
5
The Cannon.
6
"""
7
8
from __future__ import (division, print_function, absolute_import,
9
                        unicode_literals)
10
11
__all__ = ["CannonModel"]
12
13
import logging
14
import multiprocessing as mp
15
import numpy as np
16
import os
17
import pickle
18
import scipy.optimize as op
19
from collections import OrderedDict
20
from copy import deepcopy
21
from datetime import datetime
22
from functools import wraps
23
from six import string_types
24
from sys import version_info
25
from scipy.spatial import Delaunay
26
from time import time
27
28
from .vectorizer.base import BaseVectorizer
29
from . import (censoring, fitting, utils, vectorizer as vectorizer_module, __version__)
30
31
32
logger = logging.getLogger(__name__)
33
34
35
def requires_training(method):
36
    """
37
    A decorator for model methods that require training before being run.
38
39
    :param method:
40
        A method belonging to CannonModel.
41
    """
42
    @wraps(method)
43
    def wrapper(model, *args, **kwargs):
44
        if not model.is_trained:
45
            raise TypeError("the model requires training first")
46
        return method(model, *args, **kwargs)
47
    return wrapper
48
49
50
class CannonModel(object):
51
    """
52
    A model for The Cannon which includes L1 regularization and pixel censoring.
53
54
    :param training_set_labels:
55
        A set of objects with labels known to high fidelity. This can be 
56
        given as a numpy structured array, or an astropy table.
57
58
    :param training_set_flux:
59
        An array of normalised fluxes for stars in the labelled set, given 
60
        as shape `(num_stars, num_pixels)`. The `num_stars` should match the
61
        number of rows in `training_set_labels`.
62
63
    :param training_set_ivar:
64
        An array of inverse variances on the normalized fluxes for stars in 
65
        the training set. The shape of the `training_set_ivar` array should
66
        match that of `training_set_flux`.
67
68
    :param vectorizer:
69
        A vectorizer to take input labels and produce a design matrix. This
70
        should be a sub-class of `vectorizer.BaseVectorizer`.
71
72
    :param dispersion: [optional]
73
        The dispersion values corresponding to the given pixels. If provided, 
74
        this should have a size of `num_pixels`.
75
    
76
    :param regularization: [optional]
77
        The strength of the L1 regularization. This should either be `None`,
78
        a float-type value for single regularization strength for all pixels,
79
        or a float-like array of length `num_pixels`.
80
81
    :param censors: [optional]
82
        A dictionary containing label names as keys and boolean censoring
83
        masks as values.
84
    """
85
86
    _data_attributes = \
87
        ("training_set_labels", "training_set_flux", "training_set_ivar")
88
89
    # Descriptive attributes are needed to train *and* test the model.
90
    _descriptive_attributes = \
91
        ("vectorizer", "censors", "regularization", "dispersion")
92
93
    # Trained attributes are set only at training time.
94
    _trained_attributes = ("theta", "s2")
95
    
96
    def __init__(self, training_set_labels, training_set_flux, training_set_ivar,
97
        vectorizer, dispersion=None, regularization=None, censors=None, **kwargs):
98
99
        # Save the vectorizer.
100
        if not isinstance(vectorizer, BaseVectorizer):
101
            raise TypeError(
102
                "vectorizer must be a sub-class of vectorizer.BaseVectorizer")
103
        
104
        self._vectorizer = vectorizer
105
        
106
        if training_set_flux is None and training_set_ivar is None:
107
108
            # Must be reading in a model that does not have the training set
109
            # spectra saved.
110
            self._training_set_flux = None
111
            self._training_set_ivar = None
112
            self._training_set_labels = training_set_labels
113
114
        else:
115
            self._training_set_flux = np.atleast_2d(training_set_flux)
116
            self._training_set_ivar = np.atleast_2d(training_set_ivar)
117
            
118
            if isinstance(training_set_labels, np.ndarray) \
119
            and training_set_labels.shape[0] == self._training_set_flux.shape[0] \
120
            and training_set_labels.shape[1] == len(vectorizer.label_names):
121
                # A valid array was given as the training set labels, not a table.
122
                self._training_set_labels = training_set_labels
123
            else: 
124
                self._training_set_labels = np.array(
125
                    [training_set_labels[ln] for ln in vectorizer.label_names]).T
126
            
127
            # Check that the flux and ivar are valid.
128
            self._verify_training_data(**kwargs)
129
130
        # Set regularization, censoring, dispersion.
131
        self.regularization = regularization
132
        self.censors = censors
133
        self.dispersion = dispersion
134
135
        # Set useful private attributes.
136
        __scale_labels_function = kwargs.get("__scale_labels_function", 
137
            lambda l: np.ptp(np.percentile(l, [25.5, 97.5], axis=0), axis=0))
138
        __fiducial_labels_function = kwargs.get("__fiducial_labels_function",
139
            lambda l: np.percentile(l, 50, axis=0))
140
141
        self._scales = __scale_labels_function(self.training_set_labels)
142
        self._fiducials = __fiducial_labels_function(self.training_set_labels)
143
        self._design_matrix = vectorizer(
144
            (self.training_set_labels - self._fiducials)/self._scales).T
145
146
        self.reset()
147
148
        return None
149
150
151
    # Representations.
152
153
154
    def __str__(self):
155
        return "<{module}.{name} of {K} labels {trained}with a training set "\
156
               "of {N} stars each with {M} pixels>".format(
157
                    module=self.__module__,
158
                    name=type(self).__name__,
159
                    trained="trained " if self.is_trained else "",
160
                    K=self.training_set_labels.shape[1],
161
                    N=self.training_set_labels.shape[0], 
162
                    M=self.training_set_flux.shape[1])
163
164
165
    def __repr__(self):
166
        return "<{0}.{1} object at {2}>".format(self.__module__, 
167
            type(self).__name__, hex(id(self)))
168
169
170
    # Model attributes that cannot (well, should not) be changed.
171
172
173
    @property
174
    def training_set_labels(self):
175
        """ Return the labels in the training set. """
176
        return self._training_set_labels
177
178
179
    @property
180
    def training_set_flux(self):
181
        """ Return the training set fluxes. """
182
        return self._training_set_flux
183
184
185
    @property
186
    def training_set_ivar(self):
187
        """ Return the inverse variances of the training set fluxes. """
188
        return self._training_set_ivar
189
190
191
    @property
192
    def vectorizer(self):
193
        """ Return the vectorizer for this model. """
194
        return self._vectorizer
195
196
197
    @property
198
    def design_matrix(self):
199
        """ Return the design matrix for this model. """
200
        return self._design_matrix
201
202
203
    def censored_design_matrix(self, pixel_index):
204
        """
205
        Return a censored design matrix for the given pixel index, and a mask of
206
        which theta values to ignore when fitting.
207
    
208
        :param pixel_index:
209
            The zero-indexed pixel.
210
211
        :returns:
212
            A two-length tuple containing the censored design mask for this
213
            pixel, and a boolean mask of values to exclude when fitting for
214
            the spectral derivatives.
215
        """
216
        L = len(self.vectorizer.label_names)
217
218
        if not self.censors or self.censors is None: return design_matrix
219
        #return (self.design_matrix, np.ones(L, dtype=bool))
220
221
        data = self.training_set_labels.copy()
222
        for i, label_name in enumerate(self.vectorizer.label_names):
223
            try:
224
                use = self.censors[label_name][pixel_index]
225
226
            except KeyError:
227
                continue
228
229
            else:
230
                if not use:
231
                    data[:, i] = np.nan
232
233
        design_matrix = self.vectorizer(data.T)
234
        return design_matrix
235
236
        #design_matrix[~np.isfinite(design_matrix)] = 0
237
238
239
240
    @property
241
    def theta(self):
242
        """ Return the theta coefficients (spectral model derivatives). """
243
        return self._theta
244
245
246
    @property
247
    def s2(self):
248
        """ Return the intrinsic variance (s^2) for all pixels. """
249
        return self._s2
250
251
252
    # Model attributes that can be changed after initiation.
253
254
255
    @property
256
    def censors(self):
257
        """ Return the wavelength censor masks for the labels. """
258
        return self._censors
259
260
261
    @censors.setter
262
    def censors(self, censors):
263
        """
264
        Set label censoring masks for each pixel.
265
266
        :param censors:
267
            A dictionary-like object with label names as keys, and boolean arrays
268
            as values.
269
        """
270
271
        censors = {} if censors is None else censors
272
        if isinstance(censors, censoring.Censors):
273
            # Could be a censoring dictionary from a different model,
274
            # with different label names and pixels.
275
            
276
            # But more likely: we are loading a model from disk.
277
            self._censors = censors
278
279
        elif isinstance(censors, dict):
280
            self._censors = censoring.Censors(
281
                self.vectorizer.label_names, self.training_set_flux.shape[1],
282
                censors)
283
284
        else:
285
            raise TypeError(
286
                "censors must be a dictionary or a censoring.Censors object")
287
288
289
    @property
290
    def dispersion(self):
291
        """ Return the dispersion points for all pixels. """
292
        return self._dispersion
293
294
295
    @dispersion.setter
296
    def dispersion(self, dispersion):
297
        """
298
        Set the dispersion values for all the pixels.
299
300
        :param dispersion:
301
            An array of the dispersion values.
302
        """
303
        if dispersion is None:
304
            self._dispersion = None
305
            return None
306
307
        dispersion = np.array(dispersion).flatten()
308
        if self.training_set_flux is not None \
309
        and dispersion.size != self.training_set_flux.shape[1]:
310
            raise ValueError("dispersion provided does not match the number "
311
                             "of pixels per star ({0} != {1})".format(
312
                                dispersion.size, self.training_set_flux.shape[1]))
313
314
        if dispersion.dtype.kind not in "iuf":
315
            raise ValueError("dispersion values are not float-like")
316
317
        if not np.all(np.isfinite(dispersion)):
318
            raise ValueError("dispersion values must be finite")
319
320
        self._dispersion = dispersion
321
        return None
322
323
324
    @property
325
    def regularization(self):
326
        """ Return the strength of the L1 regularization for this model. """
327
        return self._regularization
328
329
330
    @regularization.setter
331
    def regularization(self, regularization):
332
        """
333
        Specify the strength of the regularization for the model, either as a
334
        single value for all pixels, or a different strength for each pixel.
335
336
        :param regularization:
337
            The L1-regularization strength for the model.
338
        """
339
340
        if regularization is None:
341
            self._regularization = None
342
            return None
343
344
        regularization = np.array(regularization).flatten()
345
        if regularization.size == 1:
346
            regularization = regularization[0]
347
            if 0 > regularization or not np.isfinite(regularization):
348
                raise ValueError("regularization must be positive and finite")
349
350
        elif regularization.size != self.training_set_flux.shape[1]:
351
            raise ValueError("regularization array must be of size `num_pixels`")
352
353
            if any(0 > regularization) \
354
            or not np.all(np.isfinite(regularization)):
355
                raise ValueError("regularization must be positive and finite")
356
357
        self._regularization = regularization
358
        return None
359
360
361
    # Convenient functions and properties.
362
363
364
    @property
365
    def is_trained(self):
366
        """ Return true or false for whether the model is trained. """
367
        return all(getattr(self, attr, None) is not None \
368
            for attr in self._trained_attributes)
369
370
371
    def reset(self):
372
        """ Clear any attributes that have been trained. """
373
        for attribute in self._trained_attributes:
374
            setattr(self, "_{}".format(attribute), None)
375
        return None
376
377
378
    def _pixel_access(self, array, index, default=None):
379
        """
380
        Safely access a (potentially per-pixel) attribute of the model.
381
        
382
        :param array:
383
            Either `None`, a float value, or an array the size of the dispersion
384
            array.
385
386
        :param index:
387
            The zero-indexed pixel to attempt to access.
388
389
        :param default: [optional]
390
            The default value to return if `array` is None.
391
        """
392
393
        if array is None:
394
            return default
395
        try:
396
            return array[index]
397
        except (IndexError, TypeError):
398
            return array
399
400
401
    def _verify_training_data(self, rho_warning=0.90):
402
        """
403
        Verify the training data for the appropriate shape and content.
404
405
        :param rho_warning: [optional]
406
            Maximum correlation value between labels before a warning is given.
407
        """
408
409
        if self.training_set_flux.shape != self.training_set_ivar.shape:
410
            raise ValueError("the training set flux and inverse variance arrays"
411
                             " for the labelled set must have the same shape")
412
413
        if len(self.training_set_labels) != self.training_set_flux.shape[0]:
414
            raise ValueError(
415
                "the first axes of the training set flux array should "
416
                "have the same shape as the nuber of rows in the labelled set"
417
                "(N_stars, N_pixels)")
418
419
        if not np.all(np.isfinite(self.training_set_labels)):
420
            raise ValueError("training set labels are not all finite")
421
422
        if not np.all(np.isfinite(self.training_set_flux)):
423
            raise ValueError("training set fluxes are not all finite")
424
425
        if not np.all(self.training_set_ivar >= 0) \
426
        or not np.all(np.isfinite(self.training_set_ivar)):
427
            raise ValueError("training set ivars are not all positive finite")
428
429
        # Look for very high correlation coefficients between labels, which
430
        # could make the training time very difficult.
431
        rho = np.corrcoef(self.training_set_labels.T)
432
433
        # Set the diagonal indices to zero.
434
        K = rho.shape[0]
435
        rho[np.diag_indices(K)] = 0.0
436
        indices = np.argsort(rho.flatten())[::-1]
437
438
        for index in indices:
439
            x, y = (index % K, int(index / K)) 
440
            rho_xy = rho[x, y]
441
            if rho_xy >= rho_warning: 
442
                if x > y: # One warning per correlated label pair.
443
                    logger.warn("Labels '{X}' and '{Y}' are highly correlated ("\
444
                        "rho = {rho_xy:.2}). This may cause very slow training "\
445
                        "times. Are both labels needed?".format(
446
                            X=self.vectorizer.label_names[x],
447
                            Y=self.vectorizer.label_names[y],
448
                            rho_xy=rho_xy))
449
            else:
450
                break
451
        return None
452
453
454
    def in_convex_hull(self, labels):
455
        """
456
        Return whether the provided labels are inside a complex hull constructed
457
        from the labelled set.
458
459
        :param labels:
460
            A `NxK` array of `N` sets of `K` labels, where `K` is the number of
461
            labels that make up the vectorizer.
462
463
        :returns:
464
            A boolean array as to whether the points are in the complex hull of
465
            the labelled set.
466
        """
467
468
        labels = np.atleast_2d(labels)
469
        if labels.shape[1] != self.training_set_labels.shape[1]:
470
            raise ValueError("expected {} labels; got {}".format(
471
                self.training_set_labels.shape[1], labels.shape[1]))
472
473
        hull = Delaunay(self.training_set_labels)
474
        return hull.find_simplex(labels) >= 0
475
476
477
    def write(self, path, include_training_set_spectra=False, overwrite=False,
478
        protocol=-1):
479
        """
480
        Serialise the trained model and save it to disk. This will save all
481
        relevant training attributes, and optionally, the training data.
482
483
        :param path:
484
            The path to save the model to.
485
486
        :param include_training_set_spectra: [optional]
487
            Save the labelled set, normalised flux and inverse variance used to
488
            train the model.
489
490
        :param overwrite: [optional]
491
            Overwrite the existing file path, if it already exists.
492
493
        :param protocol: [optional]
494
            The Python pickling protocol to employ. Use 2 for compatibility with
495
            previous Python releases, -1 for performance.
496
        """
497
498
        if os.path.exists(path) and not overwrite:
499
            raise IOError("path already exists: {0}".format(path))
500
501
        attributes = list(self._descriptive_attributes) \
502
                   + list(self._trained_attributes) \
503
                   + list(self._data_attributes)
504
505
        if "metadata" in attributes:
506
            logger.warn("'metadata' is a protected attribute. Ignoring.")
507
            attributes.remote("metadata")
508
509
        # Store up all the trained attributes and a hash of the training set.
510
        state = {}
511
        for attribute in attributes:
512
513
            value = getattr(self, attribute)
514
515
            try:
516
                # If it's a vectorizer or censoring dict, etc, get the state.
517
                value = value.__getstate__()
518
            except:
519
                None
520
521
            state[attribute] = value
522
523
        # Create a metadata dictionary.
524
        state["metadata"] = dict(
525
            version=__version__,
526
            model_class=type(self).__name__,
527
            modified=str(datetime.now()),
528
            data_attributes=self._data_attributes,
529
            descriptive_attributes=self._descriptive_attributes,
530
            trained_attributes=self._trained_attributes,
531
            training_set_hash=utils.short_hash(
532
                getattr(self, attr) for attr in self._data_attributes),
533
        )
534
535
        if not include_training_set_spectra:
536
            state.pop("training_set_flux")
537
            state.pop("training_set_ivar")
538
539
        elif not self.is_trained:
540
            logger.warn("The training set spectra won't be saved, and this model"\
541
                        "is not already trained. The saved model will not be "\
542
                        "able to be trained when loaded!")
543
544
        with open(path, "wb") as fp:
545
            pickle.dump(state, fp, protocol) 
546
        return None
547
548
549
    @classmethod
550
    def read(cls, path, **kwargs):
551
        """
552
        Read a saved model from disk.
553
554
        :param path:
555
            The path where to load the model from.
556
        """
557
558
        encodings = ("utf-8", "latin-1")
559
        for encoding in encodings:
560
            kwds = {"encoding": encoding} if version_info[0] >= 3 else {}
561
            try:
562
                with open(path, "rb") as fp:        
563
                    state = pickle.load(fp, **kwds)
564
565
            except UnicodeDecodeError:
566
                if encoding == encodings:
567
                    raise
568
569
        # Parse the state.
570
        metadata = state.get("metadata", {})
571
        version_saved = metadata.get("version", "0.1.0")
572
        if version_saved >= "0.2.0": # Refactor'd.
573
574
            init_attributes = list(metadata["data_attributes"]) \
575
                            + list(metadata["descriptive_attributes"])
576
577
            kwds = dict([(a, state.get(a, None)) for a in init_attributes])
578
579
            # Initiate the vectorizer.
580
            vectorizer_class, vectorizer_kwds = kwds["vectorizer"]
581
            klass = getattr(vectorizer_module, vectorizer_class)
582
            kwds["vectorizer"] = klass(**vectorizer_kwds)
583
584
            # Initiate the censors.
585
            kwds["censors"] = censoring.Censors(**kwds["censors"])
586
587
            model = cls(**kwds)
588
589
            # Set training attributes.
590
            for attr in metadata["trained_attributes"]:
591
                setattr(model, "_{}".format(attr), state.get(attr, None))
592
593
            return model
594
            
595
        else:
596
            raise NotImplementedError(
597
                "Cannot auto-convert old model files yet; "
598
                "contact Andy Casey <[email protected]> if you need this")
599
600
601
    def train(self, threads=None, **kwargs):
602
        """
603
        Train the model.
604
605
        :param threads: [optional]
606
            The number of parallel threads to use.
607
608
        :returns:
609
            A three-length tuple containing the spectral coefficients `theta`,
610
            the squared scatter term at each pixel `s2`, and metadata related to
611
            the training of each pixel.
612
        """
613
614
        if self.training_set_flux is None or self.training_set_ivar is None:
615
            raise TypeError(
616
                "cannot train: training set spectra not saved with the model")
617
618
        S, P = self.training_set_flux.shape
619
        T = self.design_matrix.shape[1]
620
621
        logger.info("Training {0}-label {1} with {2} stars and {3} pixels/star"\
622
            .format(len(self.vectorizer.label_names), type(self).__name__, S, P))
623
624
        # Parallelise out.
625
        if threads in (1, None):
626
            mapper, pool = (map, None)
627
628
        else:
629
            pool = mp.Pool(threads)
630
            mapper = pool.map
631
632
        func = utils.wrapper(fitting.fit_pixel_fixed_scatter, None, kwargs, P)
633
634
        meta = []
635
        theta = np.nan * np.ones((P, T))
636
        s2 = np.nan * np.ones(P)
637
638
        for pixel, (flux, ivar) \
639
        in enumerate(zip(self.training_set_flux.T, self.training_set_ivar.T)):
640
641
            args = (
642
                flux, ivar, 
643
                self._initial_theta(pixel),
644
                self.censored_design_matrix(pixel),
645
                self._pixel_access(self.regularization, pixel, 0.0),
646
                None
647
            )
648
            (pixel_theta, pixel_s2, pixel_meta), = mapper(func, [args])
649
650
            meta.append(pixel_meta)
651
            theta[pixel], s2[pixel] = (pixel_theta, pixel_s2)
652
653
        self._theta, self._s2 = (theta, s2)
654
655
        if pool is not None:
656
            pool.close()
657
            pool.join()
658
659
        return (theta, s2, meta)
660
661
662
    @requires_training
663
    def __call__(self, labels):
664
        """
665
        Return spectral fluxes, given the labels.
666
667
        :param labels:
668
            An array of stellar labels.
669
        """
670
671
        # Scale and offset the labels.
672
        scaled_labels = (np.atleast_2d(labels) - self._fiducials)/self._scales
673
        flux = np.dot(self.theta, self.vectorizer(scaled_labels)).T
674
        return flux[0] if flux.shape[0] == 1 else flux
675
676
677
    @requires_training
678
    def test(self, flux, ivar, initial_labels=None, threads=None, **kwargs):
679
        """
680
        Run the test step on spectra.
681
682
        :param flux:
683
            The (pseudo-continuum-normalized) spectral flux.
684
685
        :param ivar:
686
            The inverse variance values for the spectral fluxes.
687
688
        :param initial_labels: [optional]
689
            The initial labels to try for each spectrum. This can be a single
690
            set of initial values, or one set of initial values for each star.
691
692
        :param threads: [optional]
693
            The number of parallel threads to use.
694
        """
695
696
        if threads in (1, None):
697
            mapper, pool = (map, None)
698
699
        else:
700
            pool = mp.Pool(threads)
701
            mapper = pool.map
702
703
        flux, ivar = (np.atleast_2d(flux), np.atleast_2d(ivar))
704
        S, P = flux.shape
705
706
        if ivar.shape != flux.shape:
707
            raise ValueError("flux and ivar arrays must be the same shape")
708
709
        if initial_labels is None:
710
            initial_labels = self._fiducials
711
712
        initial_labels = np.atleast_2d(initial_labels)
713
        if initial_labels.shape[0] != S and len(initial_labels.shape) == 2:
714
            initial_labels = np.tile(initial_labels.flatten(), S)\
715
                             .reshape(S, -1, len(self._fiducials))
716
717
        func = utils.wrapper(fitting.fit_spectrum, 
718
            (self.vectorizer, self.theta, self.s2, self._fiducials, self._scales),
719
            kwargs, S, message="Running test step on {} spectra".format(S))
720
721
        labels, cov, meta = zip(*mapper(func, zip(*(flux, ivar, initial_labels))))
722
723
        if pool is not None:
724
            pool.close()
725
            pool.join()
726
727
        return (np.array(labels), np.array(cov), meta)
728
729
730
    def _initial_theta(self, pixel_index, **kwargs):
731
        """
732
        Return a list of guesses of the spectral coefficients for the given
733
        pixel index. Initial values are sourced in the following preference
734
        order: 
735
736
            (1) a previously trained `theta` value for this pixel,
737
            (2) an estimate of `theta` using linear algebra,
738
            (3) a neighbouring pixel's `theta` value,
739
            (4) the fiducial value of [1, 0, ..., 0].
740
741
        :param pixel_index:
742
            The zero-indexed integer of the pixel.
743
744
        :returns:
745
            A list of initial theta guesses, and the source of each guess.
746
        """
747
748
        guesses = []
749
750
        if self.theta is not None:
751
            # Previously trained theta value.
752
            if np.all(np.isfinite(self.theta[pixel_index])):
753
                guesses.append((self.theta[pixel_index], "previously_trained"))
754
755
        # Estimate from linear algebra.
756
        theta, cov = fitting.fit_theta_by_linalg(
757
            self.training_set_flux[:, pixel_index],
758
            self.training_set_ivar[:, pixel_index],
759
            s2=kwargs.get("s2", 0.0), design_matrix=self.design_matrix)
760
761
        if np.all(np.isfinite(theta)):
762
            guesses.append((theta, "linear_algebra"))
763
764
        if self.theta is not None:
765
            # Neighbouring pixels value.
766
            for neighbour_pixel_index in set(np.clip(
767
                [pixel_index - 1, pixel_index + 1], 
768
                0, self.training_set_flux.shape[1] - 1)):
769
770
                if np.all(np.isfinite(self.theta[neighbour_pixel_index])):
771
                    guesses.append(
772
                        (self.theta[neighbour_pixel_index], "neighbour_pixel"))
773
774
        # Fiducial value.
775
        fiducial = np.hstack([1.0, np.zeros(len(self.vectorizer.terms))])
776
        guesses.append((fiducial, "fiducial"))
777
778
        return guesses
779