Completed
Push — master ( e683ec...dcbe58 )
by Andy
30s
created

CannonModel._censored_design_matrix()   C

Complexity

Conditions 7

Size

Total Lines 30

Duplication

Lines 0
Ratio 0 %

Importance

Changes 1
Bugs 0 Features 0
Metric Value
cc 7
c 1
b 0
f 0
dl 0
loc 30
rs 5.5
1
#!/usr/bin/env python
2
# -*- coding: utf-8 -*-
3
4
"""
5
The Cannon.
6
"""
7
8
from __future__ import (division, print_function, absolute_import,
9
                        unicode_literals)
10
11
__all__ = ["CannonModel"]
12
13
import logging
14
import multiprocessing as mp
15
import numpy as np
16
import os
17
import pickle
18
import scipy.optimize as op
19
from collections import OrderedDict
20
from copy import deepcopy
21
from datetime import datetime
22
from functools import wraps
23
from six import string_types
24
from sys import version_info
25
from scipy.spatial import Delaunay
26
from time import time
27
28
from .vectorizer.base import BaseVectorizer
29
from . import (censoring, fitting, utils, vectorizer as vectorizer_module, __version__)
30
31
32
logger = logging.getLogger(__name__)
33
34
35
def requires_training(method):
36
    """
37
    A decorator for model methods that require training before being run.
38
39
    :param method:
40
        A method belonging to CannonModel.
41
    """
42
    @wraps(method)
43
    def wrapper(model, *args, **kwargs):
44
        if not model.is_trained:
45
            raise TypeError("the model requires training first")
46
        return method(model, *args, **kwargs)
47
    return wrapper
48
49
50
class CannonModel(object):
51
    """
52
    A model for The Cannon which includes L1 regularization and pixel censoring.
53
54
    :param training_set_labels:
55
        A set of objects with labels known to high fidelity. This can be 
56
        given as a numpy structured array, or an astropy table.
57
58
    :param training_set_flux:
59
        An array of normalised fluxes for stars in the labelled set, given 
60
        as shape `(num_stars, num_pixels)`. The `num_stars` should match the
61
        number of rows in `training_set_labels`.
62
63
    :param training_set_ivar:
64
        An array of inverse variances on the normalized fluxes for stars in 
65
        the training set. The shape of the `training_set_ivar` array should
66
        match that of `training_set_flux`.
67
68
    :param vectorizer:
69
        A vectorizer to take input labels and produce a design matrix. This
70
        should be a sub-class of `vectorizer.BaseVectorizer`.
71
72
    :param dispersion: [optional]
73
        The dispersion values corresponding to the given pixels. If provided, 
74
        this should have a size of `num_pixels`.
75
    
76
    :param regularization: [optional]
77
        The strength of the L1 regularization. This should either be `None`,
78
        a float-type value for single regularization strength for all pixels,
79
        or a float-like array of length `num_pixels`.
80
81
    :param censors: [optional]
82
        A dictionary containing label names as keys and boolean censoring
83
        masks as values.
84
    """
85
86
    _data_attributes = \
87
        ("training_set_labels", "training_set_flux", "training_set_ivar")
88
89
    # Descriptive attributes are needed to train *and* test the model.
90
    _descriptive_attributes = \
91
        ("vectorizer", "censors", "regularization", "dispersion")
92
93
    # Trained attributes are set only at training time.
94
    _trained_attributes = ("theta", "s2")
95
    
96
    def __init__(self, training_set_labels, training_set_flux, training_set_ivar,
97
        vectorizer, dispersion=None, regularization=None, censors=None, **kwargs):
98
99
        # Save the vectorizer.
100
        if not isinstance(vectorizer, BaseVectorizer):
101
            raise TypeError(
102
                "vectorizer must be a sub-class of vectorizer.BaseVectorizer")
103
        
104
        self._vectorizer = vectorizer
105
        
106
        if training_set_flux is None and training_set_ivar is None:
107
108
            # Must be reading in a model that does not have the training set
109
            # spectra saved.
110
            self._training_set_flux = None
111
            self._training_set_ivar = None
112
            self._training_set_labels = training_set_labels
113
114
        else:
115
            self._training_set_flux = np.atleast_2d(training_set_flux)
116
            self._training_set_ivar = np.atleast_2d(training_set_ivar)
117
            
118
            if isinstance(training_set_labels, np.ndarray) \
119
            and training_set_labels.shape[0] == self._training_set_flux.shape[0] \
120
            and training_set_labels.shape[1] == len(vectorizer.label_names):
121
                # A valid array was given as the training set labels, not a table.
122
                self._training_set_labels = training_set_labels
123
            else: 
124
                self._training_set_labels = np.array(
125
                    [training_set_labels[ln] for ln in vectorizer.label_names]).T
126
            
127
            # Check that the flux and ivar are valid.
128
            self._verify_training_data(**kwargs)
129
130
        # Set regularization, censoring, dispersion.
131
        self.regularization = regularization
132
        self.censors = censors
133
        self.dispersion = dispersion
134
135
        # Set useful private attributes.
136
        __scale_labels_function = kwargs.get("__scale_labels_function", 
137
            lambda l: np.ptp(np.percentile(l, [25.5, 97.5], axis=0), axis=0))
138
        __fiducial_labels_function = kwargs.get("__fiducial_labels_function",
139
            lambda l: np.percentile(l, 50, axis=0))
140
141
        self._scales = __scale_labels_function(self.training_set_labels)
142
        self._fiducials = __fiducial_labels_function(self.training_set_labels)
143
        self._design_matrix = vectorizer(
144
            (self.training_set_labels - self._fiducials)/self._scales).T
145
146
        self.reset()
147
148
        return None
149
150
151
    # Representations.
152
153
154
    def __str__(self):
155
        return "<{module}.{name} of {K} labels {trained}with a training set "\
156
               "of {N} stars each with {M} pixels>".format(
157
                    module=self.__module__,
158
                    name=type(self).__name__,
159
                    trained="trained " if self.is_trained else "",
160
                    K=self.training_set_labels.shape[1],
161
                    N=self.training_set_labels.shape[0], 
162
                    M=self.training_set_flux.shape[1])
163
164
165
    def __repr__(self):
166
        return "<{0}.{1} object at {2}>".format(self.__module__, 
167
            type(self).__name__, hex(id(self)))
168
169
170
    # Model attributes that cannot (well, should not) be changed.
171
172
173
    @property
174
    def training_set_labels(self):
175
        """ Return the labels in the training set. """
176
        return self._training_set_labels
177
178
179
    @property
180
    def training_set_flux(self):
181
        """ Return the training set fluxes. """
182
        return self._training_set_flux
183
184
185
    @property
186
    def training_set_ivar(self):
187
        """ Return the inverse variances of the training set fluxes. """
188
        return self._training_set_ivar
189
190
191
    @property
192
    def vectorizer(self):
193
        """ Return the vectorizer for this model. """
194
        return self._vectorizer
195
196
197
    @property
198
    def design_matrix(self):
199
        """ Return the design matrix for this model. """
200
        return self._design_matrix
201
202
203
    def _censored_design_matrix(self, pixel_index, fill_value=np.nan):
204
        """
205
        Return a censored design matrix for the given pixel index, and a mask of
206
        which theta values to ignore when fitting.
207
    
208
        :param pixel_index:
209
            The zero-indexed pixel.
210
211
        :returns:
212
            A two-length tuple containing the censored design mask for this
213
            pixel, and a boolean mask of values to exclude when fitting for
214
            the spectral derivatives.
215
        """
216
217
        if not self.censors or self.censors is None \
218
        or len(set(self.censors).intersection(self.vectorizer.label_names)) == 0:
219
            return design_matrix
220
221
        data = (self.training_set_labels.copy() - self._fiducials)/self._scales
222
        for i, label_name in enumerate(self.vectorizer.label_names):
223
            try:
224
                use = self.censors[label_name][pixel_index]
225
226
            except KeyError:
227
                continue
228
229
            if not use:
230
                data[:, i] = fill_value
231
232
        return self.vectorizer(data).T
233
234
235
    @property
236
    def theta(self):
237
        """ Return the theta coefficients (spectral model derivatives). """
238
        return self._theta
239
240
241
    @property
242
    def s2(self):
243
        """ Return the intrinsic variance (s^2) for all pixels. """
244
        return self._s2
245
246
247
    # Model attributes that can be changed after initiation.
248
249
250
    @property
251
    def censors(self):
252
        """ Return the wavelength censor masks for the labels. """
253
        return self._censors
254
255
256
    @censors.setter
257
    def censors(self, censors):
258
        """
259
        Set label censoring masks for each pixel.
260
261
        :param censors:
262
            A dictionary-like object with label names as keys, and boolean arrays
263
            as values.
264
        """
265
266
        censors = {} if censors is None else censors
267
        if isinstance(censors, censoring.Censors):
268
            # Could be a censoring dictionary from a different model,
269
            # with different label names and pixels.
270
            
271
            # But more likely: we are loading a model from disk.
272
            self._censors = censors
273
274
        elif isinstance(censors, dict):
275
            self._censors = censoring.Censors(
276
                self.vectorizer.label_names, self.training_set_flux.shape[1],
277
                censors)
278
279
        else:
280
            raise TypeError(
281
                "censors must be a dictionary or a censoring.Censors object")
282
283
284
    @property
285
    def dispersion(self):
286
        """ Return the dispersion points for all pixels. """
287
        return self._dispersion
288
289
290
    @dispersion.setter
291
    def dispersion(self, dispersion):
292
        """
293
        Set the dispersion values for all the pixels.
294
295
        :param dispersion:
296
            An array of the dispersion values.
297
        """
298
        if dispersion is None:
299
            self._dispersion = None
300
            return None
301
302
        dispersion = np.array(dispersion).flatten()
303
        if self.training_set_flux is not None \
304
        and dispersion.size != self.training_set_flux.shape[1]:
305
            raise ValueError("dispersion provided does not match the number "
306
                             "of pixels per star ({0} != {1})".format(
307
                                dispersion.size, self.training_set_flux.shape[1]))
308
309
        if dispersion.dtype.kind not in "iuf":
310
            raise ValueError("dispersion values are not float-like")
311
312
        if not np.all(np.isfinite(dispersion)):
313
            raise ValueError("dispersion values must be finite")
314
315
        self._dispersion = dispersion
316
        return None
317
318
319
    @property
320
    def regularization(self):
321
        """ Return the strength of the L1 regularization for this model. """
322
        return self._regularization
323
324
325
    @regularization.setter
326
    def regularization(self, regularization):
327
        """
328
        Specify the strength of the regularization for the model, either as a
329
        single value for all pixels, or a different strength for each pixel.
330
331
        :param regularization:
332
            The L1-regularization strength for the model.
333
        """
334
335
        if regularization is None:
336
            self._regularization = None
337
            return None
338
339
        regularization = np.array(regularization).flatten()
340
        if regularization.size == 1:
341
            regularization = regularization[0]
342
            if 0 > regularization or not np.isfinite(regularization):
343
                raise ValueError("regularization must be positive and finite")
344
345
        elif regularization.size != self.training_set_flux.shape[1]:
346
            raise ValueError("regularization array must be of size `num_pixels`")
347
348
            if any(0 > regularization) \
349
            or not np.all(np.isfinite(regularization)):
350
                raise ValueError("regularization must be positive and finite")
351
352
        self._regularization = regularization
353
        return None
354
355
356
    # Convenient functions and properties.
357
358
359
    @property
360
    def is_trained(self):
361
        """ Return true or false for whether the model is trained. """
362
        return all(getattr(self, attr, None) is not None \
363
            for attr in self._trained_attributes)
364
365
366
    def reset(self):
367
        """ Clear any attributes that have been trained. """
368
        for attribute in self._trained_attributes:
369
            setattr(self, "_{}".format(attribute), None)
370
        return None
371
372
373
    def _pixel_access(self, array, index, default=None):
374
        """
375
        Safely access a (potentially per-pixel) attribute of the model.
376
        
377
        :param array:
378
            Either `None`, a float value, or an array the size of the dispersion
379
            array.
380
381
        :param index:
382
            The zero-indexed pixel to attempt to access.
383
384
        :param default: [optional]
385
            The default value to return if `array` is None.
386
        """
387
388
        if array is None:
389
            return default
390
        try:
391
            return array[index]
392
        except (IndexError, TypeError):
393
            return array
394
395
396
    def _verify_training_data(self, rho_warning=0.90):
397
        """
398
        Verify the training data for the appropriate shape and content.
399
400
        :param rho_warning: [optional]
401
            Maximum correlation value between labels before a warning is given.
402
        """
403
404
        if self.training_set_flux.shape != self.training_set_ivar.shape:
405
            raise ValueError("the training set flux and inverse variance arrays"
406
                             " for the labelled set must have the same shape")
407
408
        if len(self.training_set_labels) != self.training_set_flux.shape[0]:
409
            raise ValueError(
410
                "the first axes of the training set flux array should "
411
                "have the same shape as the nuber of rows in the labelled set"
412
                "(N_stars, N_pixels)")
413
414
        if not np.all(np.isfinite(self.training_set_labels)):
415
            raise ValueError("training set labels are not all finite")
416
417
        if not np.all(np.isfinite(self.training_set_flux)):
418
            raise ValueError("training set fluxes are not all finite")
419
420
        if not np.all(self.training_set_ivar >= 0) \
421
        or not np.all(np.isfinite(self.training_set_ivar)):
422
            raise ValueError("training set ivars are not all positive finite")
423
424
        # Look for very high correlation coefficients between labels, which
425
        # could make the training time very difficult.
426
        rho = np.corrcoef(self.training_set_labels.T)
427
428
        # Set the diagonal indices to zero.
429
        K = rho.shape[0]
430
        rho[np.diag_indices(K)] = 0.0
431
        indices = np.argsort(rho.flatten())[::-1]
432
433
        for index in indices:
434
            x, y = (index % K, int(index / K)) 
435
            rho_xy = rho[x, y]
436
            if rho_xy >= rho_warning: 
437
                if x > y: # One warning per correlated label pair.
438
                    logger.warn("Labels '{X}' and '{Y}' are highly correlated ("\
439
                        "rho = {rho_xy:.2}). This may cause very slow training "\
440
                        "times. Are both labels needed?".format(
441
                            X=self.vectorizer.label_names[x],
442
                            Y=self.vectorizer.label_names[y],
443
                            rho_xy=rho_xy))
444
            else:
445
                break
446
        return None
447
448
449
    def in_convex_hull(self, labels):
450
        """
451
        Return whether the provided labels are inside a complex hull constructed
452
        from the labelled set.
453
454
        :param labels:
455
            A `NxK` array of `N` sets of `K` labels, where `K` is the number of
456
            labels that make up the vectorizer.
457
458
        :returns:
459
            A boolean array as to whether the points are in the complex hull of
460
            the labelled set.
461
        """
462
463
        labels = np.atleast_2d(labels)
464
        if labels.shape[1] != self.training_set_labels.shape[1]:
465
            raise ValueError("expected {} labels; got {}".format(
466
                self.training_set_labels.shape[1], labels.shape[1]))
467
468
        hull = Delaunay(self.training_set_labels)
469
        return hull.find_simplex(labels) >= 0
470
471
472
    def write(self, path, include_training_set_spectra=False, overwrite=False,
473
        protocol=-1):
474
        """
475
        Serialise the trained model and save it to disk. This will save all
476
        relevant training attributes, and optionally, the training data.
477
478
        :param path:
479
            The path to save the model to.
480
481
        :param include_training_set_spectra: [optional]
482
            Save the labelled set, normalised flux and inverse variance used to
483
            train the model.
484
485
        :param overwrite: [optional]
486
            Overwrite the existing file path, if it already exists.
487
488
        :param protocol: [optional]
489
            The Python pickling protocol to employ. Use 2 for compatibility with
490
            previous Python releases, -1 for performance.
491
        """
492
493
        if os.path.exists(path) and not overwrite:
494
            raise IOError("path already exists: {0}".format(path))
495
496
        attributes = list(self._descriptive_attributes) \
497
                   + list(self._trained_attributes) \
498
                   + list(self._data_attributes)
499
500
        if "metadata" in attributes:
501
            logger.warn("'metadata' is a protected attribute. Ignoring.")
502
            attributes.remote("metadata")
503
504
        # Store up all the trained attributes and a hash of the training set.
505
        state = {}
506
        for attribute in attributes:
507
508
            value = getattr(self, attribute)
509
510
            try:
511
                # If it's a vectorizer or censoring dict, etc, get the state.
512
                value = value.__getstate__()
513
            except:
514
                None
515
516
            state[attribute] = value
517
518
        # Create a metadata dictionary.
519
        state["metadata"] = dict(
520
            version=__version__,
521
            model_class=type(self).__name__,
522
            modified=str(datetime.now()),
523
            data_attributes=self._data_attributes,
524
            descriptive_attributes=self._descriptive_attributes,
525
            trained_attributes=self._trained_attributes,
526
            training_set_hash=utils.short_hash(
527
                getattr(self, attr) for attr in self._data_attributes),
528
        )
529
530
        if not include_training_set_spectra:
531
            state.pop("training_set_flux")
532
            state.pop("training_set_ivar")
533
534
        elif not self.is_trained:
535
            logger.warn("The training set spectra won't be saved, and this model"\
536
                        "is not already trained. The saved model will not be "\
537
                        "able to be trained when loaded!")
538
539
        with open(path, "wb") as fp:
540
            pickle.dump(state, fp, protocol) 
541
        return None
542
543
544
    @classmethod
545
    def read(cls, path, **kwargs):
546
        """
547
        Read a saved model from disk.
548
549
        :param path:
550
            The path where to load the model from.
551
        """
552
553
        encodings = ("utf-8", "latin-1")
554
        for encoding in encodings:
555
            kwds = {"encoding": encoding} if version_info[0] >= 3 else {}
556
            try:
557
                with open(path, "rb") as fp:        
558
                    state = pickle.load(fp, **kwds)
559
560
            except UnicodeDecodeError:
561
                if encoding == encodings:
562
                    raise
563
564
        # Parse the state.
565
        metadata = state.get("metadata", {})
566
        version_saved = metadata.get("version", "0.1.0")
567
        if version_saved >= "0.2.0": # Refactor'd.
568
569
            init_attributes = list(metadata["data_attributes"]) \
570
                            + list(metadata["descriptive_attributes"])
571
572
            kwds = dict([(a, state.get(a, None)) for a in init_attributes])
573
574
            # Initiate the vectorizer.
575
            vectorizer_class, vectorizer_kwds = kwds["vectorizer"]
576
            klass = getattr(vectorizer_module, vectorizer_class)
577
            kwds["vectorizer"] = klass(**vectorizer_kwds)
578
579
            # Initiate the censors.
580
            kwds["censors"] = censoring.Censors(**kwds["censors"])
581
582
            model = cls(**kwds)
583
584
            # Set training attributes.
585
            for attr in metadata["trained_attributes"]:
586
                setattr(model, "_{}".format(attr), state.get(attr, None))
587
588
            return model
589
            
590
        else:
591
            raise NotImplementedError(
592
                "Cannot auto-convert old model files yet; "
593
                "contact Andy Casey <[email protected]> if you need this")
594
595
596
    def train(self, threads=None, **kwargs):
597
        """
598
        Train the model.
599
600
        :param threads: [optional]
601
            The number of parallel threads to use.
602
603
        :returns:
604
            A three-length tuple containing the spectral coefficients `theta`,
605
            the squared scatter term at each pixel `s2`, and metadata related to
606
            the training of each pixel.
607
        """
608
609
        if self.training_set_flux is None or self.training_set_ivar is None:
610
            raise TypeError(
611
                "cannot train: training set spectra not saved with the model")
612
613
        S, P = self.training_set_flux.shape
614
        T = self.design_matrix.shape[1]
615
616
        logger.info("Training {0}-label {1} with {2} stars and {3} pixels/star"\
617
            .format(len(self.vectorizer.label_names), type(self).__name__, S, P))
618
619
        # Parallelise out.
620
        if threads in (1, None):
621
            mapper, pool = (map, None)
622
623
        else:
624
            pool = mp.Pool(threads)
625
            mapper = pool.map
626
627
        func = utils.wrapper(fitting.fit_pixel_fixed_scatter, None, kwargs, P)
628
629
        meta = []
630
        theta = np.nan * np.ones((P, T))
631
        s2 = np.nan * np.ones(P)
632
633
        for pixel, (flux, ivar) \
634
        in enumerate(zip(self.training_set_flux.T, self.training_set_ivar.T)):
635
636
            args = (
637
                flux, ivar, 
638
                self._initial_theta(pixel),
639
                self._censored_design_matrix(pixel),
640
                self._pixel_access(self.regularization, pixel, 0.0),
641
                None
642
            )
643
            (pixel_theta, pixel_s2, pixel_meta), = mapper(func, [args])
644
645
            meta.append(pixel_meta)
646
            theta[pixel], s2[pixel] = (pixel_theta, pixel_s2)
647
648
        self._theta, self._s2 = (theta, s2)
649
650
        if pool is not None:
651
            pool.close()
652
            pool.join()
653
654
        return (theta, s2, meta)
655
656
657
    @requires_training
658
    def __call__(self, labels):
659
        """
660
        Return spectral fluxes, given the labels.
661
662
        :param labels:
663
            An array of stellar labels.
664
        """
665
666
        # Scale and offset the labels.
667
        scaled_labels = (np.atleast_2d(labels) - self._fiducials)/self._scales
668
        flux = np.dot(self.theta, self.vectorizer(scaled_labels)).T
669
        return flux[0] if flux.shape[0] == 1 else flux
670
671
672
    @requires_training
673
    def test(self, flux, ivar, initial_labels=None, threads=None, **kwargs):
674
        """
675
        Run the test step on spectra.
676
677
        :param flux:
678
            The (pseudo-continuum-normalized) spectral flux.
679
680
        :param ivar:
681
            The inverse variance values for the spectral fluxes.
682
683
        :param initial_labels: [optional]
684
            The initial labels to try for each spectrum. This can be a single
685
            set of initial values, or one set of initial values for each star.
686
687
        :param threads: [optional]
688
            The number of parallel threads to use.
689
        """
690
691
        if threads in (1, None):
692
            mapper, pool = (map, None)
693
694
        else:
695
            pool = mp.Pool(threads)
696
            mapper = pool.map
697
698
        flux, ivar = (np.atleast_2d(flux), np.atleast_2d(ivar))
699
        S, P = flux.shape
700
701
        if ivar.shape != flux.shape:
702
            raise ValueError("flux and ivar arrays must be the same shape")
703
704
        if initial_labels is None:
705
            initial_labels = self._fiducials
706
707
        initial_labels = np.atleast_2d(initial_labels)
708
        if initial_labels.shape[0] != S and len(initial_labels.shape) == 2:
709
            initial_labels = np.tile(initial_labels.flatten(), S)\
710
                             .reshape(S, -1, len(self._fiducials))
711
712
        func = utils.wrapper(fitting.fit_spectrum, 
713
            (self.vectorizer, self.theta, self.s2, self._fiducials, self._scales),
714
            kwargs, S, message="Running test step on {} spectra".format(S))
715
716
        labels, cov, meta = zip(*mapper(func, zip(*(flux, ivar, initial_labels))))
717
718
        if pool is not None:
719
            pool.close()
720
            pool.join()
721
722
        return (np.array(labels), np.array(cov), meta)
723
724
725
    def _initial_theta(self, pixel_index, **kwargs):
726
        """
727
        Return a list of guesses of the spectral coefficients for the given
728
        pixel index. Initial values are sourced in the following preference
729
        order: 
730
731
            (1) a previously trained `theta` value for this pixel,
732
            (2) an estimate of `theta` using linear algebra,
733
            (3) a neighbouring pixel's `theta` value,
734
            (4) the fiducial value of [1, 0, ..., 0].
735
736
        :param pixel_index:
737
            The zero-indexed integer of the pixel.
738
739
        :returns:
740
            A list of initial theta guesses, and the source of each guess.
741
        """
742
743
        guesses = []
744
745
        if self.theta is not None:
746
            # Previously trained theta value.
747
            if np.all(np.isfinite(self.theta[pixel_index])):
748
                guesses.append((self.theta[pixel_index], "previously_trained"))
749
750
        # Estimate from linear algebra.
751
        theta, cov = fitting.fit_theta_by_linalg(
752
            self.training_set_flux[:, pixel_index],
753
            self.training_set_ivar[:, pixel_index],
754
            s2=kwargs.get("s2", 0.0), design_matrix=self.design_matrix)
755
756
        if np.all(np.isfinite(theta)):
757
            guesses.append((theta, "linear_algebra"))
758
759
        if self.theta is not None:
760
            # Neighbouring pixels value.
761
            for neighbour_pixel_index in set(np.clip(
762
                [pixel_index - 1, pixel_index + 1], 
763
                0, self.training_set_flux.shape[1] - 1)):
764
765
                if np.all(np.isfinite(self.theta[neighbour_pixel_index])):
766
                    guesses.append(
767
                        (self.theta[neighbour_pixel_index], "neighbour_pixel"))
768
769
        # Fiducial value.
770
        fiducial = np.hstack([1.0, np.zeros(len(self.vectorizer.terms))])
771
        guesses.append((fiducial, "fiducial"))
772
773
        return guesses
774