Completed
Push — master ( b89561...0d02f6 )
by Andy
01:18
created

CannonModel.censored_design_matrix()   B

Complexity

Conditions 6

Size

Total Lines 26

Duplication

Lines 0
Ratio 0 %

Importance

Changes 1
Bugs 0 Features 0
Metric Value
cc 6
c 1
b 0
f 0
dl 0
loc 26
rs 7.5384
1
#!/usr/bin/env python
2
# -*- coding: utf-8 -*-
3
4
"""
5
The Cannon.
6
"""
7
8
from __future__ import (division, print_function, absolute_import,
9
                        unicode_literals)
10
11
__all__ = ["CannonModel"]
12
13
import logging
14
import multiprocessing as mp
15
import numpy as np
16
import os
17
import pickle
18
import scipy.optimize as op
19
from collections import OrderedDict
20
from copy import deepcopy
21
from datetime import datetime
22
from functools import wraps
23
from six import string_types
24
from sys import version_info
25
from scipy.spatial import Delaunay
26
from time import time
27
28
from .vectorizer.base import BaseVectorizer
29
from . import (censoring, fitting, utils, vectorizer as vectorizer_module, __version__)
30
31
32
logger = logging.getLogger(__name__)
33
34
35
def requires_training(method):
36
    """
37
    A decorator for model methods that require training before being run.
38
39
    :param method:
40
        A method belonging to CannonModel.
41
    """
42
    @wraps(method)
43
    def wrapper(model, *args, **kwargs):
44
        if not model.is_trained:
45
            raise TypeError("the model requires training first")
46
        return method(model, *args, **kwargs)
47
    return wrapper
48
49
50
class CannonModel(object):
51
    """
52
    A model for The Cannon which includes L1 regularization and pixel censoring.
53
54
    :param training_set_labels:
55
        A set of objects with labels known to high fidelity. This can be 
56
        given as a numpy structured array, or an astropy table.
57
58
    :param training_set_flux:
59
        An array of normalised fluxes for stars in the labelled set, given 
60
        as shape `(num_stars, num_pixels)`. The `num_stars` should match the
61
        number of rows in `training_set_labels`.
62
63
    :param training_set_ivar:
64
        An array of inverse variances on the normalized fluxes for stars in 
65
        the training set. The shape of the `training_set_ivar` array should
66
        match that of `training_set_flux`.
67
68
    :param vectorizer:
69
        A vectorizer to take input labels and produce a design matrix. This
70
        should be a sub-class of `vectorizer.BaseVectorizer`.
71
72
    :param dispersion: [optional]
73
        The dispersion values corresponding to the given pixels. If provided, 
74
        this should have a size of `num_pixels`.
75
    
76
    :param regularization: [optional]
77
        The strength of the L1 regularization. This should either be `None`,
78
        a float-type value for single regularization strength for all pixels,
79
        or a float-like array of length `num_pixels`.
80
81
    :param censors: [optional]
82
        A dictionary containing label names as keys and boolean censoring
83
        masks as values.
84
    """
85
86
    _data_attributes = \
87
        ("training_set_labels", "training_set_flux", "training_set_ivar")
88
89
    # Descriptive attributes are needed to train *and* test the model.
90
    _descriptive_attributes = \
91
        ("vectorizer", "censors", "regularization", "dispersion")
92
93
    # Trained attributes are set only at training time.
94
    _trained_attributes = ("theta", "s2")
95
    
96
    def __init__(self, training_set_labels, training_set_flux, training_set_ivar,
97
        vectorizer, dispersion=None, regularization=None, censors=None, **kwargs):
98
99
        # Save the vectorizer.
100
        if not isinstance(vectorizer, BaseVectorizer):
101
            raise TypeError(
102
                "vectorizer must be a sub-class of vectorizer.BaseVectorizer")
103
        
104
        self._vectorizer = vectorizer
105
        
106
        if training_set_flux is None and training_set_ivar is None:
107
108
            # Must be reading in a model that does not have the training set
109
            # spectra saved.
110
            self._training_set_flux = None
111
            self._training_set_ivar = None
112
            self._training_set_labels = training_set_labels
113
114
        else:
115
            self._training_set_flux = np.atleast_2d(training_set_flux)
116
            self._training_set_ivar = np.atleast_2d(training_set_ivar)
117
            
118
            if isinstance(training_set_labels, np.ndarray) \
119
            and training_set_labels.shape[0] == self._training_set_flux.shape[0] \
120
            and training_set_labels.shape[1] == len(vectorizer.label_names):
121
                # A valid array was given as the training set labels, not a table.
122
                self._training_set_labels = training_set_labels
123
            else: 
124
                self._training_set_labels = np.array(
125
                    [training_set_labels[ln] for ln in vectorizer.label_names]).T
126
            
127
            # Check that the flux and ivar are valid.
128
            self._verify_training_data(**kwargs)
129
130
        # Set regularization, censoring, dispersion.
131
        self.regularization = regularization
132
        self.censors = censors
133
        self.dispersion = dispersion
134
135
        # Set useful private attributes.
136
        __scale_labels_function = kwargs.get("__scale_labels_function", 
137
            lambda l: np.ptp(np.percentile(l, [25.5, 97.5], axis=0), axis=0))
138
        __fiducial_labels_function = kwargs.get("__fiducial_labels_function",
139
            lambda l: np.percentile(l, 50, axis=0))
140
141
        self._scales = __scale_labels_function(self.training_set_labels)
142
        self._fiducials = __fiducial_labels_function(self.training_set_labels)
143
        self._design_matrix = vectorizer(
144
            (self.training_set_labels - self._fiducials)/self._scales).T
145
146
        self.reset()
147
148
        return None
149
150
151
    # Representations.
152
153
154
    def __str__(self):
155
        return "<{module}.{name} of {K} labels {trained}with a training set "\
156
               "of {N} stars each with {M} pixels>".format(
157
                    module=self.__module__,
158
                    name=type(self).__name__,
159
                    trained="trained " if self.is_trained else "",
160
                    K=self.training_set_labels.shape[1],
161
                    N=self.training_set_labels.shape[0], 
162
                    M=self.training_set_flux.shape[1])
163
164
165
    def __repr__(self):
166
        return "<{0}.{1} object at {2}>".format(self.__module__, 
167
            type(self).__name__, hex(id(self)))
168
169
170
    # Model attributes that cannot (well, should not) be changed.
171
172
173
    @property
174
    def training_set_labels(self):
175
        """ Return the labels in the training set. """
176
        return self._training_set_labels
177
178
179
    @property
180
    def training_set_flux(self):
181
        """ Return the training set fluxes. """
182
        return self._training_set_flux
183
184
185
    @property
186
    def training_set_ivar(self):
187
        """ Return the inverse variances of the training set fluxes. """
188
        return self._training_set_ivar
189
190
191
    @property
192
    def vectorizer(self):
193
        """ Return the vectorizer for this model. """
194
        return self._vectorizer
195
196
197
    @property
198
    def design_matrix(self):
199
        """ Return the design matrix for this model. """
200
        return self._design_matrix
201
202
203
    @property
204
    def censored_design_matrix(self):
205
        """ Return a censored design matrix. """
206
        if not self.censors or self.censors is None:
207
            return self.design_matrix
208
209
        columns = []
210
        for label_name in self.vectorizer.label_names:
211
            column = self.training_set_labels[label_name].copy()
212
            
213
            try:
214
                use = self.censors[label_name]
215
216
            except KeyError:
217
                None
218
219
            else:
220
                # When the pixel mask is False, set the data as NaN
221
                column[~use] = np.nan
222
223
            columns.append(column)
224
225
        design_matrix = self.vectorizer(np.vstack(censored).T)
226
        #design_matrix[~np.isfinite(design_matrix)] = 0
227
228
        return design_matrix
229
230
231
    @property
232
    def theta(self):
233
        """ Return the theta coefficients (spectral model derivatives). """
234
        return self._theta
235
236
237
    @property
238
    def s2(self):
239
        """ Return the intrinsic variance (s^2) for all pixels. """
240
        return self._s2
241
242
243
    # Model attributes that can be changed after initiation.
244
245
246
    @property
247
    def censors(self):
248
        """ Return the wavelength censor masks for the labels. """
249
        return self._censors
250
251
252
    @censors.setter
253
    def censors(self, censors):
254
        """
255
        Set label censoring masks for each pixel.
256
257
        :param censors:
258
            A dictionary-like object with label names as keys, and boolean arrays
259
            as values.
260
        """
261
262
        censors = {} if censors is None else censors
263
        if isinstance(censors, censoring.Censors):
264
            # Could be a censoring dictionary from a different model,
265
            # with different label names and pixels.
266
            
267
            # But more likely: we are loading a model from disk.
268
            self._censors = censors
269
270
        elif isinstance(censors, dict):
271
            self._censors = censoring.Censors(
272
                self.vectorizer.label_names, self.training_set_flux.shape[1],
273
                censors)
274
275
        else:
276
            raise TypeError(
277
                "censors must be a dictionary or a censoring.Censors object")
278
279
280
    @property
281
    def dispersion(self):
282
        """ Return the dispersion points for all pixels. """
283
        return self._dispersion
284
285
286
    @dispersion.setter
287
    def dispersion(self, dispersion):
288
        """
289
        Set the dispersion values for all the pixels.
290
291
        :param dispersion:
292
            An array of the dispersion values.
293
        """
294
        if dispersion is None:
295
            self._dispersion = None
296
            return None
297
298
        dispersion = np.array(dispersion).flatten()
299
        if self.training_set_flux is not None \
300
        and dispersion.size != self.training_set_flux.shape[1]:
301
            raise ValueError("dispersion provided does not match the number "
302
                             "of pixels per star ({0} != {1})".format(
303
                                dispersion.size, self.training_set_flux.shape[1]))
304
305
        if dispersion.dtype.kind not in "iuf":
306
            raise ValueError("dispersion values are not float-like")
307
308
        if not np.all(np.isfinite(dispersion)):
309
            raise ValueError("dispersion values must be finite")
310
311
        self._dispersion = dispersion
312
        return None
313
314
315
    @property
316
    def regularization(self):
317
        """ Return the strength of the L1 regularization for this model. """
318
        return self._regularization
319
320
321
    @regularization.setter
322
    def regularization(self, regularization):
323
        """
324
        Specify the strength of the regularization for the model, either as a
325
        single value for all pixels, or a different strength for each pixel.
326
327
        :param regularization:
328
            The L1-regularization strength for the model.
329
        """
330
331
        if regularization is None:
332
            self._regularization = None
333
            return None
334
335
        regularization = np.array(regularization).flatten()
336
        if regularization.size == 1:
337
            regularization = regularization[0]
338
            if 0 > regularization or not np.isfinite(regularization):
339
                raise ValueError("regularization must be positive and finite")
340
341
        elif regularization.size != self.training_set_flux.shape[1]:
342
            raise ValueError("regularization array must be of size `num_pixels`")
343
344
            if any(0 > regularization) \
345
            or not np.all(np.isfinite(regularization)):
346
                raise ValueError("regularization must be positive and finite")
347
348
        self._regularization = regularization
349
        return None
350
351
352
    # Convenient functions and properties.
353
354
355
    @property
356
    def is_trained(self):
357
        """ Return true or false for whether the model is trained. """
358
        return all(getattr(self, attr, None) is not None \
359
            for attr in self._trained_attributes)
360
361
362
    def reset(self):
363
        """ Clear any attributes that have been trained. """
364
        for attribute in self._trained_attributes:
365
            setattr(self, "_{}".format(attribute), None)
366
        return None
367
368
369
    def _pixel_access(self, array, index, default=None):
370
        """
371
        Safely access a (potentially per-pixel) attribute of the model.
372
        
373
        :param array:
374
            Either `None`, a float value, or an array the size of the dispersion
375
            array.
376
377
        :param index:
378
            The zero-indexed pixel to attempt to access.
379
380
        :param default: [optional]
381
            The default value to return if `array` is None.
382
        """
383
384
        if array is None:
385
            return default
386
        try:
387
            return array[index]
388
        except (IndexError, TypeError):
389
            return array
390
391
392
    def _verify_training_data(self, rho_warning=0.90):
393
        """
394
        Verify the training data for the appropriate shape and content.
395
396
        :param rho_warning: [optional]
397
            Maximum correlation value between labels before a warning is given.
398
        """
399
400
        if self.training_set_flux.shape != self.training_set_ivar.shape:
401
            raise ValueError("the training set flux and inverse variance arrays"
402
                             " for the labelled set must have the same shape")
403
404
        if len(self.training_set_labels) != self.training_set_flux.shape[0]:
405
            raise ValueError(
406
                "the first axes of the training set flux array should "
407
                "have the same shape as the nuber of rows in the labelled set"
408
                "(N_stars, N_pixels)")
409
410
        if not np.all(np.isfinite(self.training_set_labels)):
411
            raise ValueError("training set labels are not all finite")
412
413
        if not np.all(np.isfinite(self.training_set_flux)):
414
            raise ValueError("training set fluxes are not all finite")
415
416
        if not np.all(self.training_set_ivar >= 0) \
417
        or not np.all(np.isfinite(self.training_set_ivar)):
418
            raise ValueError("training set ivars are not all positive finite")
419
420
        # Look for very high correlation coefficients between labels, which
421
        # could make the training time very difficult.
422
        rho = np.corrcoef(self.training_set_labels.T)
423
424
        # Set the diagonal indices to zero.
425
        K = rho.shape[0]
426
        rho[np.diag_indices(K)] = 0.0
427
        indices = np.argsort(rho.flatten())[::-1]
428
429
        for index in indices:
430
            x, y = (index % K, int(index / K)) 
431
            rho_xy = rho[x, y]
432
            if rho_xy >= rho_warning: 
433
                if x > y: # One warning per correlated label pair.
434
                    logger.warn("Labels '{X}' and '{Y}' are highly correlated ("\
435
                        "rho = {rho_xy:.2}). This may cause very slow training "\
436
                        "times. Are both labels needed?".format(
437
                            X=self.vectorizer.label_names[x],
438
                            Y=self.vectorizer.label_names[y],
439
                            rho_xy=rho_xy))
440
            else:
441
                break
442
        return None
443
444
445
    def in_convex_hull(self, labels):
446
        """
447
        Return whether the provided labels are inside a complex hull constructed
448
        from the labelled set.
449
450
        :param labels:
451
            A `NxK` array of `N` sets of `K` labels, where `K` is the number of
452
            labels that make up the vectorizer.
453
454
        :returns:
455
            A boolean array as to whether the points are in the complex hull of
456
            the labelled set.
457
        """
458
459
        labels = np.atleast_2d(labels)
460
        if labels.shape[1] != self.training_set_labels.shape[1]:
461
            raise ValueError("expected {} labels; got {}".format(
462
                self.training_set_labels.shape[1], labels.shape[1]))
463
464
        hull = Delaunay(self.training_set_labels)
465
        return hull.find_simplex(labels) >= 0
466
467
468
    def write(self, path, include_training_set_spectra=False, overwrite=False,
469
        protocol=-1):
470
        """
471
        Serialise the trained model and save it to disk. This will save all
472
        relevant training attributes, and optionally, the training data.
473
474
        :param path:
475
            The path to save the model to.
476
477
        :param include_training_set_spectra: [optional]
478
            Save the labelled set, normalised flux and inverse variance used to
479
            train the model.
480
481
        :param overwrite: [optional]
482
            Overwrite the existing file path, if it already exists.
483
484
        :param protocol: [optional]
485
            The Python pickling protocol to employ. Use 2 for compatibility with
486
            previous Python releases, -1 for performance.
487
        """
488
489
        if os.path.exists(path) and not overwrite:
490
            raise IOError("path already exists: {0}".format(path))
491
492
        attributes = list(self._descriptive_attributes) \
493
                   + list(self._trained_attributes) \
494
                   + list(self._data_attributes)
495
496
        if "metadata" in attributes:
497
            logger.warn("'metadata' is a protected attribute. Ignoring.")
498
            attributes.remote("metadata")
499
500
        # Store up all the trained attributes and a hash of the training set.
501
        state = {}
502
        for attribute in attributes:
503
504
            value = getattr(self, attribute)
505
506
            try:
507
                # If it's a vectorizer or censoring dict, etc, get the state.
508
                value = value.__getstate__()
509
            except:
510
                None
511
512
            state[attribute] = value
513
514
        # Create a metadata dictionary.
515
        state["metadata"] = dict(
516
            version=__version__,
517
            model_class=type(self).__name__,
518
            modified=str(datetime.now()),
519
            data_attributes=self._data_attributes,
520
            descriptive_attributes=self._descriptive_attributes,
521
            trained_attributes=self._trained_attributes,
522
            training_set_hash=utils.short_hash(
523
                getattr(self, attr) for attr in self._data_attributes),
524
        )
525
526
        if not include_training_set_spectra:
527
            state.pop("training_set_flux")
528
            state.pop("training_set_ivar")
529
530
        elif not self.is_trained:
531
            logger.warn("The training set spectra won't be saved, and this model"\
532
                        "is not already trained. The saved model will not be "\
533
                        "able to be trained when loaded!")
534
535
        with open(path, "wb") as fp:
536
            pickle.dump(state, fp, protocol) 
537
        return None
538
539
540
    @classmethod
541
    def read(cls, path, **kwargs):
542
        """
543
        Read a saved model from disk.
544
545
        :param path:
546
            The path where to load the model from.
547
        """
548
549
        encodings = ("utf-8", "latin-1")
550
        for encoding in encodings:
551
            kwds = {"encoding": encoding} if version_info[0] >= 3 else {}
552
            try:
553
                with open(path, "rb") as fp:        
554
                    state = pickle.load(fp, **kwds)
555
556
            except UnicodeDecodeError:
557
                if encoding == encodings:
558
                    raise
559
560
        # Parse the state.
561
        metadata = state.get("metadata", {})
562
        version_saved = metadata.get("version", "0.1.0")
563
        if version_saved >= "0.2.0": # Refactor'd.
564
565
            init_attributes = list(metadata["data_attributes"]) \
566
                            + list(metadata["descriptive_attributes"])
567
568
            kwds = dict([(a, state.get(a, None)) for a in init_attributes])
569
570
            # Initiate the vectorizer.
571
            vectorizer_class, vectorizer_kwds = kwds["vectorizer"]
572
            klass = getattr(vectorizer_module, vectorizer_class)
573
            kwds["vectorizer"] = klass(**vectorizer_kwds)
574
575
            # Initiate the censors.
576
            kwds["censors"] = censoring.Censors(**kwds["censors"])
577
578
            model = cls(**kwds)
579
580
            # Set training attributes.
581
            for attr in metadata["trained_attributes"]:
582
                setattr(model, "_{}".format(attr), state.get(attr, None))
583
584
            return model
585
            
586
        else:
587
            raise NotImplementedError(
588
                "Cannot auto-convert old model files yet; "
589
                "contact Andy Casey <[email protected]> if you need this")
590
591
592
    def train(self, threads=None, **kwargs):
593
        """
594
        Train the model.
595
596
        :param threads: [optional]
597
            The number of parallel threads to use.
598
599
        :returns:
600
            A three-length tuple containing the spectral coefficients `theta`,
601
            the squared scatter term at each pixel `s2`, and metadata related to
602
            the training of each pixel.
603
        """
604
605
        if self.training_set_flux is None or self.training_set_ivar is None:
606
            raise TypeError(
607
                "cannot train: training set spectra not saved with the model")
608
609
        S, P = self.training_set_flux.shape
610
        T = self.design_matrix.shape[1]
611
612
        logger.info("Training {0}-label {1} with {2} stars and {3} pixels/star"\
613
            .format(len(self.vectorizer.label_names), type(self).__name__, S, P))
614
615
        # Parallelise out.
616
        if threads in (1, None):
617
            mapper, pool = (map, None)
618
619
        else:
620
            pool = mp.Pool(threads)
621
            mapper = pool.map
622
623
        func = utils.wrapper(fitting.fit_pixel_fixed_scatter, None, kwargs, P)
624
625
        meta = []
626
        theta = np.nan * np.ones((P, T))
627
        s2 = np.nan * np.ones(P)
628
629
        for pixel, (flux, ivar) \
630
        in enumerate(zip(self.training_set_flux.T, self.training_set_ivar.T)):
631
632
            args = (
633
                flux, ivar, 
634
                self._initial_theta(pixel),
635
                self.design_matrix,
636
                self._pixel_access(self.regularization, pixel, 0.0),
637
                None
638
            )
639
            (pixel_theta, pixel_s2, pixel_meta), = mapper(func, [args])
640
641
            meta.append(pixel_meta)
642
            theta[pixel], s2[pixel] = (pixel_theta, pixel_s2)
643
644
        self._theta, self._s2 = (theta, s2)
645
646
        if pool is not None:
647
            pool.close()
648
            pool.join()
649
650
        return (theta, s2, meta)
651
652
653
    @requires_training
654
    def __call__(self, labels):
655
        """
656
        Return spectral fluxes, given the labels.
657
658
        :param labels:
659
            An array of stellar labels.
660
        """
661
662
        # Scale and offset the labels.
663
        scaled_labels = (np.atleast_2d(labels) - self._fiducials)/self._scales
664
        flux = np.dot(self.theta, self.vectorizer(scaled_labels)).T
665
        return flux[0] if flux.shape[0] == 1 else flux
666
667
668
    @requires_training
669
    def test(self, flux, ivar, initial_labels=None, threads=None, **kwargs):
670
        """
671
        Run the test step on spectra.
672
673
        :param flux:
674
            The (pseudo-continuum-normalized) spectral flux.
675
676
        :param ivar:
677
            The inverse variance values for the spectral fluxes.
678
679
        :param initial_labels: [optional]
680
            The initial labels to try for each spectrum. This can be a single
681
            set of initial values, or one set of initial values for each star.
682
683
        :param threads: [optional]
684
            The number of parallel threads to use.
685
        """
686
687
        if threads in (1, None):
688
            mapper, pool = (map, None)
689
690
        else:
691
            pool = mp.Pool(threads)
692
            mapper = pool.map
693
694
        flux, ivar = (np.atleast_2d(flux), np.atleast_2d(ivar))
695
        S, P = flux.shape
696
697
        if ivar.shape != flux.shape:
698
            raise ValueError("flux and ivar arrays must be the same shape")
699
700
        if initial_labels is None:
701
            initial_labels = self._fiducials
702
703
        initial_labels = np.atleast_2d(initial_labels)
704
        if initial_labels.shape[0] != S and len(initial_labels.shape) == 2:
705
            initial_labels = np.tile(initial_labels.flatten(), S)\
706
                             .reshape(S, -1, len(self._fiducials))
707
708
        func = utils.wrapper(fitting.fit_spectrum, 
709
            (self.vectorizer, self.theta, self.s2, self._fiducials, self._scales),
710
            kwargs, S, message="Running test step on {} spectra".format(S))
711
712
        labels, cov, meta = zip(*mapper(func, zip(*(flux, ivar, initial_labels))))
713
714
        if pool is not None:
715
            pool.close()
716
            pool.join()
717
718
        return (np.array(labels), np.array(cov), meta)
719
720
721
    def _initial_theta(self, pixel_index, **kwargs):
722
        """
723
        Return a list of guesses of the spectral coefficients for the given
724
        pixel index. Initial values are sourced in the following preference
725
        order: 
726
727
            (1) a previously trained `theta` value for this pixel,
728
            (2) an estimate of `theta` using linear algebra,
729
            (3) a neighbouring pixel's `theta` value,
730
            (4) the fiducial value of [1, 0, ..., 0].
731
732
        :param pixel_index:
733
            The zero-indexed integer of the pixel.
734
735
        :returns:
736
            A list of initial theta guesses, and the source of each guess.
737
        """
738
739
        guesses = []
740
741
        if self.theta is not None:
742
            # Previously trained theta value.
743
            if np.all(np.isfinite(self.theta[pixel_index])):
744
                guesses.append((self.theta[pixel_index], "previously_trained"))
745
746
        # Estimate from linear algebra.
747
        theta, cov = fitting.fit_theta_by_linalg(
748
            self.training_set_flux[:, pixel_index],
749
            self.training_set_ivar[:, pixel_index],
750
            s2=kwargs.get("s2", 0.0), design_matrix=self.design_matrix)
751
752
        if np.all(np.isfinite(theta)):
753
            guesses.append((theta, "linear_algebra"))
754
755
        if self.theta is not None:
756
            # Neighbouring pixels value.
757
            for neighbour_pixel_index in set(np.clip(
758
                [pixel_index - 1, pixel_index + 1], 
759
                0, self.training_set_flux.shape[1] - 1)):
760
761
                if np.all(np.isfinite(self.theta[neighbour_pixel_index])):
762
                    guesses.append(
763
                        (self.theta[neighbour_pixel_index], "neighbour_pixel"))
764
765
        # Fiducial value.
766
        fiducial = np.hstack([1.0, np.zeros(len(self.vectorizer.terms))])
767
        guesses.append((fiducial, "fiducial"))
768
769
        return guesses
770