Passed
Push — master ( edd9bf...c7c6d0 )
by Stefan
04:22
created

sciapy.regress.__main__.save_samples_netcdf()   B

Complexity

Conditions 7

Size

Total Lines 43
Code Lines 37

Duplication

Lines 43
Ratio 100 %

Code Coverage

Tests 1
CRAP Score 49.3305

Importance

Changes 0
Metric Value
cc 7
eloc 37
nop 8
dl 43
loc 43
ccs 1
cts 21
cp 0.0476
crap 49.3305
rs 7.592
c 0
b 0
f 0

How to fix   Many Parameters   

Many Parameters

Methods with many parameters are not only hard to understand, but their parameters also often become inconsistent when you need more, or different data.

There are several approaches to avoid long parameter lists:

1
# -*- coding: utf-8 -*-
2
# vim:fileencoding=utf-8
3
#
4
# Copyright (c) 2017-2018 Stefan Bender
5
#
6
# This module is part of sciapy.
7
# sciapy is free software: you can redistribute it or modify
8
# it under the terms of the GNU General Public License as published
9
# by the Free Software Foundation, version 2.
10
# See accompanying LICENSE file or http://www.gnu.org/licenses/gpl-2.0.html.
11 1
"""SCIAMACHY data regression command line interface
12
13
Command line main program for regression analysis of SCIAMACHY
14
daily zonal mean time series (NO for now).
15
"""
16
17 1
import ctypes
18 1
import logging
19
20 1
import numpy as np
21 1
import scipy.optimize as op
22 1
from scipy.interpolate import interp1d
23
24 1
import george
25 1
import celerite
26
27 1
import matplotlib as mpl
28
# switch off X11 rendering
29 1
mpl.use("Agg")
30
31 1
from .load_data import load_solar_gm_table, load_scia_dzm
32 1
from .models_cel import trace_gas_model
33 1
from .mcmc import mcmc_sample_model
34 1
from .statistics import mcmc_statistics
35
36 1
from ._gpkernels import (george_solvers,
37
		setup_george_kernel, setup_celerite_terms)
38 1
from ._plot import (plot_single_sample_and_residuals,
39
		plot_residual, plot_random_samples)
40 1
from ._options import parser
41
42
43 1 View Code Duplication
def save_samples_netcdf(filename, model, alt, lat, samples,
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
44
		scale=1e-6,
45
		lnpost=None, compressed=False):
46
	from xarray import Dataset
47
	smpl_ds = Dataset(dict([(pname, (["lat", "alt", "sample"],
48
				samples[..., i].reshape(1, 1, -1)))
49
				for i, pname in enumerate(model.get_parameter_names())]
50
				# + [("lpost", (["lat", "alt", "sample"], lnp.reshape(1, 1, -1)))]
51
			),
52
			coords={"lat": [lat], "alt": [alt]})
53
54
	for modn in model.mean.models:
55
		modl = model.mean.models[modn]
56
		if hasattr(modl, "mean"):
57
			smpl_ds.attrs[modn + ":mean"] = modl.mean
58
59
	units = {"kernel": {
60
				"log": "log(10$^{{{0:.0f}}}$ cm$^{{-3}}$)"
61
						.format(-np.log10(scale))},
62
			"mean": {
63
				"log": "log(10$^{{{0:.0f}}}$ cm$^{{-3}}$)"
64
						.format(-np.log10(scale)),
65
				"cos": "10$^{{{0:.0f}}}$ cm$^{{-3}}$".format(-np.log10(scale)),
66
				"sin": "10$^{{{0:.0f}}}$ cm$^{{-3}}$".format(-np.log10(scale)),
67
				"val": "10$^{{{0:.0f}}}$ cm$^{{-3}}$".format(-np.log10(scale)),
68
				"amp": "10$^{{{0:.0f}}}$ cm$^{{-3}}$".format(-np.log10(scale)),
69
				"tau": "d"}}
70
	for pname in smpl_ds.data_vars:
71
		_pp = pname.split(':')
72
		for _n, _u in units[_pp[0]].items():
73
			if _pp[-1].startswith(_n):
74
				logging.debug("units for %s: %s", pname, _u)
75
				smpl_ds[pname].attrs["units"] = _u
76
77
	smpl_ds["alt"].attrs = {"long_name": "altitude", "units": "km"}
78
	smpl_ds["lat"].attrs = {"long_name": "latitude", "units": "degrees_north"}
79
80
	_encoding = None
81
	if compressed:
82
		_encoding = {var: {"zlib": True, "complevel": 1}
83
					for var in smpl_ds.data_vars}
84
	smpl_ds.to_netcdf(filename, encoding=_encoding)
85
	smpl_ds.close()
86
87
88 1 View Code Duplication
def _train_test_split(times, data, errs, train_frac,
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
89
		test_frac, randomize):
90
	# split the data into training and test subsets according to the
91
	# fraction given (default is 1, i.e. no splitting)
92
	ndata = len(times)
93
	train_size = int(ndata * train_frac)
94
	test_size = min(ndata - train_size, int(ndata * test_frac))
95
	# randomize if requested
96
	if randomize:
97
		permut_idx = np.random.permutation(np.arange(ndata))
98
	else:
99
		permut_idx = np.arange(ndata)
100
	train_idx = np.sort(permut_idx[:train_size])
101
	test_idx = np.sort(permut_idx[train_size:train_size + test_size])
102
	times_train = times[train_idx]
103
	data_train = data[train_idx]
104
	errs_train = errs[train_idx]
105
	if test_size > 0:
106
		times_test = times[test_idx]
107
		data_test = data[test_idx]
108
		errs_test = errs[test_idx]
109
	else:
110
		times_test = times
111
		data_test = data
112
		errs_test = errs
113
	logging.info("using %s of %s samples for training.", len(times_train), ndata)
114
	logging.info("using %s of %s samples for testing.", len(times_test), ndata)
115
	return (times_train, data_train, errs_train,
116
			times_test, data_test, errs_test)
117
118
119 1 View Code Duplication
def _r_sun_earth(time, tfmt="jyear"):
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
120
	"""First order approximation of the Sun-Earth distance
121
122
	The Sun-to-Earth distance can be used to (un-)normalize proxies
123
	to the actual distance to the Sun instead of 1 AU.
124
125
	Parameters
126
	----------
127
	time : float
128
		Time value in the units given by 'tfmt'.
129
	tfmt : str, optional
130
		The units of 'time' as supported by the
131
		astropy.time time formats. Default: 'jyear'.
132
133
	Returns
134
	-------
135
	dist : float
136
		The Sun-Earth distance at the given day of year in AU.
137
	"""
138
	from astropy.time import Time
139
	tdoy = Time(time, format=tfmt)
140
	tdoy.format = "yday"
141
	doy = int(tdoy.value.split(':')[1])
142
	return 1 - 0.01672 * np.cos(2 * np.pi / 365.256363 * (doy - 4))
143
144
145 1 View Code Duplication
def main():
0 ignored issues
show
Duplication introduced by
This code seems to be duplicated in your project.
Loading history...
146 1
	logging.basicConfig(level=logging.WARNING,
147
			format="[%(levelname)-8s] (%(asctime)s) "
148
				"%(filename)s:%(lineno)d %(message)s",
149
			datefmt="%Y-%m-%d %H:%M:%S %z")
150
151 1
	args = parser.parse_args()
152
153
	logging.info("command line arguments: %s", args)
154
	if args.quiet:
155
		logging.getLogger().setLevel(logging.ERROR)
156
	elif args.verbose:
157
		logging.getLogger().setLevel(logging.INFO)
158
	else:
159
		logging.getLogger().setLevel(args.loglevel)
160
161
	from numpy.distutils.system_info import get_info
162
	for oblas_path in get_info("openblas")["library_dirs"]:
163
		oblas_name = "{0}/libopenblas.so".format(oblas_path)
164
		logging.info("Trying %s", oblas_name)
165
		try:
166
			oblas_lib = ctypes.cdll.LoadLibrary(oblas_name)
167
			oblas_cores = oblas_lib.openblas_get_num_threads()
168
			oblas_lib.openblas_set_num_threads(args.openblas_threads)
169
			logging.info("Using %s/%s Openblas thread(s).",
170
					oblas_lib.openblas_get_num_threads(), oblas_cores)
171
		except:
172
			logging.info("Setting number of openblas threads failed.")
173
174
	if args.random_seed is not None:
175
		np.random.seed(args.random_seed)
176
177
	if args.proxies:
178
		proxies = args.proxies.split(',')
179
		proxy_dict = dict(_p.split(':') for _p in proxies)
180
	else:
181
		proxy_dict = {}
182
	lag_dict = {pn: 0 for pn in proxy_dict.keys()}
183
184
	# Post-processing of arguments...
185
	# List of proxy lag fits from csv
186
	fit_lags = args.fit_lags.split(',')
187
	# List of proxy lifetime fits from csv
188
	fit_lifetimes = args.fit_lifetimes.split(',')
189
	fit_annlifetimes = args.fit_annlifetimes.split(',')
190
	# List of proxy lag times from csv
191
	lag_dict.update(dict(_ls.split(':') for _ls in args.lag_times.split(',')))
192
	# List of cycles (frequencies in 1/year) from argument list (csv)
193
	try:
194
		freqs = list(map(float, args.freqs.split(',')))
195
	except ValueError:
196
		freqs = []
197
	args.freqs = freqs
198
	# List of initial parameter values
199
	initial = None
200
	if args.initial is not None:
201
		try:
202
			initial = list(map(float, args.initial.split(',')))
203
		except ValueError:
204
			pass
205
	# List of GP kernels from argument list (csv)
206
	kernls = args.kernels.split(',')
207
208
	lat = args.latitude
209
	alt = args.altitude
210
	logging.info("location: %.0f°N %.0f km", lat, alt)
211
212
	no_ys, no_dens, no_errs, no_szas = load_scia_dzm(args.file, alt, lat,
213
			tfmt=args.time_format,
214
			scale=args.scale,
215
			#subsample_factor=args.random_subsample,
216
			#subsample_method="random",
217
			akd_threshold=args.akd_threshold,
218
			cnt_threshold=args.cnt_threshold,
219
			center=args.center_data,
220
			season=args.season,
221
			SPEs=args.exclude_spe)
222
223
	(no_ys_train, no_dens_train, no_errs_train,
224
		no_ys_test, no_dens_test, no_errs_test) = _train_test_split(
225
				no_ys, no_dens, no_errs, args.train_fraction,
226
				args.test_fraction, args.random_train_test)
227
228
	sza_intp = interp1d(no_ys, no_szas, bounds_error=False)
229
230
	max_amp = 1e10 * args.scale
231
	max_days = 100
232
233
	proxy_config = {}
234
	for pn, pf in proxy_dict.items():
235
		pt, pp = load_solar_gm_table(pf, cols=[0, 1], names=["time", pn], tfmt=args.time_format)
236
		# use log of proxy values
237
		pv = np.log(pp[pn]) if pn in args.log_proxies.split(',') else pp[pn]
238
		# normalize to sun--earth distance squared
239
		if pn in args.norm_proxies_distSEsq.split(','):
240
			rad_sun_earth = np.vectorize(_r_sun_earth)(pt, tfmt=args.time_format)
241
			pv /= rad_sun_earth**2
242
		# normalize by cos(SZA)
243
		if pn in args.norm_proxies_SZA.split(',') and sza_intp is not None:
244
			pv *= np.cos(np.radians(sza_intp(pt)))
245
		proxy_config.update({pn:
246
			dict(times=pt, values=pv,
247
				center=pn in args.center_proxies.split(','),
248
				positive=pn in args.positive_proxies.split(','),
249
				lag=float(lag_dict[pn]),
250
				max_amp=max_amp, max_days=max_days,
251
				sza_intp=sza_intp if args.use_sza else None,
252
			)}
253
		)
254
255
	model = trace_gas_model(constant=args.fit_offset,
256
			proxy_config=proxy_config, **vars(args))
257
258
	logging.debug("model dict: %s", model.get_parameter_dict())
259
	model.freeze_all_parameters()
260
	# thaw parameters according to requested fits
261
	for pn in proxy_dict.keys():
262
		model.thaw_parameter("{0}:amp".format(pn))
263
		if pn in fit_lags:
264
			model.thaw_parameter("{0}:lag".format(pn))
265
		if pn in fit_lifetimes:
266
			model.set_parameter("{0}:tau0".format(pn), 1e-3)
267
			model.thaw_parameter("{0}:tau0".format(pn))
268
			if pn in fit_annlifetimes:
269
				model.thaw_parameter("{0}:taucos1".format(pn))
270
				model.thaw_parameter("{0}:tausin1".format(pn))
271
		else:
272
			model.set_parameter("{0}:ltscan".format(pn), 0)
273
	for freq in freqs:
274
		if not args.fit_phase:
275
			model.thaw_parameter("f{0:.0f}:cos".format(freq))
276
			model.thaw_parameter("f{0:.0f}:sin".format(freq))
277
		else:
278
			model.thaw_parameter("f{0:.0f}:amp".format(freq))
279
			model.thaw_parameter("f{0:.0f}:phase".format(freq))
280
	if args.fit_offset:
281
		#model.set_parameter("offset:value", -100.)
282
		#model.set_parameter("offset:value", 0)
283
		model.thaw_parameter("offset:value")
284
285
	if initial is not None:
286
		model.set_parameter_vector(initial)
287
	# model.thaw_parameter("GM:ltscan")
288
	logging.debug("params: %s", model.get_parameter_dict())
289
	logging.debug("param names: %s", model.get_parameter_names())
290
	logging.debug("param vector: %s", model.get_parameter_vector())
291
	logging.debug("param bounds: %s", model.get_parameter_bounds())
292
	#logging.debug("model value: %s", model.get_value(no_ys))
293
	#logging.debug("default log likelihood: %s", model.log_likelihood(model.vector))
294
295
	# setup the Gaussian Process kernel
296
	kernel_base = (1e7 * args.scale)**2
297
	ksub = args.name_suffix
298
299
	solver = "basic"
300
	skwargs = {}
301
	if args.HODLR_Solver:
302
		solver = "HODLR"
303
		#skwargs = {"tol": 1e-3}
304
305
	if args.george:
306
		gpname, kernel = setup_george_kernel(kernls,
307
				kernel_base=kernel_base, fit_bias=args.fit_bias)
308
		gpmodel = george.GP(kernel, mean=model,
309
			white_noise=1.e-25, fit_white_noise=args.fit_white,
310
			solver=george_solvers[solver], **skwargs)
311
		# the george interface does not allow setting the bounds in
312
		# the kernel initialization so we prepare simple default bounds
313
		kernel_bounds = [(-0.3 * max_amp, 0.3 * max_amp)
314
				for _ in gpmodel.kernel.get_parameter_names()]
315
		bounds = gpmodel.mean.get_parameter_bounds() + kernel_bounds
316
	else:
317
		gpname, cel_terms = setup_celerite_terms(kernls,
318
				fit_bias=args.fit_bias, fit_white=args.fit_white)
319
		gpmodel = celerite.GP(cel_terms, mean=model,
320
			fit_white_noise=args.fit_white,
321
			fit_mean=True)
322
		bounds = gpmodel.get_parameter_bounds()
323
	gpmodel.compute(no_ys_train, no_errs_train)
324
	logging.debug("gpmodel params: %s", gpmodel.get_parameter_dict())
325
	logging.debug("gpmodel bounds: %s", bounds)
326
	logging.debug("initial log likelihood: %s", gpmodel.log_likelihood(no_dens_train))
327
	if isinstance(gpmodel, celerite.GP):
328
		logging.info("(GP) jitter: %s", gpmodel.kernel.jitter)
329
	model_name = "_".join(gpmodel.mean.get_parameter_names()).replace(':', '')
330
	gpmodel_name = model_name + gpname
331
	logging.info("GP model name: %s", gpmodel_name)
332
333
	pre_opt = False
334
	if args.optimize > 0:
335
		def gpmodel_mean(x, *p):
336
			gpmodel.set_parameter_vector(p)
337
			return gpmodel.mean.get_value(x)
338
339
		def gpmodel_res(x, *p):
340
			gpmodel.set_parameter_vector(p)
341
			return (gpmodel.mean.get_value(x) - no_dens_train) / no_errs_train
342
343
		def lpost(p, y, gp):
344
			gp.set_parameter_vector(p)
345
			return gp.log_likelihood(y, quiet=True) + gp.log_prior()
346
347
		def nlpost(p, y, gp):
348
			lp = lpost(p, y, gp)
0 ignored issues
show
introduced by
The variable lpost does not seem to be defined in case args.optimize > 0 on line 334 is False. Are you sure this can never be the case?
Loading history...
349
			return -lp if np.isfinite(lp) else 1e25
350
351
		def grad_nlpost(p, y, gp):
352
			gp.set_parameter_vector(p)
353
			grad_ll = gp.grad_log_likelihood(y)
354
			if isinstance(grad_ll, tuple):
355
				# celerite
356
				return -grad_ll[1]
357
			# george
358
			return -grad_ll
359
360
		if args.optimize == 1:
361
			resop_gp = op.minimize(
362
				nlpost,
363
				gpmodel.get_parameter_vector(),
364
				args=(no_dens_train, gpmodel),
365
				bounds=bounds,
366
				# method="l-bfgs-b", options=dict(disp=True, maxcor=100, eps=1e-9, ftol=2e-15, gtol=1e-8))
367
				method="l-bfgs-b", jac=grad_nlpost)
368
				# method="tnc", options=dict(disp=True, maxiter=500, xtol=1e-12))
369
				# method="nelder-mead", options=dict(disp=True, maxfev=100000, fatol=1.49012e-8, xatol=1.49012e-8))
370
				# method="Powell", options=dict(ftol=1.49012e-08, xtol=1.49012e-08))
371
		if args.optimize == 2:
372
			resop_gp = op.differential_evolution(
373
				nlpost,
374
				bounds=bounds,
375
				args=(no_dens_train, gpmodel),
376
				popsize=2 * args.walkers, tol=0.01)
377
		if args.optimize == 3:
378
			resop_bh = op.basinhopping(
379
				nlpost,
380
				gpmodel.get_parameter_vector(),
381
				niter=200,
382
				minimizer_kwargs=dict(
383
					args=(no_dens_train, gpmodel),
384
					bounds=bounds,
385
					# method="tnc"))
386
					# method="l-bfgs-b", options=dict(maxcor=100)))
387
					method="l-bfgs-b", jac=grad_nlpost))
388
					# method="Nelder-Mead"))
389
					# method="BFGS"))
390
					# method="Powell", options=dict(ftol=1.49012e-08, xtol=1.49012e-08)))
391
			logging.debug("optimization result: %s", resop_bh)
392
			resop_gp = resop_bh.lowest_optimization_result
393
		if args.optimize == 4:
394
			resop_gp, cov_gp = op.curve_fit(
395
				gpmodel_mean,
396
				no_ys_train, no_dens_train, gpmodel.get_parameter_vector(),
397
				bounds=tuple(np.array(bounds).T),
398
				# method='lm',
399
				# absolute_sigma=True,
400
				sigma=no_errs_train)
401
			print(resop_gp, np.sqrt(np.diag(cov_gp)))
402
		logging.info("%s", resop_gp.message)
0 ignored issues
show
introduced by
The variable resop_gp does not seem to be defined in case args.optimize == 1 on line 360 is False. Are you sure this can never be the case?
Loading history...
403
		logging.debug("optimization result: %s", resop_gp)
404
		logging.info("gpmodel dict: %s", gpmodel.get_parameter_dict())
405
		logging.info("log posterior trained: %s", lpost(gpmodel.get_parameter_vector(), no_dens_train, gpmodel))
406
		gpmodel.compute(no_ys_test, no_errs_test)
407
		logging.info("log posterior test: %s", lpost(gpmodel.get_parameter_vector(), no_dens_test, gpmodel))
408
		gpmodel.compute(no_ys, no_errs)
409
		logging.info("log posterior all: %s", lpost(gpmodel.get_parameter_vector(), no_dens, gpmodel))
410
		# cross check to make sure that the gpmodel parameter vector is really
411
		# set to the fitted parameters
412
		logging.info("opt. model vector: %s", resop_gp.x)
413
		gpmodel.compute(no_ys_train, no_errs_train)
414
		logging.debug("opt. log posterior trained 1: %s", lpost(resop_gp.x, no_dens_train, gpmodel))
415
		gpmodel.compute(no_ys_test, no_errs_test)
416
		logging.debug("opt. log posterior test 1: %s", lpost(resop_gp.x, no_dens_test, gpmodel))
417
		gpmodel.compute(no_ys, no_errs)
418
		logging.debug("opt. log posterior all 1: %s", lpost(resop_gp.x, no_dens, gpmodel))
419
		logging.debug("opt. model vector: %s", gpmodel.get_parameter_vector())
420
		gpmodel.compute(no_ys_train, no_errs_train)
421
		logging.debug("opt. log posterior trained 2: %s", lpost(gpmodel.get_parameter_vector(), no_dens_train, gpmodel))
422
		gpmodel.compute(no_ys_test, no_errs_test)
423
		logging.debug("opt. log posterior test 2: %s", lpost(gpmodel.get_parameter_vector(), no_dens_test, gpmodel))
424
		gpmodel.compute(no_ys, no_errs)
425
		logging.debug("opt. log posterior all 2: %s", lpost(gpmodel.get_parameter_vector(), no_dens, gpmodel))
426
		pre_opt = resop_gp.success
427
	try:
428
		logging.info("GM lt: %s", gpmodel.get_parameter("mean:GM:tau0"))
429
	except ValueError:
430
		pass
431
	logging.info("(GP) model: %s", gpmodel.kernel)
432
	if isinstance(gpmodel, celerite.GP):
433
		logging.info("(GP) jitter: %s", gpmodel.kernel.jitter)
434
435
	bestfit = gpmodel.get_parameter_vector()
436
	filename_base = ("NO_regress_fit_{0}_{1:.0f}_{2:.0f}_{{0}}_{3}"
437
					.format(gpmodel_name, lat * 10, alt, ksub))
438
439
	if args.mcmc:
440
		gpmodel.compute(no_ys_train, no_errs_train)
441
		samples, lnp = mcmc_sample_model(gpmodel,
442
				no_dens_train,
443
				beta=1.0,
444
				nwalkers=args.walkers, nburnin=args.burn_in,
445
				nprod=args.production, nthreads=args.threads,
446
				show_progress=args.progress,
447
				optimized=pre_opt, bounds=bounds, return_logpost=True)
448
449
		if args.train_fraction < 1. or args.test_fraction < 1.:
450
			logging.info("Statistics for the test samples")
451
			mcmc_statistics(gpmodel,
452
					no_ys_test, no_dens_test, no_errs_test,
453
					no_ys_train, no_dens_train, no_errs_train,
454
					samples, lnp,
455
			)
456
		logging.info("Statistics for all samples")
457
		mcmc_statistics(gpmodel,
458
				no_ys, no_dens, no_errs,
459
				no_ys_train, no_dens_train, no_errs_train,
460
				samples, lnp,
461
		)
462
463
		sampl_percs = np.percentile(samples, [2.5, 50, 97.5], axis=0)
464
		if args.plot_corner:
465
			import corner
466
			# Corner plot of the sampled parameters
467
			fig = corner.corner(samples,
468
					quantiles=[0.025, 0.5, 0.975],
469
					show_titles=True,
470
					labels=gpmodel.get_parameter_names(),
471
					truths=bestfit,
472
					hist_args=dict(normed=True))
473
			fig.savefig(filename_base.format("corner") + ".pdf", transparent=True)
474
475
		if args.save_samples:
476
			if args.samples_format in ["npz"]:
477
				# save the samples compressed to save space.
478
				np.savez_compressed(filename_base.format("sampls") + ".npz",
479
					samples=samples)
480
			if args.samples_format in ["nc", "netcdf4"]:
481
				save_samples_netcdf(filename_base.format("sampls") + ".nc",
482
					gpmodel, alt, lat, samples, scale=args.scale, compressed=True)
483
			if args.samples_format in ["h5", "hdf5"]:
484
				save_samples_netcdf(filename_base.format("sampls") + ".h5",
485
					gpmodel, alt, lat, samples, scale=args.scale, compressed=True)
486
		# MCMC finished here
487
488
	# set the model times and errors to use the full data set for plotting
489
	gpmodel.compute(no_ys, no_errs)
490
	if args.save_model:
491
		try:
492
			# python 2
493
			import cPickle as pickle
494
		except ImportError:
495
			# python 3
496
			import pickle
497
		# pickle and save the model
498
		with open(filename_base.format("model") + ".pkl", "wb") as f:
499
			pickle.dump((gpmodel), f, -1)
500
501
	if args.plot_samples and args.mcmc:
502
		plot_random_samples(gpmodel, no_ys, no_dens, no_errs,
503
				samples, args.scale,
0 ignored issues
show
introduced by
The variable samples does not seem to be defined in case args.mcmc on line 439 is False. Are you sure this can never be the case?
Loading history...
504
				filename_base.format("sampls") + ".pdf",
505
				size=4, extra_years=[4, 2])
506
507
	if args.plot_median:
508
		plot_single_sample_and_residuals(gpmodel, no_ys, no_dens, no_errs,
509
				sampl_percs[1],
0 ignored issues
show
introduced by
The variable sampl_percs does not seem to be defined in case args.mcmc on line 439 is False. Are you sure this can never be the case?
Loading history...
510
				filename_base.format("median") + ".pdf")
511
	if args.plot_residuals:
512
		plot_residual(gpmodel, no_ys, no_dens, no_errs,
513
				sampl_percs[1], args.scale,
514
				filename_base.format("medres") + ".pdf")
515
	if args.plot_maxlnp:
516
		plot_single_sample_and_residuals(gpmodel, no_ys, no_dens, no_errs,
517
				samples[np.argmax(lnp)],
0 ignored issues
show
introduced by
The variable lnp does not seem to be defined in case args.mcmc on line 439 is False. Are you sure this can never be the case?
Loading history...
518
				filename_base.format("maxlnp") + ".pdf")
519
	if args.plot_maxlnpres:
520
		plot_residual(gpmodel, no_ys, no_dens, no_errs,
521
				samples[np.argmax(lnp)], args.scale,
522
				filename_base.format("mlpres") + ".pdf")
523
524
	labels = gpmodel.get_parameter_names()
525
	logging.info("param percentiles [2.5, 50, 97.5]:")
526
	for pc, label in zip(sampl_percs.T, labels):
527
		median = pc[1]
528
		pc_minus = median - pc[0]
529
		pc_plus = pc[2] - median
530
		logging.debug("%s: %s", label, pc)
531
		logging.info("%s: %.6f (- %.6f) (+ %.6f)", label,
532
				median, pc_minus, pc_plus)
533
534
	logging.info("Finished successfully.")
535
536
537 1
if __name__ == "__main__":
538
	main()
539