diff --git a/enterprise_extensions/altpol/altmodel.py b/enterprise_extensions/altpol/altmodel.py deleted file mode 100644 index ae8bcc1f..00000000 --- a/enterprise_extensions/altpol/altmodel.py +++ /dev/null @@ -1,137 +0,0 @@ -def altpol_model(psrs, - noise_dict, - MG = '1000', - crn_bins = 14, - prior = 'log-uniform', - kappa = None, - pdist = None, - gamma_list = [4.333, 5,5,5], - amp_list = None): - - Npulsars = len(psrs) - Tspan = model_utils.get_tspan(psrs) - pols = ['TT', 'ST', 'VL', 'SL'] - names = [psr.name for psr in psrs] - mask = [bool(int(m)) for m in MG] - freqs = np.arange(1/Tspan, (crn_bins+.1)/Tspan, 1/Tspan) - fnorm = np.repeat(freqs * kpc_to_meter/light_speed,2) - - #Amplitudes - if not amp_list: - amp_list = [] - if prior == 'log-uniform': - log10_As = [parameter.Uniform(-18,-11)('gw_log10_A_{}'.format(pol)) for pol in pols] - elif prior == 'lin-exp': - log10_As = [parameter.LinearExp(-18,-11)('gw_log10_A_{}'.format(pol)) for pol in pols] - else: - raise ValueError("The prior given is invalid. Choose between 'log-uniform' and 'lin-exp'.") - for m,l in zip(mask, log10_As): - if m: - amp_list.append(l) - else: - amp_list.append(-np.inf) - - #Spectral Indicies - if not gamma_list: - gamma_list = [] - sindex_list = [parameter.Uniform(0, 7)('gamma_' + pol) for pol in pols] - for m,g in zip(mask, sindex_list): - if m: - gamma_list.append(g) - - #Timing model - s = gp_signals.MarginalizingTimingModel(use_svd = True) - - #White noise - s += blocks.white_noise_block(vary=False, inc_ecorr= True) - - #Pulsar red noise - s += blocks.red_noise_block(prior='log-uniform', Tspan=Tspan, components=30) - - - #ORF Functions - orf_funcs = [model_orfs.hd_orf, model_orfs.st_orf, model_orfs.vl_orf, model_orfs.sl_orf] - - #Pulsar distances - param_list = {'{}_p_dist'.format(names[ii]):parameter.Normal(pdist[0,ii], pdist[1,ii])('{}_gw_p_dist'.format(names[ii])) for ii in range(Npulsars)} - param_list.update({'fnorm': fnorm}) - param_list.update({'pnames': names}) - - #PSD - kappa = parameter.Uniform(0, 10)('kappa') - p_dist_dummy = parameter.Normal(1, .2) - - ##TT Powerlaw - pl_tt = model_orfs.generalized_gwpol_psd(log10_A_tt = amp_list[0], - log10_A_st=-np.inf, - log10_A_vl=-np.inf, - log10_A_sl=-np.inf, - alpha_tt = (3 - gamma_list[0])/2, - alpha_alt = (3 - gamma_list[1])/2, - kappa = kappa, - p_dist = p_dist_dummy) - ##ST Powerlaw - pl_st = model_orfs.generalized_gwpol_psd(log10_A_tt = -np.inf, - log10_A_st= amp_list[1], - log10_A_vl=-np.inf, - log10_A_sl=-np.inf, - alpha_tt = (3 - gamma_list[0])/2, - alpha_alt = (3 - gamma_list[1])/2, - kappa = kappa, - p_dist = p_dist_dummy) - ##VL Powerlaw - pl_vl = model_orfs.generalized_gwpol_psd(log10_A_tt = -np.inf, - log10_A_st=-np.inf, - log10_A_vl=amp_list[2], - log10_A_sl=-np.inf, - alpha_tt = (3 - gamma_list[0])/2, - alpha_alt = (3 - gamma_list[1])/2, - kappa = kappa, - p_dist = p_dist_dummy) - ##SL Powerlaw - pl_sl = model_orfs.generalized_gwpol_psd(log10_A_tt = -np.inf, - log10_A_st=-np.inf, - log10_A_vl=-np.inf, - log10_A_sl=amp_list[3], - alpha_tt = (3 - gamma_list[0])/2, - alpha_alt = (3 - gamma_list[1])/2, - kappa = kappa, - p_dist = p_dist_dummy) - - - #GW red noise - s_gw = [] - s_gw.append(gp_signals.FourierBasisCommonGP(spectrum=pl_tt, components=crn_bins,Tspan=Tspan, name='{}_gw'.format(pols[0]), - orf = model_orfs.hd_orf())) - - s_gw.append(gp_signals.FourierBasisCommonGP(spectrum=pl_st, components=crn_bins,Tspan=Tspan, name='{}_gw'.format(pols[1]), - orf = model_orfs.st_orf())) - - s_gw.append(gp_signals.FourierBasisCommonGP(spectrum=pl_vl, components=crn_bins,Tspan=Tspan, name='{}_gw'.format(pols[2]), - orf = model_orfs.vl_orf(**param_list))) - - s_gw.append(gp_signals.FourierBasisGP(spectrum=pl_sl, components=crn_bins,Tspan=Tspan, name='{}_gw'.format(pols[3]))), - - for m,sg in zip(mask, s_gw): - if m: - s+=sg - - #Constructing the PTA object - model_list = [] - for p in psrs: - model_list.append(s(p)) - pta = signal_base.PTA(model_list) - pta.set_default_params(noise_dict) - - #Setting the parameters to their given values - ii = 0 - if pdist.shape[0] == 2: - for param in pta.params: - if 'p_dist' in param.name: - mu = pdist[0][ii] - sigma = pdist[1][ii] - param._prior = parameter.Normal(mu=mu,sigma=sigma) - param._mu = mu - param._sigma = sigma - ii += 1 - return pta diff --git a/enterprise_extensions/altpol/altorfs.py b/enterprise_extensions/altpol/altorfs.py deleted file mode 100644 index 88f70acf..00000000 --- a/enterprise_extensions/altpol/altorfs.py +++ /dev/null @@ -1,45 +0,0 @@ -import numpy as np -import scipy.special as ss -euler_const = 0.5772156649 -kpc_to_meter = 3.086e19 -light_speed = 299792458 - - -def HD_ORF (angle): - return 3/2*( (1/3 + ((1-np.cos(angle))/2) * (np.log((1-np.cos(angle))/2) - 1/6))) - -def ST_ORF (angle): - return 1/8 * (3 + np.cos(angle)) - -def autovl(f, l): - return (6*np.log(4*np.pi*f*l) - 14 + 6 * euler_const) - -def critical_loc(ga,gb, tol = .95): - gdiff = ga - gb - if np.all(gdiff == 0.0) : - c = 2 * (-7 + 3*(np.log(2*ga) + 3*euler_const)) * tol - else: - c = (-7 + 3*(np.log(2 * ga * gb/(gdiff)) + euler_const + ss.sici(2 * gdiff)[1])) * tol - return np.arccos(np.real(1/4*(4 + 3 * ss.lambertw(-8/3 * np.exp(-7/3 - np.float64(c)/3), k = 0)))) - -def VL_ORF(ang_with_info, f_norm): - - xi = ang_with_info[:,2].astype(dtype = float) - cos = np.cos(xi) - orf = 3*np.log(2/(1-np.cos(xi))) -4*np.cos(xi) - 3 - - g1 = 2*np.pi * ang_with_info[:,0].astype(dtype = float) * f_norm - g2 = 2*np.pi * ang_with_info[:,1].astype(dtype = float) * f_norm - c_loc = critical_loc(g1,g2) - mask = np.logical_not(xi > c_loc) - g1 = g1[mask] - g2 = g2[mask] - gdiff = g1-g2 - cos = cos[mask] - - if np.all(gdiff == 0): - orf[mask] = 2 * (-4*cos - 3 + 3*(np.log(2 * g1) + euler_const + ss.sici(2 * g1)[1])*cos) - else: - orf[mask] = (-4*cos - 3 + 3*(np.log(2 * g1 * g2/(gdiff)) + euler_const + ss.sici(2 * gdiff)[1])*cos) - - return orf diff --git a/enterprise_extensions/altpol/altutils.py b/enterprise_extensions/altpol/altutils.py deleted file mode 100644 index c5d0f36c..00000000 --- a/enterprise_extensions/altpol/altutils.py +++ /dev/null @@ -1,232 +0,0 @@ -import numpy as np -from la_forge.core import Core -import pickle, os, json, glob, copy, time -from enterprise.pulsar import Pulsar -from enterprise_extensions.altpol import altorfs, altutils -from enterprise_extensions import model_utils -import matplotlib.pyplot as plt - -def ml_finder(chaindir, savedir = False): - c = Core(label = '', chaindir = chaindir) - params = c.params[:-4] - burn = int(.25 * c.chain.shape[0]) - chain = c.chain[burn:,:] - ml_params = {} - for ii, name in enumerate(params): - idx = np.argmax(chain[:, -4]) - max_ll_val = chain[:, ii][idx] - ml_params[name] = max_ll_val - - ml_params_orig = copy.deepcopy(ml_params) - keys = list(ml_params.keys()) - if not 'gw_log10_A_TT' in keys: - ml_params['gw_log10_A_TT'] = -np.inf - if not 'gw_log10_A_ST' in keys: - ml_params['gw_log10_A_ST'] = -np.inf - if not 'gw_log10_A_VL' in keys: - ml_params['gw_log10_A_VL'] = -np.inf - - if savedir: - os.makedirs(savedir, exist_ok = True) - with open(savedir + '/ml_params.json', 'w' ) as fout: - json.dump({'orig': ml_params_orig, 'added': ml_params}, fout, sort_keys=True, - indent=4, separators=(',', ': ')) - return ml_params_orig - -def med_finder(chaindir, savedir = False): - c = Core(label = '', chaindir = chaindir) - params = c.params[:-4] - burn = int(.25 * c.chain.shape[0]) - chain = c.chain[burn:,:] - param_dict = {} - params = list(params) - for p in params: - param_dict.update({p: np.median(chain[:, params.index(p)])}) - - param_dict_orig = copy.deepcopy(param_dict) - - keys = list(param_dict.keys()) - if not 'gw_log10_A_TT' in keys: - param_dict['gw_log10_A_TT'] = -np.inf - if not 'gw_log10_A_ST' in keys: - param_dict['gw_log10_A_ST'] = -np.inf - if not 'gw_log10_A_VL' in keys: - param_dict['gw_log10_A_VL'] = -np.inf - - if savedir: - with open(savedir + '/med_params.json', 'w' ) as fout: - json.dump({'orig': param_dict_orig, 'added': param_dict}, fout, sort_keys=True, - indent=4, separators=(',', ': ')) - return param_dict_orig - -def crn_bins_finder(chaindir, Tspan): - ml_params = ml_finder(chaindir) - freqs = np.arange(1/Tspan, 30/Tspan, 1/Tspan) - fb = 10**ml_params['gw_log10_fb'] - crn_bins = np.argmin(np.abs(freqs - fb)) + 1 - if crn_bins > 5: - return crn_bins - else: - return 5 - -def psrs_slicer(psrs, min_year = 10, interval = 1): - day_sec = 86400 - year_sec = 365.25 * day_sec - a = []; sliced_psrs = {} - max_year = int(round(model_utils.get_tspan(psrs)/(year_sec))) - for psr in psrs: a.append(min(psr._toas)/day_sec) - start_time = min(a) - for bct,base in enumerate(np.arange(max_year,min_year,-interval)): - if bct == 0: - sliced_psrs.update({'{}'.format(max_year): copy.deepcopy(psrs)}) - else: - psrs_moded = [] - end_time = start_time + base * (365.25) - for psr in copy.deepcopy(psrs): - psr.filter_data(start_time = start_time, end_time = end_time) - if (psr.toas.size == 0) or (model_utils.get_tspan([psr]) < min_year * year_sec): - continue - else: - psrs_moded.append(psr) - sliced_psrs.update({'{}'.format(max_year - bct): psrs_moded}) - - return sliced_psrs - -def liklihood_check(pta, n = 10): - st = time.time() - for _ in range(n): - x0 = np.hstack(p.sample() for p in pta.params) - print(pta.get_lnlikelihood(x0)) - print('***Time Elapsed(s): ***', time.time() - st) - -def weightedavg(rho, sig): - weights, avg = 0., 0. - for r,s in zip(rho,sig): - weights += 1./(s*s) - avg += r/(s*s) - return avg/weights, np.sqrt(1./weights) - -def bin_crosscorr(zeta, xi, rho, sig): - rho_avg, sig_avg = np.zeros(len(zeta)), np.zeros(len(zeta)) - for i,z in enumerate(zeta[:-1]): - myrhos, mysigs = [], [] - for x,r,s in zip(xi,rho,sig): - if x >= z and x < (z+10.): - myrhos.append(r) - mysigs.append(s) - rho_avg[i], sig_avg[i] = weightedavg(myrhos, mysigs) - return rho_avg, sig_avg - -def binned_corr_Maker(xi, rho, sig, bins): - n_pulsars_per_bin,bin_loc = np.histogram(xi, density = False, bins = bins) - xi_mean = []; xi_err = []; rho_avg = []; sig_avg = [] - for ii in range (len(bin_loc) - 1): - mask = np.logical_and(xi >= bin_loc[ii] , xi < bin_loc[ii+1]) - if not rho[mask].size == 0: - r, s = weightedavg(rho[mask], sig[mask]) - rho_avg.append(r); sig_avg.append(s) - xi_mean.append(np.mean(xi[mask])) - xi_err.append(np.std(xi[mask])) - return np.array(xi_mean), np.array(xi_err), np.array(rho_avg), np.array(sig_avg) - -def get_HD_curve(zeta): - coszeta = np.cos(zeta*np.pi/180.) - xip = (1.-coszeta) / 2. - HD = 3.*( 1./3. + xip * ( np.log(xip) -1./6.) ) - return HD/2 - -def get_ST_curve(zeta): - coszeta = np.cos(zeta*np.pi/180.) - return 1/8 * (3 + coszeta) - -def get_VL_curve(zeta): - coszeta = np.cos(zeta*np.pi/180.) - return 3*np.log(2/(1-coszeta)) -4*coszeta - 3 - -def gt( x, tau, mono ) : - if np.all(x) == 0: - return 1 + mono - else: - cos_ang = np.cos(x) - k = 1/2*(1-cos_ang) - return 1/8 * (3+cos_ang) + (1-tau)*3/4*k*np.log(k) + mono - -def vl_search(x,a,b,c): - cos_ang = np.cos(x) - return a*np.log(2/(1-cos_ang)) -b*cos_ang - c - -def adjacent_values(vals, q1, q3): - upper_adjacent_value = q3 + (q3 - q1) * 1.5 - upper_adjacent_value = np.clip(upper_adjacent_value, q3, vals[-1]) - - lower_adjacent_value = q1 - (q3 - q1) * 1.5 - lower_adjacent_value = np.clip(lower_adjacent_value, vals[0], q1) - return lower_adjacent_value, upper_adjacent_value - - -def set_axis_style(ax, labels): - ax.xaxis.set_tick_params(direction='out') - ax.xaxis.set_ticks_position('bottom') - ax.set_xticks(np.arange(1, len(labels) + 1), labels=labels) - ax.set_xlim(0.25, len(labels) + 0.75) - -def BinModelTearDrop(data, realization, xi, nbins, split_first = False, orf = altorfs.HD_ORF, - orfname = 'HD', savedir = None, step = 'Binmodel', masterdir = None, - facecolor = 'lightblue', barcolor = 'blue', edgecolor = 'black', - marker_color = 'red', orf_color = 'red', alpha = 1): - rad_to_deg = 180/np.pi - if not data: - data = [] - if split_first: - pars = ['gw_orf_bin_{}'.format(_) for _ in range(nbins+1)] - else: - pars = ['gw_orf_bin_{}'.format(_) for _ in range(nbins)] - if realization == 'all': - x = altutils.chain_loader(masterdir, - step = step, - realization = 'all') - for _ in range(len(x[step])): - p = x[step][str(_)] - data.append(p[0].get_param(pars)) - else: - x = altutils.chain_loader(masterdir, - step = step, - realization = realization) - p = x[step][str(realization)] - data.append(p[0].get_param(pars)) - - data = list(np.hstack([list(data[ii].T) for ii in range(len(data))])) - bins = np.quantile(xi, np.linspace(0,1,nbins+1)) - if split_first: - bins = np.insert(bins, 1, (bins[0] + bins[1])/2) - widths = np.diff(bins) * rad_to_deg - #widths = np.diff(np.insert(bins, len(bins), np.pi)) * rad_to_deg - orfsigs = np.sqrt(.5 * (orf(xi)**2 + 4 * orf(.00001)**2)) - xi_mean, xi_err, rho_avg, sig_avg = binned_corr_Maker(xi, orf(xi), orfsigs, bins) - fig, ax = plt.subplots(nrows=1, ncols=1, figsize=(12, 6), sharey=True, dpi = 170) - - parts = ax.violinplot(data, bw_method = 0.5, - positions = xi_mean * rad_to_deg, - widths = widths, - showmedians = True) - - for pc in parts['bodies']: - pc.set_facecolor(facecolor) - pc.set_edgecolor(edgecolor) - pc.set_alpha(1) - parts['cmedians'].set_color(barcolor) - parts['cbars'].set_color(barcolor) - parts['cmins'].set_color(barcolor) - parts['cmaxes'].set_color(barcolor) - angs = np.linspace(.001, np.pi, 100) - plt.plot(angs * 180/np.pi, orf(angs), lw = 2, ls = '--', color = orf_color) - plt.errorbar(xi_mean*180/np.pi, rho_avg, xerr=xi_err*180/np.pi, yerr=sig_avg, marker='o', ls='', alpha = alpha, - color=marker_color, capsize=4, elinewidth=1.2, - label = 'Theoretical {}'.format(orfname)) - - plt.xlabel('Angular Separation (deg)') - plt.ylabel('Correlations') - plt.legend() - if savedir: - plt.savefig(savedir, dpi = 600) - plt.show() diff --git a/enterprise_extensions/gibbs_sampling/gibbs.py b/enterprise_extensions/gibbs_sampling/gibbs.py index 015f6d09..81188199 100644 --- a/enterprise_extensions/gibbs_sampling/gibbs.py +++ b/enterprise_extensions/gibbs_sampling/gibbs.py @@ -9,7 +9,7 @@ from scipy.linalg import solve_triangular as st_solve from scipy.linalg import cho_factor, cho_solve - + class BayesPowerSingle(object): """ @@ -31,19 +31,21 @@ class BayesPowerSingle(object): N. Laal """ - def __init__(self, - psr = None, - Tspan = None, - select = 'backend', - white_vary = False, - inc_ecorr = False, - ecorr_type = 'kernel', - noise_dict = None, - tm_marg = False, - freq_bins = None, - tnequad = True, - log10rhomin = -9., log10rhomax = -4.): - + def __init__( + self, + psr=None, + Tspan=None, + select="backend", + white_vary=False, + inc_ecorr=False, + ecorr_type="kernel", + noise_dict=None, + tm_marg=False, + freq_bins=None, + tnequad=True, + log10rhomin=-9.0, + log10rhomax=-4.0, + ): """ Parameters ----------- @@ -96,54 +98,82 @@ def __init__(self, self.ecorr_type = ecorr_type self.white_vary = white_vary self.tm_marg = tm_marg - self.wn_names = ['efac', 'equad', 'ecorr'] + self.wn_names = ["efac", "equad", "ecorr"] self.rhomin = log10rhomin self.rhomax = log10rhomax self.freq_bins = freq_bins - self.low = 10**(2 * self.rhomin) - self.high = 10**(2 * self.rhomax) - + self.low = 10 ** (2 * self.rhomin) + self.high = 10 ** (2 * self.rhomax) + ##Making the pta object if self.tm_marg: tm = gp_signals.MarginalizingTimingModel(use_svd=True) if self.white_vary: - warnings.warn('***FYI: the timing model is marginalized for. This will slow down the WN sampling!!***') + warnings.warn( + "***FYI: the timing model is marginalized for. This will slow down the WN sampling!!***" + ) else: tm = gp_signals.TimingModel(use_svd=True) - if self.ecorr_type == 'basis': - wn = blocks.white_noise_block(vary=self.white_vary, inc_ecorr=self.inc_ecorr, gp_ecorr=True, select = select, tnequad = tnequad) + if self.ecorr_type == "basis": + wn = blocks.white_noise_block( + vary=self.white_vary, + inc_ecorr=self.inc_ecorr, + gp_ecorr=True, + select=select, + tnequad=tnequad, + ) else: - wn = blocks.white_noise_block(vary=self.white_vary, inc_ecorr=self.inc_ecorr, gp_ecorr=False, select = select, tnequad = tnequad) - - rn = blocks.common_red_noise_block(psd='spectrum', prior='log-uniform', Tspan=self.Tspan,logmin=self.rhomin, logmax=self.rhomax, - components=freq_bins, gamma_val=None, name = 'gw') + wn = blocks.white_noise_block( + vary=self.white_vary, + inc_ecorr=self.inc_ecorr, + gp_ecorr=False, + select=select, + tnequad=tnequad, + ) + + rn = blocks.common_red_noise_block( + psd="spectrum", + prior="log-uniform", + Tspan=self.Tspan, + logmin=self.rhomin, + logmax=self.rhomax, + components=freq_bins, + gamma_val=None, + name="gw", + ) s = tm + wn + rn - self.pta = signal_base.PTA([s(p) for p in self.psr], lnlikelihood = signal_base.LogLikelihoodDenseCholesky) + self.pta = signal_base.PTA( + [s(p) for p in self.psr], + lnlikelihood=signal_base.LogLikelihoodDenseCholesky, + ) if not white_vary: self.pta.set_default_params(noise_dict) - self.Nmat = self.pta.get_ndiag(params = {})[0] - self.TNr = self.pta.get_TNr(params = {})[0] - self.TNT = self.pta.get_TNT(params = {})[0] + self.Nmat = self.pta.get_ndiag(params={})[0] + self.TNr = self.pta.get_TNr(params={})[0] + self.TNT = self.pta.get_TNT(params={})[0] else: self.Nmat = None - if self.inc_ecorr and 'basis' in self.ecorr_type: + if self.inc_ecorr and "basis" in self.ecorr_type: # grabbing priors on ECORR params for ct, par in enumerate(self.pta.params): - if 'ecorr' in str(par): ind = ct + if "ecorr" in str(par): + ind = ct ecorr_priors = str(self.pta.params[ind].params[0]) - ecorr_priors = ecorr_priors.split('(')[1].split(')')[0].split(', ') - self.ecorrmin, self.ecorrmax = (10**(2*float(ecorr_priors[0].split('=')[1])), - 10**(2*float(ecorr_priors[1].split('=')[1]))) + ecorr_priors = ecorr_priors.split("(")[1].split(")")[0].split(", ") + self.ecorrmin, self.ecorrmax = ( + 10 ** (2 * float(ecorr_priors[0].split("=")[1])), + 10 ** (2 * float(ecorr_priors[1].split("=")[1])), + ) # Getting residuals self._residuals = self.pta.get_residuals()[0] # Intial guess for the model params - self._xs = np.array([p.sample() for p in self.pta.params], dtype = object) + self._xs = np.array([p.sample() for p in self.pta.params], dtype=object) # Initializign the b-coefficients. The shape is 2*freq_bins if tm_marg = True. self._b = np.zeros(self.pta.get_basis(self._xs)[0].shape[1]) - self.Tmat = self.pta.get_basis(params = {})[0] + self.Tmat = self.pta.get_basis(params={})[0] self.phiinv = None # find basis indices of GW process @@ -152,13 +182,13 @@ def __init__(self, psigs = [sig for sig in self.pta.signals.keys() if self.name in sig] for sig in psigs: Fmat = self.pta.signals[sig].get_basis() - if 'gw' in self.pta.signals[sig].name: - self.gwid.append(ct + np.arange(0,Fmat.shape[1])) + if "gw" in self.pta.signals[sig].name: + self.gwid.append(ct + np.arange(0, Fmat.shape[1])) # Avoid None-basis processes. # Also assume red + GW signals share basis. - if Fmat is not None and 'red' not in sig: + if Fmat is not None and "red" not in sig: ct += Fmat.shape[1] - + @cached_property def params(self): return self.pta.params @@ -174,20 +204,20 @@ def map_params(self, xs): def get_red_param_indices(self): ind = [] for ct, par in enumerate(self.param_names): - if 'log10_A' in par or 'gamma' in par or 'rho' in par: + if "log10_A" in par or "gamma" in par or "rho" in par: ind.append(ct) return np.array(ind) @cached_property def get_efacequad_indices(self): ind = [] - if 'basis' in self.ecorr_type: + if "basis" in self.ecorr_type: for ct, par in enumerate(self.param_names): - if 'efac' in par or 'equad' in par: + if "efac" in par or "equad" in par: ind.append(ct) else: for ct, par in enumerate(self.param_names): - if 'ecorr' in par or 'efac' in par or 'equad' in par: + if "ecorr" in par or "efac" in par or "equad" in par: ind.append(ct) return np.array(ind) @@ -195,27 +225,27 @@ def get_efacequad_indices(self): def get_basis_ecorr_indices(self): ind = [] for ct, par in enumerate(self.param_names): - if 'ecorr' in par: + if "ecorr" in par: ind.append(ct) return np.array(ind) def update_red_params(self, xs): - ''' + """ Function to perform log10_rho updates given the Fourier coefficients. - ''' - tau = self._b[tuple(self.gwid)]**2 + """ + tau = self._b[tuple(self.gwid)] ** 2 tau = (tau[0::2] + tau[1::2]) / 2 - Norm = 1/(np.exp(-tau/self.high) - np.exp(-tau/self.low)) - x = np.random.default_rng().uniform(0, 1, size = tau.shape) - rhonew = -tau/np.log(x/Norm + np.exp(-tau/self.low)) - xs[-1] = 0.5*np.log10(rhonew) + Norm = 1 / (np.exp(-tau / self.high) - np.exp(-tau / self.low)) + x = np.random.default_rng().uniform(0, 1, size=tau.shape) + rhonew = -tau / np.log(x / Norm + np.exp(-tau / self.low)) + xs[-1] = 0.5 * np.log10(rhonew) return xs def update_b(self, xs): - ''' + """ Function to perform updates on Fourier coefficients given other model parameters. - ''' + """ params = self.pta.map_params(np.hstack(xs)) self._phiinv = self.pta.get_phiinv(params, logdet=False)[0] @@ -223,80 +253,100 @@ def update_b(self, xs): TNT = self.TNT.copy() except: T = self.Tmat - TNT = self.Nmat.solve(T, left_array = T) + TNT = self.Nmat.solve(T, left_array=T) try: TNr = self.TNr.copy() except: T = self.Tmat - TNr = self.Nmat.solve(self._residuals, left_array = T) + TNr = self.Nmat.solve(self._residuals, left_array=T) np.fill_diagonal(TNT, TNT.diagonal() + self._phiinv) try: chol = cho_factor(TNT, lower=True, overwrite_a=False, check_finite=False) - mean = cho_solve(chol, b = TNr, overwrite_b=False, check_finite=False) - self._b = mean + st_solve(chol[0], np.random.normal(loc=0, scale=1, size=TNT.shape[0]), - lower=True, unit_diagonal=False, overwrite_b=False, check_finite=False, trans=1) + mean = cho_solve(chol, b=TNr, overwrite_b=False, check_finite=False) + self._b = mean + st_solve( + chol[0], + np.random.normal(loc=0, scale=1, size=TNT.shape[0]), + lower=True, + unit_diagonal=False, + overwrite_b=False, + check_finite=False, + trans=1, + ) except np.linalg.LinAlgError: if self.bchain.any(): - self._b = self.bchain[np.random.default_rng().integers(0, len(self.bchain))] + self._b = self.bchain[ + np.random.default_rng().integers(0, len(self.bchain)) + ] else: - bchain = np.memmap(self._savepath + '/chain_1', dtype='float32', mode='r', shape = (self.niter, self.len_x + self.len_b))[:, -len(self._b):] + bchain = np.memmap( + self._savepath + "/chain_1", + dtype="float32", + mode="r", + shape=(self.niter, self.len_x + self.len_b), + )[:, -len(self._b) :] self._b = bchain[np.random.default_rng().integers(0, len(bchain))] def update_white_params(self, xs, iters=10): - ''' + """ Function to perform WN updates given other model parameters. If kernel ecorr is chosen, WN includes ecorr as well. - ''' + """ # get white noise parameter indices wind = self.get_efacequad_indices xnew = xs x0 = xnew[wind].copy() - lnlike0, lnprior0 = self.get_lnlikelihood_white(x0), self.get_wn_lnprior(x0) + lnlike0, lnprior0 = self.get_lnlikelihood_white(x0), self.get_wn_lnprior(x0) lnprob0 = lnlike0 + lnprior0 for ii in range(self.start_wn_iter + 1, self.start_wn_iter + iters + 1): - x0, lnlike0, lnprob0 = self.sampler_wn.PTMCMCOneStep(x0, lnlike0, lnprob0, ii) + x0, lnlike0, lnprob0 = self.sampler_wn.PTMCMCOneStep( + x0, lnlike0, lnprob0, ii + ) xnew[wind] = x0 self.start_wn_iter = ii ##Do some caching of "later needed" parameters for improved performance self.Nmat = self.pta.get_ndiag(self.map_params(xnew))[0] Tmat = self.Tmat - if not 'basis' in self.ecorr_type: - self.TNT = self.Nmat.solve(Tmat, left_array = Tmat) + if not "basis" in self.ecorr_type: + self.TNT = self.Nmat.solve(Tmat, left_array=Tmat) else: - TN = Tmat/self.Nmat[:,None] + TN = Tmat / self.Nmat[:, None] self.TNT = Tmat.T @ TN residuals = self._residuals - self.rNr = np.sum(residuals**2/self.Nmat) + self.rNr = np.sum(residuals**2 / self.Nmat) self.logdet_N = np.sum(np.log(self.Nmat)) self.d = TN.T @ residuals return xnew def update_basis_ecorr_params(self, xs, iters=10): - ''' + """ Function to perform basis ecorr updates. - ''' + """ # get white noise parameter indices eind = self.get_basis_ecorr_indices xnew = xs x0 = xnew[eind].copy() - lnlike0, lnprior0 = self.get_basis_ecorr_lnlikelihood(x0), self.get_basis_ecorr_lnprior(x0) + lnlike0, lnprior0 = self.get_basis_ecorr_lnlikelihood( + x0 + ), self.get_basis_ecorr_lnprior(x0) lnprob0 = lnlike0 + lnprior0 for ii in range(self.start_ec_iter + 1, self.start_ec_iter + iters + 1): - x0, lnlike0, lnprob0 = self.sampler_ec.PTMCMCOneStep(x0, lnlike0, lnprob0, ii) + x0, lnlike0, lnprob0 = self.sampler_ec.PTMCMCOneStep( + x0, lnlike0, lnprob0, ii + ) xnew[eind] = x0 self.start_ec_iter = ii return xnew def get_lnlikelihood_white(self, xs): - ''' + """ Function to calculate WN log-liklihood. - ''' + """ x0 = self._xs.copy() x0[self.get_efacequad_indices] = xs @@ -305,10 +355,10 @@ def get_lnlikelihood_white(self, xs): # whitened residuals yred = self._residuals - self.Tmat @ self._b try: - if not 'basis' in self.ecorr_type: - rNr, logdet_N = Nmat.solve(yred, left_array=yred, logdet = True) + if not "basis" in self.ecorr_type: + rNr, logdet_N = Nmat.solve(yred, left_array=yred, logdet=True) else: - rNr = np.sum(yred**2/Nmat) + rNr = np.sum(yred**2 / Nmat) logdet_N = np.sum(np.log(Nmat)) except: return -np.inf @@ -317,11 +367,10 @@ def get_lnlikelihood_white(self, xs): return loglike - def get_basis_ecorr_lnlikelihood(self, xs): - ''' + """ Function to calculate basis ecorr log-liklihood. - ''' + """ x0 = np.hstack(self._xs.copy()) x0[self.get_basis_ecorr_indices] = xs @@ -329,7 +378,7 @@ def get_basis_ecorr_lnlikelihood(self, xs): # start likelihood calculations loglike = 0 # get auxiliaries - phiinv, logdet_phi = self.pta.get_phiinv(params,logdet=True)[0] + phiinv, logdet_phi = self.pta.get_phiinv(params, logdet=True)[0] # first component of likelihood function loglike += -0.5 * (self.logdet_N + self.rNr) # Red noise piece @@ -341,35 +390,42 @@ def get_basis_ecorr_lnlikelihood(self, xs): return -np.inf logdet_sigma = np.sum(2 * np.log(np.diag(cf[0]))) - loglike += 0.5 * (self.d @ expval - - logdet_sigma - logdet_phi) + loglike += 0.5 * (self.d @ expval - logdet_sigma - logdet_phi) return loglike def get_wn_lnprior(self, xs): - ''' + """ Function to calculate WN log-prior. - ''' + """ x0 = self._xs.copy() x0[self.get_efacequad_indices] = xs - return np.sum([p.get_logpdf(value = x0[ct]) for ct, p in enumerate(self.params)]) + return np.sum([p.get_logpdf(value=x0[ct]) for ct, p in enumerate(self.params)]) def get_basis_ecorr_lnprior(self, xs): - ''' + """ Function to calculate basis ecorr log-prior. - ''' + """ x0 = self._xs.copy() x0[self.get_basis_ecorr_indices] = xs - return np.sum([p.get_logpdf(value = x0[ct]) for ct, p in enumerate(self.params)]) - - - def sample(self, niter=int(1e4), wniters = 30, eciters = 10, - savepath = None, - SCAMweight=30, AMweight=15, DEweight=50, - covUpdate = 1000, burn = 10000, **kwargs): - ''' + return np.sum([p.get_logpdf(value=x0[ct]) for ct, p in enumerate(self.params)]) + + def sample( + self, + niter=int(1e4), + wniters=30, + eciters=10, + savepath=None, + SCAMweight=30, + AMweight=15, + DEweight=50, + covUpdate=1000, + burn=10000, + **kwargs + ): + """ Gibbs Sampling Parameters @@ -400,44 +456,81 @@ def sample(self, niter=int(1e4), wniters = 30, eciters = 10, kwargs: dict PTMCMC initialization settings not mentioned above - ''' + """ self.start_wn_iter = 0 self.start_ec_iter = 0 - os.makedirs(savepath, exist_ok = True) + os.makedirs(savepath, exist_ok=True) if self.white_vary: - - isave = int(4e9) ## large number to avoid saving the white noise choice in a txt file + isave = int( + 4e9 + ) ## large number to avoid saving the white noise choice in a txt file thin = 1 Niter = int(niter * wniters + 1) x0 = self._xs[self.get_efacequad_indices] ndim = len(x0) - cov = np.diag(np.ones(ndim) * 0.01**2) # helps to tune MCMC proposal distribution - self.sampler_wn = ptmcmc(ndim, self.get_lnlikelihood_white, self.get_wn_lnprior, cov, - outDir = savepath, - resume=False) - self.sampler_wn.initialize(Niter = Niter, isave = isave, thin = thin, SCAMweight=SCAMweight, - AMweight=AMweight, DEweight=DEweight, covUpdate = covUpdate, - burn = burn, **kwargs) - - if 'basis' in self.ecorr_type and self.white_vary: + cov = np.diag( + np.ones(ndim) * 0.01**2 + ) # helps to tune MCMC proposal distribution + self.sampler_wn = ptmcmc( + ndim, + self.get_lnlikelihood_white, + self.get_wn_lnprior, + cov, + outDir=savepath, + resume=False, + ) + self.sampler_wn.initialize( + Niter=Niter, + isave=isave, + thin=thin, + SCAMweight=SCAMweight, + AMweight=AMweight, + DEweight=DEweight, + covUpdate=covUpdate, + burn=burn, + **kwargs + ) + + if "basis" in self.ecorr_type and self.white_vary: x0 = self._xs[self.get_basis_ecorr_indices] ndim = len(x0) cov = np.diag(np.ones(ndim) * 0.01**2) - self.sampler_ec = ptmcmc(ndim, self.get_basis_ecorr_lnlikelihood, self.get_basis_ecorr_lnprior, cov, - outDir = savepath, - resume=False) - self.sampler_ec.initialize(Niter = Niter, isave = isave, thin = thin, SCAMweight=SCAMweight, - AMweight=AMweight, DEweight=DEweight, covUpdate = covUpdate, - burn = burn, **kwargs) - - np.savetxt(savepath + '/pars.txt',list(map(str, self.pta.param_names)), fmt='%s') - np.savetxt(savepath +'/priors.txt',list(map(lambda x: str(x.__repr__()), self.pta.params)), fmt='%s') - freqs = np.arange(1/self.Tspan, (self.freq_bins + .001)/self.Tspan, 1/self.Tspan) - np.save(savepath +'/freqs.npy',freqs) - [os.remove(dpa) for dpa in glob.glob(savepath + '/*jump.txt')] + self.sampler_ec = ptmcmc( + ndim, + self.get_basis_ecorr_lnlikelihood, + self.get_basis_ecorr_lnprior, + cov, + outDir=savepath, + resume=False, + ) + self.sampler_ec.initialize( + Niter=Niter, + isave=isave, + thin=thin, + SCAMweight=SCAMweight, + AMweight=AMweight, + DEweight=DEweight, + covUpdate=covUpdate, + burn=burn, + **kwargs + ) + + np.savetxt( + savepath + "/pars.txt", list(map(str, self.pta.param_names)), fmt="%s" + ) + np.savetxt( + savepath + "/priors.txt", + list(map(lambda x: str(x.__repr__()), self.pta.params)), + fmt="%s", + ) + freqs = np.arange( + 1 / self.Tspan, (self.freq_bins + 0.001) / self.Tspan, 1 / self.Tspan + ) + np.save(savepath + "/freqs.npy", freqs) + [os.remove(dpa) for dpa in glob.glob(savepath + "/*jump.txt")] xnew = self._xs.copy() @@ -445,24 +538,25 @@ def sample(self, niter=int(1e4), wniters = 30, eciters = 10, len_x = len(np.hstack(self._xs)) self._savepath = savepath - fp = np.lib.format.open_memmap(savepath + '/chain_1.npy', - mode='w+', - dtype='float32', - shape=(niter, len_x + len_b), - fortran_order=False) - + fp = np.lib.format.open_memmap( + savepath + "/chain_1.npy", + mode="w+", + dtype="float32", + shape=(niter, len_x + len_b), + fortran_order=False, + ) + pbar = tqdm(range(niter), colour="GREEN") pbar.set_description("Sampling %s" % self.name) for ii in pbar: - if self.white_vary: xnew = self.update_white_params(xnew, iters=wniters) - if self.inc_ecorr and 'basis' in self.ecorr_type: + if self.inc_ecorr and "basis" in self.ecorr_type: xnew = self.update_basis_ecorr_params(xnew, iters=eciters) - self.update_b(xs = xnew) - xnew = self.update_red_params(xs = xnew) + self.update_b(xs=xnew) + xnew = self.update_red_params(xs=xnew) fp[ii, -len_b:] = self._b - fp[ii, 0:len_x] = np.hstack(xnew) \ No newline at end of file + fp[ii, 0:len_x] = np.hstack(xnew)