Skip to content

Commit

Permalink
ACT DR4 calibration options
Browse files Browse the repository at this point in the history
  • Loading branch information
msyriac committed Jan 31, 2024
1 parent 2a5d9d9 commit 8da6daa
Showing 1 changed file with 57 additions and 14 deletions.
71 changes: 57 additions & 14 deletions act_dr6_lenslike/act_dr6_lenslike.py
Original file line number Diff line number Diff line change
Expand Up @@ -81,7 +81,11 @@ def pp_to_kk(clpp,ell):
return clpp * (ell*(ell+1.))**2. / 4.

def get_corrected_clkk(data_dict,clkk,cltt,clte,clee,clbb,suff='',
do_N1kk_corr=True, do_N1cmb_corr=True):
do_norm_corr=True, do_N1kk_corr=True, do_N1cmb_corr=True,
act_calib=False, no_like_cmb_corrections=False):
if no_like_cmb_corrections:
do_norm_corr = False
do_N1cmb_corr = False
clkk_fid = data_dict['fiducial_cl_kk']
cl_dict = {'tt':cltt,'te':clte,'ee':clee,'bb':clbb}
if do_N1kk_corr:
Expand All @@ -92,15 +96,29 @@ def get_corrected_clkk(data_dict,clkk,cltt,clte,clee,clbb,suff='',
fid_norm = data_dict[f'fAL{suff}']
N1_cmb_corr = 0.
norm_corr = 0.

if act_calib and not('planck' in suff):
ocl = cl_dict['tt']
fcl = data_dict[f'fiducial_cl_tt']
ols = np.arange(ocl.size)
cal_ell_min = 1000
cal_ell_max = 2000
sel = np.s_[np.logical_and(ols>cal_ell_min,ols<cal_ell_max)]
cal_fact = (ocl[sel]/fcl[sel]).mean()
else:
cal_fact = 1.0

for i,s in enumerate(['tt','ee','bb','te']):
cldiff = (cl_dict[s]-data_dict[f'fiducial_cl_{s}'])
icl = cl_dict[s]
cldiff = ((icl/cal_fact)-data_dict[f'fiducial_cl_{s}'])
if do_N1cmb_corr:
N1_cmb_corr = N1_cmb_corr + (data_dict[f'dN1_{s}{suff}']@cldiff)
c = - 2. * (dNorm[i] @ cldiff)
if i==0:
ls = np.arange(c.size)
c[ls>=2] = c[ls>=2] / fid_norm[ls>=2]
norm_corr = norm_corr + c
if do_norm_corr:
c = - 2. * (dNorm[i] @ cldiff)
if i==0:
ls = np.arange(c.size)
c[ls>=2] = c[ls>=2] / fid_norm[ls>=2]
norm_corr = norm_corr + c
nclkk = clkk + norm_corr*clkk_fid + N1_kk_corr + N1_cmb_corr
return nclkk

Expand Down Expand Up @@ -214,7 +232,7 @@ def load_data(variant, ddir=None,
lens_only=False,
apply_hartlap=True,like_corrections=True,mock=False,
nsims_act=796,nsims_planck=400,trim_lmax=2998,scale_cov=None,
version=None):
version=None, act_cmb_rescale=False, act_calib=False):
"""
Given a data directory path, this function loads into a dictionary
the data products necessary for evaluating the DR6 lensing likelihood.
Expand Down Expand Up @@ -249,6 +267,7 @@ def load_data(variant, ddir=None,

print(f"Loading ACT DR6 lensing likelihood {version}...")
v,baseline,include_planck = parse_variant(variant)
if include_planck and act_cmb_rescale: raise ValueError


# output data
Expand Down Expand Up @@ -299,6 +318,7 @@ def load_data(variant, ddir=None,
data_act = y[start:end].copy()
d['data_binned_clkk'] = data_act
nbins_act = data_act.size


binmat = np.loadtxt(f'{ddir}/binning_matrix_act.txt')
d['full_binmat_act'] = binmat.copy()
Expand All @@ -308,7 +328,22 @@ def load_data(variant, ddir=None,
d['binmat_act'] = standardize(ls,binmat[start:end,:],trim_lmax,extra_dims="xy")
d['bcents_act'] = bcents[start:end].copy()

if act_cmb_rescale:
# load A_L_fid / A_L_ACT and standardize it
r = np.loadtxt("ratio_fid_over_act_wmap.txt")
rls = np.arange(r.size)
r[rls<2] = 0
rs = standardize(rls,r,trim_lmax)
# bin it
rb = d['binmat_act'] @ rs
# correct data
print("Binned ACT rescaling corrections: ", rb)
d['data_binned_clkk'] = d['data_binned_clkk'] / rb**2


if lens_only:
if act_cmb_rescale: raise ValueError
if act_calib: raise ValueError
if include_planck:
if v not in [None,'cinpaint']: raise ValueError(f"Combination of {v} with Planck is not available")
fcov = np.loadtxt(f'{ddir}/covmat_actplanck_cmbmarg.txt')
Expand Down Expand Up @@ -378,8 +413,7 @@ def load_data(variant, ddir=None,
nsims = min(nsims_act,nsims_planck) if include_planck else nsims_act
hartlap_correction = (nsims-nbins-2.)/(nsims-1.)
if apply_hartlap:
pass
#warnings.warn("Hartlap correction to cinv: ", hartlap_correction)
warnings.warn(f"Hartlap correction to cinv: {hartlap_correction}")
else:
warnings.warn(f"Disabled Hartlap correction to cinv: {hartlap_correction}")
hartlap_correction = 1.0
Expand All @@ -399,7 +433,8 @@ def load_data(variant, ddir=None,
return d


def generic_lnlike(data_dict,ell_kk,cl_kk,ell_cmb,cl_tt,cl_ee,cl_te,cl_bb,trim_lmax=2998,return_theory=False):
def generic_lnlike(data_dict,ell_kk,cl_kk,ell_cmb,cl_tt,cl_ee,cl_te,cl_bb,trim_lmax=2998,
return_theory=False,do_norm_corr=True,act_calib=False,no_actlike_cmb_corrections=False):

cl_kk = standardize(ell_kk,cl_kk,trim_lmax)
cl_tt = standardize(ell_cmb,cl_tt,trim_lmax)
Expand All @@ -409,7 +444,9 @@ def generic_lnlike(data_dict,ell_kk,cl_kk,ell_cmb,cl_tt,cl_ee,cl_te,cl_bb,trim_l

d = data_dict
cinv = d['cinv']
clkk_act = get_corrected_clkk(data_dict,cl_kk,cl_tt,cl_te,cl_ee,cl_bb) if d['likelihood_corrections'] else cl_kk
clkk_act = get_corrected_clkk(data_dict,cl_kk,cl_tt,cl_te,cl_ee,cl_bb,
do_norm_corr=do_norm_corr,act_calib=act_calib,
no_like_cmb_corrections=no_actlike_cmb_corrections) if d['likelihood_corrections'] else cl_kk
bclkk = d['binmat_act'] @ clkk_act
if d['include_planck']:
clkk_planck = get_corrected_clkk(data_dict,cl_kk,cl_tt,cl_te,cl_ee,cl_bb,'_planck') if d['likelihood_corrections'] else cl_kk
Expand All @@ -434,6 +471,7 @@ class ACTDR6LensLike(InstallableLikelihood):
nsims_act = 792. # Number of sims used for covmat; used in Hartlap correction
nsims_planck = 400. # Number of sims used for covmat; used in Hartlap correction
no_like_corrections = False
no_actlike_cmb_corrections = False
lens_only = False
# Any ells above this will be discarded; likelihood must at least request ells up to this
trim_lmax = 2998
Expand All @@ -447,14 +485,17 @@ class ACTDR6LensLike(InstallableLikelihood):
scale_cov = None
varying_cmb_alens = False # Whether to divide the theory spectrum by Alens
version = None
act_cmb_rescale = False
act_calib = False

def initialize(self):
if self.lens_only: self.no_like_corrections = True
if self.lmax<self.trim_lmax: raise ValueError(f"An lmax of at least {self.trim_lmax} is required.")
self.data = load_data(variant=self.variant,lens_only=self.lens_only,
like_corrections=not(self.no_like_corrections),apply_hartlap=self.apply_hartlap,
mock=self.mock,nsims_act=self.nsims_act,nsims_planck=self.nsims_planck,
trim_lmax=self.trim_lmax,scale_cov=self.scale_cov,version=self.version)
trim_lmax=self.trim_lmax,scale_cov=self.scale_cov,version=self.version,
act_cmb_rescale=self.act_cmb_rescale,act_calib=self.act_calib)

if self.no_like_corrections:
self.requested_cls = ["pp"]
Expand Down Expand Up @@ -493,7 +534,9 @@ def loglike(self, cl, **params_values):
else:
cl_kk = pp_to_kk(clpp,ell)

logp = generic_lnlike(self.data,ell,cl_kk,ell,cl['tt'],cl['ee'],cl['te'],cl['bb'],self.trim_lmax)
logp = generic_lnlike(self.data,ell,cl_kk,ell,cl['tt'],cl['ee'],cl['te'],cl['bb'],self.trim_lmax,
do_norm_corr=not(self.act_cmb_rescale),act_calib=self.act_calib,
no_actlike_cmb_corrections=self.no_actlike_cmb_corrections)
self.log.debug(
f"ACT-DR6-lensing-like lnLike value = {logp} (chisquare = {-2 * logp})")
return logp

0 comments on commit 8da6daa

Please sign in to comment.