Skip to content

Commit

Permalink
Merge branch 'main' into update-processor
Browse files Browse the repository at this point in the history
  • Loading branch information
cmantill authored May 22, 2024
2 parents 1b6de05 + 64959c6 commit f7d40a1
Show file tree
Hide file tree
Showing 6 changed files with 92 additions and 24 deletions.
37 changes: 37 additions & 0 deletions src/HH4b/combine/prepare_snapshots.sh
Original file line number Diff line number Diff line change
@@ -0,0 +1,37 @@
#!/bin/bash

run_blinded_hh4b.sh --workspace --bfit --passbin=0
extract_fit_result.py higgsCombineSnapshot.MultiDimFit.mH125.root "w:MultiDimFit" "inject_combined.json" --keep '*'
if [ -f "passvbf.txt" ]; then
combineCards.py fail=fail.txt passbin1=passbin1.txt passbin2=passbin2.txt passbin3=passbin3.txt passvbf=passvbf.txt > combined_nomasks.txt
else
combineCards.py fail=fail.txt passbin1=passbin1.txt passbin2=passbin2.txt passbin3=passbin3.txt > combined_nomasks.txt
fi
text2workspace.py combined_nomasks.txt
inject_fit_result.py inject_combined.json combined_nomasks.root w

if [ -f "passvbf.txt" ]; then
run_blinded_hh4b.sh --workspace --bfit --passbin=vbf
extract_fit_result.py higgsCombineSnapshot.MultiDimFit.mH125.root "w:MultiDimFit" "inject_passvbf.json" --keep '*'
combineCards.py fail=fail.txt passvbf=passvbf.txt > passvbf_nomasks.txt
text2workspace.py passvbf_nomasks.txt
inject_fit_result.py inject_passvbf.json passvbf_nomasks.root w
fi

run_blinded_hh4b.sh --workspace --bfit --passbin=1
extract_fit_result.py higgsCombineSnapshot.MultiDimFit.mH125.root "w:MultiDimFit" "inject_passbin1.json" --keep '*'
combineCards.py fail=fail.txt passbin1=passbin1.txt > passbin1_nomasks.txt
text2workspace.py passbin1_nomasks.txt
inject_fit_result.py inject_passbin1.json passbin1_nomasks.root w

run_blinded_hh4b.sh --workspace --bfit --passbin=2
extract_fit_result.py higgsCombineSnapshot.MultiDimFit.mH125.root "w:MultiDimFit" "inject_passbin2.json" --keep '*'
combineCards.py fail=fail.txt passbin2=passbin2.txt > passbin2_nomasks.txt
text2workspace.py passbin2_nomasks.txt
inject_fit_result.py inject_passbin2.json passbin2_nomasks.root w

run_blinded_hh4b.sh --workspace --bfit --passbin=3
extract_fit_result.py higgsCombineSnapshot.MultiDimFit.mH125.root "w:MultiDimFit" "inject_passbin3.json" --keep '*'
combineCards.py fail=fail.txt passbin3=passbin3.txt > passbin3_nomasks.txt
text2workspace.py passbin3_nomasks.txt
inject_fit_result.py inject_passbin3.json passbin3_nomasks.root w
6 changes: 2 additions & 4 deletions src/HH4b/combine/run_inference_impacts_hh4b.sh
Original file line number Diff line number Diff line change
@@ -1,9 +1,8 @@
#!/bin/bash
# shellcheck disable=SC2086,SC2034

card_dir=/uscms/home/jduarte1/nobackup/HH4b/src/HH4b/cards/run3-bdt-may9-msd40-v2
datacards=$card_dir/combined_snapshot.root
masks="mask_passbin1=0:mask_passbin2=0:mask_passbin3=0:mask_fail=0:mask_passbin1MCBlinded=1:mask_passbin2MCBlinded=1:mask_passbin3MCBlinded=1:mask_failMCBlinded=1"
card_dir=./
datacards=$card_dir/combined_nomasks.root
model=hh_model.model_default@noNNLOscaling@noklDependentUnc
campaign="61 fb$^{-1}$, 2022-2023 (13.6 TeV)"

Expand All @@ -13,7 +12,6 @@ law run PlotPullsAndImpacts \
--datacards $datacards \
--pois r \
--mc-stats \
--parameter-values "$masks" \
--parameter-ranges r=-20,20 \
--PullsAndImpacts-workflow "htcondor" \
--PullsAndImpacts-tasks-per-job 10 \
Expand Down
19 changes: 14 additions & 5 deletions src/HH4b/combine/run_inference_upper_limits_hh4b.sh
Original file line number Diff line number Diff line change
@@ -1,18 +1,27 @@
#!/bin/bash

card_dir=/uscms/home/jduarte1/nobackup/HH4b/src/HH4b/cards/run3-bdt-may9-msd40-v2-ntf012
datacards=$card_dir/passbin3_snapshot.root:$card_dir/passbin2_snapshot.root:$card_dir/passbin1_snapshot.root:$card_dir/combined_snapshot.root
masks="mask_passbin1=0:mask_passbin2=0:mask_passbin3=0:mask_fail=0:mask_passbin1MCBlinded=1:mask_passbin2MCBlinded=1:mask_passbin3MCBlinded=1:mask_failMCBlinded=1"
card_dir=./
if [ -f "passvbf.txt" ]; then
datacards=$card_dir/passbin3_nomasks.root:$card_dir/passbin2_nomasks.root:$card_dir/passbin1_nomasks.root:$card_dir/passvbf_nomasks.root:$card_dir/combined_nomasks.root
datacard_names="Category 3,Category 2,Category 1,VBF Category,Combined"
xmin="0.03"
parameters="C2V=0"
else
datacards=$card_dir/passbin3_nomasks.root:$card_dir/passbin2_nomasks.root:$card_dir/passbin1_nomasks.root:$card_dir/combined_nomasks.root
datacard_names="Category 3,Category 2,Category 1,Combined"
xmin="0.75"
parameters="C2V=1"
fi
model=hh_model.model_default@noNNLOscaling@noklDependentUnc
campaign="61 fb$^{-1}$, 2022-2023 (13.6 TeV)"
datacard_names="Category 3,Category 2,Category 1,Combined"

law run PlotUpperLimitsAtPoint \
--version dev \
--multi-datacards "$datacards" \
--parameter-values "$masks" \
--parameter-values "$parameters" \
--h-lines 1 \
--x-log True \
--x-min "$xmin" \
--hh-model "$model" \
--datacard-names "$datacard_names" \
--remove-output 1,a,y \
Expand Down
14 changes: 7 additions & 7 deletions src/HH4b/postprocessing/CombineTemplates.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@
},
{
"cell_type": "code",
"execution_count": 1,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -23,7 +23,7 @@
},
{
"cell_type": "code",
"execution_count": 2,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
Expand All @@ -33,11 +33,11 @@
},
{
"cell_type": "code",
"execution_count": 3,
"execution_count": null,
"metadata": {},
"outputs": [],
"source": [
"templates_path = Path(\"src/HH4b/postprocessing/templates/24May16\")"
"templates_path = Path(\"src/HH4b/postprocessing/templates/24May16VBFPriorityCat1\")"
]
},
{
Expand Down Expand Up @@ -185,15 +185,15 @@
"source": [
"warnings.filterwarnings(\"ignore\")\n",
"\n",
"# sig_key = \"hh4b\"\n",
"sig_key = \"hh4b\"\n",
"# sig_key = \"vbfhh4b-k2v0\"\n",
"sig_key = \"ttbar\"\n",
"# sig_key = \"ttbar\"\n",
"\n",
"for rname, region in selection_regions.items():\n",
" plot_dir = main_plot_dir / year / \"wshifts\"\n",
" plot_dir.mkdir(exist_ok=True, parents=True)\n",
" # for wshift in list((jecs).keys()):\n",
" for wshift in [\"ttbarSF\", \"trigger\"]:\n",
" for wshift in [\"trigger\"]:\n",
" if wshift in jecs:\n",
" # adding jshift-ed histograms into the same histogram\n",
" cjshift_templates = [templates[rname]]\n",
Expand Down
28 changes: 21 additions & 7 deletions src/HH4b/postprocessing/CreateDatacard.py
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@
)

add_bool_arg(parser, "sig-separate", "separate templates for signals and bgs", default=False)
add_bool_arg(parser, "do-jshifts", "Do JEC/JMC corrections.", default=False)
add_bool_arg(parser, "do-jshifts", "Do JEC/JMC corrections.", default=True)

parser.add_argument("--cards-dir", default="cards", type=str, help="output card directory")

Expand All @@ -71,7 +71,7 @@
)

add_bool_arg(parser, "only-sm", "Only add SM HH samples", default=False)
parser.add_argument("--sig-sample", default="hh4b", type=str, help="specify signal")
parser.add_argument("--sig-samples", default="hh4b", nargs="*", type=str, help="specify signals")

parser.add_argument(
"--nTF",
Expand Down Expand Up @@ -147,7 +147,7 @@

for key in all_sig_keys:
# check in case single sig sample is specified
if args.sig_sample is None or key == args.sig_sample:
if args.sig_samples is None or key in args.sig_samples:
# TODO: change names to match HH combination convention
mc_samples[key] = key
sig_keys.append(key)
Expand Down Expand Up @@ -234,8 +234,13 @@
# ),
# TODO: separate into individual
"JES": Syst(name="CMS_scale_j", prior="shape", samples=sig_keys), # TODO: update to all_mc
"ttbarSF": Syst(name=f"{CMS_PARAMS_LABEL}_ttbar_sf", prior="shape", samples=["ttbar"]),
"trigger": Syst(name=f"{CMS_PARAMS_LABEL}_trigger", prior="shape", samples=all_mc),
"ttbarSF": Syst(
name=f"{CMS_PARAMS_LABEL}_ttbar_sf",
prior="shape",
samples=["ttbar"],
convert_shape_to_lnN=True,
),
# "trigger": Syst(name=f"{CMS_PARAMS_LABEL}_trigger", prior="shape", samples=all_mc), # TODO: fix
# "txbb": Syst(
# name=f"{CMS_PARAMS_LABEL}_PNetHbbScaleFactors_correlated",
# prior="shape",
Expand Down Expand Up @@ -266,7 +271,10 @@

shape_systs_dict = {}
for skey, syst in corr_year_shape_systs.items():
shape_systs_dict[skey] = rl.NuisanceParameter(syst.name, "shape")
if syst.convert_shape_to_lnN:
shape_systs_dict[skey] = rl.NuisanceParameter(syst.name, "lnN")
else:
shape_systs_dict[skey] = rl.NuisanceParameter(syst.name, "shape")
for skey, syst in uncorr_year_shape_systs.items():
for year in years:
if year in syst.uncorr_years:
Expand Down Expand Up @@ -481,7 +489,13 @@ def fill_regions(
logger = logging.getLogger(f"validate_shapes_{region}_{sample_name}_{skey}")

effect_up, effect_down = get_effect_updown(
values_nominal, values_up, values_down, mask, logger, args.epsilon
values_nominal,
values_up,
values_down,
mask,
logger,
args.epsilon,
syst.convert_shape_to_lnN,
)
sample.setParamEffect(shape_systs_dict[skey], effect_up, effect_down)

Expand Down
12 changes: 11 additions & 1 deletion src/HH4b/postprocessing/datacardHelpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,6 +34,7 @@ class Syst:
# in case of uncorrelated unc., which years to split into
uncorr_years: list[str] = field(default_factory=lambda: all_years)
pass_only: bool = False # is it applied only in the pass regions
convert_shape_to_lnN: bool = False # take shape uncertainty and convert to lnN

def __post_init__(self):
if isinstance(self.value, dict) and not (self.diff_regions or self.diff_samples):
Expand Down Expand Up @@ -180,7 +181,16 @@ def _shape_checks(values_up, values_down, values_nominal, effect_up, effect_down
logger.warning("Up and Down vary norm in the same direction")


def get_effect_updown(values_nominal, values_up, values_down, mask, logger, epsilon):
def get_effect_updown(
values_nominal, values_up, values_down, mask, logger, epsilon, convert_shape_to_lnN=False
):
if convert_shape_to_lnN:
effect_up = np.sum(values_up) / np.sum(values_nominal)
effect_down = np.sum(values_down) / np.sum(values_nominal)
logging.debug(f"effect_up : {effect_up}")
logging.debug(f"effect_down: {effect_down}")
return effect_up, effect_down

effect_up = np.ones_like(values_nominal)
effect_down = np.ones_like(values_nominal)

Expand Down

0 comments on commit f7d40a1

Please sign in to comment.