diff --git a/.travis.yml b/.travis.yml index a929f319e83..7ebe0e9e4e7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,10 +1,13 @@ language: cpp before_install: + - sudo apt-add-repository -y ppa:git-core/ppa + - sudo apt-get update -qq + - sudo apt-get install git - cd .. && mkdir -p src && mv $OLDPWD src/CombineHarvester - export CMSSW_BASE=$PWD - cd src/CombineHarvester install: - - export DOX=doxygen-1.8.8 + - export DOX=doxygen-1.8.10 - wget -O - http://ftp.stack.nl/pub/users/dimitri/${DOX}.linux.bin.tar.gz | tar xz -C ${TMPDIR-/tmp} ${DOX}/bin/doxygen - sudo install -m 755 ${TMPDIR-/tmp}/${DOX}/bin/doxygen /usr/local/bin/doxygen script: diff --git a/CombinePdfs/bin/AZhMorphing.cpp b/CombinePdfs/bin/AZhMorphing.cpp deleted file mode 100644 index 255f21dc458..00000000000 --- a/CombinePdfs/bin/AZhMorphing.cpp +++ /dev/null @@ -1,232 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include "boost/filesystem.hpp" -#include "CombineHarvester/CombineTools/interface/CombineHarvester.h" -#include "CombineHarvester/CombineTools/interface/Utilities.h" -#include "CombineHarvester/CombineTools/interface/HttSystematics.h" -#include "CombineHarvester/CombineTools/interface/BinByBin.h" -#include "CombineHarvester/CombinePdfs/interface/MorphFunctions.h" -#include "CombineHarvester/CombineTools/interface/TFileIO.h" - -#include "RooWorkspace.h" -#include "RooRealVar.h" -#include "TH2.h" -#include "RooDataHist.h" -#include "RooHistFunc.h" -#include "RooFormulaVar.h" -#include "RooProduct.h" - -using namespace std; - -int main() { - ch::CombineHarvester cb; - - // cb.SetVerbosity(1); - - typedef vector> Categories; - typedef vector VString; - - // RooFit will be quite noisy if we don't set this - RooMsgService::instance().setGlobalKillBelow(RooFit::WARNING); - - - string auxiliaries = string(getenv("CMSSW_BASE")) + "/src/auxiliaries/"; - string aux_shapes = auxiliaries +"shapes/"; - string aux_pruning = auxiliaries +"pruning/"; - - // In the first part of this code we will construct our MSSM signal model. - // This means declaring mA and tanb as our free parameters, and defining all - // other Higgs masses, cross sections and branching ratios as a function of - // these. Note that this example is very verbose and explicit in the - // construction - one could imagine developing a more generic and efficient - // tool to do this. - - // Define mA and tanb as RooRealVars - // Here I do not declare a range and it is decided automatically from - // the inputs once the first RooDataHist is created. - // There were some issues found with declaring a range by hand. - RooRealVar mA("mA", "mA", 300.); - RooRealVar tanb("tanb", "tanb", 1.); - - // All the other inputs we need to build the model can be found in one of the - // LHCXSWG scans. Here we use the low-tanb-high scenario. This file contains a - // large number of 2D histograms, all binned in mA and tanb, that each provide - // a different mass, xsec or BR. - TFile inputs(TString(auxiliaries) + - "models/out.low-tb-high-8TeV-tanbAll-nnlo.root"); - // Get the TH2Fs for the masses of the h and H bosons. - //TH2F h_mH = ch::OpenFromTFile(&inputs, "h_mH"); - //TH2F h_mh = ch::OpenFromTFile(&inputs, "h_mh"); - // Now two more steps are needed: we have to convert each TH2F to a - // RooDataHist, then build a RooHistFunc from the RooDataHist. These steps - // will be repeated for every TH2F we import below. - //RooDataHist dh_mH("dh_mH", "dh_mH", RooArgList(mA, tanb), - // RooFit::Import(h_mH)); - //RooDataHist dh_mh("dh_mh", "dh_mh", RooArgList(mA, tanb), - // RooFit::Import(h_mh)); - //RooHistFunc mH("mH", "mH", RooArgList(mA, tanb), dh_mH); - //RooHistFunc mh("mh", "mh", RooArgList(mA, tanb), dh_mh); - // There is also the possibility to interpolate mass values rather than just - // take the value of the enclosing histogram bin, but we'll leave this off for - // now. - // mH.setInterpolationOrder(1); - - // Now we'll do the same for the cross section - TH2F h_ggF_xsec_A = ch::OpenFromTFile(&inputs, "h_ggF_xsec_A"); - RooDataHist dh_ggF_xsec_A("dh_ggF_xsec_A", "dh_ggF_xsec_A", - RooArgList(mA, tanb), RooFit::Import(h_ggF_xsec_A)); - RooHistFunc ggF_xsec_A("ggF_xsec_A", "ggF_xsec_A", RooArgList(mA, tanb), - dh_ggF_xsec_A); - - // Now the branching ratios to hh, tau tau and bb - TH2F h_brZh0_A = ch::OpenFromTFile(&inputs, "h_brZh0_A"); - // Add factor of 2 here for h->tautau, h->bb and reverse - h_brZh0_A = 0.10099*h_brZh0_A; - TH2F h_brtautau_h = ch::OpenFromTFile(&inputs, "h_brtautau_h"); - RooDataHist dh_brZh0_A("dh_brZh0_A", "dh_brZh0_A", - RooArgList(mA, tanb), - RooFit::Import(h_brZh0_A)); - RooDataHist dh_brtautau_h("dh_brtautau_h", "dh_brtautau_h", - RooArgList(mA, tanb), - RooFit::Import(h_brtautau_h)); - RooHistFunc brZh0_A("brZh0_A", "brZh0_A", RooArgList(mA, tanb), - dh_brZh0_A); - RooHistFunc brtautau_h("brtautau_h", "brtautau_h", RooArgList(mA, tanb), - dh_brtautau_h); - // We can build the xsec * BR products - RooProduct ggF_xsec_br_A("ggF_xsec_br_A", "", - RooArgList(ggF_xsec_A, brZh0_A, brtautau_h)); - // Let's print out the values of all of these objects - cout << "mA: " << mA.getVal() << "\n"; - cout << "tanb: " << tanb.getVal() << "\n\n"; - cout << "ggF_xsec_A: " << ggF_xsec_A.getVal() << "\n"; - cout << "brZh0_A: " << brZh0_A.getVal()/0.10099<< "\n"; - cout << "brtautau_h: " << brtautau_h.getVal() << "\n"; - cout << "ggF_xsec_br_A: " << ggF_xsec_br_A.getVal() << "\n"; - - map xs_map; - xs_map["AZh"] = &ggF_xsec_br_A; - - VString chns = - {"et", "mt","tt", "em"}; - - string input_folders = "ULB"; - - map bkg_procs; - bkg_procs["et"] = {"ZZ","GGToZZ2L2L","TTZ","WWZ","ZZZ","WZZ","Zjets"}; - bkg_procs["mt"] = {"ZZ","GGToZZ2L2L","TTZ","WWZ","ZZZ","WZZ","Zjets"}; - bkg_procs["em"] = {"ZZ","GGToZZ2L2L","TTZ","WWZ","ZZZ","WZZ","Zjets"}; - bkg_procs["tt"] = {"ZZ","GGToZZ2L2L","TTZ","WWZ","ZZZ","WZZ","Zjets"}; - - map sm_procs; - sm_procs["et"] = {"ZH_ww125","ZH_tt125"}; - - - VString sig_procs={"AZh"}; - - map cats; - cats["et_8TeV"] = { - {0, "eeet_zh"}, {1, "mmet_zh"}}; - cats["mt_8TeV"] = { - {0, "eemt_zh"}, {1, "mmmt_zh"}}; - cats["em_8TeV"] = { - {0, "eeem_zh"}, {1, "mmme_zh"}}; - cats["tt_8TeV"] = { - {0, "eett_zh"}, {1, "mmtt_zh"}}; - - vector masses = ch::MassesFromRange("220-350:10"); - - cout << ">> Creating processes and observations...\n"; - for (string era : {"8TeV"}) { - for (auto chn : chns) { - cb.AddObservations( - {"*"}, {"htt"}, {era}, {chn}, cats[chn+"_"+era]); - cb.AddProcesses( - {"*"}, {"htt"}, {era}, {chn}, bkg_procs[chn], cats[chn+"_"+era], false); - // cb.AddProcesses( - // {"*"},{"htt"}, {era}, {chn}, sm_procs[chn], cats[chn+"_"+era],false); - cb.AddProcesses( - masses, {"htt"}, {era}, {chn}, sig_procs, cats[chn+"_"+era], true); - } - } - - - cout << ">> Adding systematic uncertainties...\n"; - ch::AddSystematics_AZh(cb); - - cout << ">> Extracting histograms from input root files...\n"; - for (string era : {"8TeV"}) { - for (string chn : chns) { - string file = aux_shapes + input_folders + "/htt_AZh" + - ".inputs-AZh-" + era + ".root"; - cb.cp().channel({chn}).era({era}).backgrounds().ExtractShapes( - file, "$BIN/$PROCESS", "$BIN/$PROCESS_$SYSTEMATIC"); - cb.cp().channel({chn}).era({era}).signals().ExtractShapes( - file, "$BIN/$PROCESS$MASS", "$BIN/$PROCESS$MASS_$SYSTEMATIC"); - } - } - - - cout << ">> Generating bbb uncertainties...\n"; - auto bbb = ch::BinByBinFactory() - .SetAddThreshold(0.1) - .SetFixNorm(true); - - bbb.AddBinByBin(cb.cp().process({"Zjets"}), cb); - - - cout << ">> Setting standardised bin names...\n"; - ch::SetStandardBinNames(cb); - - - RooWorkspace ws("htt", "htt"); - - TFile demo("AZh_demo.root", "RECREATE"); - - bool do_morphing = true; - string postfix = "_eff_acc"; - map mass_var = { - {"AZh", &mA} - }; - if (do_morphing) { - auto bins = cb.bin_set(); - for (auto b : bins) { - auto procs = cb.cp().bin({b}).signals().process_set(); - for (auto p : procs) { - string pdf_name = b + "_" + p + "_morph"; - ch::BuildRooMorphing(ws, cb, b, p, *(mass_var[p]), - "eff_acc", true, true, false, &demo); - std::string prod_name = pdf_name + "_eff_acc"; - RooAbsReal *norm = ws.function(prod_name.c_str()); - RooProduct full_norm((pdf_name + "_norm").c_str(), "", - RooArgList(*norm, *(xs_map[p]))); - ws.import(full_norm, RooFit::RecycleConflictNodes()); - } - } - } - demo.Close(); - cb.AddWorkspace(ws); - cb.cp().signals().ExtractPdfs(cb, "htt", "$BIN_$PROCESS_morph"); - cb.PrintAll(); - - - string folder = "output/AZh_cards"; - boost::filesystem::create_directories(folder); - - TFile output((folder + "/htt_input.AZh.root").c_str(), "RECREATE"); - cb.cp().mass({"*"}).WriteDatacard(folder + "/htt_cmb_AZh.txt", output); - auto bins = cb.bin_set(); - for (auto b : bins) { - cb.cp().bin({b}).mass({"*"}).WriteDatacard( - folder + "/" + b + ".txt", output); - } - output.Close(); - - - cout << "\n>> Done!\n"; -} diff --git a/CombinePdfs/bin/BuildFile.xml b/CombinePdfs/bin/BuildFile.xml index 841ae2feac6..ab3dcdb2d82 100644 --- a/CombinePdfs/bin/BuildFile.xml +++ b/CombinePdfs/bin/BuildFile.xml @@ -1,13 +1,4 @@ - - - - - - - - - diff --git a/CombinePdfs/bin/HhhMSSMCombinationExample.cpp b/CombinePdfs/bin/HhhMSSMCombinationExample.cpp deleted file mode 100644 index 5887d506388..00000000000 --- a/CombinePdfs/bin/HhhMSSMCombinationExample.cpp +++ /dev/null @@ -1,429 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include "boost/filesystem.hpp" -#include "CombineHarvester/CombineTools/interface/CombineHarvester.h" -#include "CombineHarvester/CombineTools/interface/Utilities.h" -#include "CombineHarvester/CombineTools/interface/HttSystematics.h" -#include "CombineHarvester/CombinePdfs/interface/MorphFunctions.h" -#include "CombineHarvester/CombineTools/interface/TFileIO.h" - -#include "RooWorkspace.h" -#include "RooRealVar.h" -#include "TH2.h" -#include "RooDataHist.h" -#include "RooHistFunc.h" -#include "RooFormulaVar.h" -#include "RooProduct.h" - -using namespace std; - -TH2F SantanderMatching(TH2F const& h4f, TH2F const& h5f, TH2F const* mass) { - TH2F res = h4f; - for (int x = 1; x <= h4f.GetNbinsX(); ++x) { - for (int y = 1; y <= h4f.GetNbinsY(); ++y) { - double mh = - mass ? mass->GetBinContent(x, y) : h4f.GetXaxis()->GetBinCenter(x); - double t = log(mh / 4.75) - 2.; - double fourflav = h4f.GetBinContent(x, y); - double fiveflav = h5f.GetBinContent(x, y); - double sigma = (1. / (1. + t)) * (fourflav + t * fiveflav); - res.SetBinContent(x, y, sigma); - } - } - return res; -} - -TH1F RestoreBinning(TH1F const& src, TH1F const& ref) { - TH1F res = ref; - res.Reset(); - for (int x = 1; x <= res.GetNbinsX(); ++x) { - res.SetBinContent(x, src.GetBinContent(x)); - } - return res; -} - -int main() { - ch::CombineHarvester cb; - - // cb.SetVerbosity(1); - - typedef vector> Categories; - typedef vector VString; - - string auxiliaries = string(getenv("CMSSW_BASE")) + "/src/auxiliaries/"; - string aux_shapes = auxiliaries +"shapes/"; - string aux_pruning = auxiliaries +"pruning/"; - - // RooFit will be quite noisy if we don't set this - RooMsgService::instance().setGlobalKillBelow(RooFit::WARNING); - - // In the first part of this code we will construct our MSSM signal model. - // This means declaring mA and tanb as our free parameters, and defining all - // other Higgs masses, cross sections and branching ratios as a function of - // these. Note that this example is very verbose and explicit in the - // construction - one could imagine developing a more generic and efficient - // tool to do this. - - // Define mA and tanb as RooRealVars - // Here I do not declare a range and it is decided automatically from - // the inputs once the first RooDataHist is created. - // There were some issues found with declaring a range by hand. - RooRealVar mA("mA", "mA", 300.); - RooRealVar tanb("tanb", "tanb", 1.); - - // All the other inputs we need to build the model can be found in one of the - // LHCXSWG scans. Here we use the low-tanb-high scenario. This file contains a - // large number of 2D histograms, all binned in mA and tanb, that each provide - // a different mass, xsec or BR. - TFile inputs(TString(auxiliaries) + - "models/out.low-tb-high-8TeV-tanbAll-nnlo.root"); - // Get the TH2Fs for the masses of the h and H bosons. - TH2F h_mH = ch::OpenFromTFile(&inputs, "h_mH"); - TH2F h_mh = ch::OpenFromTFile(&inputs, "h_mh"); - // Now two more steps are needed: we have to convert each TH2F to a - // RooDataHist, then build a RooHistFunc from the RooDataHist. These steps - // will be repeated for every TH2F we import below. - RooDataHist dh_mH("dh_mH", "dh_mH", RooArgList(mA, tanb), - RooFit::Import(h_mH)); - RooDataHist dh_mh("dh_mh", "dh_mh", RooArgList(mA, tanb), - RooFit::Import(h_mh)); - RooHistFunc mH("mH", "mH", RooArgList(mA, tanb), dh_mH); - RooHistFunc mh("mh", "mh", RooArgList(mA, tanb), dh_mh); - // There is also the possibility to interpolate mass values rather than just - // take the value of the enclosing histogram bin, but we'll leave this off for - // now. - // mH.setInterpolationOrder(1); - - // Now we'll do the same for the cross section - TH2F h_ggF_xsec_H = ch::OpenFromTFile(&inputs, "h_ggF_xsec_H"); - RooDataHist dh_ggF_xsec_H("dh_ggF_xsec_H", "dh_ggF_xsec_H", - RooArgList(mA, tanb), RooFit::Import(h_ggF_xsec_H)); - RooHistFunc ggF_xsec_H("ggF_xsec_H", "ggF_xsec_H", RooArgList(mA, tanb), - dh_ggF_xsec_H); - // Extra histograms needed for MSSM part - TH2F h_ggF_xsec_h = ch::OpenFromTFile(&inputs, "h_ggF_xsec_h"); - RooDataHist dh_ggF_xsec_h("dh_ggF_xsec_h", "dh_ggF_xsec_h", - RooArgList(mA, tanb), RooFit::Import(h_ggF_xsec_h)); - RooHistFunc ggF_xsec_h("ggF_xsec_h", "ggF_xsec_h", RooArgList(mA, tanb), - dh_ggF_xsec_h); - TH2F h_ggF_xsec_A = ch::OpenFromTFile(&inputs, "h_ggF_xsec_A"); - RooDataHist dh_ggF_xsec_A("dh_ggF_xsec_A", "dh_ggF_xsec_A", - RooArgList(mA, tanb), RooFit::Import(h_ggF_xsec_A)); - RooHistFunc ggF_xsec_A("ggF_xsec_A", "ggF_xsec_A", RooArgList(mA, tanb), - dh_ggF_xsec_A); - - // And the same for the b-associated production - but with the caveat that - // we must first do the Santander-matching of the 4FS and 5FS. - TH2F h_bbH5f_xsec_h = ch::OpenFromTFile(&inputs, "h_bbH_xsec_h"); - TH2F h_bbH5f_xsec_H = ch::OpenFromTFile(&inputs, "h_bbH_xsec_H"); - TH2F h_bbH5f_xsec_A = ch::OpenFromTFile(&inputs, "h_bbH_xsec_A"); - TH2F h_bbH4f_xsec_h = ch::OpenFromTFile(&inputs, "h_bbH4f_xsec_h"); - TH2F h_bbH4f_xsec_H = ch::OpenFromTFile(&inputs, "h_bbH4f_xsec_H"); - TH2F h_bbH4f_xsec_A = ch::OpenFromTFile(&inputs, "h_bbH4f_xsec_A"); - TH2F h_bbH_xsec_h = SantanderMatching(h_bbH4f_xsec_h, h_bbH5f_xsec_h, &h_mh); - TH2F h_bbH_xsec_H = SantanderMatching(h_bbH4f_xsec_H, h_bbH5f_xsec_H, &h_mH); - TH2F h_bbH_xsec_A = SantanderMatching(h_bbH4f_xsec_A, h_bbH5f_xsec_A, nullptr); - RooDataHist dh_bbH_xsec_h("dh_bbH_xsec_h", "dh_bbH_xsec_h", - RooArgList(mA, tanb), RooFit::Import(h_bbH_xsec_h)); - RooDataHist dh_bbH_xsec_H("dh_bbH_xsec_H", "dh_bbH_xsec_H", - RooArgList(mA, tanb), RooFit::Import(h_bbH_xsec_H)); - RooDataHist dh_bbH_xsec_A("dh_bbH_xsec_A", "dh_bbH_xsec_A", - RooArgList(mA, tanb), RooFit::Import(h_bbH_xsec_A)); - RooHistFunc bbH_xsec_h("bbH_xsec_h", "bbH_xsec_h", RooArgList(mA, tanb), - dh_bbH_xsec_h); - RooHistFunc bbH_xsec_H("bbH_xsec_H", "bbH_xsec_H", RooArgList(mA, tanb), - dh_bbH_xsec_H); - RooHistFunc bbH_xsec_A("bbH_xsec_A", "bbH_xsec_A", RooArgList(mA, tanb), - dh_bbH_xsec_A); - - // Now the branching ratios to hh, tau tau and bb - TH2F h_brh0h0_H = ch::OpenFromTFile(&inputs, "h_brh0h0_H"); - // Add factor of 2 here for h->tautau, h->bb and reverse - h_brh0h0_H = 2*h_brh0h0_H; - TH2F h_brtautau_h = ch::OpenFromTFile(&inputs, "h_brtautau_h"); - TH2F h_brbb_h = ch::OpenFromTFile(&inputs, "h_brbb_h"); - RooDataHist dh_brh0h0_H("dh_brh0h0_H", "dh_brh0h0_H", - RooArgList(mA, tanb), - RooFit::Import(h_brh0h0_H)); - RooDataHist dh_brtautau_h("dh_brtautau_h", "dh_brtautau_h", - RooArgList(mA, tanb), - RooFit::Import(h_brtautau_h)); - RooDataHist dh_brbb_h("dh_brbb_h", "dh_brbb_h", - RooArgList(mA, tanb), - RooFit::Import(h_brbb_h)); - RooHistFunc brh0h0_H("brh0h0_H", "brh0h0_H", RooArgList(mA, tanb), - dh_brh0h0_H); - RooHistFunc brtautau_h("brtautau_h", "brtautau_h", RooArgList(mA, tanb), - dh_brtautau_h); - RooHistFunc brbb_h("brbb_h", "brbb_h", RooArgList(mA, tanb), - dh_brbb_h); - // Extra histograms needed for MSSM part - TH2F h_brtautau_H = ch::OpenFromTFile(&inputs, "h_brtautau_H"); - TH2F h_brtautau_A = ch::OpenFromTFile(&inputs, "h_brtautau_A"); - RooDataHist dh_brtautau_H("dh_brtautau_H", "dh_brtautau_H", - RooArgList(mA, tanb), - RooFit::Import(h_brtautau_H)); - RooDataHist dh_brtautau_A("dh_brtautau_A", "dh_brtautau_A", - RooArgList(mA, tanb), - RooFit::Import(h_brtautau_A)); - RooHistFunc brtautau_H("brtautau_H", "brtautau_H", RooArgList(mA, tanb), - dh_brtautau_H); - RooHistFunc brtautau_A("brtautau_A", "brtautau_A", RooArgList(mA, tanb), - dh_brtautau_A); - - - // We can build the xsec * BR products - RooProduct ggF_xsec_br_H_hh("ggF_xsec_br_H_hh", "", - RooArgList(ggF_xsec_H, brh0h0_H, brtautau_h, brbb_h)); - RooProduct ggF_xsec_br_h("ggF_xsec_br_h", "", - RooArgList(ggF_xsec_h, brtautau_h)); - RooProduct ggF_xsec_br_H("ggF_xsec_br_H", "", - RooArgList(ggF_xsec_H, brtautau_H)); - RooProduct ggF_xsec_br_A("ggF_xsec_br_A", "", - RooArgList(ggF_xsec_A, brtautau_A)); - RooProduct bbH_xsec_br_h("bbH_xsec_br_h", "", - RooArgList(bbH_xsec_h, brtautau_h)); - RooProduct bbH_xsec_br_H("bbH_xsec_br_H", "", - RooArgList(bbH_xsec_H, brtautau_H)); - RooProduct bbH_xsec_br_A("bbH_xsec_br_A", "", - RooArgList(bbH_xsec_A, brtautau_A)); - - - // Let's print out the values of all of these objects - cout << "mA: " << mA.getVal() << "\n"; - cout << "tanb: " << tanb.getVal() << "\n\n"; - cout << "mH: " << mH.getVal() << "\n"; - cout << "mh: " << mh.getVal() << "\n"; - cout << "ggF_xsec_H: " << ggF_xsec_H.getVal() << "\n"; - cout << "brh0h0_H: " << brh0h0_H.getVal()/2<< "\n"; - cout << "brtautau_h: " << brtautau_h.getVal() << "\n"; - cout << "brbb_h: " << brbb_h.getVal() << "\n"; - cout << "ggF_xsec_br_H_hh: " << ggF_xsec_br_H_hh.getVal() << "\n"; - cout << "ggF_xsec_h: " << ggF_xsec_h.getVal() << "\n"; - cout << "ggF_xsec_H: " << ggF_xsec_H.getVal() << "\n"; - cout << "ggF_xsec_A: " << ggF_xsec_A.getVal() << "\n"; - cout << "bbH_xsec_h: " << bbH_xsec_h.getVal() << "\n"; - cout << "bbH_xsec_H: " << bbH_xsec_H.getVal() << "\n"; - cout << "bbH_xsec_A: " << bbH_xsec_A.getVal() << "\n"; - cout << "brtautau_h: " << brtautau_h.getVal() << "\n"; - cout << "brtautau_H: " << brtautau_H.getVal() << "\n"; - cout << "brtautau_A: " << brtautau_A.getVal() << "\n\n"; - cout << "ggF_xsec_br_h: " << ggF_xsec_br_h.getVal() << "\n"; - cout << "ggF_xsec_br_H: " << ggF_xsec_br_H.getVal() << "\n"; - cout << "ggF_xsec_br_A: " << ggF_xsec_br_A.getVal() << "\n\n"; - cout << "bbH_xsec_br_h: " << bbH_xsec_br_h.getVal() << "\n"; - cout << "bbH_xsec_br_H: " << bbH_xsec_br_H.getVal() << "\n"; - cout << "bbH_xsec_br_A: " << bbH_xsec_br_A.getVal() << "\n\n"; - - map xs_map; - xs_map["ggHTohhTo2Tau2B"] = &ggF_xsec_br_H_hh; - xs_map["ggh"] = &ggF_xsec_br_h; - xs_map["ggH"] = &ggF_xsec_br_H; - xs_map["ggA"] = &ggF_xsec_br_A; - xs_map["bbh"] = &bbH_xsec_br_h; - xs_map["bbH"] = &bbH_xsec_br_H; - xs_map["bbA"] = &bbH_xsec_br_A; - - VString chns = - {"et", "mt","tt"}; - - map input_folders = { - {"et", "Imperial"}, - {"mt", "Imperial"}, - {"tt", "Italians"} - }; - - map bkg_procs; - bkg_procs["et"] = {"ZTT", "W", "QCD", "ZL", "ZJ", "TT", "VV"}; - bkg_procs["mt"] = {"ZTT", "W", "QCD", "ZL", "ZJ", "TT", "VV"}; - bkg_procs["tt"] = {"ZTT", "W", "QCD", "ZLL", "TT", "VV"}; - - - map cats; - cats["et_8TeV"] = { - {0, "eleTau_2jet0tag"}, {1, "eleTau_2jet1tag"}, - {2, "eleTau_2jet2tag"}}; - - cats["mt_8TeV"] = { - {0, "muTau_2jet0tag"}, {1, "muTau_2jet1tag"}, -// {2, "muTau_2jet2tag"}}; - {2, "muTau_2jet2tag"}, - {8, "muTau_nobtag"}, - {9, "muTau_btag"}}; - - cats["tt_8TeV"] = { - {0, "tauTau_2jet0tag"}, {1, "tauTau_2jet1tag"}, - {2, "tauTau_2jet2tag"}}; - - VString sig_procs = {"ggHTohhTo2Tau2B"}; - - vector masses = ch::MassesFromRange("260-350:10"); - - // Create the backgrounds and observations for all categories - cout << ">> Creating processes and observations...\n"; - for (string era : {"8TeV"}) { - for (auto chn : chns) { - cb.AddObservations( - {"*"}, {"htt"}, {era}, {chn}, cats[chn+"_"+era]); - cb.AddProcesses( - {"*"}, {"htt"}, {era}, {chn}, bkg_procs[chn], cats[chn+"_"+era], false); - } - } - // Create the H->hh signal for the H->hh categories - cb.AddProcesses(masses, {"htt"}, {"8TeV"}, {"et"}, - sig_procs, {cats["et_8TeV"]}, true); - cb.AddProcesses(masses, {"htt"}, {"8TeV"}, {"tt"}, - sig_procs, {cats["tt_8TeV"]}, true); - cb.AddProcesses(masses, {"htt"}, {"8TeV"}, {"mt"}, - sig_procs, {cats["mt_8TeV"][0]}, true); - cb.AddProcesses(masses, {"htt"}, {"8TeV"}, {"mt"}, - sig_procs, {cats["mt_8TeV"][1]}, true); - cb.AddProcesses(masses, {"htt"}, {"8TeV"}, {"mt"}, - sig_procs, {cats["mt_8TeV"][2]}, true); - - // create the phi->tautau signal for the MSSM categories - map signal_types = { - {"ggH", {"ggh", "ggH", "ggA"}}, - {"bbH", {"bbh", "bbH", "bbA"}} - }; - auto mssm_masses = ch::MassesFromRange( - "90,100,120-140:10,140-200:20,200-500:50,600-1000:100"); - cb.AddProcesses(mssm_masses, {"htt"}, {"8TeV"}, {"mt"}, - signal_types["ggH"], {cats["mt_8TeV"][3]}, true); - cb.AddProcesses(mssm_masses, {"htt"}, {"8TeV"}, {"mt"}, - signal_types["bbH"], {cats["mt_8TeV"][4]}, true); - cb.AddProcesses(mssm_masses, {"htt"}, {"8TeV"}, {"mt"}, - signal_types["bbH"], {cats["mt_8TeV"][3]}, true); - cb.AddProcesses(mssm_masses, {"htt"}, {"8TeV"}, {"mt"}, - signal_types["ggH"], {cats["mt_8TeV"][4]}, true); - - //Remove W background from 2jet1tag and 2jet2tag categories for tt channel - cb.FilterProcs([](ch::Process const* p) { - return (p->bin() == "tauTau_2jet1tag") && p->process() == "W"; - }); - cb.FilterProcs([](ch::Process const* p) { - return (p->bin() == "tauTau_2jet2tag") && p->process() == "W"; - }); - - cout << ">> Adding systematic uncertainties...\n"; - ch::AddSystematics_hhh_et_mt(cb,cb.cp().bin_id({0,1,2})); - ch::AddSystematics_hhh_tt(cb,cb.cp().bin_id({0,1,2})); - ch::AddMSSMSystematics(cb,cb.cp().bin_id({8,9})); - - cout << ">> Extracting histograms from input root files...\n"; - for (string era : {"8TeV"}) { - for (string chn : chns) { - string file = aux_shapes + input_folders[chn] + "/htt_" + chn + - ".inputs-Hhh-" + era + ".root"; - cb.cp().channel({chn}).era({era}).bin_id({0,1,2}).backgrounds().ExtractShapes( - file, "$BIN/$PROCESS", "$BIN/$PROCESS_$SYSTEMATIC"); - cb.cp().channel({chn}).era({era}).bin_id({0,1,2}).signals().ExtractShapes( - file, "$BIN/$PROCESS$MASS", "$BIN/$PROCESS$MASS_$SYSTEMATIC"); - } - } - //Read the MSSM part, just for mutau for now - // We have to map each Higgs signal process to the same histogram, i.e: - // {ggh, ggH, ggA} --> ggH - // {bbh, bbH, bbA} --> bbH - for (string era : {"8TeV"}) { - string file = aux_shapes + input_folders["mt"] + "/htt_mt.inputs-mssm-" + era + "-0.root"; - cb.cp().channel({"mt"}).era({era}).bin_id({8,9}).backgrounds().ExtractShapes( - file, "$BIN/$PROCESS", "$BIN/$PROCESS_$SYSTEMATIC"); - cb.cp().channel({"mt"}).era({era}).bin_id({8,9}).process(signal_types["ggH"]).ExtractShapes( - file, "$CHANNEL/ggH$MASS", "$CHANNEL/ggH$MASS_$SYSTEMATIC"); - cb.cp().channel({"mt"}).era({era}).bin_id({8,9}).process(signal_types["bbH"]).ExtractShapes( - file, "$CHANNEL/bbH$MASS", "$CHANNEL/bbH$MASS_$SYSTEMATIC"); - } - - cout << ">> Merging bin errors...\n"; - ch::CombineHarvester cb_et = move(cb.cp().channel({"et"})); - for (string era : {"8TeV"}) { - cb_et.cp().era({era}).bin_id({0, 1, 2}).process({"QCD","W","ZL","ZJ","VV","ZTT","TT"}) - .MergeBinErrors(0.1, 0.5); - } - ch::CombineHarvester cb_mt = move(cb.cp().channel({"mt"})); - for (string era : {"8TeV"}) { - cb_mt.cp().era({era}).bin_id({0, 1, 2}).process({"QCD","W","ZL","ZJ","VV","ZTT","TT"}) - .MergeBinErrors(0.1, 0.5); - } - ch::CombineHarvester cb_tt = move(cb.cp().channel({"tt"})); - for (string era : {"8TeV"}) { - cb_tt.cp().era({era}).bin_id({0, 1, 2}).process({"QCD","W","ZLL","VV","ZTT","TT"}) - .MergeBinErrors(0.1, 0.5); - } - - cout << ">> Generating bbb uncertainties...\n"; - cb_mt.cp().bin_id({0, 1, 2}).process({"ZL","ZJ","W", "QCD","TT", "ZTT","VV"}) - .AddBinByBin(0.1, true, &cb); - - cb_et.cp().bin_id({0, 1, 2}).process({"ZL", "ZJ", "QCD", "W", "TT","ZTT","VV"}) - .AddBinByBin(0.1, true, &cb); - - cb_tt.cp().bin_id({0, 1, 2}).era({"8TeV"}).process({"ZL", "ZJ","W", "TT", "QCD", "ZTT","VV"}) - .AddBinByBin(0.1, true, &cb); - - cout << ">> Setting standardised bin names...\n"; - ch::SetStandardBinNames(cb); - - - RooWorkspace ws("htt", "htt"); - - TFile demo("Hhh_demo.root", "RECREATE"); - - bool do_morphing = true; - string postfix = "_eff_acc"; - map mass_var = { - {"ggHTohhTo2Tau2B", &mH}, {"ggh", &mh}, {"ggH", &mH}, {"ggA", &mA}, - {"bbh", &mh}, {"bbH", &mH}, {"bbA", &mA} - }; - - if (do_morphing) { - auto bins = cb.bin_set(); - for (auto b : bins) { - auto procs = cb.cp().bin({b}).signals().process_set(); - std::cout << "morphing bin: " << b << std::endl; - for (auto p : procs) { - std::cout << "morphing process: " << p << std::endl; - string pdf_name = b + "_" + p + "_morph"; - // Set option to force signal to 0 outside of template range for H->hh only - bool force_template_limit = true; - if(b.find("8")!=string::npos && b.find("9")!=string::npos) force_template_limit=false; - ch::BuildRooMorphing(ws, cb, b, p, *(mass_var[p]), - "eff_acc", true, true, force_template_limit, &demo); - std::string prod_name = pdf_name + "_eff_acc"; - RooAbsReal *norm = ws.function(prod_name.c_str()); - RooProduct full_norm((pdf_name + "_norm").c_str(), "", - RooArgList(*norm, *(xs_map[p]))); - ws.import(full_norm, RooFit::RecycleConflictNodes()); - } - } - } - demo.Close(); - cb.AddWorkspace(ws); - cb.cp().signals().ExtractPdfs(cb, "htt", "$BIN_$PROCESS_morph"); - cb.PrintAll(); - - - string folder = "output/mssm_hhh_cards"; - boost::filesystem::create_directories(folder); - - TFile output((folder + "/htt_input.mssm.Hhh.root").c_str(), "RECREATE"); - cb.cp().mass({"*"}).WriteDatacard(folder + "/htt_cmb_mssm_Hhh.txt", output); - auto bins = cb.bin_set(); - for (auto b : bins) { - std::cout << "Writing datacard for bin: " << b << std::endl; - cb.cp().bin({b}).mass({"*"}).WriteDatacard( - folder + "/" + b + ".txt", output); - } - - - - output.Close(); - - cout << "\n>> Done!\n"; -} diff --git a/CombinePdfs/bin/HhhMorphingExample.cpp b/CombinePdfs/bin/HhhMorphingExample.cpp deleted file mode 100644 index 3d54403ce2f..00000000000 --- a/CombinePdfs/bin/HhhMorphingExample.cpp +++ /dev/null @@ -1,275 +0,0 @@ -#include -#include -#include -#include -#include -#include -#include -#include "boost/filesystem.hpp" -#include "CombineHarvester/CombineTools/interface/CombineHarvester.h" -#include "CombineHarvester/CombineTools/interface/Utilities.h" -#include "CombineHarvester/CombineTools/interface/HttSystematics.h" -#include "CombineHarvester/CombinePdfs/interface/MorphFunctions.h" -#include "CombineHarvester/CombineTools/interface/TFileIO.h" - -#include "RooWorkspace.h" -#include "RooRealVar.h" -#include "TH2.h" -#include "RooDataHist.h" -#include "RooHistFunc.h" -#include "RooFormulaVar.h" -#include "RooProduct.h" - -using namespace std; - -int main() { - ch::CombineHarvester cb; - - // cb.SetVerbosity(1); - - typedef vector> Categories; - typedef vector VString; - - string auxiliaries = string(getenv("CMSSW_BASE")) + "/src/auxiliaries/"; - string aux_shapes = auxiliaries +"shapes/"; - string aux_pruning = auxiliaries +"pruning/"; - - // RooFit will be quite noisy if we don't set this - RooMsgService::instance().setGlobalKillBelow(RooFit::WARNING); - - // In the first part of this code we will construct our MSSM signal model. - // This means declaring mA and tanb as our free parameters, and defining all - // other Higgs masses, cross sections and branching ratios as a function of - // these. Note that this example is very verbose and explicit in the - // construction - one could imagine developing a more generic and efficient - // tool to do this. - - // Define mA and tanb as RooRealVars - // Here I do not declare a range and it is decided automatically from - // the inputs once the first RooDataHist is created. - // There were some issues found with declaring a range by hand. - RooRealVar mA("mA", "mA", 300.); - RooRealVar tanb("tanb", "tanb", 1.); - - // All the other inputs we need to build the model can be found in one of the - // LHCXSWG scans. Here we use the low-tanb-high scenario. This file contains a - // large number of 2D histograms, all binned in mA and tanb, that each provide - // a different mass, xsec or BR. - TFile inputs(TString(auxiliaries) + - "models/out.low-tb-high-8TeV-tanbAll-nnlo.root"); - // Get the TH2Fs for the masses of the h and H bosons. - TH2F h_mH = ch::OpenFromTFile(&inputs, "h_mH"); - TH2F h_mh = ch::OpenFromTFile(&inputs, "h_mh"); - // Now two more steps are needed: we have to convert each TH2F to a - // RooDataHist, then build a RooHistFunc from the RooDataHist. These steps - // will be repeated for every TH2F we import below. - RooDataHist dh_mH("dh_mH", "dh_mH", RooArgList(mA, tanb), - RooFit::Import(h_mH)); - RooDataHist dh_mh("dh_mh", "dh_mh", RooArgList(mA, tanb), - RooFit::Import(h_mh)); - RooHistFunc mH("mH", "mH", RooArgList(mA, tanb), dh_mH); - RooHistFunc mh("mh", "mh", RooArgList(mA, tanb), dh_mh); - // There is also the possibility to interpolate mass values rather than just - // take the value of the enclosing histogram bin, but we'll leave this off for - // now. - // mH.setInterpolationOrder(1); - - // Now we'll do the same for the cross section - TH2F h_ggF_xsec_H = ch::OpenFromTFile(&inputs, "h_ggF_xsec_H"); - RooDataHist dh_ggF_xsec_H("dh_ggF_xsec_H", "dh_ggF_xsec_H", - RooArgList(mA, tanb), RooFit::Import(h_ggF_xsec_H)); - RooHistFunc ggF_xsec_H("ggF_xsec_H", "ggF_xsec_H", RooArgList(mA, tanb), - dh_ggF_xsec_H); - - // Now the branching ratios to hh, tau tau and bb - TH2F h_brh0h0_H = ch::OpenFromTFile(&inputs, "h_brh0h0_H"); - // Add factor of 2 here for h->tautau, h->bb and reverse - h_brh0h0_H = 2*h_brh0h0_H; - TH2F h_brtautau_h = ch::OpenFromTFile(&inputs, "h_brtautau_h"); - TH2F h_brbb_h = ch::OpenFromTFile(&inputs, "h_brbb_h"); - RooDataHist dh_brh0h0_H("dh_brh0h0_H", "dh_brh0h0_H", - RooArgList(mA, tanb), - RooFit::Import(h_brh0h0_H)); - RooDataHist dh_brtautau_h("dh_brtautau_h", "dh_brtautau_h", - RooArgList(mA, tanb), - RooFit::Import(h_brtautau_h)); - RooDataHist dh_brbb_h("dh_brbb_h", "dh_brbb_h", - RooArgList(mA, tanb), - RooFit::Import(h_brbb_h)); - RooHistFunc brh0h0_H("brh0h0_H", "brh0h0_H", RooArgList(mA, tanb), - dh_brh0h0_H); - RooHistFunc brtautau_h("brtautau_h", "brtautau_h", RooArgList(mA, tanb), - dh_brtautau_h); - RooHistFunc brbb_h("brbb_h", "brbb_h", RooArgList(mA, tanb), - dh_brbb_h); - // We can build the xsec * BR products - RooProduct ggF_xsec_br_H("ggF_xsec_br_H", "", - RooArgList(ggF_xsec_H, brh0h0_H, brtautau_h, brbb_h)); - // Let's print out the values of all of these objects - cout << "mA: " << mA.getVal() << "\n"; - cout << "tanb: " << tanb.getVal() << "\n\n"; - cout << "mH: " << mH.getVal() << "\n"; - cout << "mh: " << mh.getVal() << "\n"; - cout << "ggF_xsec_H: " << ggF_xsec_H.getVal() << "\n"; - cout << "brh0h0_H: " << brh0h0_H.getVal()/2<< "\n"; - cout << "brtautau_h: " << brtautau_h.getVal() << "\n"; - cout << "brbb_h: " << brbb_h.getVal() << "\n"; - cout << "ggF_xsec_br_H: " << ggF_xsec_br_H.getVal() << "\n"; - - map xs_map; - xs_map["ggHTohhTo2Tau2B"] = &ggF_xsec_br_H; - - VString chns = - {"et", "mt","tt"}; - - map input_folders = { - {"et", "Imperial"}, - {"mt", "Imperial"}, - {"tt", "Italians"} - }; - - map bkg_procs; - bkg_procs["et"] = {"ZTT", "W", "QCD", "ZL", "ZJ", "TT", "VV"}; - bkg_procs["mt"] = {"ZTT", "W", "QCD", "ZL", "ZJ", "TT", "VV"}; - bkg_procs["tt"] = {"ZTT", "W", "QCD", "ZLL", "TT", "VV"}; - - /*map sm_procs; - sm_procs["et"] = {"ggH_SM125","VH_SM125","qqH_SM125"}; - sm_procs["mt"] = {"ggH_SM125","VH_SM125","qqH_SM125"}; - sm_procs["tt"] = {"ggH_SM125","VH_SM125","qqH_SM125"}; - */ - - VString sig_procs={"ggHTohhTo2Tau2B"}; - - map cats; - cats["et_8TeV"] = { - {0, "eleTau_2jet0tag"}, {1, "eleTau_2jet1tag"}, - {2, "eleTau_2jet2tag"}}; - - cats["mt_8TeV"] = { - {0, "muTau_2jet0tag"}, {1, "muTau_2jet1tag"}, - {2, "muTau_2jet2tag"}}; - - cats["tt_8TeV"] = { - {0, "tauTau_2jet0tag"}, {1, "tauTau_2jet1tag"}, - {2, "tauTau_2jet2tag"}}; - - - - vector masses = ch::MassesFromRange("260-350:10"); - - cout << ">> Creating processes and observations...\n"; - for (string era : {"8TeV"}) { - for (auto chn : chns) { - cb.AddObservations( - {"*"}, {"htt"}, {era}, {chn}, cats[chn+"_"+era]); - cb.AddProcesses( - {"*"}, {"htt"}, {era}, {chn}, bkg_procs[chn], cats[chn+"_"+era], false); - // cb.AddProcesses( - // {"*"},{"htt"}, {era}, {chn}, sm_procs[chn], cats[chn+"_"+era],false); - cb.AddProcesses( - masses, {"htt"}, {era}, {chn}, sig_procs, cats[chn+"_"+era], true); - } - } - - //Remove W background from 2jet1tag and 2jet2tag categories for tt channel - cb.FilterProcs([](ch::Process const* p) { - return (p->bin() == "tauTau_2jet1tag") && p->process() == "W"; - }); - cb.FilterProcs([](ch::Process const* p) { - return (p->bin() == "tauTau_2jet2tag") && p->process() == "W"; - }); - - cout << ">> Adding systematic uncertainties...\n"; - ch::AddSystematics_hhh_et_mt(cb); - ch::AddSystematics_hhh_tt(cb); - - cout << ">> Extracting histograms from input root files...\n"; - for (string era : {"8TeV"}) { - for (string chn : chns) { - string file = aux_shapes + input_folders[chn] + "/htt_" + chn + - ".inputs-Hhh-" + era + ".root"; - cb.cp().channel({chn}).era({era}).backgrounds().ExtractShapes( - file, "$BIN/$PROCESS", "$BIN/$PROCESS_$SYSTEMATIC"); - cb.cp().channel({chn}).era({era}).signals().ExtractShapes( - file, "$BIN/$PROCESS$MASS", "$BIN/$PROCESS$MASS_$SYSTEMATIC"); - } - } - - - cout << ">> Merging bin errors...\n"; - ch::CombineHarvester cb_et = move(cb.cp().channel({"et"})); - for (string era : {"8TeV"}) { - cb_et.cp().era({era}).bin_id({0, 1, 2}).process({"QCD","W","ZL","ZJ","VV","ZTT","TT"}) - .MergeBinErrors(0.1, 0.5); - } - ch::CombineHarvester cb_mt = move(cb.cp().channel({"mt"})); - for (string era : {"8TeV"}) { - cb_mt.cp().era({era}).bin_id({0, 1, 2}).process({"QCD","W","ZL","ZJ","VV","ZTT","TT"}) - .MergeBinErrors(0.1, 0.5); - } - ch::CombineHarvester cb_tt = move(cb.cp().channel({"tt"})); - for (string era : {"8TeV"}) { - cb_tt.cp().era({era}).bin_id({0, 1, 2}).process({"QCD","W","ZLL","VV","ZTT","TT"}) - .MergeBinErrors(0.1, 0.5); - } - - cout << ">> Generating bbb uncertainties...\n"; - cb_mt.cp().bin_id({0, 1, 2}).process({"ZL","ZJ","W", "QCD","TT", "ZTT","VV"}) - .AddBinByBin(0.1, true, &cb); - - cb_et.cp().bin_id({0, 1, 2}).process({"ZL", "ZJ", "QCD", "W", "TT","ZTT","VV"}) - .AddBinByBin(0.1, true, &cb); - - cb_tt.cp().bin_id({0, 1, 2}).era({"8TeV"}).process({"ZL", "ZJ","W", "TT", "QCD", "ZTT","VV"}) - .AddBinByBin(0.1, true, &cb); - - cout << ">> Setting standardised bin names...\n"; - ch::SetStandardBinNames(cb); - - - RooWorkspace ws("htt", "htt"); - - TFile demo("Hhh_demo.root", "RECREATE"); - - bool do_morphing = true; - string postfix = "_eff_acc"; - map mass_var = { - {"ggHTohhTo2Tau2B", &mH} - }; - if (do_morphing) { - auto bins = cb.bin_set(); - for (auto b : bins) { - auto procs = cb.cp().bin({b}).signals().process_set(); - for (auto p : procs) { - string pdf_name = b + "_" + p + "_morph"; - ch::BuildRooMorphing(ws, cb, b, p, *(mass_var[p]), - "eff_acc", true, true, true, &demo); - std::string prod_name = pdf_name + "_eff_acc"; - RooAbsReal *norm = ws.function(prod_name.c_str()); - RooProduct full_norm((pdf_name + "_norm").c_str(), "", - RooArgList(*norm, *(xs_map[p]))); - ws.import(full_norm, RooFit::RecycleConflictNodes()); - } - } - } - demo.Close(); - cb.AddWorkspace(ws); - cb.cp().signals().ExtractPdfs(cb, "htt", "$BIN_$PROCESS_morph"); - cb.PrintAll(); - - - string folder = "output/hhh_cards"; - boost::filesystem::create_directories(folder); - - TFile output((folder + "/htt_input.Hhh.root").c_str(), "RECREATE"); - cb.cp().mass({"*"}).WriteDatacard(folder + "/htt_cmb_Hhh.txt", output); - auto bins = cb.bin_set(); - for (auto b : bins) { - cb.cp().bin({b}).mass({"*"}).WriteDatacard( - folder + "/" + b + ".txt", output); - } - output.Close(); - - cout << "\n>> Done!\n"; -} diff --git a/CombinePdfs/bin/MorphingMSSM.cpp b/CombinePdfs/bin/MorphingMSSM.cpp deleted file mode 100644 index f3efcb853a5..00000000000 --- a/CombinePdfs/bin/MorphingMSSM.cpp +++ /dev/null @@ -1,348 +0,0 @@ -#include -#include -#include -#include "CombineHarvester/CombineTools/interface/CombineHarvester.h" -#include "CombineHarvester/CombineTools/interface/Utilities.h" -#include "CombineHarvester/CombineTools/interface/TFileIO.h" -#include "CombineHarvester/CombineTools/interface/HttSystematics.h" -#include "CombineHarvester/CombinePdfs/interface/MorphFunctions.h" - -#include "RooWorkspace.h" -#include "RooRealVar.h" -#include "TH2.h" -#include "RooDataHist.h" -#include "RooHistFunc.h" -#include "RooFormulaVar.h" -#include "RooProduct.h" - -using namespace std; - -TH2F SantanderMatching(TH2F const& h4f, TH2F const& h5f, TH2F const* mass) { - TH2F res = h4f; - for (int x = 1; x <= h4f.GetNbinsX(); ++x) { - for (int y = 1; y <= h4f.GetNbinsY(); ++y) { - double mh = - mass ? mass->GetBinContent(x, y) : h4f.GetXaxis()->GetBinCenter(x); - double t = log(mh / 4.75) - 2.; - double fourflav = h4f.GetBinContent(x, y); - double fiveflav = h5f.GetBinContent(x, y); - double sigma = (1. / (1. + t)) * (fourflav + t * fiveflav); - res.SetBinContent(x, y, sigma); - } - } - return res; -} - -TH1F RestoreBinning(TH1F const& src, TH1F const& ref) { - TH1F res = ref; - res.Reset(); - for (int x = 1; x <= res.GetNbinsX(); ++x) { - res.SetBinContent(x, src.GetBinContent(x)); - } - return res; -} - -int main() { - typedef vector> Categories; - typedef vector VString; - - // We will need to source some inputs from the "auxiliaries" repo - string auxiliaries = string(getenv("CMSSW_BASE")) + "/src/auxiliaries/"; - string aux_shapes = auxiliaries +"shapes/"; - - // RooFit will be quite noisy if we don't set this - RooMsgService::instance().setGlobalKillBelow(RooFit::WARNING); - - // In the first part of this code we will construct our MSSM signal model. - // This means declaring mA and tanb as our free parameters, and defining all - // other Higgs masses, cross sections and branching ratios as a function of - // these. Note that this example is very verbose and explicit in the - // construction - one could imagine developing a more generic and efficient - // tool to do this. - - // Define mA and tanb as RooRealVars - // Here we fix the valid ranges of these parameters and set their starting - // values - RooRealVar mA("mA", "mA", 344., 90., 1000.); - RooRealVar tanb("tanb", "tanb", 9., 1., 60.); - - // All the other inputs we need to build the model can be found in one of the - // LHCXSWG scans. Here we use the classic mhmax scenario. This file contains a - // large number of 2D histograms, all binned in mA and tanb, that each provide - // a different mass, xsec or BR. - TFile inputs(TString(auxiliaries) + - "models/out.mhmax-mu+200-8TeV-tanbHigh-nnlo.root"); - - // Get the TH2Fs for the masses of the h and H bosons. - TH2F h_mH = ch::OpenFromTFile(&inputs, "h_mH"); - TH2F h_mh = ch::OpenFromTFile(&inputs, "h_mh"); - // Now two more steps are needed: we have to convert each TH2F to a - // RooDataHist, then build a RooHistFunc from the RooDataHist. These steps - // will be repeated for every TH2F we import below. - RooDataHist dh_mH("dh_mH", "dh_mH", RooArgList(mA, tanb), - RooFit::Import(h_mH)); - RooDataHist dh_mh("dh_mh", "dh_mh", RooArgList(mA, tanb), - RooFit::Import(h_mh)); - RooHistFunc mH("mH", "mH", RooArgList(mA, tanb), dh_mH); - RooHistFunc mh("mh", "mh", RooArgList(mA, tanb), dh_mh); - // There is also the possibility to interpolate mass values rather than just - // take the value of the enclosing histogram bin, but we'll leave this off for - // now. - // mH.setInterpolationOrder(1); - // mh.setInterpolationOrder(1); - - // Now we'll do the same for the gluon-fusion cross sections (one TH2F for - // each Higgs boson) - TH2F h_ggF_xsec_h = ch::OpenFromTFile(&inputs, "h_ggF_xsec_h"); - TH2F h_ggF_xsec_H = ch::OpenFromTFile(&inputs, "h_ggF_xsec_H"); - TH2F h_ggF_xsec_A = ch::OpenFromTFile(&inputs, "h_ggF_xsec_A"); - RooDataHist dh_ggF_xsec_h("dh_ggF_xsec_h", "dh_ggF_xsec_h", - RooArgList(mA, tanb), RooFit::Import(h_ggF_xsec_h)); - RooDataHist dh_ggF_xsec_H("dh_ggF_xsec_H", "dh_ggF_xsec_H", - RooArgList(mA, tanb), RooFit::Import(h_ggF_xsec_H)); - RooDataHist dh_ggF_xsec_A("dh_ggF_xsec_A", "dh_ggF_xsec_A", - RooArgList(mA, tanb), RooFit::Import(h_ggF_xsec_A)); - RooHistFunc ggF_xsec_h("ggF_xsec_h", "ggF_xsec_h", RooArgList(mA, tanb), - dh_ggF_xsec_h); - RooHistFunc ggF_xsec_H("ggF_xsec_H", "ggF_xsec_H", RooArgList(mA, tanb), - dh_ggF_xsec_H); - RooHistFunc ggF_xsec_A("ggF_xsec_A", "ggF_xsec_A", RooArgList(mA, tanb), - dh_ggF_xsec_A); - - // And the same for the b-associated production - but with the caveat that - // we must first do the Santander-matching of the 4FS and 5FS. - TH2F h_bbH5f_xsec_h = ch::OpenFromTFile(&inputs, "h_bbH_xsec_h"); - TH2F h_bbH5f_xsec_H = ch::OpenFromTFile(&inputs, "h_bbH_xsec_H"); - TH2F h_bbH5f_xsec_A = ch::OpenFromTFile(&inputs, "h_bbH_xsec_A"); - TH2F h_bbH4f_xsec_h = ch::OpenFromTFile(&inputs, "h_bbH4f_xsec_h"); - TH2F h_bbH4f_xsec_H = ch::OpenFromTFile(&inputs, "h_bbH4f_xsec_H"); - TH2F h_bbH4f_xsec_A = ch::OpenFromTFile(&inputs, "h_bbH4f_xsec_A"); - TH2F h_bbH_xsec_h = SantanderMatching(h_bbH4f_xsec_h, h_bbH5f_xsec_h, &h_mh); - TH2F h_bbH_xsec_H = SantanderMatching(h_bbH4f_xsec_H, h_bbH5f_xsec_H, &h_mH); - TH2F h_bbH_xsec_A = SantanderMatching(h_bbH4f_xsec_A, h_bbH5f_xsec_A, nullptr); - RooDataHist dh_bbH_xsec_h("dh_bbH_xsec_h", "dh_bbH_xsec_h", - RooArgList(mA, tanb), RooFit::Import(h_bbH_xsec_h)); - RooDataHist dh_bbH_xsec_H("dh_bbH_xsec_H", "dh_bbH_xsec_H", - RooArgList(mA, tanb), RooFit::Import(h_bbH_xsec_H)); - RooDataHist dh_bbH_xsec_A("dh_bbH_xsec_A", "dh_bbH_xsec_A", - RooArgList(mA, tanb), RooFit::Import(h_bbH_xsec_A)); - RooHistFunc bbH_xsec_h("bbH_xsec_h", "bbH_xsec_h", RooArgList(mA, tanb), - dh_bbH_xsec_h); - RooHistFunc bbH_xsec_H("bbH_xsec_H", "bbH_xsec_H", RooArgList(mA, tanb), - dh_bbH_xsec_H); - RooHistFunc bbH_xsec_A("bbH_xsec_A", "bbH_xsec_A", RooArgList(mA, tanb), - dh_bbH_xsec_A); - - // Now the branching ratios to tau tau - TH2F h_brtautau_h = ch::OpenFromTFile(&inputs, "h_brtautau_h"); - TH2F h_brtautau_H = ch::OpenFromTFile(&inputs, "h_brtautau_H"); - TH2F h_brtautau_A = ch::OpenFromTFile(&inputs, "h_brtautau_A"); - RooDataHist dh_brtautau_h("dh_brtautau_h", "dh_brtautau_h", - RooArgList(mA, tanb), - RooFit::Import(h_brtautau_h)); - RooDataHist dh_brtautau_H("dh_brtautau_H", "dh_brtautau_H", - RooArgList(mA, tanb), - RooFit::Import(h_brtautau_H)); - RooDataHist dh_brtautau_A("dh_brtautau_A", "dh_brtautau_A", - RooArgList(mA, tanb), - RooFit::Import(h_brtautau_A)); - RooHistFunc brtautau_h("brtautau_h", "brtautau_h", RooArgList(mA, tanb), - dh_brtautau_h); - RooHistFunc brtautau_H("brtautau_H", "brtautau_H", RooArgList(mA, tanb), - dh_brtautau_H); - RooHistFunc brtautau_A("brtautau_A", "brtautau_A", RooArgList(mA, tanb), - dh_brtautau_A); - - // We can build the xsec * BR products - RooProduct ggF_xsec_br_h("ggF_xsec_br_h", "", - RooArgList(ggF_xsec_h, brtautau_h)); - RooProduct ggF_xsec_br_H("ggF_xsec_br_H", "", - RooArgList(ggF_xsec_H, brtautau_H)); - RooProduct ggF_xsec_br_A("ggF_xsec_br_A", "", - RooArgList(ggF_xsec_A, brtautau_A)); - RooProduct bbH_xsec_br_h("bbH_xsec_br_h", "", - RooArgList(bbH_xsec_h, brtautau_h)); - RooProduct bbH_xsec_br_H("bbH_xsec_br_H", "", - RooArgList(bbH_xsec_H, brtautau_H)); - RooProduct bbH_xsec_br_A("bbH_xsec_br_A", "", - RooArgList(bbH_xsec_A, brtautau_A)); - - // Let's print out the values of all of these objects - cout << "mA: " << mA.getVal() << "\n"; - cout << "tanb: " << tanb.getVal() << "\n\n"; - cout << "mH: " << mH.getVal() << "\n"; - cout << "mh: " << mh.getVal() << "\n\n"; - cout << "ggF_xsec_h: " << ggF_xsec_h.getVal() << "\n"; - cout << "ggF_xsec_H: " << ggF_xsec_H.getVal() << "\n"; - cout << "ggF_xsec_A: " << ggF_xsec_A.getVal() << "\n"; - cout << "bbH_xsec_h: " << bbH_xsec_h.getVal() << "\n"; - cout << "bbH_xsec_H: " << bbH_xsec_H.getVal() << "\n"; - cout << "bbH_xsec_A: " << bbH_xsec_A.getVal() << "\n"; - cout << "brtautau_h: " << brtautau_h.getVal() << "\n"; - cout << "brtautau_H: " << brtautau_H.getVal() << "\n"; - cout << "brtautau_A: " << brtautau_A.getVal() << "\n\n"; - cout << "ggF_xsec_br_h: " << ggF_xsec_br_h.getVal() << "\n"; - cout << "ggF_xsec_br_H: " << ggF_xsec_br_H.getVal() << "\n"; - cout << "ggF_xsec_br_A: " << ggF_xsec_br_A.getVal() << "\n\n"; - cout << "bbH_xsec_br_h: " << bbH_xsec_br_h.getVal() << "\n"; - cout << "bbH_xsec_br_H: " << bbH_xsec_br_H.getVal() << "\n"; - cout << "bbH_xsec_br_A: " << bbH_xsec_br_A.getVal() << "\n\n"; - - - map xs_map; - xs_map["ggh"] = &ggF_xsec_br_h; - xs_map["ggH"] = &ggF_xsec_br_H; - xs_map["ggA"] = &ggF_xsec_br_A; - xs_map["bbh"] = &bbH_xsec_br_h; - xs_map["bbH"] = &bbH_xsec_br_H; - xs_map["bbA"] = &bbH_xsec_br_A; - - TH1::AddDirectory(false); - ch::CombineHarvester cb; - - Categories mt_cats = { - make_pair(8, "muTau_nobtag"), - make_pair(9, "muTau_btag")}; - - auto masses = ch::MassesFromRange( - "90,100,120-140:10,140-200:20,200-500:50,600-1000:100"); - - cout << "Adding observations..."; - cb.AddObservations({"*"}, {"htt"}, {"8TeV"}, {"mt"}, mt_cats); - cout << " done\n"; - - cout << "Adding background processes..."; - cb.AddProcesses({"*"}, {"htt"}, {"8TeV"}, {"mt"}, - {"ZTT", "W", "QCD", "ZL", "ZJ", "TT", "VV"}, mt_cats, false); - cout << " done\n"; - - cout << "Adding signal processes..."; - // Unlike in previous MSSM H->tautau analyses we will create a separate - // process for each Higgs in the datacards - map signal_types = { - {"ggH", {"ggh", "ggH", "ggA"}}, - {"bbH", {"bbh", "bbH", "bbA"}} - }; - cb.AddProcesses(masses, {"htt"}, {"8TeV"}, {"mt"}, - signal_types["ggH"], {mt_cats[0]}, true); - cb.AddProcesses(masses, {"htt"}, {"8TeV"}, {"mt"}, - signal_types["bbH"], {mt_cats[1]}, true); - cb.AddProcesses(masses, {"htt"}, {"8TeV"}, {"mt"}, - signal_types["bbH"], {mt_cats[0]}, true); - cb.AddProcesses(masses, {"htt"}, {"8TeV"}, {"mt"}, - signal_types["ggH"], {mt_cats[1]}, true); - cout << " done\n"; - - cout << "Adding systematic uncertainties..."; - ch::AddMSSMSystematics(cb); - cout << " done\n"; - - string file = aux_shapes + "Imperial/htt_mt.inputs-mssm-8TeV-0.root"; - - cout << "Extracting histograms from input root files..."; - cb.cp().era({"8TeV"}).backgrounds().ExtractShapes( - file, "$CHANNEL/$PROCESS", "$CHANNEL/$PROCESS_$SYSTEMATIC"); - - // We have to map each Higgs signal process to the same histogram, i.e: - // {ggh, ggH, ggA} --> ggH - // {bbh, bbH, bbA} --> bbH - cb.cp().era({"8TeV"}).process(signal_types["ggH"]).ExtractShapes( - file, "$CHANNEL/ggH$MASS", - "$CHANNEL/ggH$MASS_$SYSTEMATIC"); - cb.cp().era({"8TeV"}).process(signal_types["bbH"]).ExtractShapes( - file, "$CHANNEL/bbH$MASS", - "$CHANNEL/bbH$MASS_$SYSTEMATIC"); - cout << " done\n"; - - cout << "Scaling signal process rates for acceptance...\n"; - for (string e : {"8TeV"}) { - for (string p : {"ggH", "bbH"}) { - cout << "Scaling for process " << p << " and era " << e << "\n"; - auto gr = ch::TGraphFromTable( - "input/xsecs_brs/mssm_" + p + "_" + e + "_accept.txt", "mPhi", - "accept"); - cb.cp().process(signal_types[p]).era({e}).ForEachProc([&](ch::Process *proc) { - double m = boost::lexical_cast(proc->mass()); - proc->set_rate(proc->rate() * gr.Eval(m)); - }); - } - } - - cout << "Setting standardised bin names..."; - ch::SetStandardBinNames(cb); - cout << " done\n"; - - RooWorkspace ws("htt", "htt"); - - TFile demo("htt_mt_mssm_demo.root", "RECREATE"); - - bool do_morphing = true; - string postfix = "_eff_acc"; - map mass_var = { - {"ggh", &mh}, {"ggH", &mH}, {"ggA", &mA}, - {"bbh", &mh}, {"bbH", &mH}, {"bbA", &mA} - }; - if (do_morphing) { - auto bins = cb.bin_set(); - for (auto b : bins) { - auto procs = cb.cp().bin({b}).signals().process_set(); - for (auto p : procs) { - string pdf_name = b + "_" + p + "_morph"; - ch::BuildRooMorphing(ws, cb, b, p, *(mass_var[p]), - "eff_acc", true, true, false, &demo); - std::string prod_name = pdf_name + "_eff_acc"; - RooAbsReal *norm = ws.function(prod_name.c_str()); - RooProduct full_norm((pdf_name + "_norm").c_str(), "", - RooArgList(*norm, *(xs_map[p]))); - ws.import(full_norm, RooFit::RecycleConflictNodes()); - } - } - } - demo.Close(); - cb.AddWorkspace(ws); - cb.cp().signals().ExtractPdfs(cb, "htt", "$BIN_$PROCESS_morph"); - cb.PrintAll(); - - // cout << "Adding bbb...\n"; - // cb.cp().process({"W", "QCD", "ZTT", "ZL", "ZJ", "TT", "VV"}) - // .MergeBinErrors(0.05, 0.4); - // cb.cp().process({"W", "QCD", "ZTT", "ZL", "ZJ", "TT", "VV"}) - // .AddBinByBin(0.05, true, &cb); - // cout << "...done\n"; - - // ch::CombineHarvester gg_plot = std::move(cb.cp().era({"8TeV"}).bin_id({8})); - // ch::CombineHarvester bb_plot = std::move(cb.cp().era({"8TeV"}).bin_id({9})); - // TH1F gg_dat = gg_plot.GetObservedShape(); - // TH1F bb_dat = bb_plot.GetObservedShape(); - // TH1F ggh = RestoreBinning(gg_plot.cp().process({"ggh"}).GetShape(), gg_dat); - // TH1F ggH = RestoreBinning(gg_plot.cp().process({"ggH"}).GetShape(), gg_dat); - // TH1F ggA = RestoreBinning(gg_plot.cp().process({"ggA"}).GetShape(), gg_dat); - // TH1F ggX = RestoreBinning(gg_plot.cp().process({"ggh", "ggH", "ggA"}).GetShape(), gg_dat); - // TH1F bbh = RestoreBinning(bb_plot.cp().process({"bbh"}).GetShape(), bb_dat); - // TH1F bbH = RestoreBinning(bb_plot.cp().process({"bbH"}).GetShape(), bb_dat); - // TH1F bbA = RestoreBinning(bb_plot.cp().process({"bbA"}).GetShape(), bb_dat); - // TH1F bbX = RestoreBinning(bb_plot.cp().process({"bbh", "bbH", "bbA"}).GetShape(), bb_dat); - - string folder = "output/mssm_test"; - boost::filesystem::create_directories(folder); - - - TFile output((folder + "/htt_mt.input.mssm.root").c_str(), "RECREATE"); - // ggh.Write("ggh"); - // ggH.Write("ggH"); - // ggA.Write("ggA"); - // ggX.Write("ggX"); - // bbh.Write("bbh"); - // bbH.Write("bbH"); - // bbA.Write("bbA"); - // bbX.Write("bbX"); - cb.cp().mass({"*"}).WriteDatacard(folder + "/htt_mt_mssm.txt", output); - auto bins = cb.bin_set(); - for (auto b : bins) { - cb.cp().bin({b}).mass({"*"}).WriteDatacard( - folder + "/" + b + ".txt", output); - } - output.Close(); -} - - diff --git a/CombinePdfs/bin/SMLegacyMorphing.cpp b/CombinePdfs/bin/SMLegacyMorphing.cpp index 6e70882f1e5..4824ef8f71c 100644 --- a/CombinePdfs/bin/SMLegacyMorphing.cpp +++ b/CombinePdfs/bin/SMLegacyMorphing.cpp @@ -238,6 +238,7 @@ int main() { RooRealVar mh("MH", "", 125, 90, 145); // TFile demo("htt_sm_morphing_debug.root", "RECREATE"); + //! [part1] bool do_morphing = true; if (do_morphing) { // RooMsgService::instance().setGlobalKillBelow(RooFit::WARNING); @@ -253,6 +254,7 @@ int main() { // demo.Close(); cb.AddWorkspace(ws); cb.cp().signals().ExtractPdfs(cb, "htt", "$BIN_$PROCESS_morph"); + //! [part1] string folder = "output/sm_cards_morphed"; boost::filesystem::create_directories(folder); diff --git a/CombinePdfs/input b/CombinePdfs/input deleted file mode 120000 index 796d82ff0d5..00000000000 --- a/CombinePdfs/input +++ /dev/null @@ -1 +0,0 @@ -../CombineTools/input \ No newline at end of file diff --git a/CombinePdfs/python/AZh.py b/CombinePdfs/python/AZh.py deleted file mode 100644 index bd5ceea436e..00000000000 --- a/CombinePdfs/python/AZh.py +++ /dev/null @@ -1,160 +0,0 @@ -from HiggsAnalysis.CombinedLimit.PhysicsModel import * -import os -import ROOT -import math -import itertools - -class MSSMAZhHiggsModel(PhysicsModel): - def __init__(self, modelname): - PhysicsModel.__init__(self) - # Define the known production and decay processes - # These are strings we will look for in the process names to - # determine the correct normalisation scaling - self.ERAS = ['7TeV', '8TeV', '13TeV'] - # We can't add anything here that we're not actually going to have - # loaded from the model files. Should integrate this into the options - # somehow. - eras = ['8TeV'] - self.modelname = modelname - self.PROC_SETS = [] - self.PROC_SETS.append( - ([ 'ggA' ], [ 'AZhLLtautau' ], eras) - ) - - def setModelBuilder(self, modelBuilder): - """We're not supposed to overload this method, but we have to because - this is our only chance to import things into the workspace while it - is completely empty. This is primary so we can define some of our MSSM - Higgs boson masses as functions instead of the free variables that would - be imported later as dependents of the normalisation terms.""" - # First call the parent class implementation - PhysicsModel.setModelBuilder(self, modelBuilder) - if self.modelname == "low-tb-high_8TeV" : filename = 'out.low-tb-high-8TeV-tanbAll-nnlo.root' - if self.modelname == "low-tb-high_13TeV" : filename = 'low-tb-high_13TeV.root' - if self.modelname == "hMSSM_8TeV" : filename = 'hMSSM_8TeV.root' - if self.modelname == "hMSSM_13TeV" : filename = 'hMSSM_13TeV.root' - self.buildModel(os.environ['CMSSW_BASE']+'/src/auxiliaries/models/'+filename) - - def doHistFunc(self, name, hist, varlist, interpolate=1): - "method to conveniently create a RooHistFunc from a TH1/TH2 input" - dh = ROOT.RooDataHist('dh_%s'%name, 'dh_%s'%name, ROOT.RooArgList(*varlist), ROOT.RooFit.Import(hist)) - hfunc = ROOT.RooHistFunc(name, name, ROOT.RooArgSet(*varlist), dh) - hfunc.setInterpolationOrder(interpolate) - self.modelBuilder.out._import(hfunc, ROOT.RooFit.RecycleConflictNodes()) - return self.modelBuilder.out.function(name) - - def buildModel(self, filename): - mA = ROOT.RooRealVar('mA', 'mA', 300.) - tanb = ROOT.RooRealVar('tanb', 'tanb', 2.) - f = ROOT.TFile(filename) - #Take care of different histogram names for hMSSM: - if 'hMSSM' in self.modelname or '13TeV' in self.modelname or '14TeV' in self.modelname: - ggF_xsec_A_str = "xs_gg_A" - brtautau_h_str = "br_h_tautau" - brZh0_A_str="br_A_Zh" - else : - ggF_xsec_A_str = "h_ggF_xsec_A" - brtautau_h_str = "h_brtautau_h" - brZh0_A_str="h_brZh0_A" - - ggF_xsec_A = self.doHistFunc('xsec_ggA_8TeV', f.Get(ggF_xsec_A_str), [mA, tanb]) - total_br_hist = ROOT.TH2F() - total_br_hist = (f.Get(brtautau_h_str)) * (f.Get(brZh0_A_str)) - total_br_hist *= 0.10099 - brAZhLLtautau = self.doHistFunc('br_AZhLLtautau', total_br_hist, [mA, tanb]) - - - # Next step: creating theory uncertainties - # 1) for each syst source build kappaHi and kappaLo TH1s by doing a *careful* divide - # between nominal and shifted TH2s => "kappa_hi_xsec_ggh_8TeV_scale" - # 2) create nuisance parameter (QCD_scale_ggH?) and constraint. Like: - # def preProcessNuisances(self,nuisances): - # if self.add_bbH and not any(row for row in nuisances if row[0] == "QCDscale_bbH"): - # nuisances.append(("QCDscale_bbH",False, "param", [ "0", "1"], [] ) ) - # --> probably have to create the param ourself first, preProcessNuisances doesn't - # happen until later (after doParametersOfInterest) - # 3) create AsymPow and add to the norm product - - def doParametersOfInterest(self): - """Create POI and other parameters, and define the POI set.""" - # print '>> In doParametersOfInterest, workspace contents:' - self.modelBuilder.doVar("r[1,0,20]") - self.modelBuilder.out.var('mA').setConstant(True) - self.modelBuilder.out.var('tanb').setConstant(True) - for proc_set in self.PROC_SETS: - for (P, D, E) in itertools.product(*proc_set): - print (P, D, E) - terms = ['xsec_%s_%s' % (P, E), 'br_%s'%D] - terms += ['r'] - self.modelBuilder.factory_('prod::scaling_%s_%s_%s(%s)' % (P,D,E,','.join(terms))) - self.modelBuilder.out.function('scaling_%s_%s_%s' % (P,D,E)).Print('') - - # self.modelBuilder.out.Print() - self.modelBuilder.doSet('POI', 'r') - - def getHiggsProdDecMode(self, bin, process): - """Return a triple of (production, decay, energy)""" - print process - P = '' - D = '' - if "_" in process: - (P, D) = process.split("_") - else: - raise RuntimeError, 'Expected signal process %s to be of the form PROD_DECAY' % process - E = None - for era in self.ERAS: - if era in bin: - if E: raise RuntimeError, "Validation Error: bin string %s contains multiple known energies" % bin - E = era - if not E: - raise RuntimeError, 'Did not find a valid energy in bin string %s' % bin - return (P, D, E) - - def getYieldScale(self,bin,process): - if self.DC.isSignal[process]: - (P, D, E) = self.getHiggsProdDecMode(bin, process) - scaling = 'scaling_%s_%s_%s' % (P, D, E) - print 'Scaling %s/%s as %s' % (bin, process, scaling) - return scaling - else: - return 1 - -class BRChargedHiggs(PhysicsModel): - def __init__(self): - PhysicsModel.__init__(self) - - def doParametersOfInterest(self): - """Create POI and other parameters, and define the POI set.""" - self.modelBuilder.doVar('BR[0,0,1]'); - - self.modelBuilder.doSet('POI','BR') - - self.modelBuilder.factory_('expr::Scaling_HH("@0*@0", BR)') - self.modelBuilder.factory_('expr::Scaling_WH("2 * (1-@0)*@0", BR)') - #self.modelBuilder.factory_('expr::Scaling_tt("(1-@0)*(1-@0)", BR)') - self.modelBuilder.factory_('expr::Scaling_tt("1 - (@0+@1)", Scaling_HH, Scaling_WH)') - - self.processScaling = { 'tt_ttHchHch':'HH', 'tt_ttHchW':'WH', 'tt':'tt' } - - # self.modelBuilder.out.Print() - - def getYieldScale(self,bin,process): - for prefix, model in self.processScaling.iteritems(): - if process == prefix: - print 'Scaling %s/%s as %s' % (bin, process, 'Scaling_'+model) - return 'Scaling_'+model - return 1 - - - - -#8TeV -hMSSM_8TeV = MSSMAZhHiggsModel("hMSSM_8TeV") -lowtbhigh_8TeV = MSSMAZhHiggsModel("low-tb-high_8TeV") - -#13TeV -hMSSM_13TeV = MSSMAZhHiggsModel("hMSSM_13TeV") -lowtbhigh_13TeV = MSSMAZhHiggsModel("low-tb-high_13TeV") - -brChargedHiggs = BRChargedHiggs() - diff --git a/CombinePdfs/python/Hhh.py b/CombinePdfs/python/Hhh.py deleted file mode 100644 index a49f071f4a3..00000000000 --- a/CombinePdfs/python/Hhh.py +++ /dev/null @@ -1,169 +0,0 @@ -from HiggsAnalysis.CombinedLimit.PhysicsModel import * -import os -import ROOT -import math -import itertools - -class MSSMHhhHiggsModel(PhysicsModel): - def __init__(self, modelname): - PhysicsModel.__init__(self) - # Define the known production and decay processes - # These are strings we will look for in the process names to - # determine the correct normalisation scaling - self.ERAS = ['7TeV', '8TeV', '13TeV'] - # We can't add anything here that we're not actually going to have - # loaded from the model files. Should integrate this into the options - # somehow. - eras = ['8TeV'] - self.modelname = modelname - self.PROC_SETS = [] - self.PROC_SETS.append( - ([ 'ggH' ], [ 'Hhhbbtautau' ], eras) - ) - - def setModelBuilder(self, modelBuilder): - """We're not supposed to overload this method, but we have to because - this is our only chance to import things into the workspace while it - is completely empty. This is primary so we can define some of our MSSM - Higgs boson masses as functions instead of the free variables that would - be imported later as dependents of the normalisation terms.""" - # First call the parent class implementation - PhysicsModel.setModelBuilder(self, modelBuilder) - if self.modelname == "low-tb-high_8TeV" : filename = 'out.low-tb-high-8TeV-tanbAll-nnlo.root' - if self.modelname == "low-tb-high_13TeV" : filename = 'low-tb-high_13TeV.root' - if self.modelname == "hMSSM_8TeV" : filename = 'hMSSM_8TeV.root' - if self.modelname == "hMSSM_13TeV" : filename = 'hMSSM_13TeV.root' - self.buildModel(os.environ['CMSSW_BASE']+'/src/auxiliaries/models/'+filename) - - def doHistFunc(self, name, hist, varlist, interpolate=1): - "method to conveniently create a RooHistFunc from a TH1/TH2 input" - dh = ROOT.RooDataHist('dh_%s'%name, 'dh_%s'%name, ROOT.RooArgList(*varlist), ROOT.RooFit.Import(hist)) - hfunc = ROOT.RooHistFunc(name, name, ROOT.RooArgSet(*varlist), dh) - hfunc.setInterpolationOrder(interpolate) - self.modelBuilder.out._import(hfunc, ROOT.RooFit.RecycleConflictNodes()) - return self.modelBuilder.out.function(name) - - def buildModel(self, filename): - mA = ROOT.RooRealVar('mA', 'mA', 300.) - tanb = ROOT.RooRealVar('tanb', 'tanb', 1.) - f = ROOT.TFile(filename) - #Take care of different histogram names for hMSSM: - if 'hMSSM' in self.modelname or '13TeV' in self.modelname or '14TeV' in self.modelname: - mH_str = "m_H" - mh_str = "m_h" - ggF_xsec_H_str = "xs_gg_H" - brtautau_h_str = "br_h_tautau" - brbb_h_str = "br_h_bb" - brh0h0_H_str="br_H_hh" - else : - mH_str = "h_mH" - mh_str = "h_mh" - ggF_xsec_H_str = "h_ggF_xsec_H" - brtautau_h_str = "h_brtautau_h" - brbb_h_str = "h_brbb_h" - brh0h0_H_str="h_brh0h0_H" - - mH = self.doHistFunc('mH', f.Get(mH_str), [mA, tanb]) - mh = self.doHistFunc('mh', f.Get(mh_str), [mA, tanb]) - ggF_xsec_H = self.doHistFunc('xsec_ggH_8TeV', f.Get(ggF_xsec_H_str), [mA, tanb]) - total_br_hist = ROOT.TH2F() - total_br_hist = (f.Get(brtautau_h_str)) * (f.Get(brbb_h_str)) * (f.Get(brh0h0_H_str)) - total_br_hist *= 2 - brHhhbbtautau = self.doHistFunc('br_Hhhbbtautau', total_br_hist, [mA, tanb]) - - # Next step: creating theory uncertainties - # 1) for each syst source build kappaHi and kappaLo TH1s by doing a *careful* divide - # between nominal and shifted TH2s => "kappa_hi_xsec_ggh_8TeV_scale" - # 2) create nuisance parameter (QCD_scale_ggH?) and constraint. Like: - # def preProcessNuisances(self,nuisances): - # if self.add_bbH and not any(row for row in nuisances if row[0] == "QCDscale_bbH"): - # nuisances.append(("QCDscale_bbH",False, "param", [ "0", "1"], [] ) ) - # --> probably have to create the param ourself first, preProcessNuisances doesn't - # happen until later (after doParametersOfInterest) - # 3) create AsymPow and add to the norm product - - def doParametersOfInterest(self): - """Create POI and other parameters, and define the POI set.""" - # print '>> In doParametersOfInterest, workspace contents:' - self.modelBuilder.doVar("r[1,0,20]") - self.modelBuilder.out.var('mA').setConstant(True) - self.modelBuilder.out.var('tanb').setConstant(True) - for proc_set in self.PROC_SETS: - for (P, D, E) in itertools.product(*proc_set): - print (P, D, E) - terms = ['xsec_%s_%s' % (P, E), 'br_%s'%D] - terms += ['r'] - self.modelBuilder.factory_('prod::scaling_%s_%s_%s(%s)' % (P,D,E,','.join(terms))) - self.modelBuilder.out.function('scaling_%s_%s_%s' % (P,D,E)).Print('') - - # self.modelBuilder.out.Print() - self.modelBuilder.doSet('POI', 'r') - - def getHiggsProdDecMode(self, bin, process): - """Return a triple of (production, decay, energy)""" - print process - P = '' - D = '' - if "_" in process: - (P, D) = process.split("_") - else: - raise RuntimeError, 'Expected signal process %s to be of the form PROD_DECAY' % process - E = None - for era in self.ERAS: - if era in bin: - if E: raise RuntimeError, "Validation Error: bin string %s contains multiple known energies" % bin - E = era - if not E: - raise RuntimeError, 'Did not find a valid energy in bin string %s' % bin - return (P, D, E) - - def getYieldScale(self,bin,process): - if self.DC.isSignal[process]: - print "is signal" - # something going wrong here - (P, D, E) = self.getHiggsProdDecMode(bin, process) - scaling = 'scaling_%s_%s_%s' % (P, D, E) - print 'Scaling %s/%s as %s' % (bin, process, scaling) - return scaling - else: - return 1 - -class BRChargedHiggs(PhysicsModel): - def __init__(self): - PhysicsModel.__init__(self) - - def doParametersOfInterest(self): - """Create POI and other parameters, and define the POI set.""" - self.modelBuilder.doVar('BR[0,0,1]'); - - self.modelBuilder.doSet('POI','BR') - - self.modelBuilder.factory_('expr::Scaling_HH("@0*@0", BR)') - self.modelBuilder.factory_('expr::Scaling_WH("2 * (1-@0)*@0", BR)') - #self.modelBuilder.factory_('expr::Scaling_tt("(1-@0)*(1-@0)", BR)') - self.modelBuilder.factory_('expr::Scaling_tt("1 - (@0+@1)", Scaling_HH, Scaling_WH)') - - self.processScaling = { 'tt_ttHchHch':'HH', 'tt_ttHchW':'WH', 'tt':'tt' } - - # self.modelBuilder.out.Print() - - def getYieldScale(self,bin,process): - for prefix, model in self.processScaling.iteritems(): - if process == prefix: - print 'Scaling %s/%s as %s' % (bin, process, 'Scaling_'+model) - return 'Scaling_'+model - return 1 - - - - -#8TeV -hMSSM_8TeV = MSSMHhhHiggsModel("hMSSM_8TeV") -lowtbhigh_8TeV = MSSMHhhHiggsModel("low-tb-high_8TeV") - -#13TeV -hMSSM_13TeV = MSSMHhhHiggsModel("hMSSM_13TeV") -lowtbhigh_13TeV = MSSMHhhHiggsModel("low-tb-high_13TeV") - -brChargedHiggs = BRChargedHiggs() - diff --git a/CombinePdfs/python/MSSM.py b/CombinePdfs/python/MSSM.py index 0b3b4a272bf..c3cedff441d 100644 --- a/CombinePdfs/python/MSSM.py +++ b/CombinePdfs/python/MSSM.py @@ -1,89 +1,148 @@ from HiggsAnalysis.CombinedLimit.PhysicsModel import * +import CombineHarvester.CombineTools.plotting as plot import os import ROOT import math import itertools +import pprint +import sys +from collections import defaultdict class MSSMHiggsModel(PhysicsModel): - def __init__(self, modelname): + def __init__(self): PhysicsModel.__init__(self) + ROOT.gROOT.SetBatch(ROOT.kTRUE) + plot.ModTDRStyle(l=0.13, b=0.10, r=0.19) + ROOT.gStyle.SetNdivisions(510, "Z") + plot.SetBirdPalette() + self.filePrefix = '' + self.modelFiles = {} + self.h_dict = {} + self.h_dict[0] = { + 'mH' : 'h_mH', + 'mh' : 'h_mh', + 'mHp' : 'h_mHp', + 'br_tHpb' : 'h_brHpb_t', + 'br_Hptaunu' : 'h_brtaunu_Hp', + 'br_Hhh' : 'h_brh0h0_H', + 'br_AZh' : 'h_brZh0_A' + } + self.h_dict[1] = { + 'mH' : 'm_H', + 'mh' : 'm_h', + 'mHp' : 'm_Hp', + 'br_tHpb' : 'br_t_Hpb', + 'br_Hptaunu' : 'br_Hp_taunu', + 'br_Hhh' : 'br_H_hh', + 'br_AZh' : 'br_A_Zh' + } + for X in ['h', 'H', 'A']: + self.h_dict[0].update({ + 'xs_gg%s'%X : 'h_ggF_xsec_%s'%X, + 'xs_bb4f%s'%X : 'h_bbH4f_xsec_%s'%X, + 'xs_bb5f%s'%X : 'h_bbH_xsec_%s'%X, + 'br_%stautau'%X : 'h_brtautau_%s'%X, + 'br_%sbb'%X : 'h_brbb_%s'%X, + 'xs_gg%s_scale_lo'%X : 'h_ggF_xsec20_%s'%X, # nominal - uncert + 'xs_gg%s_scale_hi'%X : 'h_ggF_xsec05_%s'%X, # nominal + uncert + 'xs_bb4f%s_scale_lo'%X : 'h_bbH4f_xsec_%s_low'%X, # nominal + uncert + 'xs_bb4f%s_scale_hi'%X : 'h_bbH4f_xsec_%s_high'%X, # nominal - uncert + 'xs_bb5f%s_scale_lo'%X : 'h_bbH_mudown_%s'%X, # nominal + uncert + 'xs_bb5f%s_scale_hi'%X : 'h_bbH_muup_%s'%X, # nominal - uncert + 'xs_gg%s_pdf_hi'%X : 'h_ggF_pdfup_%s'%X, # abs(+uncert) + 'xs_gg%s_pdf_lo'%X : 'h_ggF_pdfdown_%s'%X, # abs(-uncert) + 'xs_gg%s_alphas_hi'%X : 'h_ggF_alphasup_%s'%X, # abs(+uncert) + 'xs_gg%s_alphas_lo'%X : 'h_ggF_alphasdown_%s'%X, # abs(-uncert) + 'xs_bb5f%s_pdf_hi'%X : 'h_bbH_pdf68up_%s'%X, # abs(+uncert) + 'xs_bb5f%s_pdf_lo'%X : 'h_bbH_pdf68down_%s'%X, # abs(-uncert) + 'xs_bb5f%s_alphas_hi'%X : 'h_bbH_pdfalphas68up_%s'%X, # abs(+uncert) + 'xs_bb5f%s_alphas_lo'%X : 'h_bbH_pdfalphas68down_%s'%X, # abs(-uncert) + }) + self.h_dict[1].update({ + 'xs_gg%s'%X : 'xs_gg_%s'%X, + 'xs_bb4f%s'%X : 'xs_bb4F_%s'%X, + 'xs_bb5f%s'%X : 'xs_bb5F_%s'%X, + 'br_%stautau'%X : 'br_%s_tautau'%X, + 'br_%sbb'%X : 'br_%s_bb'%X, + 'xs_gg%s_scale_lo'%X : 'xs_gg_%s_scaleDown'%X, # nominal - uncert + 'xs_gg%s_scale_hi'%X : 'xs_gg_%s_scaleUp'%X, # nominal + uncert + 'xs_bb4f%s_scale_lo'%X : 'xs_bb4F_%s_scaleDown'%X, # nominal + uncert + 'xs_bb4f%s_scale_hi'%X : 'xs_bb4F_%s_scaleUp'%X, # nominal - uncert + 'xs_bb5f%s_scale_lo'%X : 'xs_bb5F_%s_scaleDown'%X, # nominal + uncert + 'xs_bb5f%s_scale_hi'%X : 'xs_bb5F_%s_scaleUp'%X, # nominal - uncert + 'xs_gg%s_pdf_hi'%X : 'xs_gg_%s_pdfasUp'%X, # abs(+uncert) + 'xs_gg%s_pdf_lo'%X : 'xs_gg_%s_pdfasDown'%X, # abs(-uncert) + 'xs_gg%s_alphas_hi'%X : '', + 'xs_gg%s_alphas_lo'%X : '', + 'xs_bb5f%s_pdf_hi'%X : 'xs_bb5F_%s_pdfasUp'%X, # abs(+uncert) + 'xs_bb5f%s_pdf_lo'%X : 'xs_bb5F_%s_pdfasDown'%X, # abs(-uncert) + 'xs_bb5f%s_alphas_hi'%X : '', + 'xs_bb5f%s_alphas_lo'%X : '' + }) # Define the known production and decay processes # These are strings we will look for in the process names to # determine the correct normalisation scaling self.ERAS = ['7TeV', '8TeV', '13TeV', '14TeV'] - # We can't add anything here that we're not actually going to have - # loaded from the model files. Should integrate this into the options - # somehow. - eras = ['8TeV'] - self.modelname = modelname self.PROC_SETS = [] - self.PROC_SETS.append( - ([ 'ggh', 'bbh' ], [ 'htautau' ], eras) - ) - self.PROC_SETS.append( - ([ 'ggH', 'bbH' ], [ 'Htautau' ], eras) - ) - self.PROC_SETS.append( - ([ 'ggA', 'bbA' ], [ 'Atautau' ], eras) - ) - # self.model = 'out.mhmax-mu+200-8TeV-tanbHigh-nnlo.root' - # self.era = '8TeV' - - - #def setPhysicsOptions(self, physOptions): - # """ - # Options are: model. - # """ - # for po in physOptions: - # if po.startswith("model="): self.model = po.replace("model=", "") - # if po.startswith("era=") : self.era = po.replace("era=", "") - + self.SYST_DICT = defaultdict(list) + self.NUISANCES = set() + self.SMSignal = "SM125" #SM signal + self.dbg_file = None + self.mk_plots = False + + def setPhysicsOptions(self,physOptions): + for po in physOptions: + if po.startswith('filePrefix='): + self.filePrefix = po.replace('filePrefix=', '') + print 'Set file prefix to: %s' % self.filePrefix + if po.startswith('modelFiles='): + cfgList = po.replace('modelFiles=', '').split(':') + for cfg in cfgList: + cfgSplit = cfg.split(',') + if len(cfgSplit) != 3: + raise RuntimeError, 'Model file argument %s should be in the format ERA,FILE,VERSION' % cfg + self.modelFiles[cfgSplit[0]] = (cfgSplit[1], int(cfgSplit[2])) + pprint.pprint(self.modelFiles) + if po.startswith('debugFile='): + self.dbg_file = ROOT.TFile(po.replace('debugFile=', ''), 'RECREATE') + print 'Write debug output to: %s' % self.dbg_file.GetName() + if po.startswith("makePlots"): + self.mk_plots = True + def setModelBuilder(self, modelBuilder): - """We're not supposed to overload this method, but we have to because + """We're not supposed to overload this method, but we have to because this is our only chance to import things into the workspace while it is completely empty. This is primary so we can define some of our MSSM Higgs boson masses as functions instead of the free variables that would be imported later as dependents of the normalisation terms.""" # First call the parent class implementation PhysicsModel.setModelBuilder(self, modelBuilder) - if self.modelname == "mhmax_7TeV" : filename = 'out.mhmax-mu+200-7TeV-tanbHigh-nnlo.root' - if self.modelname == "mhmax_8TeV" : filename = 'out.mhmax-mu+200-8TeV-tanbHigh-nnlo.root' - if self.modelname == "mhmax_13TeV" : filename = 'newmhmax_mu200_13TeV.root' - if self.modelname == "mhmax_14TeV" : filename = 'newmhmax_mu200_14TeV.root' - if self.modelname == "mhmodp_7TeV" : filename = 'out.mhmodp-7TeV-tanbHigh-nnlo.root' - if self.modelname == "mhmodp_8TeV" : filename = 'out.mhmodp-8TeV-tanbHigh-nnlo.root' - if self.modelname == "mhmodp_13TeV" : filename = 'mhmodp_mu200_13TeV.root' - if self.modelname == "mhmodp_14TeV" : filename = 'mhmodp_mu200_14TeV.root' - if self.modelname == "mhmodm_7TeV" : filename = 'out.mhmodm-7TeV-tanbHigh-nnlo.root' - if self.modelname == "mhmodm_8TeV" : filename = 'out.mhmodm-8TeV-tanbHigh-nnlo.root' - if self.modelname == "mhmodm_13TeV" : filename = 'mhmodm_13TeV.root' - if self.modelname == "mhmodm_14TeV" : filename = 'mhmodm_14TeV.root' - if self.modelname == "lightstau_7TeV" : filename = 'out.lightstau1-7TeV-tanbHigh-nnlo.root' - if self.modelname == "lightstau_8TeV" : filename = 'out.lightstau1-8TeV-tanbHigh-nnlo.root' - if self.modelname == "lightstau_13TeV" : filename = 'lightstau1_13TeV.root' - if self.modelname == "lightstau_14TeV" : filename = 'lightstau1_14TeV.root' - if self.modelname == "lightstopmod_7TeV" : filename = 'out.lightstopmod-7TeV-tanbHigh-nnlo.root' - if self.modelname == "lightstopmod_8TeV" : filename = 'out.lightstopmod-8TeV-tanbHigh-nnlo.root' - if self.modelname == "lightstopmod_13TeV" : filename = 'lightstopmpd_13TeV.root' - if self.modelname == "lightstopmod_14TeV" : filename = 'lightstopmod_14TeV.root' - if self.modelname == "tauphobic_7TeV" : filename = 'out.tauphobic-7TeV-tanbAll-nnlo.root' - if self.modelname == "tauphobic_8TeV" : filename = 'out.tauphobic-8TeV-tanbAll-nnlo.root' - if self.modelname == "tauphobic_13TeV" : filename = 'tauphobic_13TeV.root' - if self.modelname == "tauphobic_14TeV" : filename = 'tauphobic_14TeV.root' - if self.modelname == "hMSSM_8TeV" : filename = 'hMSSM_8TeV.root' - if self.modelname == "hMSSM_13TeV" : filename = 'hMSSM_13TeV.root' - if self.modelname == "low-tb-high_8TeV" : filename = 'low-tb-high_8TeV.root' - if self.modelname == "low-tb-high_13TeV" : filename = 'low-tb-high_13TeV.root' - self.buildModel(os.environ['CMSSW_BASE']+'/src/auxiliaries/models/'+filename) - #self.buildModel(os.environ['CMSSW_BASE']+'/src/auxiliaries/models/'+self.model) - - def doHistFunc(self, name, hist, varlist, interpolate=1): + self.buildModel() + + #! [part2] + def doHistFunc(self, name, hist, varlist, interpolate=0): "method to conveniently create a RooHistFunc from a TH1/TH2 input" + print 'Doing histFunc %s...' % name + if self.dbg_file: + self.dbg_file.WriteTObject(hist, name) + if self.mk_plots: + canv = ROOT.TCanvas(name, name) + pads = plot.OnePad() + hist.GetXaxis().SetTitle(varlist[0].GetTitle()) + hist.GetYaxis().SetTitle(varlist[1].GetTitle()) + hist.Draw('COLZ') + plot.DrawTitle(pads[0], name, 3) + canv.Print('model_'+name+'.pdf') + canv.Print('model_'+name+'.png') + pads[0].SetLogz(True) + canv.Print('model_'+name+'_log.pdf') + canv.Print('model_'+name+'_log.png') dh = ROOT.RooDataHist('dh_%s'%name, 'dh_%s'%name, ROOT.RooArgList(*varlist), ROOT.RooFit.Import(hist)) hfunc = ROOT.RooHistFunc(name, name, ROOT.RooArgSet(*varlist), dh) hfunc.setInterpolationOrder(interpolate) self.modelBuilder.out._import(hfunc, ROOT.RooFit.RecycleConflictNodes()) return self.modelBuilder.out.function(name) + #! [part2] def doAsymPow(self, name, h_kappa_lo, h_kappa_hi, param, varlist): "create AsymPow rate scaler given two TH2 inputs corresponding to kappa_hi and kappa_lo" @@ -91,10 +150,10 @@ def doAsymPow(self, name, h_kappa_lo, h_kappa_hi, param, varlist): if not param_var: self.modelBuilder.doVar('%s[0,-7,7]'%param) param_var = self.modelBuilder.out.var(param) - print param_var + self.NUISANCES.add(param) hi = self.doHistFunc('%s_hi'%name, h_kappa_hi, varlist) lo = self.doHistFunc('%s_lo'%name, h_kappa_lo, varlist) - asym = ROOT.AsymPow('systeff_%s'%name, '', lo, hi, param_var) + asym = ROOT.AsymPow(name, '', lo, hi, param_var) self.modelBuilder.out._import(asym) return self.modelBuilder.out.function('systeff_%s'%name) @@ -103,11 +162,48 @@ def santanderMatching(self, h4f, h5f, mass = None): for x in xrange(1, h4f.GetNbinsX() + 1): for y in xrange(1, h4f.GetNbinsY() +1): mh = h4f.GetXaxis().GetBinCenter(x) if mass is None else mass.GetBinContent(x, y) - t = math.log(mh / 4.92) - 2. - fourflav = h4f.GetBinContent(x, y) - fiveflav = h5f.GetBinContent(x, y) - sigma = (1. / (1. + t)) * (fourflav + t * fiveflav) - res.SetBinContent(x, y, sigma) + if mh <= 0: + print 'santanderMatching: Have mh = %f at (%f,%f), using h4f value' % (mh, h4f.GetXaxis().GetBinCenter(x), h4f.GetYaxis().GetBinCenter(y)) + res.SetBinContent(x, y, h4f.GetBinContent(x, y)) + else: + t = math.log(mh / 4.92) - 2. + fourflav = h4f.GetBinContent(x, y) + fiveflav = h5f.GetBinContent(x, y) + sigma = (1. / (1. + t)) * (fourflav + t * fiveflav) + res.SetBinContent(x, y, sigma) + return res + + def santanderPdfUncert(self, h5f, mass = None): + res = h5f.Clone() + for x in xrange(1, h5f.GetNbinsX() + 1): + for y in xrange(1, h5f.GetNbinsY() +1): + mh = h5f.GetXaxis().GetBinCenter(x) if mass is None else mass.GetBinContent(x, y) + if mh <= 0: + print 'santanderPdfUncert: Have mh = %f at (%f,%f), using h5f value' % (mh, h5f.GetXaxis().GetBinCenter(x), h5f.GetYaxis().GetBinCenter(y)) + res.SetBinContent(x, y, h5f.GetBinContent(x, y)) + else: + t = math.log(mh / 4.92) - 2. + fiveflav = h5f.GetBinContent(x, y) + sigma = (1. / (1. + t)) * (fiveflav) + res.SetBinContent(x, y, sigma) + return res + + def santanderPdfUncert2(self, h5f, h5fnom, mass = None): + """For use with the new-style model files in which the pdf + uncertanties are given as absolute cross sections instead of the + difference with respect to the nominal.""" + res = h5f.Clone() + for x in xrange(1, h5f.GetNbinsX() + 1): + for y in xrange(1, h5f.GetNbinsY() +1): + mh = h5f.GetXaxis().GetBinCenter(x) if mass is None else mass.GetBinContent(x, y) + if mh <= 0: + print 'santanderPdfUncert2: Have mh = %f at (%f,%f), using h5f value' % (mh, h5f.GetXaxis().GetBinCenter(x), h5f.GetYaxis().GetBinCenter(y)) + res.SetBinContent(x, y, h5f.GetBinContent(x, y)) + else: + t = math.log(mh / 4.92) - 2. + fiveflav = abs(h5f.GetBinContent(x, y) - h5fnom.GetBinContent(x, y)) + sigma = (1. / (1. + t)) * (fiveflav) + res.SetBinContent(x, y, sigma) return res def safeTH2DivideForKappa(self, h1, h2): @@ -129,103 +225,219 @@ def safeTH2DivideForKappa(self, h1, h2): new_val = val_h1 / val_h2 res.SetBinContent(x, y, new_val) return res - - def buildModel(self, filename): - mA = ROOT.RooRealVar('mA', 'mA', 344., 90., 1000.) - tanb = ROOT.RooRealVar('tanb', 'tanb', 9., 1., 60.) - f = ROOT.TFile(filename) - #Take care of different histogram names: (May can be reduced if ultimately changed to 13TeV/14TeV) - if 'hMSSM' in self.modelname or 'low-tb-high' in self.modelname or '13TeV' in self.modelname or '14TeV' in self.modelname: - mH_str = "m_H" - mh_str = "m_h" - ggF_xsec_h_str = "xs_gg_h" - ggF_xsec_H_str = "xs_gg_H" - ggF_xsec_A_str = "xs_gg_A" - bbH_4f_xsec_h_str = "xs_bb4F_h" - bbH_4f_xsec_H_str = "xs_bb4F_H" - bbH_4f_xsec_A_str = "xs_bb4F_A" - bbH_xsec_h_str = "xs_bb5F_h" - bbH_xsec_H_str = "xs_bb5F_H" - bbH_xsec_A_str = "xs_bb5F_A" - brtautau_h_str = "br_h_tautau" - brtautau_H_str = "br_H_tautau" - brtautau_A_str = "br_A_tautau" - ggF_xsec_h_scale_hi_str = "xs_gg_h_scaleUp" - ggF_xsec_h_scale_lo_str = "xs_gg_h_scaleDown" - else : - mH_str = "h_mH" - mh_str = "h_mh" - ggF_xsec_h_str = "h_ggF_xsec_h" - ggF_xsec_H_str = "h_ggF_xsec_H" - ggF_xsec_A_str = "h_ggF_xsec_A" - bbH_4f_xsec_h_str = "h_bbH4f_xsec_h" - bbH_4f_xsec_H_str = "h_bbH4f_xsec_H" - bbH_4f_xsec_A_str = "h_bbH4f_xsec_A" - bbH_xsec_h_str = "h_bbH_xsec_h" - bbH_xsec_H_str = "h_bbH_xsec_H" - bbH_xsec_A_str = "h_bbH_xsec_A" - brtautau_h_str = "h_brtautau_h" - brtautau_H_str = "h_brtautau_H" - brtautau_A_str = "h_brtautau_A" - ggF_xsec_h_scale_hi_str = "h_ggF_xsec05_h" - ggF_xsec_h_scale_lo_str = "h_ggF_xsec20_h" - mH = self.doHistFunc('mH', f.Get(mH_str), [mA, tanb]) - mh = self.doHistFunc('mh', f.Get(mh_str), [mA, tanb]) - ggF_xsec_h = self.doHistFunc('xsec_ggh_8TeV', f.Get(ggF_xsec_h_str), [mA, tanb]) - ggF_xsec_H = self.doHistFunc('xsec_ggH_8TeV', f.Get(ggF_xsec_H_str), [mA, tanb]) - ggF_xsec_A = self.doHistFunc('xsec_ggA_8TeV', f.Get(ggF_xsec_A_str), [mA, tanb]) - bbH_xsec_h = self.doHistFunc('xsec_bbh_8TeV', self.santanderMatching(f.Get(bbH_4f_xsec_h_str), f.Get(bbH_xsec_h_str), f.Get(mh_str)), [mA, tanb]) - bbH_xsec_H = self.doHistFunc('xsec_bbH_8TeV', self.santanderMatching(f.Get(bbH_4f_xsec_H_str), f.Get(bbH_xsec_H_str), f.Get(mH_str)), [mA, tanb]) - bbH_xsec_A = self.doHistFunc('xsec_bbA_8TeV', self.santanderMatching(f.Get(bbH_4f_xsec_A_str), f.Get(bbH_xsec_A_str)), [mA, tanb]) - brtautau_h = self.doHistFunc('br_htautau', f.Get(brtautau_h_str), [mA, tanb]) - brtautau_H = self.doHistFunc('br_Htautau', f.Get(brtautau_H_str), [mA, tanb]) - brtautau_A = self.doHistFunc('br_Atautau', f.Get(brtautau_A_str), [mA, tanb]) - #print f.Get(brtautau_h_str).GetBinContent(f.Get(brtautau_h_str).GetXaxis().FindBin(mA.getVal()),f.Get(brtautau_h_str).GetYaxis().FindBin(tanb.getVal())), brtautau_h.getVal() - - # h_ggF_xsec_h_scale_hi = self.safeTH2DivideForKappa(f.Get(ggF_xsec_h_scale_hi_str), f.Get(ggF_xsec_h_str)) - # h_ggF_xsec_h_scale_lo = self.safeTH2DivideForKappa(f.Get(ggF_xsec_h_scale_lo_str), f.Get(ggF_xsec_h_str)) - # ggF_xsec_h_scale = self.doAsymPow('systeff_ggF_xsec_h_scale_8TeV', h_ggF_xsec_h_scale_lo, h_ggF_xsec_h_scale_hi, 'ggF_xsec_h_scale_8TeV', [mA, tanb]) - # ftest = ROOT.TFile('model_debug.root', 'RECREATE') - # ftest.WriteTObject(h_ggF_xsec_h_scale_hi, 'h_ggF_xsec_h_scale_hi') - # ftest.WriteTObject(h_ggF_xsec_h_scale_lo, 'h_ggF_xsec_h_scale_lo') - # ftest.Close() - # Next step: creating theory uncertainties - # 1) for each syst source build kappaHi and kappaLo TH2s by doing a *careful* divide - # between nominal and shifted TH2s => "kappa_hi_xsec_ggh_8TeV_scale" - - # 2) create nuisance parameter (QCD_scale_ggH?) and constraint. Like: - # def preProcessNuisances(self,nuisances): - # if self.add_bbH and not any(row for row in nuisances if row[0] == "QCDscale_bbH"): - # nuisances.append(("QCDscale_bbH",False, "param", [ "0", "1"], [] ) ) - # --> probably have to create the param ourself first, preProcessNuisances doesn't - # happen until later (after doParametersOfInterest) - # 3) create AsymPow and add to the norm product - - # def preProcessNuisances(self,nuisances): - # nuisances.append(("ggF_xsec_h_scale_8TeV",False, "param", [ "0", "1"], [] ) ) + + def safeTH2DivideForKappaDelta(self, h1, h2, coeff=1.): + """Divides two TH2s taking care of exceptions like divide by zero + and potentially doing more checks in the future""" + res = h1.Clone() + for x in xrange(1, h1.GetNbinsX() + 1): + for y in xrange(1, h2.GetNbinsY() +1): + val_h1 = h1.GetBinContent(x, y) + val_h2 = h2.GetBinContent(x, y) + if val_h1 == 0. or val_h2 == 0.: + print ('Warning: dividing histograms %s and %s at bin (%i,%i)=(%g, %g) ' + 'with values: %g/%g, will set the kappa to 1.0 here' % ( + h1.GetName(), h2.GetName(), x, y, h1.GetXaxis().GetBinCenter(x), + h1.GetYaxis().GetBinCenter(y), val_h1, val_h2 + )) + new_val = 1. + else: + new_val = (val_h2 + coeff*val_h1) / val_h2 + res.SetBinContent(x, y, new_val) + return res + + def buildModel(self): + # It's best not to set ranges for the model parameters here. + # RooFit will create them automatically from the x- and y-axis + # ranges of the input histograms + mA = ROOT.RooRealVar('mA', 'm_{A} [GeV]', 120.) + tanb = ROOT.RooRealVar('tanb', 'tan#beta', 20.) + pars = [mA, tanb] + doneMasses = False + + for era, (file, version) in self.modelFiles.iteritems(): + hd = self.h_dict[version] + f = ROOT.TFile(self.filePrefix + file) + + # We take the masses from the 1st model file, under the + # assumption that they are the same in all model files + if not doneMasses: + #! [part1] + self.doHistFunc('mH', f.Get(hd['mH']), pars) + self.doHistFunc('mh', f.Get(hd['mh']), pars) + #! [part1] + self.doHistFunc('mHp', f.Get(hd['mHp']), pars) + doneMasses = True + + # Do the xsecs and BRs for the three neutral Higgs bosons + #! [part3] + for X in ['h', 'H', 'A']: + self.doHistFunc('xs_gg%s_%s' % (X, era), f.Get(hd['xs_gg%s'%X]), pars) + #! [part3] + # QCD scale uncertainty + self.doAsymPow('systeff_xs_gg%s_scale_%s' % (X,era), + self.safeTH2DivideForKappa(f.Get(hd['xs_gg%s_scale_lo'%X]), f.Get(hd['xs_gg%s'%X])), + self.safeTH2DivideForKappa(f.Get(hd['xs_gg%s_scale_hi'%X]), f.Get(hd['xs_gg%s'%X])), + 'xs_gg%s_scale_%s' % (X,era), pars) + # PDF uncertainty + if version == 0: + # In earlier model files pdf uncertainties are given as + # shifts wrt the nominal... + self.doAsymPow('systeff_xs_gg%s_pdf_%s' % (X,era), + self.safeTH2DivideForKappaDelta(f.Get(hd['xs_gg%s_pdf_lo'%X]), f.Get(hd['xs_gg%s'%X]), -1.), + self.safeTH2DivideForKappaDelta(f.Get(hd['xs_gg%s_pdf_hi'%X]), f.Get(hd['xs_gg%s'%X]), +1.), + 'xs_gg%s_pdf_%s' % (X,era), pars) + else: + # ... whereas in the newer (mostly 13 TeV) versions they + # are absolute cross sections just like the QCD scale + self.doAsymPow('systeff_xs_gg%s_pdf_%s' % (X,era), + self.safeTH2DivideForKappa(f.Get(hd['xs_gg%s_pdf_lo'%X]), f.Get(hd['xs_gg%s'%X])), + self.safeTH2DivideForKappa(f.Get(hd['xs_gg%s_pdf_hi'%X]), f.Get(hd['xs_gg%s'%X])), + 'xs_gg%s_pdf_%s' % (X,era), pars) + # Register that this uncertainty scaling should affect any normalisation term that + # contains the cross section we defined above + self.SYST_DICT['xs_gg%s_%s' % (X, era)].append('systeff_xs_gg%s_scale_%s' % (X,era)) + self.SYST_DICT['xs_gg%s_%s' % (X, era)].append('systeff_xs_gg%s_pdf_%s' % (X,era)) + + # Build the Santander-matched bbX cross section. The matching depends + # on the mass of the Higgs boson in question, so for the h and H we + # pass the mh or mH TH2 as an additional argument + hist_bb_nominal = self.santanderMatching(f.Get(hd['xs_bb4f%s'%X]), f.Get(hd['xs_bb5f%s'%X]), None if X=='A' else f.Get(hd['m%s'%X])) + self.doHistFunc('xs_bb%s_%s' % (X, era), hist_bb_nominal, pars) + + # QCD scale uncertainty + self.doAsymPow('systeff_xs_bb%s_scale_%s' % (X,era), + self.safeTH2DivideForKappa( + self.santanderMatching(f.Get(hd['xs_bb4f%s_scale_lo'%X]), f.Get(hd['xs_bb5f%s_scale_lo'%X]), None if X=='A' else f.Get(hd['m%s'%X])), + hist_bb_nominal + ), + self.safeTH2DivideForKappa( + self.santanderMatching(f.Get(hd['xs_bb4f%s_scale_hi'%X]), f.Get(hd['xs_bb5f%s_scale_hi'%X]), None if X=='A' else f.Get(hd['m%s'%X])), + hist_bb_nominal + ), + 'xs_bb%s_scale_%s' % (X,era), pars) + + # PDF uncertainty + if version == 0: + self.doAsymPow('systeff_xs_bb%s_pdf_%s' % (X,era), + self.safeTH2DivideForKappaDelta( + self.santanderPdfUncert(f.Get(hd['xs_bb5f%s_pdf_lo'%X]), None if X=='A' else f.Get(hd['m%s'%X])), + hist_bb_nominal, -1. + ), + self.safeTH2DivideForKappaDelta( + self.santanderPdfUncert(f.Get(hd['xs_bb5f%s_pdf_hi'%X]), None if X=='A' else f.Get(hd['m%s'%X])), + hist_bb_nominal, +1. + ), + 'xs_bb%s_pdf_%s' % (X,era), pars) + else: + self.doAsymPow('systeff_xs_bb%s_pdf_%s' % (X,era), + self.safeTH2DivideForKappaDelta( + self.santanderPdfUncert2(f.Get(hd['xs_bb5f%s_pdf_lo'%X]), f.Get(hd['xs_bb5f%s'%X]), None if X=='A' else f.Get(hd['m%s'%X])), + hist_bb_nominal, -1. + ), + self.safeTH2DivideForKappaDelta( + self.santanderPdfUncert2(f.Get(hd['xs_bb5f%s_pdf_hi'%X]), f.Get(hd['xs_bb5f%s'%X]), None if X=='A' else f.Get(hd['m%s'%X])), + hist_bb_nominal, +1. + ), + 'xs_bb%s_pdf_%s' % (X,era), pars) + + self.SYST_DICT['xs_bb%s_%s' % (X, era)].append('systeff_xs_bb%s_scale_%s' % (X,era)) + self.SYST_DICT['xs_bb%s_%s' % (X, era)].append('systeff_xs_bb%s_pdf_%s' % (X,era)) + + self.doHistFunc('br_%stautau_%s' % (X, era), f.Get(hd['br_%stautau'%X]), pars) + self.doHistFunc('br_%sbb_%s' % (X, era), f.Get(hd['br_%sbb'%X]), pars) + # Make a note of what we've built, will be used to create scaling expressions later + self.PROC_SETS.append(([ 'gg%s'%X, 'bb%s'%X ], [ '%stautau'%X, '%sbb'%X], [era]) + ) + # Do the BRs for the charged Higgs + self.doHistFunc('br_tHpb_%s'%era, f.Get(hd['br_tHpb']), pars) + + # Add a fixed 5% uncertainty on BR(Hp -> tau nu) + self.doHistFunc('br_Hptaunu_%s_pre'%era, f.Get(hd['br_Hptaunu']), pars) + if not self.modelBuilder.out.var('br_Hptaunu_uncert'): + self.modelBuilder.doVar('br_Hptaunu_uncert[0,-7,7]') + self.NUISANCES.add('br_Hptaunu_uncert') + self.modelBuilder.doObj('syst_eff_br_Hptaunu_uncert_%s' % era, 'AsymPow', '0.95,1.05,br_Hptaunu_uncert') + self.modelBuilder.factory_('expr::br_Hptaunu_%s("@0*@1", br_Hptaunu_%s_pre, syst_eff_br_Hptaunu_uncert_%s)' % (era, era, era)) + + self.PROC_SETS.append((['tt'], ['HptaunubHptaunub', 'HptaunubWb', 'WbWb'], [era])) + # And the extra term we need for H->hh + self.doHistFunc('br_Hhh_%s'%era, f.Get(hd['br_Hhh']), pars) + self.PROC_SETS.append((['ggH'], ['Hhhbbtautau'], [era])) + # And the extra term we need for A->Zh + self.doHistFunc('br_AZh_%s'%era, f.Get(hd['br_AZh']), pars) + self.PROC_SETS.append((['ggA'], ['AZhLLtautau'], [era])) + # And the SM terms + for X in ['ggH', 'qqH', 'VH']: + self.PROC_SETS.append((['%s'%X], ['SM125'], [era])) + + def preProcessNuisances(self,nuisances): + doParams = set() + for bin in self.DC.bins: + for proc in self.DC.exp[bin].keys(): + if self.DC.isSignal[proc]: + (P, D, E) = self.getHiggsProdDecMode(bin, proc) + scaling = 'scaling_%s_%s_%s' % (P, D, E) + params = self.modelBuilder.out.function(scaling).getParameters(ROOT.RooArgSet()).contentsString().split(',') + for param in params: + if param in self.NUISANCES: + doParams.add(param) + for param in doParams: + print 'Add nuisance parameter %s to datacard' % param + nuisances.append((param,False, "param", [ "0", "1"], [] ) ) def doParametersOfInterest(self): """Create POI and other parameters, and define the POI set.""" - # print '>> In doParametersOfInterest, workspace contents:' self.modelBuilder.doVar("r[1,0,20]") + + #MSSMvsSM + self.modelBuilder.doVar("x[1,0,1]") + self.modelBuilder.out.var('x').setConstant(True) + self.modelBuilder.factory_("expr::not_x(\"(1-@0)\", x)") + self.sigNorms = { True:'x', False:'not_x' } + + self.modelBuilder.doSet('POI', 'r') + + # We don't intend on actually floating these in any fits... self.modelBuilder.out.var('mA').setConstant(True) self.modelBuilder.out.var('tanb').setConstant(True) + + # Build the intermediate terms for charged Higgs scaling + for E in self.modelFiles: + self.modelBuilder.doVar('xs_tt_%s[1.0]' % E) + self.modelBuilder.factory_('expr::br_HptaunubHptaunub_%s("@0*@0*@1*@1", br_tHpb_%s,br_Hptaunu_%s)' % (E,E,E)) + self.modelBuilder.factory_('expr::br_HptaunubWb_%s("2 * (1-@0)*@0*@1", br_tHpb_%s,br_Hptaunu_%s)' % (E,E,E)) + self.modelBuilder.factory_('expr::br_WbWb_%s("(1-@0)*(1-@0)", br_tHpb_%s)' % (E,E)) + + # Build the intermediate terms for H->hh->bbtautau and A->Zh->LLtautau scaling + for E in self.modelFiles: + self.modelBuilder.factory_('expr::br_Hhhbbtautau_%s("2*@0*@1*@2", br_Hhh_%s,br_htautau_%s,br_hbb_%s)' % (E,E,E,E)) + self.modelBuilder.factory_('expr::br_AZhLLtautau_%s("0.10099*@0*@1", br_AZh_%s,br_htautau_%s)' % (E,E,E)) + for proc_set in self.PROC_SETS: for (P, D, E) in itertools.product(*proc_set): # print (P, D, E) - terms = ['xsec_%s_%s' % (P, E), 'br_%s'%D] - terms += ['r'] + if ((self.SMSignal not in D) and ("ww125" not in P) and ("tt125" not in P)): #altenative hypothesis if SMSignal not in process name + terms = ['xs_%s_%s' % (P, E), 'br_%s_%s'% (D, E)] + terms += ['r'] + terms += [self.sigNorms[1]] + else: + terms = [self.sigNorms[0]] + # Now scan terms and add theory uncerts + extra = [] + for term in terms: + if term in self.SYST_DICT: + extra += self.SYST_DICT[term] + terms += extra self.modelBuilder.factory_('prod::scaling_%s_%s_%s(%s)' % (P,D,E,','.join(terms))) self.modelBuilder.out.function('scaling_%s_%s_%s' % (P,D,E)).Print('') - # self.modelBuilder.out.Print() - self.modelBuilder.doSet('POI', 'r') def getHiggsProdDecMode(self, bin, process): """Return a triple of (production, decay, energy)""" P = '' D = '' - if "_" in process: + if "_" in process: (P, D) = process.split("_") else: raise RuntimeError, 'Expected signal process %s to be of the form PROD_DECAY' % process @@ -237,7 +449,7 @@ def getHiggsProdDecMode(self, bin, process): if not E: raise RuntimeError, 'Did not find a valid energy in bin string %s' % bin return (P, D, E) - + def getYieldScale(self,bin,process): if self.DC.isSignal[process]: (P, D, E) = self.getHiggsProdDecMode(bin, process) @@ -247,70 +459,6 @@ def getYieldScale(self,bin,process): else: return 1 -class BRChargedHiggs(PhysicsModel): - def __init__(self): - PhysicsModel.__init__(self) - def doParametersOfInterest(self): - """Create POI and other parameters, and define the POI set.""" - self.modelBuilder.doVar('BR[0,0,1]'); - - self.modelBuilder.doSet('POI','BR') - - self.modelBuilder.factory_('expr::Scaling_HH("@0*@0", BR)') - self.modelBuilder.factory_('expr::Scaling_WH("2 * (1-@0)*@0", BR)') - #self.modelBuilder.factory_('expr::Scaling_tt("(1-@0)*(1-@0)", BR)') - self.modelBuilder.factory_('expr::Scaling_tt("1 - (@0+@1)", Scaling_HH, Scaling_WH)') - - self.processScaling = { 'tt_ttHchHch':'HH', 'tt_ttHchW':'WH', 'tt':'tt' } - - # self.modelBuilder.out.Print() - - def getYieldScale(self,bin,process): - for prefix, model in self.processScaling.iteritems(): - if process == prefix: - print 'Scaling %s/%s as %s' % (bin, process, 'Scaling_'+model) - return 'Scaling_'+model - return 1 - - - -#7TeV -mhmax_7TeV = MSSMHiggsModel("mhmax_7TeV") -mhmodp_7TeV = MSSMHiggsModel("mhmodp_7TeV") -mhmodm_7TeV = MSSMHiggsModel("mhmodm_7TeV") -lightstau_7TeV = MSSMHiggsModel("lightstau_7TeV") -lightstopmod_7TeV = MSSMHiggsModel("lightstopmod_7TeV") -tauphobic_7TeV = MSSMHiggsModel("tauphobic_7TeV") - -#8TeV -mhmax_8TeV = MSSMHiggsModel("mhmax_8TeV") -mhmodp_8TeV = MSSMHiggsModel("mhmodp_8TeV") -mhmodm_8TeV = MSSMHiggsModel("mhmodm_8TeV") -lightstau_8TeV = MSSMHiggsModel("lightstau_8TeV") -lightstopmod_8TeV = MSSMHiggsModel("lightstopmod_8TeV") -tauphobic_8TeV = MSSMHiggsModel("tauphobic_8TeV") -hMSSM_8TeV = MSSMHiggsModel("hMSSM_8TeV") -lowtbhigh_8TeV = MSSMHiggsModel("low-tb-high_8TeV") - -#13TeV -mhmax_13TeV = MSSMHiggsModel("mhmax_13TeV") -mhmodp_13TeV = MSSMHiggsModel("mhmodp_13TeV") -mhmodm_13TeV = MSSMHiggsModel("mhmodm_13TeV") -lightstau_13TeV = MSSMHiggsModel("lightstau_13TeV") -lightstopmod_13TeV = MSSMHiggsModel("lightstopmod_13TeV") -tauphobic_13TeV = MSSMHiggsModel("tauphobic_13TeV") -hMSSM_13TeV = MSSMHiggsModel("hMSSM_13TeV") -lowtbhigh_13TeV = MSSMHiggsModel("low-tb-high_13TeV") - -#14TeV -mhmax_14TeV = MSSMHiggsModel("mhmax_14TeV") -mhmodp_14TeV = MSSMHiggsModel("mhmodp_14TeV") -mhmodm_14TeV = MSSMHiggsModel("mhmodm_14TeV") -lightstau_14TeV = MSSMHiggsModel("lightstau_14TeV") -lightstopmod_14TeV = MSSMHiggsModel("lightstopmod_14TeV") -tauphobic_14TeV = MSSMHiggsModel("tauphobic_14TeV") - -#BR Charged Higgs -brChargedHiggs = BRChargedHiggs() +MSSM = MSSMHiggsModel() diff --git a/CombinePdfs/python/ModelIndependent.py b/CombinePdfs/python/ModelIndependent.py index 98ec6df5825..72124537da2 100644 --- a/CombinePdfs/python/ModelIndependent.py +++ b/CombinePdfs/python/ModelIndependent.py @@ -64,92 +64,8 @@ def getYieldScale(self,bin,process): print "Warning: decay string %s does not contain any known energy, assuming %s" % (decaySource, foundEnergy) return self.getHiggsSignalYieldScale(processSource, foundDecay, foundEnergy) -class FloatingMSSMXSHiggs(MSSMLikeHiggsModel): - """ - Trivial model to float ggA and bbA independently. At the moment only ggA and bbh are supported. Extensions to the other - production channels channels are given in comments. Also the principle how to deal with manipulations of the POI's and - other defined roofit variables are shown in comments. - """ - def __init__(self): - MSSMLikeHiggsModel.__init__(self) # not using 'super(x,self).__init__' since I don't understand it - self.modes = [ "ggA", "bbA" ] - self.mARange = [] - self.ggARange = ['0','20'] - self.bbARange = ['0','20'] - def setPhysicsOptions(self,physOptions): - """ - Options are: modes. Examples for options like mARange are given in comments. - """ - for po in physOptions: - if po.startswith("modes="): self.modes = po.replace("modes=","").split(",") - if po.startswith("mARange="): - self.mARange = po.replace("mARange=","").split(":") - if len(self.mARange) != 2: - raise RuntimeError, "Definition of mA range requires two extrema, separated by ':'" - elif float(self.mARange[0]) >= float(self.mARange[1]): - raise RuntimeError, "Extrema for mA range defined with inverterd order. Second element must be larger than first element" - if po.startswith("ggARange="): - self.ggARange = po.replace("ggARange=","").split(":") - if len(self.ggARange) != 2: - raise RuntimeError, "ggA signal strength range requires minimal and maximal value" - elif float(self.ggARange[0]) >= float(self.ggARange[1]): - raise RuntimeError, "minimal and maximal range swapped. Second value must be larger first one" - if po.startswith("bbARange="): - self.bbARange = po.replace("bbARange=","").split(":") - if len(self.bbARange) != 2: - raise RuntimeError, "bbA signal strength range requires minimal and maximal value" - elif float(self.bbARange[0]) >= float(self.bbARange[1]): - raise RuntimeError, "minimal and maximal range swapped. Second value must be larger first one" - def doParametersOfInterest(self): - """ - Create POI and other parameters, and define the POI set. E.g. Evaluate cross section for given values of mA and tanb - """ - # Define signal strengths on ggA and bbA as POI, NOTE: the range of the POIs is defined here - self.modelBuilder.doVar("r_ggA[%s,%s,%s]" % (str((float(self.ggARange[0])+float(self.ggARange[1]))/2.), self.ggARange[0], self.ggARange[1])); - self.modelBuilder.doVar("r_bbA[%s,%s,%s]" % (str((float(self.bbARange[0])+float(self.bbARange[1]))/2.), self.bbARange[0], self.bbARange[1])); - self.modelBuilder.doVar("r_ggh[0]" ); - self.modelBuilder.doVar("r_bbh[0]" ); - self.modelBuilder.doVar("r_ggH[0]" ); - self.modelBuilder.doVar("r_bbH[0]" ); - poi = ",".join(["r_"+m for m in self.modes]) - ## Define Higgs boson mass as another parameter. It will be floating if mARange is set otherwise it will be treated - ## as fixed. NOTE: this is only left here as an extended example. It's not useful to have mA floating at the moment. - if self.modelBuilder.out.var("mA"): - if len(self.mARange): - print 'mA will be left floating within', self.mARange[0], 'and', self.mARange[1] - self.modelBuilder.out.var("mA").setRange(float(self.mARange[0]),float(self.mARange[1])) - self.modelBuilder.out.var("mA").setConstant(False) - poi+=',mA' - else: - print 'mA will be assumed to be', self.options.mass - self.modelBuilder.out.var("mA").removeRange() - self.modelBuilder.out.var("mA").setVal(self.options.mass) - else: - if len(self.mARange): - print 'mA will be left floating within', self.mARange[0], 'and', self.mARange[1] - self.modelBuilder.doVar("mA[%s,%s]" % (self.mARange[0],self.mARange[1])) - poi+=',mA' - else: - print 'mA (not there before) will be assumed to be', self.options.mass - self.modelBuilder.doVar("mA[%g]" % self.options.mass) - if self.modelBuilder.out.var("mh"): - self.modelBuilder.out.var("mh").removeRange() - self.modelBuilder.out.var("mh").setVal(340) - if self.modelBuilder.out.var("mH"): - self.modelBuilder.out.var("mH").removeRange() - self.modelBuilder.out.var("mH").setVal(340) - ## define set of POIs - self.modelBuilder.doSet("POI",poi) - def getHiggsSignalYieldScale(self,production,decay, energy): - if production == "ggh" or production == "bbh" or production == "ggA" or production == "bbA" or production == "ggH" or production == "bbH" : - ## This is the trivial model that we follow now. We just pass on the values themselves. Yes this is just a - ## trivial renaming, but we leave it in as an example. Instead also r_ggA and r_bbA could be passed on directly - ## in the return function instead of the newly defined variables ggA_yields or bbA_yield. - self.modelBuilder.factory_('expr::%s_yield("@0", r_%s)' % (production, production)) - return "%s_yield" % production - raise RuntimeError, "Unknown production mode '%s'" % production -class FloatingMSSMXSHiggs2(MSSMLikeHiggsModel): +class FloatingMSSMXSHiggs(MSSMLikeHiggsModel): """ Trivial model to float ggH and bbH independently. At the moment only ggH and bbh are supported. Extensions to the other production channels channels are given in comments. Also the principle how to deal with manipulations of the POI's and @@ -253,5 +169,4 @@ def getHiggsSignalYieldScale(self,production,decay, energy): brChargedHiggs = BRChargedHiggs() #Model independent xs*BR limits -floatingMSSMXSHiggs = FloatingMSSMXSHiggs() #with A,H,h -floatingMSSMXSHiggs2 = FloatingMSSMXSHiggs2() #with A +floatingMSSMXSHiggs = FloatingMSSMXSHiggs() diff --git a/CombinePdfs/python/MSSMv2.py b/CombinePdfs/python/THDM.py similarity index 63% rename from CombinePdfs/python/MSSMv2.py rename to CombinePdfs/python/THDM.py index fb7df51c86c..70d9942e1e7 100644 --- a/CombinePdfs/python/MSSMv2.py +++ b/CombinePdfs/python/THDM.py @@ -6,50 +6,31 @@ import pprint import sys -class MSSMHiggsModel(PhysicsModel): +class THDMHiggsModel(PhysicsModel): def __init__(self): PhysicsModel.__init__(self) self.filePrefix = '' self.modelFiles = {} self.h_dict = {} self.h_dict[0] = { - 'mH' : 'h_mH', - 'mh' : 'h_mh', - 'mHp' : 'h_mHp', - 'br_tHpb' : 'h_brHpb_t', - 'br_Hptaunu' : 'h_brtaunu_Hp', - 'br_Hhh' : 'h_brh0h0_H', - 'br_AZh' : 'h_brZh0_A' - } - self.h_dict[1] = { - 'mH' : 'm_H', - 'mh' : 'm_h', - 'mHp' : 'm_Hp', - 'br_tHpb' : 'br_t_Hpb', - 'br_Hptaunu' : 'br_Hp_taunu', - 'br_Hhh' : 'br_H_hh', - 'br_AZh' : 'br_A_Zh' + 'mA' : 'mA', + 'mh' : 'mh', + 'br_Hhh' : 'br_Hhh', + 'br_AZh' : 'br_AZh' } for X in ['h', 'H', 'A']: self.h_dict[0].update({ - 'xs_gg%s'%X : 'h_ggF_xsec_%s'%X, - 'xs_bb4f%s'%X : 'h_bbH4f_xsec_%s'%X, - 'xs_bb5f%s'%X : 'h_bbH_xsec_%s'%X, - 'br_%stautau'%X : 'h_brtautau_%s'%X, - 'br_%sbb'%X : 'h_brbb_%s'%X, - }) - self.h_dict[1].update({ 'xs_gg%s'%X : 'xs_gg_%s'%X, - 'xs_bb4f%s'%X : 'xs_bb4F_%s'%X, - 'xs_bb5f%s'%X : 'xs_bb5F_%s'%X, - 'br_%stautau'%X : 'br_%s_tautau'%X, - 'br_%sbb'%X : 'br_%s_bb'%X + 'xs_bb%s'%X : 'xs_bb_%s'%X, + 'br_%stautau'%X : 'br_%stautau'%X, + 'br_%sbb'%X : 'br_%sbb'%X, }) # Define the known production and decay processes # These are strings we will look for in the process names to # determine the correct normalisation scaling self.ERAS = ['7TeV', '8TeV', '13TeV', '14TeV'] self.PROC_SETS = [] + self.SMSignal = "SM125" #SM signal def setPhysicsOptions(self,physOptions): for po in physOptions: @@ -97,22 +78,6 @@ def doAsymPow(self, name, h_kappa_lo, h_kappa_hi, param, varlist): self.modelBuilder.out._import(asym) return self.modelBuilder.out.function('systeff_%s'%name) - def santanderMatching(self, h4f, h5f, mass = None): - res = h4f.Clone() - for x in xrange(1, h4f.GetNbinsX() + 1): - for y in xrange(1, h4f.GetNbinsY() +1): - mh = h4f.GetXaxis().GetBinCenter(x) if mass is None else mass.GetBinContent(x, y) - if mh <= 0: - print 'santanderMatching: Have mh = %f at (%f,%f), using h4f value' % (mh, h4f.GetXaxis().GetBinCenter(x), h4f.GetYaxis().GetBinCenter(y)) - res.SetBinContent(x, y, h4f.GetBinContent(x, y)) - else: - t = math.log(mh / 4.92) - 2. - fourflav = h4f.GetBinContent(x, y) - fiveflav = h5f.GetBinContent(x, y) - sigma = (1. / (1. + t)) * (fourflav + t * fiveflav) - res.SetBinContent(x, y, sigma) - return res - def safeTH2DivideForKappa(self, h1, h2): """Divides two TH2s taking care of exceptions like divide by zero and potentially doing more checks in the future""" @@ -137,13 +102,9 @@ def buildModel(self): # It's best not to set ranges for the model parameters here. # RooFit will create them automatically from the x- and y-axis # ranges of the input histograms - mA = ROOT.RooRealVar('mA', 'mA', 120.) - tanb = ROOT.RooRealVar('tanb', 'tanb', 20.) - pars = [mA, tanb] - # ggF_xsec_h_scale_hi_str = "xs_gg_h_scaleUp" - # ggF_xsec_h_scale_lo_str = "xs_gg_h_scaleDown" - # ggF_xsec_h_scale_hi_str = "h_ggF_xsec05_h" - # ggF_xsec_h_scale_lo_str = "h_ggF_xsec20_h" + mH = ROOT.RooRealVar('mH', 'mH', 300.) + tanb = ROOT.RooRealVar('tanb', 'tanb', 10.) + pars = [mH, tanb] doneMasses = False for era, (file, version) in self.modelFiles.iteritems(): @@ -153,63 +114,46 @@ def buildModel(self): # We take the masses from the 1st model file, under the # assumption that they are the same in all model files if not doneMasses: - self.doHistFunc('mH', f.Get(hd['mH']), pars) + self.doHistFunc('mA', f.Get(hd['mA']), pars) self.doHistFunc('mh', f.Get(hd['mh']), pars) - self.doHistFunc('mHp', f.Get(hd['mHp']), pars) doneMasses = True # Do the xsecs and BRs for the three neutral Higgs bosons for X in ['h', 'H', 'A']: self.doHistFunc('xs_gg%s_%s' % (X, era), f.Get(hd['xs_gg%s'%X]), pars) # Build the Santander-matched bbX cross section. The matching depends - # on the mass of the Higgs boson in question, so for the h and H we - # pass the mh or mH TH2 as an additional argument - self.doHistFunc('xs_bb%s_%s' % (X, era), self.santanderMatching( - f.Get(hd['xs_bb4f%s'%X]), - f.Get(hd['xs_bb5f%s'%X]), - None if X=='A' else f.Get(hd['m%s'%X])), pars) + # on the mass of the Higgs boson in question, so for the h and A we + # pass the mh or mA TH2 as an additional argument + self.doHistFunc('xs_bb%s_%s' % (X, era), f.Get(hd['xs_bb%s'%X]), pars) self.doHistFunc('br_%stautau_%s' % (X, era), f.Get(hd['br_%stautau'%X]), pars) self.doHistFunc('br_%sbb_%s' % (X, era), f.Get(hd['br_%sbb'%X]), pars) # Make a note of what we've built, will be used to create scaling expressions later self.PROC_SETS.append(([ 'gg%s'%X, 'bb%s'%X ], [ '%stautau'%X, '%sbb'%X], [era]) ) - # Do the BRs for the charged Higgs - self.doHistFunc('br_tHpb_%s'%era, f.Get(hd['br_tHpb']), pars) - self.doHistFunc('br_Hptaunu_%s'%era, f.Get(hd['br_Hptaunu']), pars) - self.PROC_SETS.append((['tt'], ['HptaunubHptaunub', 'HptaunubWb', 'WbWb'], [era])) # And the extra term we need for H->hh self.doHistFunc('br_Hhh_%s'%era, f.Get(hd['br_Hhh']), pars) self.PROC_SETS.append((['ggH'], ['Hhhbbtautau'], [era])) # And the extra term we need for A->Zh self.doHistFunc('br_AZh_%s'%era, f.Get(hd['br_AZh']), pars) self.PROC_SETS.append((['ggA'], ['AZhLLtautau'], [era])) - - # h_ggF_xsec_h_scale_hi = self.safeTH2DivideForKappa(f.Get(ggF_xsec_h_scale_hi_str), f.Get(ggF_xsec_h_str)) - # h_ggF_xsec_h_scale_lo = self.safeTH2DivideForKappa(f.Get(ggF_xsec_h_scale_lo_str), f.Get(ggF_xsec_h_str)) - # ggF_xsec_h_scale = self.doAsymPow('systeff_ggF_xsec_h_scale_8TeV', h_ggF_xsec_h_scale_lo, h_ggF_xsec_h_scale_hi, 'ggF_xsec_h_scale_8TeV', [mA, tanb]) - # ftest = ROOT.TFile('model_debug.root', 'RECREATE') - # ftest.WriteTObject(h_ggF_xsec_h_scale_hi, 'h_ggF_xsec_h_scale_hi') - # ftest.WriteTObject(h_ggF_xsec_h_scale_lo, 'h_ggF_xsec_h_scale_lo') - # ftest.Close() - - # def preProcessNuisances(self,nuisances): - # nuisances.append(("ggF_xsec_h_scale_8TeV",False, "param", [ "0", "1"], [] ) ) + # And the SM terms + for X in ['ggH', 'qqH', 'VH']: + self.PROC_SETS.append((['%s'%X], ['SM125'], [era])) def doParametersOfInterest(self): """Create POI and other parameters, and define the POI set.""" self.modelBuilder.doVar("r[1,0,20]") - self.modelBuilder.doSet('POI', 'r') + + #MSSMvsMS + self.modelBuilder.doVar("x[1,0,1]") + self.modelBuilder.factory_("expr::not_x(\"(1-@0)\", x)") + self.sigNorms = { True:'x', False:'not_x' } + + self.modelBuilder.doSet('POI', 'x,r') # We don't intend on actually floating these in any fits... - self.modelBuilder.out.var('mA').setConstant(True) + self.modelBuilder.out.var('mH').setConstant(True) self.modelBuilder.out.var('tanb').setConstant(True) - # Build the intermediate terms for charged Higgs scaling - for E in self.modelFiles: - self.modelBuilder.doVar('xs_tt_%s[1.0]' % E) - self.modelBuilder.factory_('expr::br_HptaunubHptaunub_%s("@0*@0*@1*@1", br_tHpb_%s,br_Hptaunu_%s)' % (E,E,E)) - self.modelBuilder.factory_('expr::br_HptaunubWb_%s("2 * (1-@0)*@0*@1", br_tHpb_%s,br_Hptaunu_%s)' % (E,E,E)) - self.modelBuilder.factory_('expr::br_WbWb_%s("(1-@0)*(1-@0)", br_tHpb_%s)' % (E,E)) - # Build the intermediate terms for H->hh->bbtautau and A->Zh->LLtautau scaling for E in self.modelFiles: self.modelBuilder.factory_('expr::br_Hhhbbtautau_%s("2*@0*@1*@2", br_Hhh_%s,br_htautau_%s,br_hbb_%s)' % (E,E,E,E)) @@ -218,8 +162,12 @@ def doParametersOfInterest(self): for proc_set in self.PROC_SETS: for (P, D, E) in itertools.product(*proc_set): # print (P, D, E) - terms = ['xs_%s_%s' % (P, E), 'br_%s_%s'% (D, E)] - terms += ['r'] + if ((self.SMSignal not in D) and ("ww125" not in P) and ("tt125" not in P)): #altenative hypothesis if SMSignal not in process name + terms = ['xs_%s_%s' % (P, E), 'br_%s_%s'% (D, E)] + terms += ['r'] + terms += [self.sigNorms[1]] + else: + terms = [self.sigNorms[0]] self.modelBuilder.factory_('prod::scaling_%s_%s_%s(%s)' % (P,D,E,','.join(terms))) self.modelBuilder.out.function('scaling_%s_%s_%s' % (P,D,E)).Print('') @@ -251,5 +199,5 @@ def getYieldScale(self,bin,process): return 1 -MSSM = MSSMHiggsModel() +THDM = THDMHiggsModel() diff --git a/CombinePdfs/scripts/mssm_1Dlimit_grid.json b/CombinePdfs/scripts/mssm_1Dlimit_grid.json deleted file mode 100644 index cb279b93e4e..00000000000 --- a/CombinePdfs/scripts/mssm_1Dlimit_grid.json +++ /dev/null @@ -1,8 +0,0 @@ -{ - "opts" : "", - "POIs" : ["mA"], - "grids" : [ - ["90:1000|10"] - ], - "r" : ["r_ggA"] -} diff --git a/CombinePdfs/scripts/mssm_bbh_boundaries.json b/CombinePdfs/scripts/mssm_bbh_boundaries.json new file mode 100644 index 00000000000..57c01973cf8 --- /dev/null +++ b/CombinePdfs/scripts/mssm_bbh_boundaries.json @@ -0,0 +1,96 @@ +{ + "r_bbH": { + "90": 25.00, + "100": 20.00, + "110": 15.00, + "120": 12.00, + "130": 10.00, + "140": 7.00, + "150": 2.00, + "160": 1.85, + "170": 1.50, + "180": 1.25, + "190": 1.00, + "200": 1.00, + "210": 0.80, + "220": 0.70, + "230": 0.60, + "240": 0.60, + "250": 0.60, + "260": 0.60, + "270": 0.60, + "280": 0.60, + "290": 0.60, + "300": 0.60, + "310": 0.50, + "320": 0.50, + "330": 0.50, + "340": 0.50, + "350": 0.50, + "360": 0.50, + "370": 0.50, + "380": 0.50, + "390": 0.50, + "400": 0.40, + "410": 0.40, + "420": 0.40, + "430": 0.40, + "440": 0.40, + "450": 0.40, + "460": 0.40, + "470": 0.40, + "480": 0.40, + "490": 0.40, + "500": 0.40, + "510": 0.40, + "520": 0.40, + "530": 0.40, + "540": 0.40, + "550": 0.40, + "560": 0.40, + "570": 0.40, + "580": 0.40, + "590": 0.40, + "600": 0.40, + "610": 0.40, + "620": 0.40, + "630": 0.40, + "640": 0.40, + "650": 0.40, + "660": 0.40, + "670": 0.40, + "680": 0.40, + "690": 0.40, + "700": 0.30, + "710": 0.30, + "720": 0.30, + "730": 0.30, + "740": 0.30, + "750": 0.30, + "760": 0.30, + "770": 0.30, + "780": 0.30, + "790": 0.30, + "800": 0.30, + "810": 0.30, + "820": 0.30, + "830": 0.30, + "840": 0.30, + "850": 0.30, + "860": 0.30, + "870": 0.30, + "880": 0.30, + "890": 0.30, + "900": 0.20, + "910": 0.20, + "920": 0.20, + "930": 0.20, + "940": 0.20, + "950": 0.20, + "960": 0.20, + "970": 0.20, + "980": 0.20, + "990": 0.20, + "1000": 0.20 + } +} diff --git a/CombinePdfs/scripts/mssm_ggh_bbh_boundaries.json b/CombinePdfs/scripts/mssm_ggh_bbh_boundaries.json new file mode 100644 index 00000000000..8c4d15665c6 --- /dev/null +++ b/CombinePdfs/scripts/mssm_ggh_bbh_boundaries.json @@ -0,0 +1,190 @@ +{ + "r_ggH": { + "90": 80.00, + "100": 35.00, + "110": 18.00, + "120": 18.00, + "130": 12.00, + "140": 8.00, + "150": 2.00, + "160": 1.80, + "170": 1.20, + "180": 1.00, + "190": 1.00, + "200": 1.00, + "210": 0.80, + "220": 0.80, + "230": 0.80, + "240": 0.60, + "250": 0.60, + "260": 0.60, + "270": 0.60, + "280": 0.60, + "290": 0.60, + "300": 0.60, + "310": 0.50, + "320": 0.50, + "330": 0.50, + "340": 0.50, + "350": 0.50, + "360": 0.50, + "370": 0.50, + "380": 0.50, + "390": 0.50, + "400": 0.40, + "410": 0.40, + "420": 0.40, + "430": 0.40, + "440": 0.40, + "450": 0.40, + "460": 0.40, + "470": 0.40, + "480": 0.40, + "490": 0.40, + "500": 0.40, + "510": 0.40, + "520": 0.40, + "530": 0.40, + "540": 0.40, + "550": 0.40, + "560": 0.40, + "570": 0.40, + "580": 0.40, + "590": 0.40, + "600": 0.40, + "610": 0.40, + "620": 0.40, + "630": 0.40, + "640": 0.40, + "650": 0.40, + "660": 0.40, + "670": 0.40, + "680": 0.40, + "690": 0.40, + "700": 0.30, + "710": 0.30, + "720": 0.30, + "730": 0.30, + "740": 0.30, + "750": 0.30, + "760": 0.30, + "770": 0.30, + "780": 0.30, + "790": 0.30, + "800": 0.30, + "810": 0.30, + "820": 0.30, + "830": 0.30, + "840": 0.30, + "850": 0.30, + "860": 0.30, + "870": 0.30, + "880": 0.30, + "890": 0.30, + "900": 0.20, + "910": 0.20, + "920": 0.20, + "930": 0.20, + "940": 0.20, + "950": 0.20, + "960": 0.20, + "970": 0.20, + "980": 0.20, + "990": 0.20, + "1000": 0.20 + }, + "r_bbH": { + "90": 25.00, + "100": 20.00, + "110": 15.00, + "120": 12.00, + "130": 10.00, + "140": 7.00, + "150": 2.00, + "160": 1.85, + "170": 1.50, + "180": 1.25, + "190": 1.00, + "200": 1.00, + "210": 0.80, + "220": 0.70, + "230": 0.60, + "240": 0.60, + "250": 0.60, + "260": 0.60, + "270": 0.60, + "280": 0.60, + "290": 0.60, + "300": 0.60, + "310": 0.50, + "320": 0.50, + "330": 0.50, + "340": 0.50, + "350": 0.50, + "360": 0.50, + "370": 0.50, + "380": 0.50, + "390": 0.50, + "400": 0.40, + "410": 0.40, + "420": 0.40, + "430": 0.40, + "440": 0.40, + "450": 0.40, + "460": 0.40, + "470": 0.40, + "480": 0.40, + "490": 0.40, + "500": 0.40, + "510": 0.40, + "520": 0.40, + "530": 0.40, + "540": 0.40, + "550": 0.40, + "560": 0.40, + "570": 0.40, + "580": 0.40, + "590": 0.40, + "600": 0.40, + "610": 0.40, + "620": 0.40, + "630": 0.40, + "640": 0.40, + "650": 0.40, + "660": 0.40, + "670": 0.40, + "680": 0.40, + "690": 0.40, + "700": 0.30, + "710": 0.30, + "720": 0.30, + "730": 0.30, + "740": 0.30, + "750": 0.30, + "760": 0.30, + "770": 0.30, + "780": 0.30, + "790": 0.30, + "800": 0.30, + "810": 0.30, + "820": 0.30, + "830": 0.30, + "840": 0.30, + "850": 0.30, + "860": 0.30, + "870": 0.30, + "880": 0.30, + "890": 0.30, + "900": 0.20, + "910": 0.20, + "920": 0.20, + "930": 0.20, + "940": 0.20, + "950": 0.20, + "960": 0.20, + "970": 0.20, + "980": 0.20, + "990": 0.20, + "1000": 0.20 + } +} diff --git a/CombinePdfs/scripts/mssm_ggh_boundaries.json b/CombinePdfs/scripts/mssm_ggh_boundaries.json new file mode 100644 index 00000000000..f9701b2399a --- /dev/null +++ b/CombinePdfs/scripts/mssm_ggh_boundaries.json @@ -0,0 +1,96 @@ +{ + "r_ggH": { + "90": 80.00, + "100": 35.00, + "110": 18.00, + "120": 18.00, + "130": 12.00, + "140": 8.00, + "150": 2.00, + "160": 1.80, + "170": 1.20, + "180": 1.00, + "190": 1.00, + "200": 1.00, + "210": 0.80, + "220": 0.80, + "230": 0.80, + "240": 0.60, + "250": 0.60, + "260": 0.60, + "270": 0.60, + "280": 0.60, + "290": 0.60, + "300": 0.60, + "310": 0.50, + "320": 0.50, + "330": 0.50, + "340": 0.50, + "350": 0.50, + "360": 0.50, + "370": 0.50, + "380": 0.50, + "390": 0.50, + "400": 0.40, + "410": 0.40, + "420": 0.40, + "430": 0.40, + "440": 0.40, + "450": 0.40, + "460": 0.40, + "470": 0.40, + "480": 0.40, + "490": 0.40, + "500": 0.40, + "510": 0.40, + "520": 0.40, + "530": 0.40, + "540": 0.40, + "550": 0.40, + "560": 0.40, + "570": 0.40, + "580": 0.40, + "590": 0.40, + "600": 0.40, + "610": 0.40, + "620": 0.40, + "630": 0.40, + "640": 0.40, + "650": 0.40, + "660": 0.40, + "670": 0.40, + "680": 0.40, + "690": 0.40, + "700": 0.30, + "710": 0.30, + "720": 0.30, + "730": 0.30, + "740": 0.30, + "750": 0.30, + "760": 0.30, + "770": 0.30, + "780": 0.30, + "790": 0.30, + "800": 0.30, + "810": 0.30, + "820": 0.30, + "830": 0.30, + "840": 0.30, + "850": 0.30, + "860": 0.30, + "870": 0.30, + "880": 0.30, + "890": 0.30, + "900": 0.20, + "910": 0.20, + "920": 0.20, + "930": 0.20, + "940": 0.20, + "950": 0.20, + "960": 0.20, + "970": 0.20, + "980": 0.20, + "990": 0.20, + "1000": 0.20 + } +} diff --git a/CombinePdfs/scripts/mssm_hybrid_grid.json b/CombinePdfs/scripts/mssm_hybrid_grid.json index 24f664f5895..327b9869eab 100644 --- a/CombinePdfs/scripts/mssm_hybrid_grid.json +++ b/CombinePdfs/scripts/mssm_hybrid_grid.json @@ -1,8 +1,27 @@ { - "opts" : "--testStat=TEV --frequentist -d htt_mt_mssm.root --singlePoint 1.0 --saveHybridResult --clsAcc 0 --fullBToys --fork 0", + "verbose" : false, + "opts" : "--testStat=TEV --frequentist --singlePoint 1.0 --saveHybridResult --clsAcc 0 --fullBToys --fork 0", "POIs" : ["mA", "tanb"], "grids" : [ - ["500", "25", ""] + ["580:600:20", "10:30:4", ""] ], - "toys_per_cycle" : 500 + "toys_per_cycle" : 500, + "min_toys" : 500, + "max_toys" : 5000, + "signif" : 3.0, + "CL" : 0.95, + "contours" : ["obs", "exp-2", "exp-1", "exp0", "exp+1", "exp+2"], + "make_plots" : true, + "plot_settings" : { + "one_sided" : false, + "model_label" : "m_{H}^{mod+}", + "poi_labels" : ["m_{A}", "tan#beta"], + "null_label" : "SM", + "alt_label" : "MSSM", + "cms_subtitle" : "Internal", + "formats" : [".pdf", ".png"] + }, + "zipfile" : "collected.zip", + "output" : "HybridNewGridMSSM.root", + "output_incomplete" : true } diff --git a/CombinePdfs/scripts/plotMorphing.py b/CombinePdfs/scripts/plotMorphing.py index 6e7ab8b6042..6ecf7927e96 100755 --- a/CombinePdfs/scripts/plotMorphing.py +++ b/CombinePdfs/scripts/plotMorphing.py @@ -3,7 +3,7 @@ import ROOT import math from functools import partial -import Tools.Plotting.plotting as plot +import CombineHarvester.CombineTools.plotting as plot import os.path import bisect @@ -27,7 +27,7 @@ # folder = 'htt_mt_1_8TeV_ggH_morph' file = 'htt_mt_mssm_demo.root' -folder = 'htt_mt_8_8TeV_bbA_morph' +folder = 'htt_mt_8_8TeV_bbH_Htautau_morph' fin = ROOT.TFile(file) ROOT.gDirectory.cd(folder) @@ -71,4 +71,4 @@ legend.Draw() canv.Print('.pdf') - canv.Print('.png') \ No newline at end of file + canv.Print('.png') diff --git a/CombinePdfs/src/MorphFunctions.cc b/CombinePdfs/src/MorphFunctions.cc index 5487652224e..483a5e319d4 100644 --- a/CombinePdfs/src/MorphFunctions.cc +++ b/CombinePdfs/src/MorphFunctions.cc @@ -15,10 +15,12 @@ namespace ch { + //! [part1] void BuildRooMorphing(RooWorkspace& ws, CombineHarvester& cb, std::string const& bin, std::string const& process, RooAbsReal& mass_var, std::string norm_postfix, bool allow_morph, bool verbose, bool force_template_limit, TFile * file) { + //! [part1] // To keep the code concise we'll make some using-declarations here using std::set; using std::vector; @@ -47,6 +49,7 @@ void BuildRooMorphing(RooWorkspace& ws, CombineHarvester& cb, // turn into a RooMorphingPdf CombineHarvester cb_bp = cb.cp().bin({bin}).process({process}); + //! [part2] // Get a vector of the mass values vector m_str_vec = Set2Vec(cb_bp.SetFromProcs( std::mem_fn(&ch::Process::mass))); @@ -67,6 +70,7 @@ void BuildRooMorphing(RooWorkspace& ws, CombineHarvester& cb, } // So, we have m mass points to consider unsigned m = m_vec.size(); + //! [part2] // ss = "shape systematic" // Make a list of the names of shape systematics affecting this process @@ -149,6 +153,7 @@ void BuildRooMorphing(RooWorkspace& ws, CombineHarvester& cb, } } + //! [part3] // We need to build a RooArgList of the vertical morphing parameters for the // vertical-interpolation pdf - this will be the same for each mass point so // we only build it once @@ -196,6 +201,7 @@ void BuildRooMorphing(RooWorkspace& ws, CombineHarvester& cb, } } } + //! [part3] // Summarise the info on the shape systematics and scale factors if (verbose) { @@ -328,6 +334,12 @@ void BuildRooMorphing(RooWorkspace& ws, CombineHarvester& cb, // Store the uncertainty ("kappa") values for the shape systematics ss_k_hi_arr[ssi][mi] = ss_arr[ssi][mi]->value_u(); ss_k_lo_arr[ssi][mi] = ss_arr[ssi][mi]->value_d(); + // For the normalisation we scale the kappa instead of putting the scaling + // parameter as the variable + if (std::fabs(ss_scale_arr[ssi] - 1.0) > 1E-6) { + ss_k_hi_arr[ssi][mi] = std::pow(ss_arr[ssi][mi]->value_u(), ss_scale_arr[ssi]); + ss_k_lo_arr[ssi][mi] = std::pow(ss_arr[ssi][mi]->value_d(), ss_scale_arr[ssi]); + } } // And now the uncertainty values for the lnN systematics that vary with mass // We'll force these to be asymmetric even if they're not @@ -404,15 +416,17 @@ void BuildRooMorphing(RooWorkspace& ws, CombineHarvester& cb, std::cout << "\n"; } // Create the 1D spline directly from the rate array + //! [part4] RooSpline1D rate_spline("interp_rate_"+key, "", mass_var, force_template_limit ? m+2 : m, force_template_limit ? new_m_vec.data() : m_vec.data(), force_template_limit ? new_rate_arr.data() : rate_arr.data(), interp); + //! [part4] if (file) { TGraph tmp(m, m_vec.data(), rate_arr.data()); - gDirectory->WriteTObject(&tmp, "interp_rate_"+key); + file->WriteTObject(&tmp, "interp_rate_"+key); } // Collect all terms that will go into the total normalisation: // nominal * systeff_1 * systeff_2 * ... * systeff_N @@ -427,16 +441,16 @@ void BuildRooMorphing(RooWorkspace& ws, CombineHarvester& cb, ss_k_lo_arr[ssi].origin(), interp); if (file) { TGraph tmp_hi(m, m_vec.data(), ss_k_hi_arr[ssi].origin()); - gDirectory->WriteTObject(&tmp_hi, "spline_hi_" + key + "_" + ss_vec[ssi]); + file->WriteTObject(&tmp_hi, "spline_hi_" + key + "_" + ss_vec[ssi]); TGraph tmp_lo(m, m_vec.data(), ss_k_lo_arr[ssi].origin()); - gDirectory->WriteTObject(&tmp_lo, "spline_lo_" + key + "_" + ss_vec[ssi]); + file->WriteTObject(&tmp_lo, "spline_lo_" + key + "_" + ss_vec[ssi]); } // Then build the AsymPow object for each systematic as a function of the // kappas and the nuisance parameter ss_asy_arr[ssi] = std::make_shared("systeff_" + key + "_" + ss_vec[ssi], "", *(ss_spl_lo_arr[ssi]), *(ss_spl_hi_arr[ssi]), - *(reinterpret_cast(ss_list.at(ssi)))); + *(ss_scale_var_arr[ssi])); rate_prod.add(*(ss_asy_arr[ssi])); } // Same procedure for the lms normalisation systematics: build the splines @@ -491,12 +505,19 @@ void BuildRooMorphing(RooWorkspace& ws, CombineHarvester& cb, TString vert_name = key + "_"; + // Follow what ShapeTools.py does and set the smoothing region + // to the minimum of all of the shape scales + double qrange = 1.; + for (unsigned ssi = 0; ssi < ss; ++ssi) { + if (ss_scale_arr[ssi] < qrange) qrange = ss_scale_arr[ssi]; + } + for (unsigned mi = 0; mi < m; ++mi) { // Construct it with the right binning, the right histograms and the right // scaling parameters vpdf_arr[mi] = std::make_shared( vert_name + m_str_vec[mi] + "_vmorph", "", morph_xvar, *(list_arr[mi]), - ss_list, 1, 0); + ss_list, qrange, 0); // Add it to a list that we'll supply to the RooMorphingPdf vpdf_list.add(*(vpdf_arr[mi])); } @@ -509,16 +530,20 @@ void BuildRooMorphing(RooWorkspace& ws, CombineHarvester& cb, // allow_morph: if false will just evaluate to the closest pdf in mass // data_hist.GetXaxis(): The original (non-uniform) target binning // proc_hist.GetXaxis(): The original (non-uniform) morphing binning + //! [part5] RooMorphingPdf morph_pdf(morph_name, "", xvar, mass_var, vpdf_list, m_vec, allow_morph, *(data_hist.GetXaxis()), *(proc_hist.GetXaxis())); + //! [part5] // And we can make the final normalisation product // The value of norm_postfix is very important. text2workspace will only look // for for a term with the pdf name + "_norm". But it might be the user wants // to add even more terms to this total normalisation, so we give them the option // of using some other suffix. + //! [part6] RooProduct morph_rate(morph_name + "_" + TString(norm_postfix), "", rate_prod); + //! [part6] // Dump even more plots if (file) MakeMorphDebugPlots(&morph_pdf, &mass_var, m_vec, file, &data_hist); diff --git a/CombineTools/bin/BuildFile.xml b/CombineTools/bin/BuildFile.xml index 55c54525124..81d58c34932 100644 --- a/CombineTools/bin/BuildFile.xml +++ b/CombineTools/bin/BuildFile.xml @@ -1,20 +1,20 @@ - - + + - + @@ -22,7 +22,6 @@ - diff --git a/CombineTools/bin/PostFitShapesFromWorkspace.cpp b/CombineTools/bin/PostFitShapesFromWorkspace.cpp new file mode 100644 index 00000000000..1f3360c1277 --- /dev/null +++ b/CombineTools/bin/PostFitShapesFromWorkspace.cpp @@ -0,0 +1,280 @@ +#include +#include "boost/program_options.hpp" +#include "boost/format.hpp" +#include "TSystem.h" +#include "CombineHarvester/CombineTools/interface/CombineHarvester.h" +#include "CombineHarvester/CombineTools/interface/ParseCombineWorkspace.h" +#include "CombineHarvester/CombineTools/interface/TFileIO.h" +#include "CombineHarvester/CombineTools/interface/Logging.h" + +namespace po = boost::program_options; + +using namespace std; + +int main(int argc, char* argv[]) { + // Need this to read combine workspaces + gSystem->Load("libHiggsAnalysisCombinedLimit"); + + string datacard = ""; + string workspace = ""; + string fitresult = ""; + string mass = ""; + bool postfit = false; + bool sampling = false; + string output = ""; + bool factors = false; + unsigned samples = 500; + std::string freeze_arg = ""; + + po::options_description help_config("Help"); + help_config.add_options() + ("help,h", "produce help message"); + + po::options_description config("Configuration"); + config.add_options() + ("workspace,w", + po::value(&workspace)->required(), + "The input workspace-containing file [REQUIRED]") + ("datacard,d", + po::value(&datacard), + "The input datacard, only used for rebinning") + ("output,o ", + po::value(&output)->required(), + "Name of the output root file to create [REQUIRED]") + ("fitresult,f", + po::value(&fitresult)->default_value(fitresult), + "Path to a RooFitResult, only needed for postfit") + ("mass,m", + po::value(&mass)->default_value(""), + "Signal mass point of the input datacard") + ("postfit", + po::value(&postfit) + ->default_value(postfit)->implicit_value(true), + "Create post-fit histograms in addition to pre-fit") + ("sampling", + po::value(&sampling)->default_value(sampling)->implicit_value(true), + "Use the cov. matrix sampling method for the post-fit uncertainty") + ("samples", + po::value(&samples)->default_value(samples), + "Number of samples to make in each evaluate call") + ("print", + po::value(&factors)->default_value(factors)->implicit_value(true), + "Print tables of background shifts and relative uncertainties") + ("freeze", + po::value(&freeze_arg)->default_value(freeze_arg), + "Format PARAM1,PARAM2=X,PARAM3=Y where the values X and Y are optional"); + + if (sampling && !postfit) { + throw logic_error( + "Can't sample the fit covariance matrix for pre-fit!"); + } + + po::variables_map vm; + + // First check if the user has set the "--help" or "-h" option, and if so + // just prin the usage information and quit + po::store(po::command_line_parser(argc, argv) + .options(help_config).allow_unregistered().run(), vm); + po::notify(vm); + if (vm.count("help")) { + cout << config << "\nExample usage:\n"; + cout << "PostFitShapesFromWorkspace.root -d htt_mt_125.txt -w htt_mt_125.root -o htt_mt_125_shapes.root -m 125 " + "-f mlfit.root:fit_s --postfit --sampling --print\n"; + return 1; + } + + // Parse the main config options + po::store(po::command_line_parser(argc, argv).options(config).run(), vm); + po::notify(vm); + + TFile infile(workspace.c_str()); + + RooWorkspace *ws = dynamic_cast(gDirectory->Get("w")); + + if (!ws) { + throw std::runtime_error( + FNERROR("Could not locate workspace in input file")); + } + + // Create CH instance and parse the workspace + ch::CombineHarvester cmb; + cmb.SetFlag("workspaces-use-clone", true); + ch::ParseCombineWorkspace(cmb, *ws, "ModelConfig", "data_obs", false); + + vector freeze_vec; + boost::split(freeze_vec, freeze_arg, boost::is_any_of(",")); + for (auto const& item : freeze_vec) { + vector parts; + boost::split(parts, item, boost::is_any_of("=")); + if (parts.size() == 1) { + ch::Parameter *par = cmb.GetParameter(parts[0]); + if (par) par->set_frozen(true); + } else { + if (parts.size() == 2) { + ch::Parameter *par = cmb.GetParameter(parts[0]); + if (par) { + par->set_val(boost::lexical_cast(parts[1])); + par->set_frozen(true); + } + } + } + } + // cmb.GetParameter("r")->set_frozen(true); + + ch::CombineHarvester cmb_card; + if (datacard != "") { + cmb_card.ParseDatacard(datacard, "", "", "", 0, mass); + } + + // Drop any process that has no hist/data/pdf + cmb.FilterProcs([&](ch::Process * proc) { + bool no_shape = !proc->shape() && !proc->data() && !proc->pdf(); + if (no_shape) { + cout << "Filtering process with no shape:\n"; + cout << ch::Process::PrintHeader << *proc << "\n"; + } + return no_shape; + }); + + auto bins = cmb.bin_set(); + + TFile outfile(output.c_str(), "RECREATE"); + TH1::AddDirectory(false); + + // Create a map of maps for storing histograms in the form: + // pre_shapes[][] + map> pre_shapes; + // We can always do the prefit version, + // Loop through the bins writing the shapes to the output file + for (auto bin : bins) { + ch::CombineHarvester cmb_bin = cmb.cp().bin({bin}); + // This next line is a temporary fix for models with parameteric RooFit pdfs + // - we try and set the number of bins to evaluate the pdf to be the same as + // the number of bins in data + cmb_bin.SetPdfBins(cmb_bin.GetObservedShape().GetNbinsX()); + + // Fill the data and process histograms + pre_shapes[bin]["data_obs"] = cmb_bin.GetObservedShape(); + for (auto proc : cmb_bin.process_set()) { + pre_shapes[bin][proc] = + cmb_bin.cp().process({proc}).GetShapeWithUncertainty(); + } + + // The fill total signal and total bkg hists + pre_shapes[bin]["TotalBkg"] = + cmb_bin.cp().backgrounds().GetShapeWithUncertainty(); + pre_shapes[bin]["TotalSig"] = + cmb_bin.cp().signals().GetShapeWithUncertainty(); + pre_shapes[bin]["TotalProcs"] = + cmb_bin.cp().GetShapeWithUncertainty(); + + if (datacard != "") { + TH1F ref = cmb_card.cp().bin({bin}).GetObservedShape(); + for (auto & it : pre_shapes[bin]) { + it.second = ch::RestoreBinning(it.second, ref); + } + } + + // Can write these straight into the output file + outfile.cd(); + for (auto& iter : pre_shapes[bin]) { + ch::WriteToTFile(&(iter.second), &outfile, bin + "_prefit/" + iter.first); + } + } + + // Print out the relative uncert. on the bkg + if (factors) { + cout << boost::format("%-25s %-32s\n") % "Bin" % + "Total relative bkg uncert. (prefit)"; + cout << string(58, '-') << "\n"; + for (auto bin : bins) { + ch::CombineHarvester cmb_bin = cmb.cp().bin({bin}); + double rate = cmb_bin.cp().backgrounds().GetRate(); + double err = cmb_bin.cp().backgrounds().GetUncertainty(); + cout << boost::format("%-25s %-10.5f\n") % bin % + (rate > 0. ? (err / rate) : 0.); + } + } + + // Now we can do the same again but for the post-fit model + if (postfit) { + // Get the fit result and update the parameters to the post-fit model + RooFitResult res = ch::OpenFromTFile(fitresult); + cmb.UpdateParameters(res); + + // Calculate the post-fit fractional background uncertainty in each bin + if (factors) { + cout << boost::format("\n%-25s %-32s\n") % "Bin" % + "Total relative bkg uncert. (postfit)"; + cout << string(58, '-') << "\n"; + for (auto bin : bins) { + ch::CombineHarvester cmb_bkgs = cmb.cp().bin({bin}).backgrounds(); + double rate = cmb_bkgs.GetRate(); + double err = sampling ? cmb_bkgs.GetUncertainty(res, samples) + : cmb_bkgs.GetUncertainty(); + cout << boost::format("%-25s %-10.5f\n") % bin % + (rate > 0. ? (err / rate) : 0.); + } + } + + map> post_shapes; + // As we calculate the post-fit yields can also print out the post/pre scale + // factors + if (factors) { + cout << boost::format("\n%-25s %-20s %-10s\n") % "Bin" % "Process" % + "Scale factor"; + cout << string(58, '-') << "\n"; + } + + for (auto bin : bins) { + ch::CombineHarvester cmb_bin = cmb.cp().bin({bin}); + post_shapes[bin]["data_obs"] = cmb_bin.GetObservedShape(); + for (auto proc : cmb_bin.process_set()) { + auto cmb_proc = cmb_bin.cp().process({proc}); + // Method to get the shape uncertainty depends on whether we are using + // the sampling method or the "wrong" method (assumes no correlations) + post_shapes[bin][proc] = + sampling ? cmb_proc.GetShapeWithUncertainty(res, samples) + : cmb_proc.GetShapeWithUncertainty(); + // Print out the post/pre scale factors + if (factors) { + TH1 const& pre = pre_shapes[bin][proc]; + TH1 const& post = post_shapes[bin][proc]; + cout << boost::format("%-25s %-20s %-10.5f\n") % bin % proc % + (pre.Integral() > 0. ? (post.Integral() / pre.Integral()) + : 1.0); + } + } + // Fill the total sig. and total bkg. hists + auto cmb_bkgs = cmb_bin.cp().backgrounds(); + auto cmb_sigs = cmb_bin.cp().signals(); + post_shapes[bin]["TotalBkg"] = + sampling ? cmb_bkgs.GetShapeWithUncertainty(res, samples) + : cmb_bkgs.GetShapeWithUncertainty(); + post_shapes[bin]["TotalSig"] = + sampling ? cmb_sigs.GetShapeWithUncertainty(res, samples) + : cmb_sigs.GetShapeWithUncertainty(); + post_shapes[bin]["TotalProcs"] = + sampling ? cmb_bin.cp().GetShapeWithUncertainty(res, samples) + : cmb_bin.cp().GetShapeWithUncertainty(); + + if (datacard != "") { + TH1F ref = cmb_card.cp().bin({bin}).GetObservedShape(); + for (auto & it : post_shapes[bin]) { + it.second = ch::RestoreBinning(it.second, ref); + } + } + + outfile.cd(); + // Write the post-fit histograms + for (auto & iter : post_shapes[bin]) { + ch::WriteToTFile(&(iter.second), &outfile, + bin + "_postfit/" + iter.first); + } + } + } + // And we're done! + outfile.Close(); + return 0; +} + diff --git a/CombineTools/bin/SMLegacyExample.cpp b/CombineTools/bin/SMLegacyExample.cpp index 7828a025f82..00dbc9d45ed 100644 --- a/CombineTools/bin/SMLegacyExample.cpp +++ b/CombineTools/bin/SMLegacyExample.cpp @@ -134,6 +134,7 @@ int main() { } } + //! [part1] cout << ">> Scaling signal process rates...\n"; map xs; // Get the table of H->tau tau BRs vs mass @@ -149,6 +150,7 @@ int main() { }); } } + //! [part1] xs["hww_over_htt"] = ch::TGraphFromTable(input_dir+"/xsecs_brs/hww_over_htt.txt", "mH", "ratio"); for (string const& e : {"7TeV", "8TeV"}) { for (string const& p : {"ggH", "qqH"}) { @@ -162,10 +164,12 @@ int main() { cout << ">> Merging bin errors and generating bbb uncertainties...\n"; + //! [part2] auto bbb = ch::BinByBinFactory() .SetAddThreshold(0.1) .SetMergeThreshold(0.5) .SetFixNorm(true); + //! [part2] ch::CombineHarvester cb_et = cb.cp().channel({"et"}); bbb.MergeAndAdd(cb_et.cp().era({"7TeV"}).bin_id({1, 2}).process({"ZL", "ZJ", "QCD", "W"}), cb); @@ -173,7 +177,9 @@ int main() { bbb.MergeAndAdd(cb_et.cp().era({"8TeV"}).bin_id({1, 2}).process({"ZL", "ZJ", "QCD", "W"}), cb); bbb.MergeAndAdd(cb_et.cp().era({"8TeV"}).bin_id({3, 5}).process({"W"}), cb); bbb.MergeAndAdd(cb_et.cp().era({"7TeV"}).bin_id({6}).process({"ZL", "ZJ", "W", "ZTT"}), cb); + //! [part3] bbb.MergeAndAdd(cb_et.cp().era({"8TeV"}).bin_id({6}).process({"ZL", "ZJ", "W"}), cb); + //! [part3] bbb.MergeAndAdd(cb_et.cp().era({"8TeV"}).bin_id({7}).process({"ZL", "ZJ", "W", "ZTT"}), cb); ch::CombineHarvester cb_mt = cb.cp().channel({"mt"}); @@ -201,6 +207,7 @@ int main() { cout << ">> Setting standardised bin names...\n"; ch::SetStandardBinNames(cb); + //! [part4] VString droplist = ch::ParseFileLines( aux_pruning + "uncertainty-pruning-drop-131128-sm.txt"); cout << ">> Droplist contains " << droplist.size() << " entries\n"; @@ -213,6 +220,7 @@ int main() { auto post_drop = cb.syst_name_set(); cout << ">> Systematics dropped: " << pre_drop.size() - post_drop.size() << "\n"; + //! [part4] // The following is an example of duplicating existing objects and modifying // them in the process. Here we clone all mH=125 signals, adding "_SM125" to diff --git a/CombineTools/interface/CombineHarvester.h b/CombineTools/interface/CombineHarvester.h index 133ab4504ad..a926d4b5658 100644 --- a/CombineTools/interface/CombineHarvester.h +++ b/CombineTools/interface/CombineHarvester.h @@ -5,6 +5,7 @@ #include #include #include +#include #include #include #include @@ -59,6 +60,14 @@ class CombineHarvester { CombineHarvester(CombineHarvester&& other); CombineHarvester& operator=(CombineHarvester other); + /** + * Set a named flag value + * + * Creates the flag if it doesn't already + */ + void SetFlag(std::string const& flag, bool const& value); + bool GetFlag(std::string const& flag) const; + /** * Creates and returns a shallow copy of the CombineHarvester instance */ @@ -404,7 +413,7 @@ class CombineHarvester { std::map> params_; std::map> wspaces_; - std::map flags_; + std::unordered_map flags_; // --------------------------------------------------------------- // typedefs diff --git a/CombineTools/interface/HttSystematics.h b/CombineTools/interface/HttSystematics.h index 37c1301bee8..69d871b14f1 100644 --- a/CombineTools/interface/HttSystematics.h +++ b/CombineTools/interface/HttSystematics.h @@ -28,6 +28,10 @@ void AddMSSMUpdateSystematics_mm(CombineHarvester& cb); void AddMSSMUpdateSystematics_tt(CombineHarvester& cb, CombineHarvester src); void AddMSSMUpdateSystematics_tt(CombineHarvester& cb); +// Run2 MSSM analysis systematics +// Implemented in src/HttSystematics_MSSMRun2.cc +void AddMSSMRun2Systematics(CombineHarvester& cb); + // Hhh systematics // Implemented in src/HhhSystematics.cc void AddSystematics_hhh_et_mt(CombineHarvester& cb, CombineHarvester src); diff --git a/CombineTools/interface/Logging.h b/CombineTools/interface/Logging.h index c3cb6aaa6c6..056afd42ffe 100644 --- a/CombineTools/interface/Logging.h +++ b/CombineTools/interface/Logging.h @@ -6,7 +6,7 @@ namespace ch { -#define FNERROR(x) FnError(x, __FILE__, __LINE__, __PRETTY_FUNCTION__) +#define FNERROR(x) ch::FnError(x, __FILE__, __LINE__, __PRETTY_FUNCTION__) #define LOGLINE(x, y) LogLine(x, __func__, y) diff --git a/CombineTools/interface/Parameter.h b/CombineTools/interface/Parameter.h index b6243c44e73..f083912df0a 100644 --- a/CombineTools/interface/Parameter.h +++ b/CombineTools/interface/Parameter.h @@ -21,6 +21,7 @@ class Parameter { std::string const& name() const { return name_; } void set_val(double const& val) { + if (frozen_) return; val_ = val; for (unsigned i = 0; i < vars_.size(); ++i) { vars_[i]->setVal(val); @@ -40,7 +41,10 @@ class Parameter { void set_range_d(double const& range_d) { range_d_ = range_d; } double range_d() const { return range_d_; } - + + void set_frozen(bool const& frozen) { frozen_ = frozen; } + bool frozen() const { return frozen_; } + std::vector & vars() { return vars_; } friend std::ostream& operator<< (std::ostream &out, Parameter &val); @@ -53,6 +57,7 @@ class Parameter { double err_d_; double range_u_; double range_d_; + bool frozen_; std::vector vars_; friend void swap(Parameter& first, Parameter& second); }; diff --git a/CombineTools/python/combine/CombineToolBase.py b/CombineTools/python/combine/CombineToolBase.py index 106b68f084e..2c7fc56ddf3 100755 --- a/CombineTools/python/combine/CombineToolBase.py +++ b/CombineTools/python/combine/CombineToolBase.py @@ -151,12 +151,26 @@ def put_back_arg(self, arg_name, target_name): self.passthru.extend([target_name, getattr(self.args, arg_name)]) delattr(self.args, arg_name) + def extract_arg(self, arg, args_str): + args_str = args_str.replace(arg+'=', arg+' ') + args = args_str.split() + if arg in args: + idx = args.index(arg) + assert idx != -1 and idx < len(args) + val = args[idx+1] + del args[idx:idx+2] + return val, (' '.join(args)) + else: + return None, args_str + def create_job_script(self, commands, script_filename, do_log = False): fname = script_filename logname = script_filename.replace('.sh', '.log') with open(fname, "w") as text_file: - if self.job_mode=='lxbatch': text_file.write(JOB_PREFIX) - elif self.job_mode=='NAF': text_file.write(JOB_NAF_PREFIX) + if self.job_mode == 'NAF': + text_file.write(JOB_NAF_PREFIX) + else: + text_file.write(JOB_PREFIX) for i, command in enumerate(commands): tee = 'tee' if i == 0 else 'tee -a' log_part = '\n' @@ -223,14 +237,15 @@ def flush_queue(self): outscript.write(CRAB_PREFIX) jobs = 0 wsp_files = set() - for line in self.job_queue: + for i, j in enumerate(range(0, len(self.job_queue), self.merge)): jobs += 1 - newline = line - if line.startswith('combine'): newline = line.replace('combine', './combine', 1) - wsp = self.extract_workspace_arg(newline.split()) - wsp_files.add(wsp) outscript.write('\nif [ $1 -eq %i ]; then\n' % jobs) - outscript.write(' ' + newline.replace(wsp, os.path.basename(wsp)) + '\n') + for line in self.job_queue[j:j + self.merge]: + newline = line + if line.startswith('combine'): newline = line.replace('combine', './combine', 1) + wsp = str(self.extract_workspace_arg(newline.split())) + wsp_files.add(wsp) + outscript.write(' ' + newline.replace(wsp, os.path.basename(wsp)) + '\n') outscript.write('fi') outscript.write(CRAB_POSTFIX) outscript.close() diff --git a/CombineTools/python/combine/EnhancedCombine.py b/CombineTools/python/combine/EnhancedCombine.py index 90b7a1d9054..42f8ad30446 100755 --- a/CombineTools/python/combine/EnhancedCombine.py +++ b/CombineTools/python/combine/EnhancedCombine.py @@ -1,9 +1,10 @@ import itertools import CombineHarvester.CombineTools.combine.utils as utils +import json +#import os from CombineHarvester.CombineTools.combine.opts import OPTS from CombineHarvester.CombineTools.combine.CombineToolBase import CombineToolBase -from CombineHarvester.CombineTools.mssm_multidim_fit_boundaries import mssm_multidim_fit_boundaries as bounds class EnhancedCombine(CombineToolBase): @@ -22,7 +23,7 @@ def attach_intercept_args(self, group): group.add_argument( '--singlePoint', help='Supports range strings for multiple points to test, uses the same format as the --mass argument') group.add_argument( - '--Pmodel', help='Set range of physical parameters depending on the given mass and given physics model') + '--boundlist', help='Name of json-file which contains the ranges of physical parameters depending on the given mass and given physics model') group.add_argument('--name', '-n', default='.Test', help='Name used to label the combine output file, can be modified by other options') @@ -56,15 +57,24 @@ def run_method(self): subbed_vars[('SINGLEPOINT',)] = [(pval,) for pval in single_points] self.passthru.extend(['--singlePoint', '%(SINGLEPOINT)s']) self.args.name += '.POINT.%(SINGLEPOINT)s' - - if self.args.Pmodel is not None: - subbed_vars = {} - mass_vals = utils.split_vals(self.args.mass) - if self.args.Pmodel == 'bbH' : t=1 - elif self.args.Pmodel == 'ggH' : t=0 - else : exit("Physic model '%s' not recognized" % self.args.Pmodel) - subbed_vars[('MASS', 'MODEL', 'BOUND')] = [(mval, self.args.Pmodel, bounds["ggH-bbH", mval][t]) for mval in mass_vals] - self.passthru.extend(['--setPhysicsModelParameters', 'r_%(MODEL)s=r_%(MODEL)s', '--setPhysicsModelParameterRanges', 'r_%(MODEL)s=0,%(BOUND)s', '--freezeNuisances MH']) + + if self.args.boundlist is not None: + subbed_vars = {} + with open(self.args.boundlist) as json_file: + bnd = json.load(json_file) + command1=['' for i in mass_vals] + command2=['' for i in mass_vals] + i=0 + for mval in mass_vals: + for model in bnd: + if not (command1[i]==''): command1[i]=command1[i]+':' + if not (command2[i]==''): command2[i]=command2[i]+',' + command1[i]=command1[i]+model+'=0,'+str(bnd[model][mval]) + command2[i]=command2[i]+model+'=0' #'='+str(float(bnd[model][mval])/2.0) + i+=1 + subbed_vars[('MASS', 'MODELBOUNDONE', 'MODELBOUNDTWO')] = [(mass_vals[i], command1[i], command2[i]) for i in range(len(mass_vals))] + self.passthru.extend(['--setPhysicsModelParameterRanges', '%(MODELBOUNDONE)s']) + self.passthru.extend(['--setPhysicsModelParameters', '%(MODELBOUNDTWO)s']) if self.args.points is not None: self.passthru.extend(['--points', self.args.points]) @@ -76,10 +86,19 @@ def run_method(self): start = 0 ranges = [] while (start + (split - 1)) <= points: + # filename = "higgsCombine"+self.args.name+".POINTS."+str(start)+"."+str(start+(split-1))+".MultiDimFit.mH"+str(self.args.mass)+".root" + # if (not os.path.isfile(filename)) or (os.path.getsize(filename)<1024): + # # Send job, if the file it's supposed to create doesn't exist yet + # # or if the file is empty because the previous job didn't finish ranges.append((start, start + (split - 1))) start += split if start < points: + # filename = "higgsCombine"+self.args.name+".POINTS."+str(start)+"."+str(points - 1)+".MultiDimFit.mH"+str(self.args.mass)+".root" + # if (not os.path.isfile(filename)) or (os.path.getsize(filename)<1024): ranges.append((start, points - 1)) + #if (ranges == []): + # print "No jobs were created; All files already exist" + # exit() subbed_vars[('P_START', 'P_END')] = [(r[0], r[1]) for r in ranges] self.passthru.extend( ['--firstPoint %(P_START)s --lastPoint %(P_END)s']) diff --git a/CombineTools/python/combine/ImpactsFromScans.py b/CombineTools/python/combine/ImpactsFromScans.py index 0c7e16c1970..ba92cef5666 100755 --- a/CombineTools/python/combine/ImpactsFromScans.py +++ b/CombineTools/python/combine/ImpactsFromScans.py @@ -12,6 +12,8 @@ import ROOT from array import array from multiprocessing import Pool +from numpy import matrix +from numpy.linalg import solve import CombineHarvester.CombineTools.combine.utils as utils from CombineHarvester.CombineTools.combine.opts import OPTS @@ -54,6 +56,10 @@ def attach_args(self, group): help=('json file and dictionary containing the fit values, of form file:key1:key2..')) group.add_argument('--do-fits', action='store_true', help=('Actually do the fits')) + group.add_argument('--cov-method', choices=['full', 'asymm'], default='full') + group.add_argument('--cor-method', choices=['full', 'asymm', 'average'], default='full') + group.add_argument('--asymm-vals', default='') + group.add_argument('--do-generic', action='store_true') def run_method(self): mass = self.args.mass self.put_back_arg('mass', '-m') @@ -62,13 +68,13 @@ def run_method(self): js = json.load(jsonfile) for key in in_json[1:]: js = js[key] - POIs = [str(x) for x in js.keys()] + POIs = sorted([str(x) for x in js.keys()]) print POIs for POI in POIs: if not self.args.do_fits: break arg_str = '-M MultiDimFit --algo fixed --saveInactivePOI 1 --floatOtherPOIs 1 -P %s' % POI - cmd_hi = arg_str + ' -n %s --setPhysicsModelParametersForGrid %s=%f' % (self.args.name+'.%s.Hi'%POI, POI, js[POI]["Val"] + js[POI]["ErrorHi"]) - cmd_lo = arg_str + ' -n %s --setPhysicsModelParametersForGrid %s=%f' % (self.args.name+'.%s.Lo'%POI, POI, js[POI]["Val"] + js[POI]["ErrorLo"]) + cmd_hi = arg_str + ' -n %s --fixedPointPOIs %s=%f' % (self.args.name+'.%s.Hi'%POI, POI, js[POI]["Val"] + js[POI]["ErrorHi"]) + cmd_lo = arg_str + ' -n %s --fixedPointPOIs %s=%f' % (self.args.name+'.%s.Lo'%POI, POI, js[POI]["Val"] + js[POI]["ErrorLo"]) self.job_queue.append('combine %s %s' % (cmd_hi, ' '.join(self.passthru))) self.job_queue.append('combine %s %s' % (cmd_lo, ' '.join(self.passthru))) self.flush_queue() @@ -84,25 +90,185 @@ def run_method(self): res_lo = self.get_fixed_results(name_lo, POIs) for fPOI in POIs: res[POI][fPOI] = [res_lo[fPOI], js[fPOI]["Val"], res_hi[fPOI]] - print res + #print res cor = ROOT.TMatrixDSym(len(POIs)) cov = ROOT.TMatrixDSym(len(POIs)) + bf_vals = {x.split('=')[0]: float(x.split('=')[1]) for x in self.args.asymm_vals.split(',') if x != ''} + + xvars = [] + muvars = [] + covvars = [] + xvec = ROOT.RooArgList() + mu = ROOT.RooArgList() + for POI in POIs: + xvars.append(ROOT.RooRealVar(POI, '', js[POI]["Val"], -100, 100)) + muvars.append(ROOT.RooRealVar(POI+'_In', '', js[POI]["Val"], -100, 100)) + muvars[-1].setConstant(True) + xvec.add(xvars[-1]) + mu.add(muvars[-1]) + + print '-----------------------------------------------------------' + print 'Diagonal Covariance' + print '-----------------------------------------------------------' + print '%-30s %-7s %-7s %-7s %-7s %-7s' % ('POI', 'Val', 'Sym', 'Hi', 'Lo', '(Hi-Lo)/(Hi+Lo)') for i,p in enumerate(POIs): cor[i][i] = ROOT.Double(1.) # diagonal correlation is 1 - cov[i][i] = ROOT.Double(pow((res[p][p][2] - res[p][p][0])/2.,2.)) # symmetrized error + d1 = res[p][p][1] + d21 = res[p][p][2] - res[p][p][1] + d10 = res[p][p][1] - res[p][p][0] + d20 = (res[p][p][2] - res[p][p][0])/2. + vlo = js[p]["ValidErrorLo"] + vhi = js[p]["ValidErrorHi"] + print '%-30s %+.3f %+.3f %+.3f %+.3f %+.3f' % (p, d1, d20, d21, d10, (d21-d10)/(d21+d10)) + covv = 0. + if self.args.cov_method == 'full': + covv = d20 + elif self.args.cov_method == 'asymm': + bf_val = 1. + for x in bf_vals: + if x in p: + bf_val = bf_vals[x] + print 'Using %s=%g' % (x, bf_vals[x]) + covv = d21 if bf_val >= d1 else d10 + #if p == 'mu_XS_ZH_BR_WW': covv = covv * 0.90 + #if p == 'mu_XS_ttHtH_BR_tautau': covv = 6.3 + if not vlo: + print 'No ValidErrorLo, using d21' + covv = d21 + print 'Chosen: %+.3f' % covv + cov[i][i] = ROOT.Double(pow(covv,2.)) + + x1 = -1. * d10 + x2 = 0. + x3 = d21 + x4 = js[p]['2sig_ErrorHi'] + y1 = d10 * d10 + y2 = d20 * d20 + y3 = d21 * d21 + y4 = (x4 / 2.) * (x4 / 2.) + if not vlo and abs(d10) < 1E-4: + x1 = -1. * d21 + y1 = d21*d21 + # print (x1, y1) + # print (x2, y2) + # print (x3, y3) + # print (x4, y4) + + mtx = matrix([[x1*x1, x1, 1], [x3*x3, x3, 1], [x4*x4, x4, 1]]) + yvec = matrix([[y1], [y3], [y4]]) + # print mtx + # print yvec + xres = solve(mtx, yvec) + # print xres + covvars.append(ROOT.RooFormulaVar('cov%i'%i,'', '%g*(@0-%g)*(@0-%g)+%g*(@0-%g)+%g' % (xres[0],d1,d1,xres[1],d1,xres[2]), ROOT.RooArgList(xvars[i]))) + covvars[-1].Print() + + print '-----------------------------------------------------------' + print 'Correlation' + print '-----------------------------------------------------------' + print '%-30s %-30s %-7s %-7s %-7s %-7s %-7s %-7s %-7s %-7s %-7s' % ('i', 'j', 'Val_i', 'Val_j', 'ij_Sym', 'ij_Hi', 'ij_Lo', 'ji_Sym', 'ji_Hi', 'ji_Lo', 'Sym_Asym') + cors = [] + mvals = ROOT.RooArgList() + mvals_store = [] for i,ip in enumerate(POIs): for j,jp in enumerate(POIs): - if i == j: continue - val_i = ((res[ip][jp][2] - res[ip][jp][0])/2.)/math.sqrt(cov[j][j]) - val_j = ((res[jp][ip][2] - res[jp][ip][0])/2.)/math.sqrt(cov[i][i]) + if i == j: + mvals_store.append(ROOT.RooFormulaVar('ele_%i_%i'%(i,j),'@0',ROOT.RooArgList(covvars[i]))) + mvals.add(mvals_store[-1]) + continue + # Check the scans + di_1 = res[ip][ip][1] + di_21 = res[ip][ip][2] - res[ip][ip][1] + di_10 = res[ip][ip][1] - res[ip][ip][0] + di_20 = (res[ip][ip][2] - res[ip][ip][0])/2. + cj_21 = res[ip][jp][2] - res[ip][jp][1] + cj_10 = res[ip][jp][1] - res[ip][jp][0] + cj_20 = (res[ip][jp][2] - res[ip][jp][0])/2. + vi_lo = js[ip]["ValidErrorLo"] + vi_hi = js[ip]["ValidErrorHi"] + dj_1 = res[jp][jp][1] + dj_21 = res[jp][jp][2] - res[jp][jp][1] + dj_10 = res[jp][jp][1] - res[jp][jp][0] + dj_20 = (res[jp][jp][2] - res[jp][jp][0])/2. + ci_21 = res[jp][ip][2] - res[jp][ip][1] + ci_10 = res[jp][ip][1] - res[jp][ip][0] + ci_20 = (res[jp][ip][2] - res[jp][ip][0])/2. + vj_lo = js[jp]["ValidErrorLo"] + vj_hi = js[jp]["ValidErrorHi"] + + cij_20 = ci_20/di_20 + cij_21 = ci_21/(di_21 if (ci_21 >= 0 or not vi_lo) else di_10) + cij_10 = ci_10/(di_21 if (ci_21 < 0 or not vi_lo) else di_10) + #cij_21 = ci_21/di_21 + #cij_10 = ci_10/di_21 + + cji_20 = cj_20/dj_20 + cji_21 = cj_21/(dj_21 if (cj_21 >= 0 or not vj_lo) else dj_10) + cji_10 = cj_10/(dj_21 if (cj_21 < 0 or not vj_lo) else dj_10) + #cji_21 = cj_21/dj_21 + #cji_10 = cj_10/dj_21 + + a_20 = (cij_20-cji_20)/(cij_20+cji_20) + a_21 = (cij_21-cji_21)/(cij_21+cji_21) + a_10 = (cij_10-cji_10)/(cij_10+cji_10) + + a_i = (cij_21-cij_10)/(cij_21+cij_10) + a_j = (cji_21-cji_10)/(cji_21+cji_10) + + max_c = max([abs(x) for x in [cij_20, cij_21, cij_10, cji_20, cji_21, cji_10]]) + + + line = '%-30s %-30s %+.3f %+.3f | %+.3f %+.3f %+.3f %+.3f | %+.3f %+.3f %+.3f %+.3f | %+.3f' % (ip,jp,di_1,dj_1,cij_20,cij_21,cij_10,a_i,cji_20,cji_21,cji_10,a_j,a_20) + print line + + cors.append((line, max_c)) + + val_i = 0. + val_j = 0. + if self.args.cor_method == 'full': + val_i = cij_20 + val_j = cji_20 + elif self.args.cor_method == 'average': + val_i = (cij_21+cij_10)/2. + val_j = (cji_21+cji_10)/2. + elif self.args.cor_method == 'asymm': + bf_val_i = 1. + bf_val_j = 1. + for x in bf_vals: + if x in ip: + bf_val_i = bf_vals[x] + print 'Using %s=%g for POI i' % (x, bf_vals[x]) + if x in jp: + bf_val_j = bf_vals[x] + print 'Using %s=%g for POI j' % (x, bf_vals[x]) + + val_i = cji_21 if bf_val_i >= di_1 else cji_10 + val_j = cij_21 if bf_val_j >= dj_1 else cij_10 + if not vi_lo: + print 'No ValidErrorLo for POI i, using d21' + val_i = cji_21 + if not vj_lo: + print 'No ValidErrorLo for POI j, using d21' + val_j = cij_21 + print 'Chosen: %+.3f for val_i' % val_i + print 'Chosen: %+.3f for val_j' % val_j + correlation = (val_i+val_j)/2. # take average correlation? + #if ip == 'mu_XS_ttHtH_BR_WW' and jp == 'mu_XS_ttHtH_BR_tautau': correlation = correlation * 1.15 + #if jp == 'mu_XS_ttHtH_BR_WW' and ip == 'mu_XS_ttHtH_BR_tautau': correlation = correlation * 1.15 + #correlation = min(sorted([val_i, val_j],key=lambda x: abs(x), reverse=True)) #correlation = min(val_i,val_j, key=abs) # take the max? cor[i][j] = correlation cor[j][i] = correlation covariance = correlation * math.sqrt(cov[i][i]) * math.sqrt(cov[j][j]) cov[i][j] = covariance cov[j][i] = covariance - cor.Print() + mvals_store.append(ROOT.RooFormulaVar('ele_%i_%i'%(i,j),'%g*sqrt(@0)*sqrt(@1)'%(correlation),ROOT.RooArgList(covvars[i], covvars[j]))) + mvals.add(mvals_store[-1]) + cors.sort(key=lambda tup: tup[1], reverse=True) + for tup in cors: + print tup[0] + #cor.Print() fout = ROOT.TFile('covariance_%s.root' % self.args.name, 'RECREATE') fout.WriteTObject(cor, 'cor') h_cor = self.fix_TH2(ROOT.TH2D(cor), POIs) @@ -110,19 +276,13 @@ def run_method(self): fout.WriteTObject(cov, 'cov') h_cov = self.fix_TH2(ROOT.TH2D(cov), POIs) fout.WriteTObject(h_cov, 'h_cov') - xvars = [] - muvars = [] - xvec = ROOT.RooArgList() - mu = ROOT.RooArgList() - for POI in POIs: - xvars.append(ROOT.RooRealVar(POI, '', js[POI]["Val"], -10, 10)) - muvars.append(ROOT.RooRealVar(POI+'_In', '', js[POI]["Val"], -10, 10)) - muvars[-1].setConstant(True) - xvec.add(xvars[-1]) - mu.add(muvars[-1]) + xvec.Print('v') mu.Print('v') - pdf = ROOT.RooMultiVarGaussian('pdf', '', xvec, mu, cov) + if self.args.do_generic: + pdf = ROOT.RooGenericMultiVarGaussian('pdf', '', xvec, mu, mvals) + else: + pdf = ROOT.RooMultiVarGaussian('pdf', '', xvec, mu, cov) dat = ROOT.RooDataSet('global_obs', '', ROOT.RooArgSet(mu)) dat.add(ROOT.RooArgSet(mu)) pdf.Print() diff --git a/CombineTools/python/combine/LimitGrids.py b/CombineTools/python/combine/LimitGrids.py index ba3d9726c55..d3f1d2f54bd 100755 --- a/CombineTools/python/combine/LimitGrids.py +++ b/CombineTools/python/combine/LimitGrids.py @@ -4,12 +4,13 @@ import glob import sys import re +import zipfile +import os from math import floor from array import array import CombineHarvester.CombineTools.combine.utils as utils from CombineHarvester.CombineTools.combine.CombineToolBase import CombineToolBase -from CombineHarvester.CombineTools.mssm_multidim_fit_boundaries import mssm_multidim_fit_boundaries as bounds import CombineHarvester.CombineTools.plotting as plot class AsymptoticGrid(CombineToolBase): @@ -21,12 +22,17 @@ def __init__(self): def attach_intercept_args(self, group): CombineToolBase.attach_intercept_args(self, group) + group.add_argument('--setPhysicsModelParameters', default=None) + group.add_argument('--freezeNuisances', default=None) def attach_args(self, group): CombineToolBase.attach_args(self, group) group.add_argument('config', help='json config file') def run_method(self): + ROOT.PyConfig.IgnoreCommandLineOptions = True + ROOT.gROOT.SetBatch(ROOT.kTRUE) + # This is what the logic should be: # - get the list of model points # - figure out which files are: @@ -48,6 +54,19 @@ def run_method(self): if igrid[2]=='' : points.extend(itertools.product(utils.split_vals(igrid[0]), utils.split_vals(igrid[1]))) else : blacklisted_points.extend(itertools.product(utils.split_vals(igrid[0]), utils.split_vals(igrid[1]), utils.split_vals(igrid[2]))) POIs = cfg['POIs'] + opts = cfg['opts'] + + # Have to merge some arguments from both the command line and the "opts" in the json file + to_freeze = [] + to_set = [] + set_opt, opts = self.extract_arg('--setPhysicsModelParameters', opts) + if set_opt is not None: to_set.append(set_opt) + freeze_opt, opts = self.extract_arg('--freezeNuisances', opts) + if freeze_opt is not None: to_freeze.append(freeze_opt) + if hasattr(self.args, 'setPhysicsModelParameters') and self.args.setPhysicsModelParameters is not None: + to_set.append(self.args.setPhysicsModelParameters) + if hasattr(self.args, 'freezeNuisances') and self.args.freezeNuisances is not None: + to_freeze.append(self.args.freezeNuisances) file_dict = { } for p in points: @@ -66,15 +85,17 @@ def run_method(self): print '>> Point %s' % name if len(val) == 0: print 'Going to run limit for point %s' % (key,) - point_args = '-n .%s --setPhysicsModelParameters %s=%s,%s=%s --freezeNuisances %s,%s' % (name, POIs[0], key[0], POIs[1], key[1], POIs[0], POIs[1]) - cmd = ' '.join(['combine -M Asymptotic', cfg['opts'], point_args] + self.passthru) + set_arg = ','.join(['%s=%s,%s=%s' % (POIs[0], key[0], POIs[1], key[1])] + to_set) + freeze_arg = ','.join(['%s,%s' % (POIs[0], POIs[1])] + to_freeze) + point_args = '-n .%s --setPhysicsModelParameters %s --freezeNuisances %s' % (name, set_arg, freeze_arg) + cmd = ' '.join(['combine -M Asymptotic', opts, point_args] + self.passthru) self.job_queue.append(cmd) bail_out = len(self.job_queue) > 0 self.flush_queue() if bail_out: - print ">> New jobs were created / run in this cycle, run the script again to collect the output" + print '>> New jobs were created / run in this cycle, run the script again to collect the output' sys.exit(0) xvals = [] @@ -127,370 +148,395 @@ def run_method(self): # for j in xrange(1, hist.GetNbinsY()+1): # hist.SetBinContent(i, j, graph.Interpolate(hist.GetXaxis().GetBinCenter(i), hist.GetYaxis().GetBinCenter(j))) fout = ROOT.TFile('asymptotic_grid.root', 'RECREATE') - fout.WriteTObject(graph_m2s, 'minus2sigma') - fout.WriteTObject(graph_m1s, 'minus1sigma') - fout.WriteTObject(graph_exp, 'expected') - fout.WriteTObject(graph_p1s, 'plus1sigma') - fout.WriteTObject(graph_p2s, 'plus2sigma') - fout.WriteTObject(graph_obs, 'observed') + fout.WriteTObject(graph_m2s, 'exp-2') + fout.WriteTObject(graph_m1s, 'exp-1') + fout.WriteTObject(graph_exp, 'exp0') + fout.WriteTObject(graph_p1s, 'exp+1') + fout.WriteTObject(graph_p2s, 'exp+2') + fout.WriteTObject(graph_obs, 'obs') #fout.WriteTObject(hist) fout.Close() # Next step: open output files # Fill TGraph2D with CLs, CLs+b class HybridNewGrid(CombineToolBase): - description = 'Calculate toy-based limits on parameter grids' - requires_root = True - - def __init__(self): - CombineToolBase.__init__(self) - - def attach_intercept_args(self, group): - CombineToolBase.attach_intercept_args(self, group) - - def attach_args(self, group): - CombineToolBase.attach_args(self, group) - group.add_argument('config', help='json config file') - group.add_argument('--cycles', default=0, type=int, help='Number of job cycles to create per point') - - def GetCombinedHypoTest(self, files): - if len(files) == 0: return None - results = [] - for file in files: - f = ROOT.TFile(file) - ROOT.gDirectory.cd('toys') - for key in ROOT.gDirectory.GetListOfKeys(): - if ROOT.gROOT.GetClass(key.GetClassName()).InheritsFrom(ROOT.RooStats.HypoTestResult.Class()): - results.append(ROOT.gDirectory.Get(key.GetName())) - f.Close() - if (len(results)) > 1: - for r in results[1:]: - results[0].Append(r) - return results[0] - - def ValidateHypoTest(self, result, min_toys=500, max_toys=5000, contours=['obs', 'exp0', 'exp+1', 'exp-1', 'exp+2', 'exp-2'], signif=5.0, cl=0.95): - # 1st test - do we have any HypoTestResult at all? - if result is None: - print '>> No toys completed!' - return False - # 2nd test - have we thrown the minimum number of toys? - ntoys = min(result.GetNullDistribution().GetSize(), result.GetAltDistribution().GetSize()) - if ntoys < min_toys: - print '>> Only %i/%i toys completed!' % (ntoys, min_toys) - return False - # 3rd test - have we thrown the maximum (or more) toys? - if ntoys >= max_toys: - print '>> More than max toys %i/%i toys completed!' % (ntoys, min_toys) - return True - # 3rd test - are we > X sigma away from the exclusion CLs? This must be true for all the - # contours we're interested in - btoys = [x for x in result.GetNullDistribution().GetSamplingDistribution()] - btoys.sort() - crossing = 1 - cl - signif_results = {} - # save the real observed test stat, we'll change it in this - # loop to get the expected but we'll want to restore it at the end - testStatObs = result.GetTestStatisticData() - allGood = True - for contour in contours: - signif_results[contour] = True - if 'exp' in contour: - quantile = ROOT.Math.normal_cdf(float(contour.replace('exp', ''))) - print '>> Checking the %s contour at quantile=%f' % (contour, quantile) - testStat = btoys[int(min(floor(quantile * len(btoys) +0.5), len(btoys)))] - print '>> Test statistic for %f quantile: %f' % (quantile, testStat) - result.SetTestStatisticData(testStat) - result.Print("v") - CLs = result.CLs() - CLsErr = result.CLsError() - if CLsErr == 0.: - print '>> CLsErr is zero' - else: - dist = abs(CLs - crossing) / CLsErr - if dist < signif: - print '>> [%s] Only reached %.1f sigma signifance from chosen CL (target %.1f)' % (contour, dist, signif) - signif_results[contour] = False - result.SetTestStatisticData(testStatObs) - print signif_results - for (key, val) in signif_results.iteritems(): - if val == False: return False - print 'POINT IS OK AFTER %i TOYS' % ntoys - return True - - def PlotTest(self, result, name, one_sided=False, model_desc=''): - plot.ModTDRStyle() - canv = ROOT.TCanvas(name, name) - pad = ROOT.TPad('pad', 'pad', 0., 0., 1., 1.) - pad.Draw() - pad.cd() - null_vals = [x * 2. for x in result.GetNullDistribution().GetSamplingDistribution()] - alt_vals = [x * 2. for x in result.GetAltDistribution().GetSamplingDistribution()] - min_val = min(min(alt_vals), min(null_vals)) - max_val = max(max(alt_vals), max(null_vals)) - min_plot_range = min_val - 0.05 * (max_val - min_val) - if one_sided: - min_plot_range = 0. - pad.SetLogy(True) - max_plot_range = max_val + 0.05 * (max_val - min_val) - hist_null = ROOT.TH1F('null', 'null', 40, min_plot_range, max_plot_range) - hist_alt = ROOT.TH1F('alt', 'alt', 40, min_plot_range, max_plot_range) - for val in null_vals: hist_null.Fill(val) - for val in alt_vals: hist_alt.Fill(val) - hist_alt.SetLineColor(ROOT.TColor.GetColor(4, 4, 255)) - hist_alt.SetFillColorAlpha(ROOT.TColor.GetColor(4, 4, 255), 0.4) - hist_alt.GetXaxis().SetTitle('-2 #times ln(Q_{TEV})') - hist_alt.GetYaxis().SetTitle('Pseudo-experiments') - hist_alt.Draw() - hist_alt.SetMaximum(hist_alt.GetMaximum() * 1.5) - hist_null.SetLineColor(ROOT.TColor.GetColor(252, 86, 11)) - hist_null.SetFillColorAlpha(ROOT.TColor.GetColor(254, 195, 40), 0.4) - hist_null.Draw('SAME') - val_obs = result.GetTestStatisticData() * 2. - obs = ROOT.TArrow(val_obs, 0, val_obs, hist_alt.GetMaximum() * 0.3, 0.05 , '<-|') - obs.SetLineColor(ROOT.kRed) - obs.SetLineWidth(3) - obs.Draw() - leg = plot.PositionedLegend(0.22, 0.2, 3, 0.02) - leg.AddEntry(hist_null, "B", "F") - leg.AddEntry(hist_alt, "S+B", "F") - leg.AddEntry(obs, "Observed", "L") - leg.Draw() - plot.DrawCMSLogo(pad, "CMS", "Internal", 0, 0.15, 0.035, 1.2) - pt_l = ROOT.TPaveText(0.23, 0.75, 0.33, 0.9, 'NDCNB') - if model_desc: pt_l.AddText('Model:') - pt_l.AddText('Toys:') - pt_l.AddText('CLs+b:') - pt_l.AddText('CLb:') - pt_l.AddText('CLs:') - pt_l.SetTextAlign(11) - pt_l.SetTextFont(62) - pt_l.Draw() - pt_r = ROOT.TPaveText(0.33, 0.75, 0.63, 0.9, 'NDCNB') - if model_desc: pt_r.AddText(model_desc) - pt_r.AddText('%i (B) + %i (S+B)' % (result.GetNullDistribution().GetSize(), result.GetAltDistribution().GetSize())) - pt_r.AddText('%.3f #pm %.3f' % (result.CLsplusb(), result.CLsplusbError())) - pt_r.AddText('%.3f #pm %.3f' % (result.CLb(), result.CLbError())) - pt_r.AddText('%.3f #pm %.3f' % (result.CLs(), result.CLsError())) - pt_r.SetTextAlign(11) - pt_r.SetTextFont(42) - pt_r.Draw() - canv.SaveAs('.pdf') - - def run_method(self): - # Step 1 - open the json config file - with open(self.args.config) as json_file: - cfg = json.load(json_file) - # to do - have to handle the case where it doesn't exist - points = []; blacklisted_points = [] - for igrid in cfg['grids']: - assert(len(igrid) == 3) - if igrid[2]=='' : points.extend(itertools.product(utils.split_vals(igrid[0]), utils.split_vals(igrid[1]))) - else : blacklisted_points.extend(itertools.product(utils.split_vals(igrid[0]), utils.split_vals(igrid[1]), utils.split_vals(igrid[2]))) - POIs = cfg['POIs'] - - file_dict = { } - for p in points: - file_dict[p] = [] - - for f in glob.glob('higgsCombine.%s.*.%s.*.HybridNew.mH*.root' % (POIs[0], POIs[1])): - # print f - rgx = re.compile('higgsCombine\.%s\.(?P.*)\.%s\.(?P.*)\.HybridNew\.mH.*\.(?P.*)\.root' % (POIs[0], POIs[1])) - matches = rgx.search(f) - p = (matches.group('p1'), matches.group('p2')) - if p in file_dict: - file_dict[p].append((f, int(matches.group('toy')))) - - for key,val in file_dict.iteritems(): - name = '%s.%s.%s.%s' % (POIs[0], key[0], POIs[1], key[1]) - print '>> Point %s' % name - res = self.GetCombinedHypoTest([x[0] for x in val]) - ok = self.ValidateHypoTest(res) - if res is not None and True: self.PlotTest(res, 'plot_'+name, False, 'm_{H}^{mod+} [m_{A} = %.1f, tan#beta = %.1f]' % (float(key[0]), float(key[1]))) - - if not ok: - print 'Going to generate %i job(s) for point %s' % (self.args.cycles, key) - done_cycles = [x[1] for x in val] - print 'Done cycles: ' + ','.join(str(x) for x in done_cycles) - new_idx = max(done_cycles)+1 if len(done_cycles) > 0 else 1 - new_cycles = range(new_idx, new_idx+self.args.cycles) - print 'New cycles: ' + ','.join(str(x) for x in new_cycles) - point_args = '-n .%s --setPhysicsModelParameters %s=%s,%s=%s --freezeNuisances %s,%s' % (name, POIs[0], key[0], POIs[1], key[1], POIs[0], POIs[1]) - for idx in new_cycles: - cmd = ' '.join(['combine -M HybridNew', cfg['opts'], point_args, '-T %i' % cfg['toys_per_cycle'], '-s %i' % idx]) - self.job_queue.append(cmd) - - # bail_out = len(self.job_queue) > 0 - self.flush_queue() - - # if bail_out: - # print ">> New jobs were created / run in this cycle, run the script again to collect the output" - # sys.exit(0) - - # xvals = [] - # yvals = [] - # zvals_m2s = []; zvals_m1s = []; zvals_exp = []; zvals_p1s = []; zvals_p2s = []; zvals_obs = [] - # for key,val in file_dict.iteritems(): - # for filename in val: - # fin = ROOT.TFile(filename) - # if fin.IsZombie(): continue - # tree = fin.Get('limit') - # for evt in tree: - # if abs(evt.quantileExpected+1)<0.01: - # xvals.append(float(key[0])) - # yvals.append(float(key[1])) - # #print 'At point %s have observed CLs = %f' % (key, evt.limit) - # zvals_obs.append(float(evt.limit)) - # if abs(evt.quantileExpected-0.025)<0.01: - # #print 'At point %s have -2sigma CLs = %f' % (key, evt.limit) - # zvals_m2s.append(float(evt.limit)) - # if abs(evt.quantileExpected-0.16)<0.01: - # #print 'At point %s have -1sigma CLs = %f' % (key, evt.limit) - # zvals_m1s.append(float(evt.limit)) - # if abs(evt.quantileExpected-0.5)<0.01: - # #print 'At point %s have expected CLs = %f' % (key, evt.limit) - # zvals_exp.append(float(evt.limit)) - # if abs(evt.quantileExpected-0.84)<0.01: - # #print 'At point %s have +1sigma CLs = %f' % (key, evt.limit) - # zvals_p1s.append(float(evt.limit)) - # if abs(evt.quantileExpected-0.975)<0.01: - # #print 'At point %s have +2sigma CLs = %f' % (key, evt.limit) - # zvals_p2s.append(float(evt.limit)) - # for POI1, POI2, CLs in blacklisted_points: - # xvals.append(float(POI1)) - # yvals.append(float(POI2)) - # zvals_m2s.append(float(CLs)) - # zvals_m1s.append(float(CLs)) - # zvals_exp.append(float(CLs)) - # zvals_p1s.append(float(CLs)) - # zvals_p2s.append(float(CLs)) - # zvals_obs.append(float(CLs)) - # graph_m2s = ROOT.TGraph2D(len(zvals_m2s), array('d', xvals), array('d', yvals), array('d', zvals_m2s)) - # graph_m1s = ROOT.TGraph2D(len(zvals_m1s), array('d', xvals), array('d', yvals), array('d', zvals_m1s)) - # graph_exp = ROOT.TGraph2D(len(zvals_exp), array('d', xvals), array('d', yvals), array('d', zvals_exp)) - # graph_p1s = ROOT.TGraph2D(len(zvals_p1s), array('d', xvals), array('d', yvals), array('d', zvals_p1s)) - # graph_p2s = ROOT.TGraph2D(len(zvals_p2s), array('d', xvals), array('d', yvals), array('d', zvals_p2s)) - # graph_obs = ROOT.TGraph2D(len(zvals_obs), array('d', xvals), array('d', yvals), array('d', zvals_obs)) - # #h_bins = cfg['hist_binning'] - # #hist = ROOT.TH2F('h_observed', '', h_bins[0], h_bins[1], h_bins[2], h_bins[3], h_bins[4], h_bins[5]) - # #for i in xrange(1, hist.GetNbinsX()+1): - # # for j in xrange(1, hist.GetNbinsY()+1): - # # hist.SetBinContent(i, j, graph.Interpolate(hist.GetXaxis().GetBinCenter(i), hist.GetYaxis().GetBinCenter(j))) - # fout = ROOT.TFile('asymptotic_grid.root', 'RECREATE') - # fout.WriteTObject(graph_m2s, 'minus2sigma') - # fout.WriteTObject(graph_m1s, 'minus1sigma') - # fout.WriteTObject(graph_exp, 'expected') - # fout.WriteTObject(graph_p1s, 'plus1sigma') - # fout.WriteTObject(graph_p2s, 'plus2sigma') - # fout.WriteTObject(graph_obs, 'observed') - # #fout.WriteTObject(hist) - # fout.Close() - # # Next step: open output files - # # Fill TGraph2D with CLs, CLs+b - - -class Limit1D(CombineToolBase): - description = 'Calculate asymptotic limits on parameter grids' - requires_root = True - - def __init__(self): - CombineToolBase.__init__(self) - - def attach_intercept_args(self, group): - CombineToolBase.attach_intercept_args(self, group) - - def attach_args(self, group): - CombineToolBase.attach_args(self, group) - group.add_argument('config', help='json config file') - - def run_method(self): - # Step 1 - open the json config file - with open(self.args.config) as json_file: - cfg = json.load(json_file) - # to do - have to handle the case where it doesn't exist - points = []; blacklisted_points = [] - for igrid in cfg['grids']: - assert(len(igrid) == 1) - points.extend(itertools.product(utils.split_vals(igrid[0]))) - POIs = cfg['POIs'] - - file_dict = { } - for p in points: - file_dict[p] = [] - - for f in glob.glob('higgsCombine.%s.*.Asymptotic.mH*.root' % (POIs[0])): - rgx = re.compile('higgsCombine\.%s\.(?P.*)\.Asymptotic\.mH.*\.root' % (POIs[0])) - matches = rgx.search(f) - p = (matches.group('p1'),) - if p in file_dict: - file_dict[p].append(f) - - for key,val in file_dict.iteritems(): - name = '%s.%s' % (POIs[0], key[0]) - print '>> Point %s' % name - if len(val) == 0: - print 'Going to run limit for point %s' % (key) - r_process = cfg['r'] - point_args = '' - if r_process[0] == "r_ggA" : - point_args = '-n .%s --setPhysicsModelParameters %s=%s --setPhysicsModelParameterRanges %s=0,%s --freezeNuisances %s' % (name, POIs[0], key[0], r_process[0], str(bounds["ggH-bbH",key[0]][0]), POIs[0]) - elif r_process[0] == "r_bbA" : - point_args = '-n .%s --setPhysicsModelParameters %s=%s --setPhysicsModelParameterRanges %s=0,%s --freezeNuisances %s' % (name, POIs[0], key[0], r_process[0], str(bounds["ggH-bbH",key[0]][1]), POIs[0]) - cmd = ' '.join(['combine -M Asymptotic', cfg['opts'], point_args] + self.passthru) - self.job_queue.append(cmd) - - bail_out = len(self.job_queue) > 0 - self.flush_queue() - - if bail_out: - print ">> New jobs were created / run in this cycle, run the script again to collect the output" - sys.exit(0) - - xvals = [] - zvals_m2s = []; zvals_m1s = []; zvals_exp = []; zvals_p1s = []; zvals_p2s = []; zvals_obs = [] - for key,val in file_dict.iteritems(): - for filename in val: - fin = ROOT.TFile(filename) - if fin.IsZombie(): continue - tree = fin.Get('limit') - for evt in tree: - if abs(evt.quantileExpected+1)<0.01: - xvals.append(float(key[0])) - #print 'At point %s have observed CLs = %f' % (key, evt.limit) - zvals_obs.append(float(evt.limit)) - if abs(evt.quantileExpected-0.025)<0.01: - #print 'At point %s have -2sigma CLs = %f' % (key, evt.limit) - zvals_m2s.append(float(evt.limit)) - if abs(evt.quantileExpected-0.16)<0.01: - #print 'At point %s have -1sigma CLs = %f' % (key, evt.limit) - zvals_m1s.append(float(evt.limit)) - if abs(evt.quantileExpected-0.5)<0.01: - #print 'At point %s have expected CLs = %f' % (key, evt.limit) - zvals_exp.append(float(evt.limit)) - if abs(evt.quantileExpected-0.84)<0.01: - #print 'At point %s have +1sigma CLs = %f' % (key, evt.limit) - zvals_p1s.append(float(evt.limit)) - if abs(evt.quantileExpected-0.975)<0.01: - #print 'At point %s have +2sigma CLs = %f' % (key, evt.limit) - zvals_p2s.append(float(evt.limit)) - graph_m2s = ROOT.TGraph(len(zvals_m2s), array('d', xvals), array('d', zvals_m2s)) - graph_m1s = ROOT.TGraph(len(zvals_m1s), array('d', xvals), array('d', zvals_m1s)) - graph_exp = ROOT.TGraph(len(zvals_exp), array('d', xvals), array('d', zvals_exp)) - graph_p1s = ROOT.TGraph(len(zvals_p1s), array('d', xvals), array('d', zvals_p1s)) - graph_p2s = ROOT.TGraph(len(zvals_p2s), array('d', xvals), array('d', zvals_p2s)) - graph_obs = ROOT.TGraph(len(zvals_obs), array('d', xvals), array('d', zvals_obs)) - #h_bins = cfg['hist_binning'] - #hist = ROOT.TH2F('h_observed', '', h_bins[0], h_bins[1], h_bins[2], h_bins[3], h_bins[4], h_bins[5]) - #for i in xrange(1, hist.GetNbinsX()+1): - # for j in xrange(1, hist.GetNbinsY()+1): - # hist.SetBinContent(i, j, graph.Interpolate(hist.GetXaxis().GetBinCenter(i), hist.GetYaxis().GetBinCenter(j))) - fout = ROOT.TFile('asymptotic_1Dlimit.root', 'RECREATE') - fout.WriteTObject(graph_m2s, 'minus2sigma') - fout.WriteTObject(graph_m1s, 'minus1sigma') - fout.WriteTObject(graph_exp, 'expected') - fout.WriteTObject(graph_p1s, 'plus1sigma') - fout.WriteTObject(graph_p2s, 'plus2sigma') - fout.WriteTObject(graph_obs, 'observed') - #fout.WriteTObject(hist) - fout.Close() - # Next step: open output files - # Fill TGraph2D with CLs, CLs+b + description = 'Calculate toy-based limits on parameter grids' + requires_root = True + + def __init__(self): + CombineToolBase.__init__(self) + + def attach_intercept_args(self, group): + CombineToolBase.attach_intercept_args(self, group) + group.add_argument('--setPhysicsModelParameters', default=None) + group.add_argument('--freezeNuisances', default=None) + def attach_args(self, group): + CombineToolBase.attach_args(self, group) + group.add_argument('config', help='json config file') + group.add_argument('--cycles', default=0, type=int, help='Number of job cycles to create per point') + group.add_argument('--output', action='store_true', help='Write CLs grids into an output file') + + def GetCombinedHypoTest(self, files): + if len(files) == 0: return None + results = [] + for file in files: + found_res = False + f = ROOT.TFile(file) + ROOT.gDirectory.cd('toys') + for key in ROOT.gDirectory.GetListOfKeys(): + if ROOT.gROOT.GetClass(key.GetClassName()).InheritsFrom(ROOT.RooStats.HypoTestResult.Class()): + results.append(ROOT.gDirectory.Get(key.GetName())) + found_res = True + f.Close() + if not found_res: + print '>> Warning, did not find a HypoTestResult object in file %s' % file + if (len(results)) > 1: + for r in results[1:]: + results[0].Append(r) + ntoys = min(results[0].GetNullDistribution().GetSize(), results[0].GetAltDistribution().GetSize()) + if ntoys == 0: + print '>> Warning, HypoTestResult from file(s) %s does not contain any toy results, did something go wrong in your fits?' % '+'.join(files) + return results[0] + + def ValidateHypoTest(self, hyp_res, min_toys, max_toys, contours, signif, cl, output=False, verbose=False): + results = {} + + # We will take the number of toys thrown as the minimum of the number of b-only or s+b toys + ntoys = min(hyp_res.GetNullDistribution().GetSize(), hyp_res.GetAltDistribution().GetSize()) + results['ntoys'] = ntoys + + if verbose: + print '>>> Toys completed: %i [min=%i, max=%i]' % (ntoys, min_toys, max_toys) + + # If we're not going to write the CLs grids out and we fail the ntoys criteria then we + # don't need to waste time getting all the CLs values. Can just return the results dict as-is. + # 1st test - have we thrown at least the minimum number of toys? + if ntoys < min_toys and not output: + return (False, results) + # 2nd test - have we thrown the maximum (or more) toys? + if ntoys >= max_toys and not output: + return (True, results) + + # 3rd test - are we > X sigma away from the exclusion CLs? This must be true for all the + # contours we're interested in + btoys = sorted([x for x in hyp_res.GetNullDistribution().GetSamplingDistribution()]) + crossing = 1 - cl + signif_results = {} + + # save the real observed test stat, we'll change it in this + # loop to get the expected but we'll want to restore it at the end + q_obs = hyp_res.GetTestStatisticData() + + if verbose: + print '>>> CLs target is a significance of %.1f standard deviations from %.3f' % (signif, crossing) + + for contour in contours: + # Start by assuming this contour passes, we'll set it to False if it fails + signif_results[contour] = True + + # If this is an expected contour we will extract the quantile from the name + if 'exp' in contour: + quantile = ROOT.Math.normal_cdf(float(contour.replace('exp', ''))) + if verbose: + print '>>> Checking the %s contour at quantile=%f' % (contour, quantile) + # Get the stat statistic value at this quantile by rounding to the nearest b-only toy + testStat = btoys[int(min(floor(quantile * len(btoys) +0.5), len(btoys)))] + hyp_res.SetTestStatisticData(testStat) + elif contour == 'obs': + if verbose: print '>>> Checking the %s contour' % contour + else: + raise RuntimeError('Contour %s not recognised' % contour) + + # Currently assume we always want to use CLs, should provide option + # for CLs+b at some point + CLs = hyp_res.CLs() + CLsErr = hyp_res.CLsError() + if ntoys == 0: CLsErr = 0 # If there were no toys then ClsError() will return inf + dist = 0. + if CLsErr == 0.: + if verbose: + print '>>>> CLs = %g +/- %g (infinite significance), will treat as passing' % (CLs, CLsErr) + dist = 999. + else: + dist = abs(CLs - crossing) / CLsErr + if verbose: + print '>>>> CLs = %g +/- %g, reached %.1f sigma signifance' % (CLs, CLsErr, dist) + if dist < signif: + signif_results[contour] = False + results[contour] = (CLs, CLsErr, dist) + # Set the observed test statistic back to the real data value + hyp_res.SetTestStatisticData(q_obs) + + # Now do the full logic of the validation and return + all_ok = (ntoys >= min_toys) # OK if min toys passes + for (key, val) in signif_results.iteritems(): + all_ok = all_ok and val # still OK if all contour significances pass + all_ok = all_ok or (ntoys >= max_toys) # Always OK if we've reached the maximum + results['ok'] = all_ok + return (all_ok, results) + + + def run_method(self): + ROOT.PyConfig.IgnoreCommandLineOptions = True + ROOT.gROOT.SetBatch(ROOT.kTRUE) + + # Open the json config file + with open(self.args.config) as json_file: + cfg = json.load(json_file) + + # Set all the parameter values locally using defaults if necessary + grids = cfg['grids'] + POIs = cfg['POIs'] + opts = cfg['opts'] + toys_per_cycle = cfg['toys_per_cycle'] + zipname = cfg.get('zipfile', None) + contours = cfg.get('contours', ['obs', 'exp-2', 'exp-1', 'exp0', 'exp+1', 'exp+2']) + min_toys = cfg.get('min_toys', 500) + max_toys = cfg.get('max_toys', 5000) + signif = cfg.get('signif', 3.0) + cl = cfg.get('CL', 0.95) + verbose = cfg.get('verbose', False) + make_plots = cfg.get('make_plots', False) + # Write CLs values into the output even if current toys do not pass validation + incomplete = cfg.get('output_incomplete', False) + outfile = cfg.get('output','hybrid_grid.root') + # NB: blacklisting not yet implemented for this method + + # Have to merge some arguments from both the command line and the "opts" in the json file + to_freeze = [] + to_set = [] + set_opt, opts = self.extract_arg('--setPhysicsModelParameters', opts) + if set_opt is not None: to_set.append(set_opt) + freeze_opt, opts = self.extract_arg('--freezeNuisances', opts) + if freeze_opt is not None: to_freeze.append(freeze_opt) + if hasattr(self.args, 'setPhysicsModelParameters') and self.args.setPhysicsModelParameters is not None: + to_set.append(self.args.setPhysicsModelParameters) + if hasattr(self.args, 'freezeNuisances') and self.args.freezeNuisances is not None: + to_freeze.append(self.args.freezeNuisances) + + points = []; blacklisted_points = [] + for igrid in grids: + assert(len(igrid) == 3) + if igrid[2] == '': + points.extend(itertools.product(utils.split_vals(igrid[0]), utils.split_vals(igrid[1]))) + else: + blacklisted_points.extend(itertools.product(utils.split_vals(igrid[0]), utils.split_vals(igrid[1]), utils.split_vals(igrid[2]))) + + # This dictionary will keep track of the combine output files for each model point + file_dict = { } + for p in points: + file_dict[p] = {} + + # The regex we will use to identify output files and extract POI values + rgx = re.compile('higgsCombine\.%s\.(?P.*)\.%s\.(?P.*)\.HybridNew\.mH.*\.(?P.*)\.root' % (POIs[0], POIs[1])) + + # Can optionally copy output root files into a zip archive + # If the user has specified a zipfile we will first + # look for output files in this archive before scanning the + # current directory + if zipname: + # Open the zip file in append mode, this should also + # create it if it doesn't exist + zipf = zipfile.ZipFile(zipname, 'a') + for f in zipf.namelist(): + matches = rgx.search(f) + p = (matches.group('p1'), matches.group('p2')) + seed = int(matches.group('toy')) + if p in file_dict: + if seed not in file_dict[p]: + # For each model point have a dictionary keyed on the seed number + # with a value pointing to the file in the archive in the format + # ROOT expects: "zipfile.zip#higgsCombine.blah.root" + file_dict[p][seed] = zipname+'#'+f + + # Now look for files in the local directory + for f in glob.glob('higgsCombine.%s.*.%s.*.HybridNew.mH*.root' % (POIs[0], POIs[1])): + matches = rgx.search(f) + p = (matches.group('p1'), matches.group('p2')) + seed = int(matches.group('toy')) + if p in file_dict: + # Don't add this file to the list if its seed number is already + # a value in the dict. + if seed not in file_dict[p]: + # If we're using the zipfile we'll add this now and + # then delete it from the local directory + # But: only in the file is good, we don't want to pollute the zip + # file with incomplete or failed jobs + if zipname and plot.TFileIsGood(f): + zipf.write(f) # assume this throws if it fails + print 'Adding %s to %s' % (f, zipname) + file_dict[p][seed] = zipname+'#'+f + os.remove(f) + else: # otherwise just add the file to the dict in the normal way + file_dict[p][seed] = f + + if zipname: + zipf.close() + + # These lists will keep track of the CLs values which we will use + # to create the output TGraph2Ds + output_x = [] + output_y = [] + output_data = {} + output_ntoys = [] + output_clserr = {} + output_signif = {} + # One list of Z-values per contour + for contour in contours: + output_data[contour] = [] + output_clserr[contour] = [] + output_signif[contour] = [] + + + # Also keep track of the number of model points which have met the + # CLs criteria + total_points = 0 + complete_points = 0 + + for key,val in file_dict.iteritems(): + total_points += 1 + name = '%s.%s.%s.%s' % (POIs[0], key[0], POIs[1], key[1]) + files = [x for x in val.values() if plot.TFileIsGood(x)] + # Merge the HypoTestResult objects from each file into one + res = self.GetCombinedHypoTest(files) + + # Do the validation of this model point + # + ok, point_res = self.ValidateHypoTest(res, + min_toys = min_toys, + max_toys = max_toys, + contours = contours, + signif = signif, + cl = cl, + output = self.args.output, + verbose = verbose) if res is not None else (False, {"ntoys" : 0}) + + print '>> Point %s [%i toys, %s]' % (name, point_res['ntoys'], 'DONE' if ok else 'INCOMPLETE') + + if ok: + complete_points += 1 + + # Make plots of the test statistic distributions if requested + if res is not None and make_plots: + self.PlotTestStat(res, 'plot_'+name, opts = cfg['plot_settings'], poi_vals = (float(key[0]), float(key[1]))) + + # Add the resulting CLs values to the output arrays. Normally just + # for the model points that passed the validation criteria, but if "output_incomplete" + # has been set to true then we'll write all model points where at least one HypoTestResult + # is present + if res is not None and (ok or incomplete) and self.args.output: + output_x.append(float(key[0])) + output_y.append(float(key[1])) + output_ntoys.append(point_res['ntoys']) + for contour in contours: + output_data[contour].append(point_res[contour][0]) + output_clserr[contour].append(point_res[contour][1]) + output_signif[contour].append(point_res[contour][2]) + + # Do the job cycle generation if requested + if not ok and self.args.cycles > 0: + print '>>> Going to generate %i job(s) for point %s' % (self.args.cycles, key) + # Figure out the next seed numbers we need to run by finding the maximum seed number + # so far + done_cycles = val.keys() + new_idx = max(done_cycles)+1 if len(done_cycles) > 0 else 1 + new_cycles = range(new_idx, new_idx+self.args.cycles) + + print '>>> Done cycles: ' + ','.join(str(x) for x in done_cycles) + print '>>> New cycles: ' + ','.join(str(x) for x in new_cycles) + + # Build to combine command. Here we'll take responsibility for setting the name and the + # model parameters, making sure the latter are frozen + set_arg = ','.join(['%s=%s,%s=%s' % (POIs[0], key[0], POIs[1], key[1])] + to_set) + freeze_arg = ','.join(['%s,%s' % (POIs[0], POIs[1])] + to_freeze) + point_args = '-n .%s --setPhysicsModelParameters %s --freezeNuisances %s' % (name, set_arg, freeze_arg) + # Build a command for each job cycle setting the number of toys and random seed and passing through any other + # user options from the config file or the command line + for idx in new_cycles: + cmd = ' '.join(['combine -M HybridNew', opts, point_args, '-T %i' % toys_per_cycle, '-s %i' % idx] + self.passthru) + self.job_queue.append(cmd) + + print ">> %i/%i points have completed and require no further toys" % (complete_points, total_points) + self.flush_queue() + + # Create and write output CLs TGraph2Ds here + # TODO: add graphs with the CLs errors, the numbers of toys and whether or not the point passes + if self.args.output: + fout = ROOT.TFile(outfile, 'RECREATE') + for c in contours: + graph = ROOT.TGraph2D(len(output_data[c]), array('d', output_x), array('d', output_y), array('d', output_data[c])) + graph.SetName(c) + fout.WriteTObject(graph, c) + # Also write a Graph with the CLsErr + graph = ROOT.TGraph2D(len(output_clserr[c]), array('d', output_x), array('d', output_y), array('d', output_clserr[c])) + graph.SetName('clsErr_'+c) + fout.WriteTObject(graph, 'clsErr_'+c) + # And a Graph with the significance + graph = ROOT.TGraph2D(len(output_signif[c]), array('d', output_x), array('d', output_y), array('d', output_signif[c])) + graph.SetName('signif_'+c) + fout.WriteTObject(graph, 'signif_'+c) + graph = ROOT.TGraph2D(len(output_ntoys), array('d', output_x), array('d', output_y), array('d', output_ntoys)) + graph.SetName('ntoys'+c) + fout.WriteTObject(graph, 'ntoys') + fout.Close() + + def PlotTestStat(self, result, name, opts, poi_vals): + null_vals = [x * -2. for x in result.GetNullDistribution().GetSamplingDistribution()] + alt_vals = [x * -2. for x in result.GetAltDistribution().GetSamplingDistribution()] + if len(null_vals) == 0 or len(alt_vals) == 0: + print '>> Errror in PlotTestStat for %s, null and/or alt distributions are empty' + return + plot.ModTDRStyle() + canv = ROOT.TCanvas(name, name) + pad = plot.OnePad()[0] + min_val = min(min(alt_vals), min(null_vals)) + max_val = max(max(alt_vals), max(null_vals)) + min_plot_range = min_val - 0.05 * (max_val - min_val) + if opts['one_sided']: + min_plot_range = 0. + pad.SetLogy(True) + max_plot_range = max_val + 0.05 * (max_val - min_val) + hist_null = ROOT.TH1F('null', 'null', 40, min_plot_range, max_plot_range) + hist_alt = ROOT.TH1F('alt', 'alt', 40, min_plot_range, max_plot_range) + for val in null_vals: hist_null.Fill(val) + for val in alt_vals: hist_alt.Fill(val) + hist_alt.SetLineColor(ROOT.TColor.GetColor(4, 4, 255)) + hist_alt.SetFillColor(plot.CreateTransparentColor(ROOT.TColor.GetColor(4, 4, 255), 0.4)) + hist_alt.GetXaxis().SetTitle('-2 #times ln(^{}L_{%s}/^{}L_{%s})' % (opts['alt_label'], opts['null_label'])) + hist_alt.GetYaxis().SetTitle('Pseudo-experiments') + hist_alt.Draw() + hist_null.SetLineColor(ROOT.TColor.GetColor(252, 86, 11)) + hist_null.SetFillColor(plot.CreateTransparentColor(ROOT.TColor.GetColor(254, 195, 40), 0.4)) + hist_null.Draw('SAME') + val_obs = result.GetTestStatisticData() * -2. + obs = ROOT.TArrow(val_obs, 0, val_obs, hist_alt.GetMaximum() * 0.3, 0.05 , '<-|') + obs.SetLineColor(ROOT.kRed) + obs.SetLineWidth(3) + obs.Draw() + plot.FixTopRange(pad, plot.GetPadYMax(pad), 0.25) + leg = plot.PositionedLegend(0.22, 0.2, 3, 0.02) + leg.AddEntry(hist_alt, opts['alt_label'], 'F') + leg.AddEntry(hist_null, opts['null_label'], 'F') + leg.AddEntry(obs, 'Observed', 'L') + leg.Draw() + plot.DrawCMSLogo(pad, 'CMS', opts['cms_subtitle'], 0, 0.15, 0.035, 1.2) + pt_l = ROOT.TPaveText(0.23, 0.75, 0.33, 0.9, 'NDCNB') + pt_l.AddText('Model:') + pt_l.AddText('Toys:') + pt_l.AddText('CLs+b:') + pt_l.AddText('CLb:') + pt_l.AddText('CLs:') + plot.Set(pt_l, TextAlign=11, TextFont=62, BorderSize=0) + pt_l.Draw() + pt_r = ROOT.TPaveText(0.33, 0.75, 0.63, 0.9, 'NDCNB') + pt_r.AddText('%s [%s = %.1f, %s = %.1f]' % (opts['model_label'], opts['poi_labels'][0], poi_vals[0], opts['poi_labels'][1], poi_vals[1])) + pt_r.AddText('%i (%s) + %i (%s)' % (result.GetNullDistribution().GetSize(), opts['null_label'], result.GetAltDistribution().GetSize(), opts['alt_label'])) + pt_r.AddText('%.3f #pm %.3f' % (result.CLsplusb(), result.CLsplusbError())) + pt_r.AddText('%.3f #pm %.3f' % (result.CLb(), result.CLbError())) + pt_r.AddText('%.3f #pm %.3f' % (result.CLs(), result.CLsError())) + plot.Set(pt_r, TextAlign=11, TextFont=42, BorderSize=0) + pt_r.Draw() + pad.GetFrame().Draw() + pad.RedrawAxis() + for fmt in opts['formats']: + canv.SaveAs(fmt) diff --git a/CombineTools/python/combine/Output.py b/CombineTools/python/combine/Output.py index 53a3d0ce2ab..95eb78931f3 100755 --- a/CombineTools/python/combine/Output.py +++ b/CombineTools/python/combine/Output.py @@ -2,6 +2,7 @@ import ROOT import json +import os import CombineHarvester.CombineTools.combine.utils as utils from CombineHarvester.CombineTools.combine.opts import OPTS @@ -9,8 +10,8 @@ from CombineHarvester.CombineTools.combine.CombineToolBase import CombineToolBase -class PrintSingles(CombineToolBase): - description = 'Print the output of MultimDitFit --algo singles' +class PrintFit(CombineToolBase): + description = 'Print the output of MultimDitFit' requires_root = True def __init__(self): @@ -19,15 +20,39 @@ def __init__(self): def attach_args(self, group): CombineToolBase.attach_args(self, group) group.add_argument('input', help='The input file') + group.add_argument( + '--algo', help='The algo used in MultiDimFit', default='none') group.add_argument( '-P', '--POIs', help='The params that were scanned (in scan order)') + group.add_argument( + '--json', help='Write json output (format file.json:key1:key2..') def run_method(self): - POIs = args.POIs.split(',') - res = get_singles_results(args.input, POIs, POIs) - for p in POIs: - val = res[p][p] - print '%s = %.3f -%.3f/+%.3f' % (p, val[1], val[1] - val[0], val[2] - val[1]) + if self.args.json is not None: + json_structure = self.args.json.split(':') + assert(len(json_structure) >= 1) + if os.path.isfile(json_structure[0]): + with open(json_structure[0]) as jsonfile: js = json.load(jsonfile) + else: + js = {} + js_target = js + if (len(json_structure) >= 2): + for key in json_structure[1:]: + js_target[key] = {} + js_target = js_target[key] + POIs = self.args.POIs.split(',') + if self.args.algo == 'none': + res = utils.get_none_results(self.args.input, POIs) + if self.args.json is not None: + for key,val in res.iteritems(): + js_target[key] = { 'Val' : val } + with open(json_structure[0], 'w') as outfile: + json.dump(js, outfile, sort_keys=True, indent=4, separators=(',', ': ')) + elif self.args.algo == 'singles': + res = utils.get_singles_results(self.args.input, POIs, POIs) + for p in POIs: + val = res[p][p] + print '%s = %.3f -%.3f/+%.3f' % (p, val[1], val[1] - val[0], val[2] - val[1]) class CollectLimits(CombineToolBase): diff --git a/CombineTools/python/combine/utils.py b/CombineTools/python/combine/utils.py index a036e138627..f424af1048c 100644 --- a/CombineTools/python/combine/utils.py +++ b/CombineTools/python/combine/utils.py @@ -63,3 +63,15 @@ def get_singles_results(file, scanned, columns): res[param][col] = [ allvals[i * 2 + 2], allvals[0], allvals[i * 2 + 1]] return res + +def get_none_results(file, params): + """Extracts the output from the MultiDimFit none (just fit) mode""" + res = {} + f = ROOT.TFile(file) + if f is None or f.IsZombie(): + return None + t = f.Get("limit") + t.GetEntry(0) + for param in params: + res[param] = getattr(t, param) + return res diff --git a/CombineTools/python/maketable.py b/CombineTools/python/maketable.py index a2f102c2eb8..27876b94b91 100644 --- a/CombineTools/python/maketable.py +++ b/CombineTools/python/maketable.py @@ -42,14 +42,14 @@ def TablefromJson(jsonfile, filename): names = ["mass", "minus2sigma", "minus1sigma", "expected", "plus1sigma", "plus2sigma", "observed"] # Get list of masses - maxpoints = [0 for i in range(7)] + maxpoints = 0 for key in js: x.append(float(key)) - maxpoints[0]+=1 + maxpoints+=1 # Sort list of masses (Bubblesort-Algorithm. Not very fast when there are many points to be sorted.) i=0 - while (i < maxpoints[0]-1): + while (i < maxpoints-1): if (x[i+1] def_w) else 1. - scale_w = (def_h / def_w) if (def_w > def_h) else 1. - - def_min = def_h if (def_h < def_w) else def_w - - R.gStyle.SetPadTopMargin(t * scale_h); # default 0.05 - R.gStyle.SetPadBottomMargin(b * scale_h); # default 0.13 - R.gStyle.SetPadLeftMargin(l * scale_w); # default 0.16 - R.gStyle.SetPadRightMargin(r * scale_w); # default 0.02 - # But note the new CMS style sets these: - # 0.08, 0.12, 0.12, 0.04 - - # Set number of axis tick divisions - R.gStyle.SetNdivisions(506, 'XYZ') # default 510 - - # Some marker properties not set in the default tdr style - R.gStyle.SetMarkerColor(R.kBlack) - R.gStyle.SetMarkerSize(1.0) - - - R.gStyle.SetLabelOffset(0.007, 'YZ') - # This is an adhoc adjustment to scale the x-axis label - # offset when we strect plot vertically - # Will also need to increase if first x-axis label has more than one digit - R.gStyle.SetLabelOffset(0.005 * (3. - 2. / scale_h), 'X') - - # In this next part we do a slightly involved calculation to set the axis - # title offsets, depending on the values of the TPad dimensions and margins. - # This is to try and ensure that regardless of how these pad values are set, - # the axis titles will be located towards the edges of the canvas and not get - # pushed off the edge - which can often happen if a fixed value is used. - title_size = 0.05 - title_px = title_size * def_min - label_size = 0.04 - R.gStyle.SetTitleSize(title_size, 'XYZ') - R.gStyle.SetLabelSize(label_size, 'XYZ') - - R.gStyle.SetTitleXOffset(0.5 * scale_h * - (1.2 * (def_h * b * scale_h - 0.6 * title_px)) / - title_px) - R.gStyle.SetTitleYOffset(0.5 * scale_w * - (1.2 * (def_w * l * scale_w - 0.6 * title_px)) / - title_px) - - # Only draw ticks where we have an axis - R.gStyle.SetPadTickX(0) - R.gStyle.SetPadTickY(0) - R.gStyle.SetTickLength(0.02, 'XYZ') - - R.gStyle.SetLegendBorderSize(0) - R.gStyle.SetLegendFont(42) - R.gStyle.SetLegendFillColor(0) - R.gStyle.SetFillColor(0) - - R.gROOT.ForceStyle() - -def DrawCMSLogo(pad, cmsText, extraText, iPosX, relPosX, relPosY, relExtraDY): - pad.cd() - cmsTextFont = 62 # default is helvetic-bold - - writeExtraText = len(extraText) > 0 - extraTextFont = 52 - - # text sizes and text offsets with respect to the top frame - # in unit of the top margin size - lumiTextOffset = 0.2 - cmsTextSize = 0.8 - # float cmsTextOffset = 0.1; // only used in outOfFrame version - - # ratio of 'CMS' and extra text size - extraOverCmsTextSize = 0.76 - - outOfFrame = False - if iPosX / 10 == 0: - outOfFrame = True - - alignY_ = 3 - alignX_ = 2 - if (iPosX / 10 == 0): alignX_ = 1 - if (iPosX == 0): alignX_ = 1 - if (iPosX == 0): alignY_ = 1 - if (iPosX / 10 == 1): alignX_ = 1 - if (iPosX / 10 == 2): alignX_ = 2 - if (iPosX / 10 == 3): alignX_ = 3 - # if (iPosX == 0): relPosX = 0.14 - align_ = 10 * alignX_ + alignY_ - - l = pad.GetLeftMargin() - t = pad.GetTopMargin() - r = pad.GetRightMargin() - b = pad.GetBottomMargin() - - latex = R.TLatex() - latex.SetNDC() - latex.SetTextAngle(0) - latex.SetTextColor(R.kBlack) - - extraTextSize = extraOverCmsTextSize * cmsTextSize; - pad_ratio = (float(pad.GetWh()) * pad.GetAbsHNDC()) / (float(pad.GetWw()) * pad.GetAbsWNDC()) - if (pad_ratio < 1.): pad_ratio = 1. - - - if outOfFrame: - latex.SetTextFont(cmsTextFont) - latex.SetTextAlign(11) - latex.SetTextSize(cmsTextSize * t * pad_ratio) - latex.DrawLatex(l, 1 - t + lumiTextOffset * t, cmsText) - - - posX_ = 0; - if iPosX % 10 <= 1: - posX_ = l + relPosX * (1 - l - r) - elif (iPosX % 10 == 2): - posX_ = l + 0.5 * (1 - l - r) - elif (iPosX % 10 == 3): - posX_ = 1 - r - relPosX * (1 - l - r) - - posY_ = 1 - t - relPosY * (1 - t - b) - if not outOfFrame: - latex.SetTextFont(cmsTextFont) - latex.SetTextSize(cmsTextSize * t * pad_ratio) - latex.SetTextAlign(align_) - latex.DrawLatex(posX_, posY_, cmsText) - if writeExtraText: - latex.SetTextFont(extraTextFont) - latex.SetTextAlign(align_) - latex.SetTextSize(extraTextSize * t * pad_ratio) - latex.DrawLatex(posX_, posY_ - relExtraDY * cmsTextSize * t, extraText) - elif writeExtraText: - if iPosX == 0: - posX_ = l + relPosX * (1 - l - r) - posY_ = 1 - t + lumiTextOffset * t - latex.SetTextFont(extraTextFont) - latex.SetTextSize(extraTextSize * t * pad_ratio) - latex.SetTextAlign(align_) - latex.DrawLatex(posX_, posY_, extraText) + """Modified version of the tdrStyle + + Args: + width (int): Canvas width in pixels + height (int): Canvas height in pixels + t (float): Pad top margin [0-1] + b (float): Pad bottom margin [0-1] + l (float): Pad left margin [0-1] + r (float): Pad right margin [0-1] + """ + SetTDRStyle() + + # Set the default canvas width and height in pixels + R.gStyle.SetCanvasDefW(width) + R.gStyle.SetCanvasDefH(height) + + # Set the default margins. These are given as fractions of the pad height + # for `Top` and `Bottom` and the pad width for `Left` and `Right`. But we + # want to specify all of these as fractions of the shortest length. + def_w = float(R.gStyle.GetCanvasDefW()) + def_h = float(R.gStyle.GetCanvasDefH()) + + scale_h = (def_w / def_h) if (def_h > def_w) else 1. + scale_w = (def_h / def_w) if (def_w > def_h) else 1. + + def_min = def_h if (def_h < def_w) else def_w + + R.gStyle.SetPadTopMargin(t * scale_h) + # default 0.05 + R.gStyle.SetPadBottomMargin(b * scale_h) + # default 0.13 + R.gStyle.SetPadLeftMargin(l * scale_w) + # default 0.16 + R.gStyle.SetPadRightMargin(r * scale_w) + # default 0.02 + # But note the new CMS style sets these: + # 0.08, 0.12, 0.12, 0.04 + + # Set number of axis tick divisions + R.gStyle.SetNdivisions(506, 'XYZ') # default 510 + + # Some marker properties not set in the default tdr style + R.gStyle.SetMarkerColor(R.kBlack) + R.gStyle.SetMarkerSize(1.0) + + R.gStyle.SetLabelOffset(0.007, 'YZ') + # This is an adhoc adjustment to scale the x-axis label + # offset when we stretch plot vertically + # Will also need to increase if first x-axis label has more than one digit + R.gStyle.SetLabelOffset(0.005 * (3. - 2. / scale_h), 'X') + + # In this next part we do a slightly involved calculation to set the axis + # title offsets, depending on the values of the TPad dimensions and + # margins. This is to try and ensure that regardless of how these pad + # values are set, the axis titles will be located towards the edges of the + # canvas and not get pushed off the edge - which can often happen if a + # fixed value is used. + title_size = 0.05 + title_px = title_size * def_min + label_size = 0.04 + R.gStyle.SetTitleSize(title_size, 'XYZ') + R.gStyle.SetLabelSize(label_size, 'XYZ') + + R.gStyle.SetTitleXOffset(0.5 * scale_h * + (1.2 * (def_h * b * scale_h - 0.6 * title_px)) / + title_px) + R.gStyle.SetTitleYOffset(0.5 * scale_w * + (1.2 * (def_w * l * scale_w - 0.6 * title_px)) / + title_px) + + # Only draw ticks where we have an axis + R.gStyle.SetPadTickX(0) + R.gStyle.SetPadTickY(0) + R.gStyle.SetTickLength(0.02, 'XYZ') + + R.gStyle.SetLegendBorderSize(0) + R.gStyle.SetLegendFont(42) + R.gStyle.SetLegendFillColor(0) + R.gStyle.SetFillColor(0) + + R.gROOT.ForceStyle() + + +def DrawCMSLogo(pad, cmsText, extraText, iPosX, relPosX, relPosY, relExtraDY, extraText2='', cmsTextSize=0.8): + pad.cd() + cmsTextFont = 62 # default is helvetic-bold + + writeExtraText = len(extraText) > 0 + writeExtraText2 = len(extraText2) > 0 + extraTextFont = 52 + + # text sizes and text offsets with respect to the top frame + # in unit of the top margin size + lumiTextOffset = 0.2 + # cmsTextSize = 0.8 + # float cmsTextOffset = 0.1; // only used in outOfFrame version + + # ratio of 'CMS' and extra text size + extraOverCmsTextSize = 0.76 + + outOfFrame = False + if iPosX / 10 == 0: + outOfFrame = True + + alignY_ = 3 + alignX_ = 2 + if (iPosX / 10 == 0): + alignX_ = 1 + if (iPosX == 0): + alignX_ = 1 + if (iPosX == 0): + alignY_ = 1 + if (iPosX / 10 == 1): + alignX_ = 1 + if (iPosX / 10 == 2): + alignX_ = 2 + if (iPosX / 10 == 3): + alignX_ = 3 + # if (iPosX == 0): relPosX = 0.14 + align_ = 10 * alignX_ + alignY_ + + l = pad.GetLeftMargin() + t = pad.GetTopMargin() + r = pad.GetRightMargin() + b = pad.GetBottomMargin() + + latex = R.TLatex() + latex.SetNDC() + latex.SetTextAngle(0) + latex.SetTextColor(R.kBlack) + + extraTextSize = extraOverCmsTextSize * cmsTextSize + pad_ratio = (float(pad.GetWh()) * pad.GetAbsHNDC()) / \ + (float(pad.GetWw()) * pad.GetAbsWNDC()) + if (pad_ratio < 1.): + pad_ratio = 1. + + if outOfFrame: + latex.SetTextFont(cmsTextFont) + latex.SetTextAlign(11) + latex.SetTextSize(cmsTextSize * t * pad_ratio) + latex.DrawLatex(l, 1 - t + lumiTextOffset * t, cmsText) + + posX_ = 0 + if iPosX % 10 <= 1: + posX_ = l + relPosX * (1 - l - r) + elif (iPosX % 10 == 2): + posX_ = l + 0.5 * (1 - l - r) + elif (iPosX % 10 == 3): + posX_ = 1 - r - relPosX * (1 - l - r) + + posY_ = 1 - t - relPosY * (1 - t - b) + if not outOfFrame: + latex.SetTextFont(cmsTextFont) + latex.SetTextSize(cmsTextSize * t * pad_ratio) + latex.SetTextAlign(align_) + latex.DrawLatex(posX_, posY_, cmsText) + if writeExtraText: + latex.SetTextFont(extraTextFont) + latex.SetTextAlign(align_) + latex.SetTextSize(extraTextSize * t * pad_ratio) + latex.DrawLatex( + posX_, posY_ - relExtraDY * cmsTextSize * t, extraText) + if writeExtraText2: + latex.DrawLatex( + posX_, posY_ - 1.8 * relExtraDY * cmsTextSize * t, extraText2) + elif writeExtraText: + if iPosX == 0: + posX_ = l + relPosX * (1 - l - r) + posY_ = 1 - t + lumiTextOffset * t + latex.SetTextFont(extraTextFont) + latex.SetTextSize(extraTextSize * t * pad_ratio) + latex.SetTextAlign(align_) + latex.DrawLatex(posX_, posY_, extraText) + def PositionedLegend(width, height, pos, offset): - o = offset - w = width - h = height - l = R.gPad.GetLeftMargin() - t = R.gPad.GetTopMargin() - b = R.gPad.GetBottomMargin() - r = R.gPad.GetRightMargin() - if pos == 1: - return R.TLegend(l + o, 1 - t - o - h, l + o + w, 1 - t - o, '', 'NBNDC') - if pos == 2: - c = l + 0.5 * (1 - l - r) - return R.TLegend(c - 0.5 * w, 1 - t - o - h, c + 0.5 * w, 1 - t - o, '', 'NBNDC') - if pos == 3 : - return R.TLegend(1 - r - o - w, 1 - t - o - h, 1 - r - o, 1 - t - o, '', 'NBNDC') - if pos == 4: - return R.TLegend(l + o, b + o, l + o + w, b + o + h, '', 'NBNDC') - if pos == 5: - c = l + 0.5 * (1 - l - r) - return R.TLegend(c - 0.5 * w, b + o, c + 0.5 * w, b + o + h, '', 'NBNDC') - if pos == 6: - return R.TLegend(1 - r - o - w, b + o, 1 - r - o, b + o + h, '','NBNDC'); + o = offset + w = width + h = height + l = R.gPad.GetLeftMargin() + t = R.gPad.GetTopMargin() + b = R.gPad.GetBottomMargin() + r = R.gPad.GetRightMargin() + if pos == 1: + return R.TLegend(l + o, 1 - t - o - h, l + o + w, 1 - t - o, '', 'NBNDC') + if pos == 2: + c = l + 0.5 * (1 - l - r) + return R.TLegend(c - 0.5 * w, 1 - t - o - h, c + 0.5 * w, 1 - t - o, '', 'NBNDC') + if pos == 3: + return R.TLegend(1 - r - o - w, 1 - t - o - h, 1 - r - o, 1 - t - o, '', 'NBNDC') + if pos == 4: + return R.TLegend(l + o, b + o, l + o + w, b + o + h, '', 'NBNDC') + if pos == 5: + c = l + 0.5 * (1 - l - r) + return R.TLegend(c - 0.5 * w, b + o, c + 0.5 * w, b + o + h, '', 'NBNDC') + if pos == 6: + return R.TLegend(1 - r - o - w, b + o, 1 - r - o, b + o + h, '', 'NBNDC') + def Get(file, obj): - R.TH1.AddDirectory(False) - f_in = R.TFile(file) - res = R.gDirectory.Get(obj) - f_in.Close() - return res + R.TH1.AddDirectory(False) + f_in = R.TFile(file) + res = R.gDirectory.Get(obj) + f_in.Close() + return res + def RocCurveFrom1DHists(h_x, h_y, cut_is_greater_than): - backup = R.TH1.AddDirectoryStatus() - R.TH1.AddDirectory(False) - x_den = h_x.Clone() - x_num = h_x.Clone() - x_err = R.Double(0.) - x_int = h_x.IntegralAndError(0, h_x.GetNbinsX() + 1, x_err) - for i in range(1, h_x.GetNbinsX() + 1): - x_part_err = R.Double(0.) - x_part_int = h_x.IntegralAndError(i, h_x.GetNbinsX() + 1, x_part_err) if cut_is_greater_than else h_x.IntegralAndError(0, i, x_part_err) - x_den.SetBinContent(i, x_int) - x_den.SetBinError(i, x_err) - x_num.SetBinContent(i, x_part_int) - x_num.SetBinError(i, x_part_err) - y_den = h_y.Clone() - y_num = h_y.Clone() - y_err = R.Double(0.) - y_int = h_y.IntegralAndError(0, h_y.GetNbinsX() + 1, y_err) - for i in range(1, h_y.GetNbinsX() + 1): - y_part_err = R.Double(0.) - y_part_int = h_y.IntegralAndError(i, h_y.GetNbinsX() + 1, y_part_err) if cut_is_greater_than else h_y.IntegralAndError(0, i, y_part_err) - y_den.SetBinContent(i, y_int) - y_den.SetBinError(i, y_err) - y_num.SetBinContent(i, y_part_int) - y_num.SetBinError(i, y_part_err) - # x_den.Print('all') - # x_num.Print('all') - # y_den.Print('all') - # y_num.Print('all') - x_gr = R.TGraphAsymmErrors(x_num, x_den) - y_gr = R.TGraphAsymmErrors(y_num, y_den) - - res = y_gr.Clone() - for i in range (0, res.GetN()): - res.GetX()[i] = x_gr.GetY()[i] - res.GetEXlow()[i] = x_gr.GetEYlow()[i] - res.GetEXhigh()[i] = x_gr.GetEYhigh()[i] - res.Sort() - R.TH1.AddDirectory(backup) - return res + backup = R.TH1.AddDirectoryStatus() + R.TH1.AddDirectory(False) + x_den = h_x.Clone() + x_num = h_x.Clone() + x_err = R.Double(0.) + x_int = h_x.IntegralAndError(0, h_x.GetNbinsX() + 1, x_err) + for i in range(1, h_x.GetNbinsX() + 1): + x_part_err = R.Double(0.) + x_part_int = h_x.IntegralAndError(i, h_x.GetNbinsX( + ) + 1, x_part_err) if cut_is_greater_than else h_x.IntegralAndError(0, i, x_part_err) + x_den.SetBinContent(i, x_int) + x_den.SetBinError(i, x_err) + x_num.SetBinContent(i, x_part_int) + x_num.SetBinError(i, x_part_err) + y_den = h_y.Clone() + y_num = h_y.Clone() + y_err = R.Double(0.) + y_int = h_y.IntegralAndError(0, h_y.GetNbinsX() + 1, y_err) + for i in range(1, h_y.GetNbinsX() + 1): + y_part_err = R.Double(0.) + y_part_int = h_y.IntegralAndError(i, h_y.GetNbinsX( + ) + 1, y_part_err) if cut_is_greater_than else h_y.IntegralAndError(0, i, y_part_err) + y_den.SetBinContent(i, y_int) + y_den.SetBinError(i, y_err) + y_num.SetBinContent(i, y_part_int) + y_num.SetBinError(i, y_part_err) + # x_den.Print('all') + # x_num.Print('all') + # y_den.Print('all') + # y_num.Print('all') + x_gr = R.TGraphAsymmErrors(x_num, x_den) + y_gr = R.TGraphAsymmErrors(y_num, y_den) + + res = y_gr.Clone() + for i in range(0, res.GetN()): + res.GetX()[i] = x_gr.GetY()[i] + res.GetEXlow()[i] = x_gr.GetEYlow()[i] + res.GetEXhigh()[i] = x_gr.GetEYhigh()[i] + res.Sort() + R.TH1.AddDirectory(backup) + return res + def CreateAxisHist(src, at_limits): - backup = R.gPad - tmp = R.TCanvas() - tmp.cd() - src.Draw('AP') - result = src.GetHistogram().Clone() - if (at_limits): - min = 0. - max = 0. - x = R.Double(0.) - y = R.Double(0.) - src.GetPoint(0, x, y) - min = float(x) - max = float(x) - for i in range(1, src.GetN()): - src.GetPoint(i, x, y) - if x < min: min = float(x) - if x > max: max = float(x) - result.GetXaxis().SetLimits(min, max) - R.gPad = backup - return result - -class MultiGraphPlot: - def __init__(self, **args): - self.styler = args.get('styler', ModTDRStyle) - self.draw = args.get('draw') - self.outputFileName = args.get('outputFileName','test.pdf') - self.legend = args.get('legend', None) - self.pad = args.get('pad', None) - self.logy = args.get('logy', None) - - def Create(self): - self.styler() - if not self.pad: - self.canv = R.TCanvas('canv', 'canv') - self.pad = R.TPad('pad', 'pad', 0., 0., 1., 1.) - self.pad.SetGridx(True) - self.pad.SetGridy(True) - self.pad.Draw() - self.pad.cd() - self.axis = CreateAxisHist(self.draw[0].src, True) - self.pad.cd() - self.axis.GetXaxis().SetLimits(0., 1.) - self.axis.GetYaxis().SetRangeUser(1E-4, 0.015) - self.axis.GetXaxis().SetTitle('#tau_{h} Efficiency') - self.axis.GetYaxis().SetTitle('Jet#rightarrow#tau_{h} Fake Rate') - self.axis.Draw() - for d in self.draw: - ele = d.get() - ele.Draw(d.drawOpts + 'SAME') - if self.legend: - self.leg = self.legend() - for d in self.draw: - self.leg.AddEntry(d.src, d.src.GetTitle() if not d.legend else d.legend, 'L') - self.leg.Draw() - if self.logy is not None: self.pad.SetLogy(self.logy) - DrawCMSLogo(self.pad, 'CMS', 'Simulation', 0, 0.045, 0.035, 1.2) - self.canv.SaveAs(self.outputFileName) - -def StyleGraph(obj, **args): - obj.SetLineColor(args.get('lineColor', 1)) - obj.SetFillColor(args.get('fillColor', 0)) - obj.SetMarkerColor(args.get('markerColor', 1)) - obj.SetMarkerStyle(args.get('markerStyle', 20)) - obj.SetLineWidth(2) - -class Graph: - def __init__(self, **args): - self.src = args.get('src') - self.style = args.get('style') - self.drawOpts = args.get('drawOpts', 'CP') - self.legend = args.get('legend', None) - - def get(self): - self.style(self.src) - return self.src + backup = R.gPad + tmp = R.TCanvas() + tmp.cd() + src.Draw('AP') + result = src.GetHistogram().Clone() + if (at_limits): + min = 0. + max = 0. + x = R.Double(0.) + y = R.Double(0.) + src.GetPoint(0, x, y) + min = float(x) + max = float(x) + for i in range(1, src.GetN()): + src.GetPoint(i, x, y) + if x < min: + min = float(x) + if x > max: + max = float(x) + result.GetXaxis().SetLimits(min, max) + R.gPad = backup + return result + def CreateTransparentColor(color, alpha): - adapt = R.gROOT.GetColor(color) - new_idx = R.gROOT.GetListOfColors().GetSize() + 1 - trans = R.TColor(new_idx, adapt.GetRed(), adapt.GetGreen(), adapt.GetBlue(), '', alpha) + adapt = R.gROOT.GetColor(color) + new_idx = R.gROOT.GetListOfColors().GetLast() + 1 + trans = R.TColor( + new_idx, adapt.GetRed(), adapt.GetGreen(), adapt.GetBlue(), '', alpha) + COL_STORE.append(trans) trans.SetName('userColor%i' % new_idx) return new_idx + def TFileIsGood(filename): - if not os.path.exists(filename): return False - fin = R.TFile.Open(filename) - if fin: R.TFile.Close(fin) - if not fin: return False - return True + fin = R.TFile(filename) + if not fin: + return False + if fin and not fin.IsOpen(): + return False + elif fin and fin.IsOpen() and fin.IsZombie(): + fin.Close() + return False + elif fin and fin.IsOpen() and fin.TestBit(R.TFile.kRecovered): + fin.Close() + # don't consider a recovered file to be OK + return False + else: + fin.Close() + return True + def MakeTChain(files, tree): chain = R.TChain(tree) @@ -464,34 +464,42 @@ def MakeTChain(files, tree): chain.Add(f) return chain + def TGraphFromTree(tree, xvar, yvar, selection): tree.Draw(xvar + ':' + yvar, selection, 'goff') gr = R.TGraph(tree.GetSelectedRows(), tree.GetV1(), tree.GetV2()) return gr + def TGraph2DFromTree(tree, xvar, yvar, zvar, selection): - tree.Draw(xvar+':'+yvar+':'+zvar, selection, 'goff') - gr = R.TGraph2D(tree.GetSelectedRows(), tree.GetV1(), tree.GetV2(), tree.GetV3()) + tree.Draw(xvar + ':' + yvar + ':' + zvar, selection, 'goff') + gr = R.TGraph2D( + tree.GetSelectedRows(), tree.GetV1(), tree.GetV2(), tree.GetV3()) return gr + def ParamFromFilename(filename, param): - if len(re.findall(param+'\.\d+\.\d+', filename)) : - num1 = re.findall(param+'\.\d+\.\d+', filename)[0].replace(param+'.','') + if len(re.findall(param + '\.\d+\.\d+', filename)): + num1 = re.findall( + param + '\.\d+\.\d+', filename)[0].replace(param + '.', '') return float(num1) - elif len(re.findall(param+'\.\d+', filename)) : - num1 = re.findall(param + '\.\d+', filename)[0].replace(param+'.','') + elif len(re.findall(param + '\.\d+', filename)): + num1 = re.findall(param + '\.\d+', filename)[0].replace(param + '.', '') return int(num1) - else : + else: print "Error: parameter " + param + " not found in filename" + def RemoveGraphXDuplicates(graph): for i in xrange(graph.GetN() - 1): - if graph.GetX()[i+1] == graph.GetX()[i]: - # print 'Removing duplicate point (%f, %f)' % (graph.GetX()[i+1], graph.GetY()[i+1]) - graph.RemovePoint(i+1) + if graph.GetX()[i + 1] == graph.GetX()[i]: + # print 'Removing duplicate point (%f, %f)' % (graph.GetX()[i+1], + # graph.GetY()[i+1]) + graph.RemovePoint(i + 1) RemoveGraphXDuplicates(graph) break + def RemoveGraphYAll(graph, val): for i in xrange(graph.GetN()): if graph.GetY()[i] == val: @@ -500,14 +508,27 @@ def RemoveGraphYAll(graph, val): RemoveGraphYAll(graph, val) break + +def RemoveSmallDelta(graph, val): + for i in xrange(graph.GetN()): + diff = abs(graph.GetY()[i]) + if diff < val: + print '[RemoveSmallDelta] Removing point (%f, %f)' % (graph.GetX()[i], graph.GetY()[i]) + graph.RemovePoint(i) + RemoveSmallDelta(graph, val) + break + + def RemoveGraphYAbove(graph, val): for i in xrange(graph.GetN()): if graph.GetY()[i] > val: - # print 'Removing point (%f, %f)' % (graph.GetX()[i], graph.GetY()[i]) + # print 'Removing point (%f, %f)' % (graph.GetX()[i], + # graph.GetY()[i]) graph.RemovePoint(i) RemoveGraphYAbove(graph, val) break + def OnePad(): pad = R.TPad('pad', 'pad', 0., 0., 1., 1.) pad.Draw() @@ -515,7 +536,8 @@ def OnePad(): result = [pad] return result -def TwoPadSplit(split_point, gap_low, gap_high) : + +def TwoPadSplit(split_point, gap_low, gap_high): upper = R.TPad('upper', 'upper', 0., 0., 1., 1.) upper.SetBottomMargin(split_point + gap_high) upper.SetFillStyle(4000) @@ -525,10 +547,11 @@ def TwoPadSplit(split_point, gap_low, gap_high) : lower.SetFillStyle(4000) lower.Draw() upper.cd() - result = [upper,lower] + result = [upper, lower] return result -def TwoPadSplitColumns(split_point, gap_left, gap_right) : + +def TwoPadSplitColumns(split_point, gap_left, gap_right): left = R.TPad('left', 'left', 0., 0., 1., 1.) left.SetRightMargin(1 - split_point + gap_right) left.SetFillStyle(4000) @@ -538,24 +561,45 @@ def TwoPadSplitColumns(split_point, gap_left, gap_right) : right.SetFillStyle(4000) right.Draw() left.cd() - result = [left,right] + result = [left, right] return result -def ImproveMinimum(graph, func): + +def ImproveMinimum(graph, func, doIt=False): fit_x = 0. - fit_y = 0. + fit_y = 999. fit_i = 0 for i in xrange(graph.GetN()): - if graph.GetY()[i] == 0.: - fit_i= i + if graph.GetY()[i] < fit_y: + fit_i = i fit_x = graph.GetX()[i] fit_y = graph.GetY()[i] - break - if fit_i == 0 or fit_i == (graph.GetN() - 1): return - min_x = func.GetMinimumX(graph.GetX()[fit_i-1], graph.GetX()[fit_i+1]) + if fit_i == 0 or fit_i == (graph.GetN() - 1): + if doIt: + min_x = graph.GetX()[fit_i] + min_y = graph.GetY()[fit_i] + for i in xrange(graph.GetN()): + before = graph.GetY()[i] + graph.GetY()[i] -= min_y + after = graph.GetY()[i] + print 'Point %i, before=%f, after=%f' % (i, before, after) + return (fit_x, fit_y) + search_min = fit_i - 2 if fit_i >= 2 else fit_i - 1 + search_max = fit_i + 2 if fit_i + 2 < graph.GetN() else fit_i + 1 + min_x = func.GetMinimumX(graph.GetX()[search_min], graph.GetX()[search_max]) min_y = func.Eval(min_x) print '[ImproveMinimum] Fit minimum was (%f, %f)' % (fit_x, fit_y) print '[ImproveMinimum] Better minimum was (%f, %f)' % (min_x, min_y) + if doIt: + for i in xrange(graph.GetN()): + before = graph.GetY()[i] + graph.GetY()[i] -= min_y + after = graph.GetY()[i] + print 'Point %i, before=%f, after=%f' % (i, before, after) + graph.Set(graph.GetN() + 1) + graph.SetPoint(graph.GetN() - 1, min_x, 0) + graph.Sort() + return (min_x, min_y) def FindCrossingsWithSpline(graph, func, yval): @@ -563,21 +607,21 @@ def FindCrossingsWithSpline(graph, func, yval): intervals = [] current = None for i in xrange(graph.GetN() - 1): - if (graph.GetY()[i] - yval) * (graph.GetY()[i+1] - yval) < 0.: - cross = func.GetX(yval, graph.GetX()[i], graph.GetX()[i+1]) + if (graph.GetY()[i] - yval) * (graph.GetY()[i + 1] - yval) < 0.: + cross = func.GetX(yval, graph.GetX()[i], graph.GetX()[i + 1]) if (graph.GetY()[i] - yval) > 0. and current is None: current = { - 'lo' : cross, - 'hi' : graph.GetX()[graph.GetN() - 1], - 'valid_lo' : True, - 'valid_hi' : False + 'lo': cross, + 'hi': graph.GetX()[graph.GetN() - 1], + 'valid_lo': True, + 'valid_hi': False } if (graph.GetY()[i] - yval) < 0. and current is None: current = { - 'lo' : graph.GetX()[0], - 'hi' : cross, - 'valid_lo' : False, - 'valid_hi' : True + 'lo': graph.GetX()[0], + 'hi': cross, + 'valid_lo': False, + 'valid_hi': True } intervals.append(current) current = None @@ -586,42 +630,99 @@ def FindCrossingsWithSpline(graph, func, yval): current['valid_hi'] = True intervals.append(current) current = None - # print 'Crossing between: (%f, %f) -> (%f, %f) at %f' % (graph.GetX()[i], graph.GetY()[i], graph.GetX()[i+1], graph.GetY()[i+1], cross) + # print 'Crossing between: (%f, %f) -> (%f, %f) at %f' % + # (graph.GetX()[i], graph.GetY()[i], graph.GetX()[i+1], + # graph.GetY()[i+1], cross) crossings.append(cross) - if current is not None: intervals.append(current) + if current is not None: + intervals.append(current) if len(intervals) == 0: - current = { - 'lo' : graph.GetX()[0], - 'hi' : graph.GetX()[graph.GetN() - 1], - 'valid_lo' : False, - 'valid_hi' : False - } - intervals.append(current) + current = { + 'lo': graph.GetX()[0], + 'hi': graph.GetX()[graph.GetN() - 1], + 'valid_lo': False, + 'valid_hi': False + } + intervals.append(current) print intervals return intervals # return crossings + def GetAxisHist(pad): - pad_obs = pad.GetListOfPrimitives() - if pad_obs is None: return None - nextit = R.TIter(pad_obs) - obj = None - for obj in pad_obs: - if obj.InheritsFrom(R.TH1.Class()): - return obj - if obj.InheritsFrom(R.TMultiGraph.Class()): - return obj.GetHistogram() - if obj.InheritsFrom(R.TGraph.Class()): - return obj.GetHistogram() - if obj.InheritsFrom(R.THStack.Class()): - return hs.GetHistogram() - return None + pad_obs = pad.GetListOfPrimitives() + if pad_obs is None: + return None + obj = None + for obj in pad_obs: + if obj.InheritsFrom(R.TH1.Class()): + return obj + if obj.InheritsFrom(R.TMultiGraph.Class()): + return obj.GetHistogram() + if obj.InheritsFrom(R.TGraph.Class()): + return obj.GetHistogram() + if obj.InheritsFrom(R.THStack.Class()): + return obj.GetHistogram() + return None + + +def FixTopRange(pad, fix_y, fraction): + hobj = GetAxisHist(pad) + ymin = hobj.GetMinimum() + hobj.SetMaximum((fix_y - fraction * ymin) / (1. - fraction)) + if R.gPad.GetLogy(): + if ymin == 0.: + print 'Cannot adjust log-scale y-axis range if the minimum is zero!' + return + maxval = (math.log10(fix_y) - fraction * math.log10(ymin)) / \ + (1 - fraction) + maxval = math.pow(10, maxval) + hobj.SetMaximum(maxval) + + +def GetPadYMaxInRange(pad, x_min, x_max): + pad_obs = pad.GetListOfPrimitives() + if pad_obs is None: + return 0. + h_max = -99999. + for obj in pad_obs: + if obj.InheritsFrom(R.TH1.Class()): + hobj = obj + for j in xrange(1, hobj.GetNbinsX()): + if (hobj.GetBinLowEdge(j) + hobj.GetBinWidth(j) < x_min or + hobj.GetBinLowEdge(j) > x_max): + continue + if (hobj.GetBinContent(j) + hobj.GetBinError(j) > h_max): + h_max = hobj.GetBinContent(j) + hobj.GetBinError(j) + + if obj.InheritsFrom(R.TGraph.Class()): + gobj = obj + n = gobj.GetN() + for k in xrange(0, n): + x = gobs.GetX()[k] + y = gobs.GetY()[k] + if x < x_min or x > x_max: + continue + if y > h_max: + h_max = y + return h_max + + +def GetPadYMax(pad): + pad_obs = pad.GetListOfPrimitives() + if pad_obs is None: + return 0. + xmin = GetAxisHist(pad).GetXaxis().GetXmin() + xmax = GetAxisHist(pad).GetXaxis().GetXmax() + return GetPadYMaxInRange(pad, xmin, xmax) + def DrawHorizontalLine(pad, line, yval): - axis = GetAxisHist(pad) - xmin = axis.GetXaxis().GetXmin() - xmax = axis.GetXaxis().GetXmax() - line.DrawLine(xmin, yval, xmax, yval) + axis = GetAxisHist(pad) + xmin = axis.GetXaxis().GetXmin() + xmax = axis.GetXaxis().GetXmax() + line.DrawLine(xmin, yval, xmax, yval) + def DrawTitle(pad, text, align): pad_backup = R.gPad @@ -629,29 +730,38 @@ def DrawTitle(pad, text, align): t = pad.GetTopMargin() l = pad.GetLeftMargin() r = pad.GetRightMargin() - - pad_ratio = (float(pad.GetWh()) * pad.GetAbsHNDC()) / (float(pad.GetWw()) * pad.GetAbsWNDC()) - if pad_ratio < 1.: pad_ratio = 1. - + + pad_ratio = (float(pad.GetWh()) * pad.GetAbsHNDC()) / \ + (float(pad.GetWw()) * pad.GetAbsWNDC()) + if pad_ratio < 1.: + pad_ratio = 1. + textSize = 0.6 textOffset = 0.2 - + latex = R.TLatex() latex.SetNDC() latex.SetTextAngle(0) latex.SetTextColor(R.kBlack) latex.SetTextFont(42) latex.SetTextSize(textSize * t * pad_ratio) - + y_off = 1 - t + textOffset * t - if align == 1: latex.SetTextAlign(11) - if align == 1: latex.DrawLatex(l, y_off, text) - if align == 2: latex.SetTextAlign(21) - if align == 2: latex.DrawLatex(l + (1 - l - r) * 0.5, y_off, text) - if align == 3: latex.SetTextAlign(31) - if align == 3: latex.DrawLatex(1 - r, y_off, text) + if align == 1: + latex.SetTextAlign(11) + if align == 1: + latex.DrawLatex(l, y_off, text) + if align == 2: + latex.SetTextAlign(21) + if align == 2: + latex.DrawLatex(l + (1 - l - r) * 0.5, y_off, text) + if align == 3: + latex.SetTextAlign(31) + if align == 3: + latex.DrawLatex(1 - r, y_off, text) pad_backup.cd() + def ReZeroTGraph(gr, doIt=False): fit_x = 0. fit_y = 0. @@ -664,25 +774,28 @@ def ReZeroTGraph(gr, doIt=False): min_y = 0. for i in xrange(gr.GetN()): if gr.GetY()[i] < min_y: - min_y = gr.GetY()[i] - min_x = gr.GetX()[i] + min_y = gr.GetY()[i] + min_x = gr.GetX()[i] if min_y < fit_y: - print '[ReZeroTGraph] Fit minimum was (%f, %f)' % (fit_x, fit_y) - print '[ReZeroTGraph] Better minimum was (%f, %f)' % (min_x, min_y) - if doIt: - for i in xrange(gr.GetN()): - before = gr.GetY()[i] - gr.GetY()[i] -= min_y - after = gr.GetY()[i] - print 'Point %i, before=%f, after=%f' % (i, before, after) - -def RemoveNearMin(graph, val, spacing = None): + print '[ReZeroTGraph] Fit minimum was (%f, %f)' % (fit_x, fit_y) + print '[ReZeroTGraph] Better minimum was (%f, %f)' % (min_x, min_y) + if doIt: + for i in xrange(gr.GetN()): + before = gr.GetY()[i] + gr.GetY()[i] -= min_y + after = gr.GetY()[i] + print 'Point %i, before=%f, after=%f' % (i, before, after) + return min_y + + +def RemoveNearMin(graph, val, spacing=None): # assume graph is sorted: n = graph.GetN() - if n < 5: return + if n < 5: + return if spacing is None: - spacing = (graph.GetX()[n-1] - graph.GetX()[0]) / float(n - 2) - # print '[RemoveNearMin] Graph has spacing of %.3f' % spacing + spacing = (graph.GetX()[n - 1] - graph.GetX()[0]) / float(n - 2) + # print '[RemoveNearMin] Graph has spacing of %.3f' % spacing for i in xrange(graph.GetN()): if graph.GetY()[i] == 0.: bf = graph.GetX()[i] @@ -690,195 +803,307 @@ def RemoveNearMin(graph, val, spacing = None): # print '[RemoveNearMin] Found best-fit at %.3f' % bf break for i in xrange(graph.GetN()): - if i == bf_i: continue + if i == bf_i: + continue if abs(graph.GetX()[i] - bf) < (val * spacing): print '[RemoveNearMin] Removing point (%f, %f) close to minimum at %f' % (graph.GetX()[i], graph.GetY()[i], bf) graph.RemovePoint(i) RemoveNearMin(graph, val, spacing) break -def contourFromTH2(h2in, threshold, minPoints=10, mult = 1.0): - # std::cout << "Getting contour at threshold " << threshold << " from " - # << h2in->GetName() << std::endl; - # // http://root.cern.ch/root/html/tutorials/hist/ContourList.C.html - contoursList = [threshold] - contours = array('d', contoursList) - if (h2in.GetNbinsX() * h2in.GetNbinsY()) > 10000: minPoints = 50 - if (h2in.GetNbinsX() * h2in.GetNbinsY()) <= 100: minPoints = 10 - # h2 = h2in.Clone() - h2 = frameTH2D(h2in, threshold, mult) +def TH2FromTGraph2D(graph, method='BinEdgeAligned', + force_x_width=None, + force_y_width=None): + """Build an empty TH2 from the set of points in a TGraph2D + + There is no unique way to define a TH2 binning given an arbitrary + TGraph2D, therefore this function supports multiple named methods: + + - `BinEdgeAligned` simply takes the sets of x- and y- values in the + TGraph2D and uses these as the bin edge arrays in the TH2. The + implication of this is that when filling the bin contents interpolation + will be required when evaluating the TGraph2D at the bin centres. + - `BinCenterAligned` will try to have the TGraph2D points at the bin + centers, but this will only work completely correctly when the input + point spacing is regular. The algorithm first identifies the bin width + as the smallest interval between points on each axis. The start + position of the TH2 axis is then defined as the lowest value in the + TGraph2D minus half this width, and the axis continues with regular + bins until the graph maximum is passed. + + Args: + graph (TGraph2D): Should have at least two unique x and y values, + otherwise we can't define any bins + method (str): The binning algorithm to use + force_x_width (bool): Override the derived x-axis bin width in the + CenterAligned method + force_y_width (bool): Override the derived y-axis bin width in the + CenterAligned method + + Raises: + RuntimeError: If the method name is not recognised + + Returns: + TH2F: The exact binning of the TH2F depends on the chosen method + """ + x_vals = set() + y_vals = set() - h2.SetContour(1, contours) - - # Draw contours as filled regions, and Save points - backup = R.gPad - canv = R.TCanvas('tmp', 'tmp') - canv.cd() - h2.Draw('CONT Z LIST') - R.gPad.Update() # Needed to force the plotting and retrieve the contours in + for i in xrange(graph.GetN()): + x_vals.add(graph.GetX()[i]) + y_vals.add(graph.GetY()[i]) + + x_vals = sorted(x_vals) + y_vals = sorted(y_vals) + if method == 'BinEdgeAligned': + h_proto = R.TH2F('prototype', '', + len(x_vals) - 1, array('d', x_vals), + len(y_vals) - 1, array('d', y_vals)) + elif method == 'BinCenterAligned': + x_widths = [] + y_widths = [] + for i in xrange(1, len(x_vals)): + x_widths.append(x_vals[i] - x_vals[i - 1]) + for i in xrange(1, len(y_vals)): + y_widths.append(y_vals[i] - y_vals[i - 1]) + x_min = min(x_widths) if force_x_width is None else force_x_width + y_min = min(y_widths) if force_y_width is None else force_y_width + x_bins = int(((x_vals[-1] - (x_vals[0] - 0.5 * x_min)) / x_min) + 0.5) + y_bins = int(((y_vals[-1] - (y_vals[0] - 0.5 * y_min)) / y_min) + 0.5) + print '[TH2FromTGraph2D] x-axis binning: (%i, %g, %g)' % (x_bins, x_vals[0] - 0.5 * x_min, x_vals[0] - 0.5 * x_min + x_bins * x_min) + print '[TH2FromTGraph2D] y-axis binning: (%i, %g, %g)' % (y_bins, y_vals[0] - 0.5 * y_min, y_vals[0] - 0.5 * y_min + y_bins * y_min) + # Use a number slightly smaller than 0.49999 because the TGraph2D interpolation + # is fussy about evaluating on the boundary + h_proto = R.TH2F('prototype', '', + x_bins, x_vals[ + 0] - 0.49999 * x_min, x_vals[0] - 0.50001 * x_min + x_bins * x_min, + y_bins, y_vals[0] - 0.49999 * y_min, y_vals[0] - 0.50001 * y_min + y_bins * y_min) + else: + raise RuntimeError( + '[TH2FromTGraph2D] Method %s not supported' % method) + h_proto.SetDirectory(0) + return h_proto + + +def contourFromTH2(h2in, threshold, minPoints=10, frameValue=1000.): + # // http://root.cern.ch/root/html/tutorials/hist/ContourList.C.html + contoursList = [threshold] + contours = array('d', contoursList) + # if (h2in.GetNbinsX() * h2in.GetNbinsY()) > 10000: minPoints = 50 + # if (h2in.GetNbinsX() * h2in.GetNbinsY()) <= 100: minPoints = 10 + + h2 = frameTH2D(h2in, threshold, frameValue) + + h2.SetContour(1, contours) + + # Draw contours as filled regions, and Save points + # backup = R.gPad # doesn't work in pyroot, backup behaves like a ref to gPad + canv = R.TCanvas('tmp', 'tmp') + canv.cd() + h2.Draw('CONT Z LIST') + R.gPad.Update() # Needed to force the plotting and retrieve the contours in + + conts = R.gROOT.GetListOfSpecials().FindObject('contours') + contLevel = None + + if conts is None or conts.GetSize() == 0: + print '*** No Contours Were Extracted!' + return None + ret = R.TList() + for i in xrange(conts.GetSize()): + contLevel = conts.At(i) + print '>> Contour %d has %d Graphs' % (i, contLevel.GetSize()) + for j in xrange(contLevel.GetSize()): + gr1 = contLevel.At(j) + print'\t Graph %d has %d points' % (j, gr1.GetN()) + if gr1.GetN() > minPoints: + ret.Add(gr1.Clone()) + # // break; + # backup.cd() + canv.Close() + return ret + + +def frameTH2D(hist, threshold, frameValue=1000): + # Now supports variable-binned histograms First adds a narrow frame (1% of + # of bin widths) around the outside with same values as the real edge. Then + # adds another frame another frame around this one filled with some chosen + # value that will make the contours close + + # Get lists of the bin edges + x_bins = [hist.GetXaxis().GetBinLowEdge(x) + for x in xrange(1, hist.GetNbinsX() + 2)] + y_bins = [hist.GetYaxis().GetBinLowEdge(y) + for y in xrange(1, hist.GetNbinsY() + 2)] + + # New bin edge arrays will need an extra four values + x_new = [0.] * (len(x_bins) + 4) + y_new = [0.] * (len(y_bins) + 4) + + # Calculate bin widths at the edges + xw1 = x_bins[1] - x_bins[0] + xw2 = x_bins[-1] - x_bins[-2] + yw1 = y_bins[1] - y_bins[0] + yw2 = y_bins[-1] - y_bins[-2] + + # Set the edges of the outer framing bins and the adjusted + # edge of the real edge bins + x_new[0] = x_bins[0] - 2 * xw1 * 0.01 + x_new[1] = x_bins[0] - 1 * xw1 * 0.01 + x_new[-1] = x_bins[-1] + 2 * xw2 * 0.01 + x_new[-2] = x_bins[-1] + 1 * xw2 * 0.01 + y_new[0] = y_bins[0] - 2 * yw1 * 0.01 + y_new[1] = y_bins[0] - 1 * yw1 * 0.01 + y_new[-1] = y_bins[-1] + 2 * yw2 * 0.01 + y_new[-2] = y_bins[-1] + 1 * yw2 * 0.01 + + # Copy the remaining bin edges from the hist + for i in xrange(0, len(x_bins)): + x_new[i + 2] = x_bins[i] + for i in xrange(0, len(y_bins)): + y_new[i + 2] = y_bins[i] + + # print x_new + # print y_new + + framed = R.TH2D('%s framed' % hist.GetName(), '%s framed' % hist.GetTitle(), len( + x_new) - 1, array('d', x_new), len(y_new) - 1, array('d', y_new)) + framed.SetDirectory(0) + + for x in xrange(1, framed.GetNbinsX() + 1): + for y in xrange(1, framed.GetNbinsY() + 1): + if x == 1 or x == framed.GetNbinsX() or y == 1 or y == framed.GetNbinsY(): + # This is a a frame bin + framed.SetBinContent(x, y, frameValue) + else: + # adjust x and y if we're in the first frame so as to copy the output + # values from the real TH2 + ux = x + uy = y + if x == 2: + ux += 1 + elif x == (len(x_new) - 2): + ux -= 1 + if y == 2: + uy += 1 + elif y == (len(y_new) - 2): + uy -= 1 + framed.SetBinContent(x, y, hist.GetBinContent(ux - 2, uy - 2)) + return framed - conts = R.gROOT.GetListOfSpecials().FindObject('contours') - contLevel = None - - if conts is None or conts.GetSize() == 0: - print '*** No Contours Were Extracted!' - return None - ret = R.TList() - for i in xrange(conts.GetSize()): - contLevel = conts.At(i) - print 'Contour %d has %d Graphs\n' % (i, contLevel.GetSize()) - for j in xrange(contLevel.GetSize()): - gr1 = contLevel.At(j) - print'\t Graph %d has %d points' % (j, gr1.GetN()) - if gr1.GetN() > minPoints: ret.Add(gr1.Clone()) - # // break; - backup.cd() - return ret - - -def frameTH2D(hist, threshold, mult = 1.0): - # NEW LOGIC: - # - pretend that the center of the last bin is on the border if the frame - # - add one tiny frame with huge values - frameValue = 1000 - # if (TString(in->GetName()).Contains("bayes")) frameValue = -1000; - - xw = hist.GetXaxis().GetBinWidth(1) - yw = hist.GetYaxis().GetBinWidth(1) - - nx = hist.GetNbinsX() - ny = hist.GetNbinsY() - - x0 = hist.GetXaxis().GetXmin() - x1 = hist.GetXaxis().GetXmax() - - y0 = hist.GetYaxis().GetXmin() - y1 = hist.GetYaxis().GetXmax() - xbins = array('d', [0]*999) - ybins = array('d', [0]*999) - eps = 0.1 - # mult = 1.0 - - xbins[0] = x0 - eps * xw - xw * mult - xbins[1] = x0 + eps * xw - xw * mult - for ix in xrange(2, nx+1): xbins[ix] = x0 + (ix - 1) * xw - xbins[nx + 1] = x1 - eps * xw + 0.5 * xw * mult - xbins[nx + 2] = x1 + eps * xw + xw * mult - - ybins[0] = y0 - eps * yw - yw * mult - ybins[1] = y0 + eps * yw - yw * mult - for iy in xrange(2, ny+1): ybins[iy] = y0 + (iy - 1) * yw - ybins[ny + 1] = y1 - eps * yw + yw * mult - ybins[ny + 2] = y1 + eps * yw + yw * mult - - framed = R.TH2D('%s framed' % hist.GetName(), '%s framed' % hist.GetTitle(), nx + 2, xbins, ny + 2, ybins) - - # Copy over the contents - for ix in xrange(1, nx+1): - for iy in xrange(1, ny+1): - framed.SetBinContent(1 + ix, 1 + iy, hist.GetBinContent(ix, iy)) - - # Frame with huge values - nx = framed.GetNbinsX() - ny = framed.GetNbinsY() - for ix in xrange(1, nx+1): - framed.SetBinContent(ix, 1, frameValue) - framed.SetBinContent(ix, ny, frameValue) - - for iy in xrange(2, ny): - framed.SetBinContent(1, iy, frameValue) - framed.SetBinContent(nx, iy, frameValue) - - return framed def FixOverlay(): - R.gPad.GetFrame().Draw() - R.gPad.RedrawAxis() + R.gPad.GetFrame().Draw() + R.gPad.RedrawAxis() + def makeHist1D(name, xbins, graph): - len_x = graph.GetX()[graph.GetN()-1] - graph.GetX()[0] + len_x = graph.GetX()[graph.GetN() - 1] - graph.GetX()[0] binw_x = (len_x * 0.5 / (float(xbins) - 1.)) - 1E-5 - hist = R.TH1F(name, '', xbins, graph.GetX()[0], graph.GetX()[graph.GetN()-1]+binw_x) + hist = R.TH1F( + name, '', xbins, graph.GetX()[0], graph.GetX()[graph.GetN() - 1] + binw_x) return hist + def makeHist2D(name, xbins, ybins, graph2d): len_x = graph2d.GetXmax() - graph2d.GetXmin() binw_x = (len_x * 0.5 / (float(xbins) - 1.)) - 1E-5 len_y = graph2d.GetYmax() - graph2d.GetYmin() binw_y = (len_y * 0.5 / (float(ybins) - 1.)) - 1E-5 - hist = R.TH2F(name, '', xbins, graph2d.GetXmin()-binw_x, graph2d.GetXmax()+binw_x, ybins, graph2d.GetYmin()-binw_y, graph2d.GetYmax()+binw_y) + hist = R.TH2F(name, '', xbins, graph2d.GetXmin() - binw_x, graph2d.GetXmax() + + binw_x, ybins, graph2d.GetYmin() - binw_y, graph2d.GetYmax() + binw_y) return hist + def makeVarBinHist2D(name, xbins, ybins): - #create new arrays in which bin low edge is adjusted to make measured points at the bin centres - xbins_new=[None]*(len(xbins)+1) - for i in xrange(len(xbins)-1): - if i == 0 or i == 1: xbins_new[i] = xbins[i] - ((xbins[i+1]-xbins[i])/2) + 1E-5 - else: xbins_new[i] = xbins[i] - ((xbins[i+1]-xbins[i])/2) - xbins_new[len(xbins)-1] = xbins[len(xbins)-2] + ((xbins[len(xbins)-2]-xbins[len(xbins)-3])/2) - xbins_new[len(xbins)] = xbins[len(xbins)-1] + ((xbins[len(xbins)-1]-xbins[len(xbins)-2])/2) - 1E-5 - - ybins_new=[None]*(len(ybins)+1) - for i in xrange(len(ybins)-1): - if i == 0 or i == 1: ybins_new[i] = ybins[i] - ((ybins[i+1]-ybins[i])/2) + 1E-5 - else: ybins_new[i] = ybins[i] - ((ybins[i+1]-ybins[i])/2) - ybins_new[len(ybins)-1] = ybins[len(ybins)-2] + ((ybins[len(ybins)-2]-ybins[len(ybins)-3])/2) - ybins_new[len(ybins)] = ybins[len(ybins)-1] + ((ybins[len(ybins)-1]-ybins[len(ybins)-2])/2) - 1E-5 - hist = R.TH2F(name, '', len(xbins_new)-1, array('d',xbins_new), len(ybins_new)-1, array('d',ybins_new)) + # create new arrays in which bin low edge is adjusted to make measured + # points at the bin centres + xbins_new = [None] * (len(xbins) + 1) + for i in xrange(len(xbins) - 1): + if i == 0 or i == 1: + xbins_new[i] = xbins[i] - ((xbins[i + 1] - xbins[i]) / 2) + 1E-5 + else: + xbins_new[i] = xbins[i] - ((xbins[i + 1] - xbins[i]) / 2) + xbins_new[len(xbins) - 1] = xbins[len(xbins) - 2] + \ + ((xbins[len(xbins) - 2] - xbins[len(xbins) - 3]) / 2) + xbins_new[len(xbins)] = xbins[len(xbins) - 1] + \ + ((xbins[len(xbins) - 1] - xbins[len(xbins) - 2]) / 2) - 1E-5 + + ybins_new = [None] * (len(ybins) + 1) + for i in xrange(len(ybins) - 1): + if i == 0 or i == 1: + ybins_new[i] = ybins[i] - ((ybins[i + 1] - ybins[i]) / 2) + 1E-5 + else: + ybins_new[i] = ybins[i] - ((ybins[i + 1] - ybins[i]) / 2) + ybins_new[len(ybins) - 1] = ybins[len(ybins) - 2] + \ + ((ybins[len(ybins) - 2] - ybins[len(ybins) - 3]) / 2) + ybins_new[len(ybins)] = ybins[len(ybins) - 1] + \ + ((ybins[len(ybins) - 1] - ybins[len(ybins) - 2]) / 2) - 1E-5 + hist = R.TH2F(name, '', len( + xbins_new) - 1, array('d', xbins_new), len(ybins_new) - 1, array('d', ybins_new)) return hist + def fillTH2(hist2d, graph): - for x in xrange(1, hist2d.GetNbinsX()+1): - for y in xrange(1, hist2d.GetNbinsY()+1): + for x in xrange(1, hist2d.GetNbinsX() + 1): + for y in xrange(1, hist2d.GetNbinsY() + 1): xc = hist2d.GetXaxis().GetBinCenter(x) yc = hist2d.GetYaxis().GetBinCenter(y) val = graph.Interpolate(xc, yc) hist2d.SetBinContent(x, y, val) -def higgsConstraint(model, higgstype) : - higgsBand=R.TGraph2D() + +def higgsConstraint(model, higgstype): + higgsBand = R.TGraph2D() masslow = 150 masshigh = 500 massstep = 10 - n=0 - for mass in range (masslow, masshigh, massstep): - myfile = open("../../HiggsAnalysis/HiggsToTauTau/data/Higgs125/"+model+"/higgs_"+str(mass)+".dat", 'r') + n = 0 + for mass in range(masslow, masshigh, massstep): + myfile = open("../../HiggsAnalysis/HiggsToTauTau/data/Higgs125/" + + model + "/higgs_" + str(mass) + ".dat", 'r') for line in myfile: tanb = (line.split())[0] mh = float((line.split())[1]) mH = float((line.split())[3]) - if higgstype=="h" : - higgsBand.SetPoint(n, mass, float(tanb), mh) - elif higgstype=="H" : - higgsBand.SetPoint(n, mass, float(tanb), mH) - n=n+1 - myfile.close() + if higgstype == "h": + higgsBand.SetPoint(n, mass, float(tanb), mh) + elif higgstype == "H": + higgsBand.SetPoint(n, mass, float(tanb), mH) + n = n + 1 + myfile.close() return higgsBand -def MakeErrorBand(LowerGraph, UpperGraph) : - errorBand = R.TGraphAsymmErrors() - lower_list=[]; upper_list=[] - for i in range(LowerGraph.GetN()): - lower_list.append((float(LowerGraph.GetX()[i]), float(LowerGraph.GetY()[i]))) - upper_list.append((float(UpperGraph.GetX()[i]), float(UpperGraph.GetY()[i]))) - lower_list=sorted(set(lower_list)) - upper_list=sorted(set(upper_list)) - for i in range(LowerGraph.GetN()): - errorBand.SetPoint(i, lower_list[i][0], lower_list[i][1]) - errorBand.SetPointEYlow (i, lower_list[i][1]-lower_list[i][1]) - errorBand.SetPointEYhigh(i, upper_list[i][1]-lower_list[i][1]) - return errorBand - -def SortGraph(Graph) : - sortedGraph = R.TGraph() - graph_list=[] - for i in range(Graph.GetN()): - graph_list.append((float(Graph.GetX()[i]), float(Graph.GetY()[i]))) - graph_list=sorted(set(graph_list)) - for i in range(Graph.GetN()): - sortedGraph.SetPoint(i, graph_list[i][0], graph_list[i][1]) - return sortedGraph + +def MakeErrorBand(LowerGraph, UpperGraph): + errorBand = R.TGraphAsymmErrors() + lower_list = [] + upper_list = [] + for i in range(LowerGraph.GetN()): + lower_list.append( + (float(LowerGraph.GetX()[i]), float(LowerGraph.GetY()[i]))) + upper_list.append( + (float(UpperGraph.GetX()[i]), float(UpperGraph.GetY()[i]))) + lower_list = sorted(set(lower_list)) + upper_list = sorted(set(upper_list)) + for i in range(LowerGraph.GetN()): + errorBand.SetPoint(i, lower_list[i][0], lower_list[i][1]) + errorBand.SetPointEYlow(i, lower_list[i][1] - lower_list[i][1]) + errorBand.SetPointEYhigh(i, upper_list[i][1] - lower_list[i][1]) + return errorBand + + +def SortGraph(Graph): + sortedGraph = R.TGraph() + graph_list = [] + for i in range(Graph.GetN()): + graph_list.append((float(Graph.GetX()[i]), float(Graph.GetY()[i]))) + graph_list = sorted(set(graph_list)) + for i in range(Graph.GetN()): + sortedGraph.SetPoint(i, graph_list[i][0], graph_list[i][1]) + return sortedGraph + def LimitTGraphFromJSON(js, label): xvals = [] @@ -890,6 +1115,7 @@ def LimitTGraphFromJSON(js, label): graph.Sort() return graph + def LimitBandTGraphFromJSON(js, central, lo, hi): xvals = [] yvals = [] @@ -900,10 +1126,192 @@ def LimitBandTGraphFromJSON(js, central, lo, hi): yvals.append(js[key][central]) yvals_lo.append(js[key][central] - js[key][lo]) yvals_hi.append(js[key][hi] - js[key][central]) - graph = R.TGraphAsymmErrors(len(xvals), array('d', xvals), array('d', yvals), array('d', [0]), array('d', [0]), array('d', yvals_lo), array('d', yvals_hi)) + graph = R.TGraphAsymmErrors(len(xvals), array('d', xvals), array('d', yvals), array( + 'd', [0]), array('d', [0]), array('d', yvals_lo), array('d', yvals_hi)) graph.Sort() return graph + +def GraphDifference(graph1, graph2): + xvals = [] + yvals = [] + if graph1.GetN() != graph2.GetN(): + return graph1 + for i in range(graph1.GetN()): + xvals.append(graph1.GetX()[i]) + yvals.append( + 2 * abs(graph1.GetY()[i] - graph2.GetY()[i]) / (graph1.GetY()[i] + graph2.GetY()[i])) + diff_graph = R.TGraph(len(xvals), array('d', xvals), array('d', yvals)) + diff_graph.Sort() + return diff_graph + + def Set(obj, **kwargs): - for key, value in kwargs.iteritems(): - getattr(obj, 'Set'+key)(value) + for key, value in kwargs.iteritems(): + getattr(obj, 'Set' + key)(value) + + +def SetBirdPalette(): + nRGBs = 9 + stops = array( + 'd', [0.0000, 0.1250, 0.2500, 0.3750, 0.5000, 0.6250, 0.7500, 0.8750, 1.0000]) + red = array( + 'd', [0.2082, 0.0592, 0.0780, 0.0232, 0.1802, 0.5301, 0.8186, 0.9956, 0.9764]) + green = array( + 'd', [0.1664, 0.3599, 0.5041, 0.6419, 0.7178, 0.7492, 0.7328, 0.7862, 0.9832]) + blue = array( + 'd', [0.5293, 0.8684, 0.8385, 0.7914, 0.6425, 0.4662, 0.3499, 0.1968, 0.0539]) + R.TColor.CreateGradientColorTable(nRGBs, stops, red, green, blue, 255, 1) + + +def bestFit(tree, x, y, cut): + nfind = tree.Draw(y + ":" + x, cut + "deltaNLL == 0") + gr0 = R.TGraph(1) + if (nfind == 0): + gr0.SetPoint(0, -999, -999) + else: + grc = R.gROOT.FindObject("Graph").Clone() + if (grc.GetN() > 1): + grc.Set(1) + gr0.SetPoint(0, grc.GetXmax(), grc.GetYmax()) + gr0.SetMarkerStyle(34) + gr0.SetMarkerSize(2.0) + return gr0 + + +def treeToHist2D(t, x, y, name, cut, xmin, xmax, ymin, ymax, xbins, ybins): + t.Draw("2*deltaNLL:%s:%s>>%s_prof(%d,%10g,%10g,%d,%10g,%10g)" % + (y, x, name, xbins, xmin, xmax, ybins, ymin, ymax), cut + "deltaNLL != 0", "PROF") + prof = R.gROOT.FindObject(name + "_prof") + h2d = R.TH2D(name, name, xbins, xmin, xmax, ybins, ymin, ymax) + for ix in xrange(1, xbins + 1): + for iy in xrange(1, ybins + 1): + z = prof.GetBinContent(ix, iy) + if (z != z) or (z > 4294967295): # protect against NANs + z = 0 + h2d.SetBinContent(ix, iy, z) + h2d.GetXaxis().SetTitle(x) + h2d.GetYaxis().SetTitle(y) + h2d.SetDirectory(0) + h2d = NewInterpolate(h2d) + return h2d + +# Functions 'NewInterpolate' and 'rebin' are taken, translated and modified into python from: +# https://indico.cern.ch/event/256523/contribution/2/attachments/450198/624259/07JUN2013_cawest.pdf +# http://hep.ucsb.edu/people/cawest/interpolation/interpolate.h + + +def NewInterpolate(hist): + histCopy = hist.Clone() + + # make temporary histograms to store the results of both steps + hist_step1 = histCopy.Clone() + hist_step1.Reset() + hist_step2 = histCopy.Clone() + hist_step2.Reset() + + nBinsX = histCopy.GetNbinsX() + nBinsY = histCopy.GetNbinsY() + + xMin = 1 + yMin = 1 + xMax = histCopy.GetNbinsX() + 1 + yMax = histCopy.GetNbinsY() + 1 + + for i in xrange(1, nBinsX + 1): + for j in xrange(1, nBinsY + 1): + # do not extrapolate outside the scan + if (i < xMin) or (i > xMax) or (j < yMin) or (j > yMax): + continue + binContent = histCopy.GetBinContent(i, j) + binContentNW = histCopy.GetBinContent(i + 1, j + 1) + binContentSE = histCopy.GetBinContent(i - 1, j - 1) + binContentNE = histCopy.GetBinContent(i + 1, j - 1) + binContentSW = histCopy.GetBinContent(i - 1, j + 1) + binContentUp = histCopy.GetBinContent(i, j + 1) + binContentDown = histCopy.GetBinContent(i, j - 1) + binContentLeft = histCopy.GetBinContent(i - 1, j) + binContentRight = histCopy.GetBinContent(i + 1, j) + nFilled = 0 + if(binContentNW > 0): + nFilled += 1 + if(binContentSE > 0): + nFilled += 1 + if(binContentNE > 0): + nFilled += 1 + if(binContentSW > 0): + nFilled += 1 + if(binContentUp > 0): + nFilled += 1 + if(binContentDown > 0): + nFilled += 1 + if(binContentRight > 0): + nFilled += 1 + if(binContentLeft > 0): + nFilled += 1 + # if we are at an empty bin and there are neighbors + # in specified direction with non-zero entries + if(binContent == 0) and (nFilled > 1): + # average over non-zero entries + binContent = (binContentNW + binContentSE + binContentNE + binContentSW + + binContentUp + binContentDown + binContentRight + binContentLeft) / nFilled + hist_step1.SetBinContent(i, j, binContent) + + # add result of interpolation + histCopy.Add(hist_step1) + + for i in xrange(1, nBinsX): + for j in xrange(1, nBinsY): + if(i < xMin) or (i > xMax) or (j < yMin) or (j > yMax): + continue + binContent = histCopy.GetBinContent(i, j) + # get entries for "Swiss Cross" average + binContentUp = histCopy.GetBinContent(i, j + 1) + binContentDown = histCopy.GetBinContent(i, j - 1) + binContentLeft = histCopy.GetBinContent(i - 1, j) + binContentRight = histCopy.GetBinContent(i + 1, j) + nFilled = 0 + if(binContentUp > 0): + nFilled += 1 + if(binContentDown > 0): + nFilled += 1 + if(binContentRight > 0): + nFilled += 1 + if(binContentLeft > 0): + nFilled += 1 + if(binContent == 0) and (nFilled > 0): + # only average over non-zero entries + binContent = ( + binContentUp + binContentDown + binContentRight + binContentLeft) / nFilled + hist_step2.SetBinContent(i, j, binContent) + # add "Swiss Cross" average + histCopy.Add(hist_step2) + + return histCopy + + +def rebin(hist): + histName = hist.GetName() + histName += "_rebin" + +# bin widths are needed so as to not shift histogram by half a bin with each rebinning +# assume constant binning +# binWidthX = hist.GetXaxis().GetBinWidth(1) +# binWidthY = hist.GetYaxis().GetBinWidth(1) + +# histRebinned = R.TH2F(histName, histName, 2*hist.GetNbinsX(), hist.GetXaxis().GetXmin()+binWidthX/4, hist.GetXaxis().GetXmax()+binWidthX/4, 2*hist.GetNbinsY(), hist.GetYaxis().GetXmin()+binWidthY/4, hist.GetYaxis().GetXmax()+binWidthY/4) + histRebinned = R.TH2F(histName, histName, 2 * hist.GetNbinsX() - 1, hist.GetXaxis().GetXmin(), + hist.GetXaxis().GetXmax(), 2 * hist.GetNbinsY() - 1, hist.GetYaxis().GetXmin(), hist.GetYaxis().GetXmax()) + + # copy results from previous histogram + for iX in xrange(1, hist.GetNbinsX() + 1): + for iY in xrange(1, hist.GetNbinsY() + 1): + binContent = hist.GetBinContent(iX, iY) + histRebinned.SetBinContent(2 * iX - 1, 2 * iY - 1, binContent) + histRebinned.SetMaximum(hist.GetMaximum()) + histRebinned.SetMinimum(hist.GetMinimum()) + + # use interpolation to re-fill histogram + histRebinnedInterpolated = NewInterpolate(histRebinned) + + return histRebinnedInterpolated diff --git a/CombineTools/scripts/MSSMtanbPlot.py b/CombineTools/scripts/MSSMtanbPlot.py index 7d8d586d178..8adcb510910 100644 --- a/CombineTools/scripts/MSSMtanbPlot.py +++ b/CombineTools/scripts/MSSMtanbPlot.py @@ -25,6 +25,9 @@ def CreateTransparentColor(color, alpha): parser.add_argument('--x_axis_min', help='Fix x axis minimum', default=90.0) parser.add_argument('--x_axis_max', help='Fix x axis maximum', default=1000.0) parser.add_argument('--verbosity', '-v', help='verbosity', default=0) +parser.add_argument('--expected_only', help='Do not plot observed', default=False) +parser.add_argument('--lumi_label', '-l', help='lumi label e.g. 19.7 fb^{-1} (8TeV)', default='19.7 fb^{-1} (8TeV)') +parser.add_argument('--ana_label', '-a', help='analysis label (for now just for filename but can also include label on plot) e.g. Hhh, AZh, mssm', default='mssm') args = parser.parse_args() @@ -72,12 +75,18 @@ def CreateTransparentColor(color, alpha): #Note the binning of the TH2D for the interpolation should ~ match the initial input grid #Could in future implement variable binning here -h_exp = plot.makeVarBinHist2D("h_exp", mA_list, tanb_list) -h_obs = plot.makeVarBinHist2D("h_obs", mA_list, tanb_list) -h_minus1sigma = plot.makeVarBinHist2D("h_minus1sigma", mA_list, tanb_list) -h_plus1sigma = plot.makeVarBinHist2D("h_plus1sigma", mA_list, tanb_list) -h_minus2sigma = plot.makeVarBinHist2D("h_minus2sigma", mA_list, tanb_list) -h_plus2sigma = plot.makeVarBinHist2D("h_plus2sigma", mA_list, tanb_list) +#h_exp = plot.makeVarBinHist2D("h_exp", mA_list, tanb_list) +#h_obs = plot.makeVarBinHist2D("h_obs", mA_list, tanb_list) +#h_minus1sigma = plot.makeVarBinHist2D("h_minus1sigma", mA_list, tanb_list) +#h_plus1sigma = plot.makeVarBinHist2D("h_plus1sigma", mA_list, tanb_list) +#h_minus2sigma = plot.makeVarBinHist2D("h_minus2sigma", mA_list, tanb_list) +#h_plus2sigma = plot.makeVarBinHist2D("h_plus2sigma", mA_list, tanb_list) +h_exp = plot.makeHist2D("h_exp", mA_bins, tanb_bins, graph_exp) +h_obs = plot.makeHist2D("h_obs", mA_bins, tanb_bins, graph_obs) +h_minus1sigma = plot.makeHist2D("h_minus1sigma", mA_bins, tanb_bins, graph_minus1sigma) +h_plus1sigma = plot.makeHist2D("h_plus1sigma", mA_bins, tanb_bins, graph_plus1sigma) +h_minus2sigma = plot.makeHist2D("h_minus2sigma", mA_bins, tanb_bins, graph_minus2sigma) +h_plus2sigma = plot.makeHist2D("h_plus2sigma", mA_bins, tanb_bins, graph_plus2sigma) plot.fillTH2(h_exp, graph_exp) plot.fillTH2(h_obs, graph_obs) plot.fillTH2(h_minus1sigma, graph_minus1sigma) @@ -163,8 +172,9 @@ def CreateTransparentColor(color, alpha): p.SetFillStyle(1001) p.SetFillColor(CreateTransparentColor(ROOT.kAzure+6,0.5)) pads[1].cd() - p.Draw("F SAME") - p.Draw("L SAME") + if not args.expected_only: + p.Draw("F SAME") + p.Draw("L SAME") if int(args.verbosity) > 0 : outf.WriteTObject(p, 'graph_obs_%i'%i) if int(args.verbosity) > 0 : outf.Close() @@ -219,10 +229,10 @@ def CreateTransparentColor(color, alpha): legend.SetTextFont(62) legend.SetHeader("95% CL Excluded:") # Stupid hack to get legend entry looking correct -if cont_obs[0] : - alt_cont_obs = cont_obs[0].Clone() - alt_cont_obs.SetLineColor(ROOT.kWhite) - legend.AddEntry(alt_cont_obs,"Observed", "F") +if cont_obs[0] and not args.expected_only: + alt_cont_obs = cont_obs[0].Clone() + alt_cont_obs.SetLineColor(ROOT.kWhite) + legend.AddEntry(alt_cont_obs,"Observed", "F") if cont_minus1sigma[0] : legend.AddEntry(cont_minus1sigma[0], "#pm 1#sigma Expected", "F") if cont_exp[0] : legend.AddEntry(cont_exp[0],"Expected", "L") if cont_minus2sigma[0] : legend.AddEntry(cont_minus2sigma[0], "#pm 2#sigma Expected", "F") @@ -239,8 +249,9 @@ def CreateTransparentColor(color, alpha): axis.GetXaxis().SetTitle("m_{A} (GeV)") if args.custom_x_range : axis.GetXaxis().SetRangeUser(float(args.x_axis_min), float(args.x_axis_max)) plot.DrawCMSLogo(pads[1], 'Combine Harvester', scenario_label, 11, 0.045, 0.035, 1.2) -plot.DrawTitle(pads[1], '19.7 fb^{-1} (8 TeV)', 3); +plot.DrawTitle(pads[1], args.lumi_label, 3); plot.FixOverlay() -c1.SaveAs("mssm_"+args.scenario+".pdf") -c1.SaveAs("mssm_"+args.scenario+".png") +c1.SaveAs(args.ana_label+"_"+args.scenario+".pdf") +c1.SaveAs(args.ana_label+"_"+args.scenario+".png") +c1.SaveAs(args.ana_label+"_"+args.scenario+".C") diff --git a/CombineTools/scripts/combineTool.py b/CombineTools/scripts/combineTool.py index de87956c17c..ab611e34dcb 100755 --- a/CombineTools/scripts/combineTool.py +++ b/CombineTools/scripts/combineTool.py @@ -5,10 +5,11 @@ from CombineHarvester.CombineTools.combine.CombineToolBase import CombineToolBase from CombineHarvester.CombineTools.combine.EnhancedCombine import EnhancedCombine from CombineHarvester.CombineTools.combine.Impacts import Impacts +from CombineHarvester.CombineTools.combine.ImpactsFromScans import ImpactsFromScans from CombineHarvester.CombineTools.combine.Workspace import PrintWorkspace, ModifyDataSet from CombineHarvester.CombineTools.combine.CovMatrix import CovMatrix -from CombineHarvester.CombineTools.combine.LimitGrids import AsymptoticGrid, HybridNewGrid, Limit1D -from CombineHarvester.CombineTools.combine.Output import PrintSingles, CollectLimits +from CombineHarvester.CombineTools.combine.LimitGrids import AsymptoticGrid, HybridNewGrid +from CombineHarvester.CombineTools.combine.Output import PrintFit, CollectLimits ROOT.PyConfig.IgnoreCommandLineOptions = True @@ -30,12 +31,12 @@ def register_method(parser, method_dict, method_class): register_method(parser, methods, PrintWorkspace) register_method(parser, methods, ModifyDataSet) register_method(parser, methods, Impacts) +register_method(parser, methods, ImpactsFromScans) register_method(parser, methods, CollectLimits) register_method(parser, methods, CovMatrix) -register_method(parser, methods, PrintSingles) +register_method(parser, methods, PrintFit) register_method(parser, methods, AsymptoticGrid) register_method(parser, methods, HybridNewGrid) -register_method(parser, methods, Limit1D) parser.add_argument('-M', '--method') diff --git a/CombineTools/scripts/limitCompare.py b/CombineTools/scripts/limitCompare.py new file mode 100644 index 00000000000..ce5fe8c6cfe --- /dev/null +++ b/CombineTools/scripts/limitCompare.py @@ -0,0 +1,166 @@ +import CombineHarvester.CombineTools.plotting as plot +import CombineHarvester.CombineTools.maketable as maketable +import ROOT +import math +import argparse +import json +import sys + +ROOT.gROOT.SetBatch(ROOT.kTRUE) +parser = argparse.ArgumentParser() +parser.add_argument('--file', '-f', help='named input file') +parser.add_argument('--labels', help='Labels for legend') +parser.add_argument('--process', help='The process on which a limit has been calculated. [gg#phi, bb#phi]', default="gg#phi") +parser.add_argument('--custom_y_range', help='Fix y axis range', action='store_true', default=False) +parser.add_argument('--y_axis_min', help='Fix y axis minimum', default=0.001) +parser.add_argument('--y_axis_max', help='Fix y axis maximum', default=100.0) +parser.add_argument('--custom_x_range', help='Fix x axis range', action='store_true', default=False) +parser.add_argument('--x_axis_min', help='Fix x axis minimum', default=90.0) +parser.add_argument('--x_axis_max', help='Fix x axis maximum', default=1000.0) +parser.add_argument('--verbosity', '-v', help='verbosity', default=0) +parser.add_argument('--log', help='Set log range for x and y axis', action='store_true', default=False) +parser.add_argument('--expected_only', help='Plot expected limit difference only',action='store_true', default=False) +parser.add_argument('--outname','-o', help='Name of output plot', default='limit_comparison') +parser.add_argument('--relative',help='Relative difference in limit',action='store_true', default=False) +#parser.add_argument('--table_vals', help='Amount of values to be written in a table for different masses', default=10) +args = parser.parse_args() + + +colourlist=[ROOT.kGreen+3,ROOT.kRed,ROOT.kBlue,ROOT.kBlack,ROOT.kYellow+2,ROOT.kOrange+10,ROOT.kCyan+3,ROOT.kMagenta+2,ROOT.kViolet-5,ROOT.kGray] +files = (args.file).split(',') +if args.labels: + labels = (args.labels).split(',') +else: + labels =[] +outname = args.outname + +if args.relative and len(files)!=2: + print 'Provide exactly 2 input files to plot the relative difference in limit' + +if len(colourlist) < len(files): + print 'Too many input files! Maximum supported is %d'%len(colourlist) + sys.exit(1) + +if len(labels) < len(files) and not args.relative: + print 'Provide at least as many legend labels as files' + sys.exit(1) + + +exp_graph_list = [ROOT.TGraph() for i in range(len(files))] +if args.relative: + obs_graph_list = [ROOT.TGraph() for i in range(len(files))] +for i in range(len(files)): + if ".root" in files[i]: + exp_graph_list[i] = plot.SortGraph((ROOT.TFile(files[i],'r')).Get('expected')) + if args.relative: + obs_graph_list[i] = plot.SortGraph((ROOT.TFile(files[i],'r')).Get('observed')) + else: + data = {} + with open(files[i]) as jsonfile: + data = json.load(jsonfile) + exp_graph_list[i] = plot.LimitTGraphFromJSON(data, 'expected') + if args.relative: + obs_graph_list[i] = plot.LimitTGraphFromJSON(data, 'observed') + +max_vals = [] +for i in range(len(exp_graph_list)): + max_vals.append(ROOT.TMath.MaxElement(exp_graph_list[i].GetN(),exp_graph_list[i].GetY())) + +if args.relative: + relative_exp_graph = plot.GraphDifference(exp_graph_list[0],exp_graph_list[1]) + relative_obs_graph = plot.GraphDifference(obs_graph_list[0],obs_graph_list[1]) + del max_vals[:] + max_vals.append(ROOT.TMath.MaxElement(relative_exp_graph.GetN(),relative_exp_graph.GetY())) + if not args.expected_only: + max_vals.append(ROOT.TMath.MaxElement(relative_obs_graph.GetN(),relative_obs_graph.GetY())) + +process_label=args.process + +mass_list=[] +for i in range(exp_graph_list[0].GetN()) : + mass_list.append(float(exp_graph_list[0].GetX()[i])) +mass_list = sorted(set(mass_list)) +mass_bins=len(mass_list) +if int(args.verbosity) > 0 : + print "mass_list: ", mass_list, "Total number: ", mass_bins + +#Create canvas and TH1D +plot.ModTDRStyle(width=600, l=0.12) +ROOT.gStyle.SetFrameLineWidth(2) +c1=ROOT.TCanvas() +axis = plot.makeHist1D('hist1d', mass_bins, exp_graph_list[0]) +axis.GetYaxis().SetRangeUser(0,1.2*float(max(max_vals))) +if args.log: + axis.GetYaxis().SetRangeUser(0.001,1.2*float(max(max_vals))) +if process_label == "gg#phi" : + axis.GetYaxis().SetTitle("95% CL limit on #sigma#font[42]{(gg#phi)}#upoint#font[52]{B}#font[42]{(#phi#rightarrow#tau#tau)} [pb]") +elif process_label == "bb#phi" : + axis.GetYaxis().SetTitle("95% CL limit on #sigma#font[42]{(bb#phi)}#upoint#font[52]{B}#font[42]{(#phi#rightarrow#tau#tau)} [pb]") +else: + exit("Currently process is not supported") +if args.custom_y_range : axis.GetYaxis().SetRangeUser(float(args.y_axis_min), float(args.y_axis_max)) +axis.GetXaxis().SetTitle("m_{#phi} [GeV]") +if args.custom_x_range : axis.GetXaxis().SetRangeUser(float(args.x_axis_min), float(args.x_axis_max)) +#Create two pads, one is just for the Legend +pad_leg = ROOT.TPad("pad_leg","pad_leg",0,0.82,1,1) +pad_leg.SetFillStyle(4000) +pad_leg.Draw() +pad_plot = ROOT.TPad("pad_plot","pad_plot",0,0,1,0.82) +pad_plot.SetFillStyle(4000) +pad_plot.Draw() +pads=[pad_leg,pad_plot] +pads[1].cd() +if args.log : + pad_plot.SetLogx(1); + pad_plot.SetLogy(1); + axis.SetNdivisions(50005, "X"); + axis.GetXaxis().SetMoreLogLabels(); + axis.GetXaxis().SetNoExponent(); + axis.GetXaxis().SetLabelSize(0.040); +axis.Draw() + +if not args.relative: + for i in range(len(files)): + exp_graph_list[i].SetLineColor(colourlist[i]) + exp_graph_list[i].SetLineWidth(3) + exp_graph_list[i].SetMarkerStyle(20) + exp_graph_list[i].SetMarkerColor(colourlist[i]) + exp_graph_list[i].SetLineStyle(2) + exp_graph_list[i].Draw("PL") +else: + relative_exp_graph.SetLineColor(ROOT.kRed) + relative_exp_graph.SetLineWidth(3) + relative_exp_graph.SetMarkerStyle(20) + relative_exp_graph.SetMarkerColor(ROOT.kRed) + relative_exp_graph.SetLineStyle(1) + relative_exp_graph.Draw("PL") + if not args.expected_only: + relative_obs_graph.SetLineColor(ROOT.kBlue) + relative_obs_graph.SetMarkerColor(ROOT.kBlue) + relative_obs_graph.SetLineWidth(3) + relative_obs_graph.SetLineStyle(2) + relative_obs_graph.SetMarkerStyle(20) + relative_obs_graph.Draw("PL") + +pads[0].cd() +legend = plot.PositionedLegend(0.5,0.9,2,0.03) +legend.SetNColumns(2) +legend.SetFillStyle(1001) +legend.SetTextSize(0.15) +legend.SetTextFont(62) +legend.SetHeader("95% CL Excluded:") +if not args.relative: + for i in range(len(files)): + legend.AddEntry(exp_graph_list[i],labels[i],"PL") +else: + legend.AddEntry(relative_exp_graph,"Expected","PL") + if not args.expected_only: + legend.AddEntry(relative_obs_graph,"Observed","PL") +legend.Draw("same") + +plot.DrawCMSLogo(pads[1], '', '', 11, 0.045, 0.035, 1.2) +plot.DrawTitle(pads[1], '2.1 fb^{-1} (13 TeV)', 3); +plot.FixOverlay() +c1.SaveAs("%s.pdf"%outname) +c1.SaveAs("%s.png"%outname) + diff --git a/CombineTools/scripts/plotBSMxsBRLimit.py b/CombineTools/scripts/plotBSMxsBRLimit.py index 9cbaa75243c..6ebd8083bcf 100644 --- a/CombineTools/scripts/plotBSMxsBRLimit.py +++ b/CombineTools/scripts/plotBSMxsBRLimit.py @@ -17,6 +17,7 @@ parser.add_argument('--x_axis_max', help='Fix x axis maximum', default=1000.0) parser.add_argument('--verbosity', '-v', help='verbosity', default=0) parser.add_argument('--log', help='Set log range for x and y axis', default=False) +parser.add_argument('--expected_only', help='Plot expected only', action='store_true', default=False) #parser.add_argument('--table_vals', help='Amount of values to be written in a table for different masses', default=10) args = parser.parse_args() @@ -118,11 +119,12 @@ # } graph_exp.Draw("L"); -graph_obs.SetMarkerColor(ROOT.kBlack); -graph_obs.SetMarkerSize(1.0); -graph_obs.SetMarkerStyle(20); -graph_obs.SetLineWidth(3); -graph_obs.Draw("PLsame"); +if not args.expected_only: + graph_obs.SetMarkerColor(ROOT.kBlack); + graph_obs.SetMarkerSize(1.0); + graph_obs.SetMarkerStyle(20); + graph_obs.SetLineWidth(3); + graph_obs.Draw("PLsame"); pads[0].cd() legend = plot.PositionedLegend(0.5,0.9,2,0.03) @@ -131,7 +133,8 @@ legend.SetTextSize(0.15) legend.SetTextFont(62) legend.SetHeader("95% CL Excluded:") -legend.AddEntry(graph_obs,"Observed", "L") +if not args.expected_only: + legend.AddEntry(graph_obs,"Observed", "L") legend.AddEntry(innerBand, "#pm 1#sigma Expected", "F") legend.AddEntry(graph_exp,"Expected", "L") legend.AddEntry(outerBand, "#pm 2#sigma Expected", "F") diff --git a/CombineTools/scripts/plotLimitGrid.py b/CombineTools/scripts/plotLimitGrid.py new file mode 100755 index 00000000000..2b4352f5d00 --- /dev/null +++ b/CombineTools/scripts/plotLimitGrid.py @@ -0,0 +1,227 @@ +#!/usr/bin/env python +import CombineHarvester.CombineTools.plotting as plot +import ROOT +import argparse + +ROOT.PyConfig.IgnoreCommandLineOptions = True +ROOT.gROOT.SetBatch(ROOT.kTRUE) + +parser = argparse.ArgumentParser( + formatter_class=argparse.ArgumentDefaultsHelpFormatter) +parser.add_argument( + 'input', help="""ROOT file containing the output of the + combineTool.py AsymptoticGrid or HybridNewGrid methods""") +parser.add_argument( + '--output', '-o', default='limit_grid_output', help="""Name of the output + plot without file extension""") +parser.add_argument( + '--contours', default='exp-2,exp-1,exp0,exp+1,exp+2,obs', help="""List of + contours to plot. These must correspond to the names of the TGraph2D + objects in the input file""") +parser.add_argument( + '--bin-method', default='BinEdgeAligned', help="""One of BinEdgeAligned or + BinCenterAligned. See plotting.py documentation for details.""") +parser.add_argument( + '--debug-output', '-d', help="""If specified, write the contour TH2s and + TGraphs into this output ROOT file""") +parser.add_argument( + '--CL', default=0.95, help="""Confidence level for contours""") +parser.add_argument( + '--x-title', default='m_{A} (GeV)', help="""Title for the x-axis""") +parser.add_argument( + '--y-title', default='tan#beta', help="""Title for the y-axis""") +parser.add_argument( + '--cms-sub', default='Internal', help="""Text below the CMS logo""") +parser.add_argument( + '--scenario-label', default='', help="""Scenario name to be drawn in top + left of plot""") +parser.add_argument( + '--title-right', default='', help="""Right header text above the frame""") +parser.add_argument( + '--title-left', default='', help="""Left header text above the frame""") +parser.add_argument( + '--logy', action='store_true', help="""Draw y-axis in log scale""") +parser.add_argument( + '--logx', action='store_true', help="""Draw x-axis in log scale""") +parser.add_argument( + '--force-x-width', type=float, default=None, help="""Use this x bin width in + BinCenterAligned mode""") +parser.add_argument( + '--force-y-width', type=float, default=None, help="""Use this y bin width in + BinCenterAligned mode""") +parser.add_argument( + '--hist', default=None, help="""Draw this TGraph2D as a histogram with + COLZ""") +parser.add_argument( + '--z-range', default=None, type=str, help="""z-axis range of the COLZ + hist""") +parser.add_argument( + '--z-title', default=None, help="""z-axis title of the COLZ hist""") +args = parser.parse_args() + + +plot.ModTDRStyle(r=0.06 if args.hist is None else 0.17, l=0.12) +ROOT.gStyle.SetNdivisions(510, 'XYZ') +plot.SetBirdPalette() + +file = ROOT.TFile(args.input) +types = args.contours.split(',') +CL = 1 - args.CL + +# Object storage +graphs = {c: file.Get(c) for c in types} +hists = {} +contours = {} + +h_proto = plot.TH2FromTGraph2D(graphs[types[0]], method=args.bin_method, + force_x_width=args.force_x_width, + force_y_width=args.force_y_width) +h_axis = h_proto +h_axis = plot.TH2FromTGraph2D(graphs[types[0]]) +# Create the debug output file if requested +if args.debug_output is not None: + debug = ROOT.TFile(args.debug_output, 'RECREATE') +else: + debug = None + +# Fill TH2s by interpolating the TGraph2Ds, then extract contours +for c in types: + print 'Filling histo for %s' % c + hists[c] = h_proto.Clone(c) + plot.fillTH2(hists[c], graphs[c]) + contours[c] = plot.contourFromTH2(hists[c], CL, 5, frameValue=1) + if debug is not None: + debug.WriteTObject(hists[c], 'hist_%s' % c) + for i, cont in enumerate(contours[c]): + debug.WriteTObject(cont, 'cont_%s_%i' % (c, i)) + +# Setup the canvas: we'll use a two pad split, with a small top pad to contain +# the CMS logo and the legend +canv = ROOT.TCanvas(args.output, args.output) +pads = plot.TwoPadSplit(0.8, 0, 0) +pads[1].cd() +h_axis.GetXaxis().SetTitle(args.x_title) +h_axis.GetYaxis().SetTitle(args.y_title) +h_axis.Draw() + +if args.hist is not None: + colzhist = h_proto.Clone(c) + plot.fillTH2(colzhist, file.Get(args.hist)) + colzhist.SetContour(255) + colzhist.Draw('COLZSAME') + colzhist.GetZaxis().SetLabelSize(0.03) + if args.z_range is not None: + colzhist.SetMinimum(float(args.z_range.split(',')[0])) + colzhist.SetMaximum(float(args.z_range.split(',')[1])) + if args.z_title is not None: + colzhist.GetZaxis().SetTitle(args.z_title) + +pads[1].SetLogy(args.logy) +pads[1].SetLogx(args.logx) +pads[1].SetTickx() +pads[1].SetTicky() +# h_proto.GetXaxis().SetRangeUser(130,400) +# h_proto.GetYaxis().SetRangeUser(1,20) + +fillstyle = 'FSAME' +if args.hist is not None: + fillstyle = 'LSAME' + +# Now we draw the actual contours +if 'exp-2' in contours and 'exp+2' in contours: + for i, gr in enumerate(contours['exp-2']): + plot.Set(gr, LineColor=0, FillColor=ROOT.kGray + 0, FillStyle=1001) + if args.hist is not None: + plot.Set(gr, LineColor=ROOT.kGray + 0, LineWidth=2) + gr.Draw(fillstyle) +if 'exp-1' in contours and 'exp+1' in contours: + for i, gr in enumerate(contours['exp-1']): + plot.Set(gr, LineColor=0, FillColor=ROOT.kGray + 1, FillStyle=1001) + if args.hist is not None: + plot.Set(gr, LineColor=ROOT.kGray + 1, LineWidth=2) + gr.Draw(fillstyle) + fill_col = ROOT.kGray+0 + # If we're only drawing the 1 sigma contours then we should fill with + # white here instead + if 'exp-2' not in contours and 'exp+2' not in contours: + fill_col = ROOT.kWhite + for i, gr in enumerate(contours['exp+1']): + plot.Set(gr, LineColor=0, FillColor=fill_col, FillStyle=1001) + if args.hist is not None: + plot.Set(gr, LineColor=ROOT.kGray + 1, LineWidth=2) + gr.Draw(fillstyle) +if 'exp-2' in contours and 'exp+2' in contours: + for i, gr in enumerate(contours['exp+2']): + plot.Set(gr, LineColor=0, FillColor=ROOT.kWhite, FillStyle=1001) + if args.hist is not None: + plot.Set(gr, LineColor=ROOT.kGray + 0, LineWidth=2) + gr.Draw(fillstyle) +if 'exp0' in contours: + for i, gr in enumerate(contours['exp0']): + if args.hist is not None: + plot.Set(gr, LineWidth=2) + if 'obs' in contours: + plot.Set(gr, LineColor=ROOT.kBlack, LineStyle=2) + gr.Draw('LSAME') + else: + plot.Set(gr, LineStyle=2, FillStyle=1001, + FillColor=plot.CreateTransparentColor( + ROOT.kSpring + 6, 0.5)) + gr.Draw(fillstyle) + gr.Draw('LSAME') +if 'obs' in contours: + for i, gr in enumerate(contours['obs']): + plot.Set(gr, FillStyle=1001, FillColor=plot.CreateTransparentColor( + ROOT.kAzure + 6, 0.5)) + if args.hist is not None: + plot.Set(gr, LineWidth=2) + gr.Draw(fillstyle) + gr.Draw('LSAME') + +# We just want the top pad to look like a box, so set all the text and tick +# sizes to zero +pads[0].cd() +h_top = h_axis.Clone() +plot.Set(h_top.GetXaxis(), LabelSize=0, TitleSize=0, TickLength=0) +plot.Set(h_top.GetYaxis(), LabelSize=0, TitleSize=0, TickLength=0) +h_top.Draw() + +# Draw the legend in the top TPad +legend = plot.PositionedLegend(0.4, 0.11, 3, 0.015) +plot.Set(legend, NColumns=2, Header='#bf{%.0f%% CL Excluded:}' % (args.CL*100.)) +if 'obs' in contours: + legend.AddEntry(contours['obs'][0], "Observed", "F") +if 'exp-1' in contours and 'exp+1' in contours: + legend.AddEntry(contours['exp-1'][0], "#pm 1#sigma Expected", "F") +if 'exp0' in contours: + if 'obs' in contours: + legend.AddEntry(contours['exp0'][0], "Expected", "L") + else: + legend.AddEntry(contours['exp0'][0], "Expected", "F") +if 'exp-2' in contours and 'exp+2' in contours: + legend.AddEntry(contours['exp-2'][0], "#pm 2#sigma Expected", "F") +legend.Draw() + +# Draw logos and titles +plot.DrawCMSLogo(pads[0], 'CMS', args.cms_sub, 11, 0.045, 0.15, 1.0, '', 1.0) +plot.DrawTitle(pads[0], args.title_right, 3) +plot.DrawTitle(pads[0], args.title_left, 1) + + +# Redraw the frame because it usually gets covered by the filled areas +pads[1].cd() +pads[1].GetFrame().Draw() +pads[1].RedrawAxis() + +# Draw the scenario label +latex = ROOT.TLatex() +latex.SetNDC() +latex.SetTextSize(0.04) +latex.DrawLatex(0.155, 0.75, args.scenario_label) + +canv.Print('.pdf') +canv.Print('.png') +canv.Close() + +if debug is not None: + debug.Close() diff --git a/CombineTools/scripts/plotMultiDimFit.py b/CombineTools/scripts/plotMultiDimFit.py new file mode 100644 index 00000000000..d4220b01f78 --- /dev/null +++ b/CombineTools/scripts/plotMultiDimFit.py @@ -0,0 +1,140 @@ +import CombineHarvester.CombineTools.plotting as plot +import ROOT +import math +import argparse +from array import array + +ROOT.gROOT.SetBatch(ROOT.kTRUE) +parser = argparse.ArgumentParser() +parser.add_argument('--files', '-f', help='named input files') +args = parser.parse_args() + +ROOT.gROOT.ProcessLine( + "struct staff_t {\ + Float_t quantileExpected;\ + Float_t mh;\ + Double_t limit;\ + }" ) + +graph = ROOT.TGraph2D() +xvals = [] #ggH +yvals = [] #bbH +zvals = [] + +file = ROOT.TFile(args.files, 'r') +tree = file.Get('limit') +for evt in tree: +# skip=0 +# if (float(evt.deltaNLL) in zvals): # Avoid inserting the same point twice +# if (float(evt.r_ggH) == xvals[zvals.index(float(evt.deltaNLL))]) and (float(evt.r_bbH) == yvals[zvals.index(float(evt.deltaNLL))]): +# print "Found duplicate point at (ggH,bbH) = ("+str(xvals[zvals.index(float(evt.deltaNLL))])+","+str(yvals[zvals.index(float(evt.deltaNLL))])+")" +# continue +# else: +# for i in xrange(len(zvals)): +# if (float(evt.r_ggH) == xvals[i]) and (float(evt.r_bbH) == yvals[i]): +# print "Found duplicate point at (ggH,bbH) = ("+str(xvals[i])+","+str(yvals[i])+")" +# skip=1 +# break +# if (skip==1): continue + xvals.append(float(evt.r_ggH)) + yvals.append(float(evt.r_bbH)) + zvals.append(float(evt.deltaNLL)) +graph = ROOT.TGraph2D(len(zvals), array('d', xvals), array('d', yvals), array('d', zvals)) +fout = ROOT.TFile('multidimfit.root', 'RECREATE') +fout.WriteTObject(graph, 'Graph') +#graph.SaveAs("graph.root") + +ggH_list=[] +bbH_list=[] +for i in xrange(graph.GetN()) : + ggH_list.append(float(graph.GetX()[i])) + bbH_list.append(float(graph.GetY()[i])) +ggH_list = sorted(set(ggH_list)) +bbH_list = sorted(set(bbH_list)) +ggH_bins=len(ggH_list) +bbH_bins=len(bbH_list) +print "r_ggH_list: ", ggH_list, "Total number: ", ggH_bins +print "r_bbH_list: ", bbH_list, "Total number: ", bbH_bins + +#Create canvas and TH2D for each component +plot.ModTDRStyle(width=600, l=0.12) +#Slightly thicker frame to ensure contour edges dont overlay the axis +ROOT.gStyle.SetFrameLineWidth(3) +c1=ROOT.TCanvas() +axis = plot.makeVarBinHist2D('hist2d', ggH_list, bbH_list) +axis.GetXaxis().SetTitle("#sigma#font[42]{(gg#phi)}#upoint#font[52]{B}#font[42]{(#phi#rightarrow#tau#tau)} [pb]") +axis.GetYaxis().SetTitle("#sigma#font[42]{(bb#phi)}#upoint#font[52]{B}#font[42]{(#phi#rightarrow#tau#tau)} [pb]") +#Create two pads, one is just for the Legend +pad1 = ROOT.TPad("pad1","pad1",0,0.5,1,1) +pad1.SetFillStyle(4000) +pad1.Draw() +pad2 = ROOT.TPad("pad2","pad2",0,0,1,1) +pad2.SetFillStyle(4000) +pad2.Draw() +pads=[pad1,pad2] +pads[1].cd() + +bfit = plot.bestFit(tree, "r_ggH", "r_bbH", "") +#Note the binning of the TH2D for the interpolation should ~ match the initial input grid +#Could in future implement variable binning here +h2d = plot.treeToHist2D(tree, "r_ggH", "r_bbH", "h2d", "", ggH_list[0], ggH_list[-1], bbH_list[0], bbH_list[-1], ggH_bins-1, bbH_bins-1) +#h2d = plot.makeVarBinHist2D("h2d", ggH_list, bbH_list) +#plot.fillTH2(h2d, graph) +#h2d.SaveAs("heatmap.root") +for i in xrange(1): + h2d = plot.rebin(h2d) # Additional smoothing of the curves +#h2d.SaveAs("heatmap2.root") +axis.Draw() + +cont_1sigma = plot.contourFromTH2(h2d, 2.30, 20) #0.68 +cont_2sigma = plot.contourFromTH2(h2d, 5.99, 20) #0.95 + +for i, p in enumerate(cont_2sigma): + p.SetLineStyle(1) + p.SetLineWidth(2) + p.SetLineColor(ROOT.kBlack) + p.SetFillColor(ROOT.kBlue-10) + p.SetFillStyle(1001) + pads[1].cd() + p.Draw("F SAME") + p.Draw("CONT SAME") + +for i, p in enumerate(cont_1sigma): + p.SetLineStyle(1) + p.SetLineWidth(2) + p.SetLineColor(ROOT.kBlack) + p.SetFillColor(ROOT.kBlue-8) + p.SetFillStyle(1001) + pads[1].cd() + p.Draw("F SAME") + p.Draw("CONT SAME") + +bfit.Draw("P SAME") + +scenario_label="Multi-dimensional Fit" + +pads[0].cd() +legend = ROOT.TLegend(0.65, 0.35, 1.05, 0.85, '', 'NBNDC') +legend.SetNColumns(1) +legend.SetFillStyle(1001) +legend.SetTextSize(0.08) +legend.SetTextFont(62) + +hmass="" +if (args.files.find(".mH") != -1): + hmass = args.files[args.files.find(".mH")+3:args.files.find(".root")] + legend.SetHeader("m_{#phi} = "+hmass+" GeV") +if cont_1sigma[0] : legend.AddEntry(cont_1sigma[0], "68% CL", "F") +if cont_2sigma[0] : legend.AddEntry(cont_2sigma[0], "95% CL", "F") +legend.AddEntry(bfit, "Best fit", "P") +legend.Draw("same") + +plot.DrawCMSLogo(pads[1], 'Combine Harvester', scenario_label, 11, 0.045, 0.035, 1.2) +plot.DrawTitle(pads[1], '19.7 fb^{-1} (8 TeV)', 3); +plot.FixOverlay() +if (hmass == ""): + c1.SaveAs("mssm_multidimfit.pdf") + c1.SaveAs("mssm_multidimfit.png") +else: + c1.SaveAs("mssm_multidimfit_mH"+hmass+".pdf") + c1.SaveAs("mssm_multidimfit_mH"+hmass+".png") diff --git a/CombineTools/src/AZhSystematics.cc b/CombineTools/src/AZhSystematics.cc index 01bebf1d5c8..91ee4da1b7e 100644 --- a/CombineTools/src/AZhSystematics.cc +++ b/CombineTools/src/AZhSystematics.cc @@ -71,7 +71,7 @@ void AddSystematics_AZh(CombineHarvester & cb, CombineHarvester src) { src.cp().process({"ZZ"}) .AddSyst(cb, "pdf_qqbar", "lnN", SystMap<>::init - (1.06)); + (1.05)); src.cp().process({"ZZ_tt125"}) .AddSyst(cb, "CMS_htt_SM125_mu", "lnN", SystMap<>::init diff --git a/CombineTools/src/CombineHarvester.cc b/CombineTools/src/CombineHarvester.cc index 082570834c1..a23ed301a58 100644 --- a/CombineTools/src/CombineHarvester.cc +++ b/CombineTools/src/CombineHarvester.cc @@ -19,6 +19,7 @@ CombineHarvester::CombineHarvester() : verbosity_(0), log_(&(std::cout)) { // } flags_["zero-negative-bins-on-import"] = true; flags_["allow-missing-shapes"] = true; + flags_["workspaces-use-clone"] = false; // std::cout << "[CombineHarvester] Constructor called for " << this << "\n"; } @@ -53,6 +54,27 @@ CombineHarvester::CombineHarvester(CombineHarvester const& other) // << " -> " << this << "\n"; } + +void CombineHarvester::SetFlag(std::string const& flag, bool const& value) { + auto it = flags_.find(flag); + if (it != flags_.end()) { + FNLOG(std::cout) << " Changing value of flag \"" << it->first << "\" from " << it->second << " to " << value << "\n"; + it->second = value; + } else { + FNLOG(std::cout) << " Created new flag \"" << flag << "\" with value " << value << "\n"; + flags_[flag] = value; + } +} + +bool CombineHarvester::GetFlag(std::string const& flag) const { + auto it = flags_.find(flag); + if (it != flags_.end()) { + return it->second; + } else { + throw std::runtime_error(FNERROR("Flag " + flag + " is not defined")); + } +} + CombineHarvester CombineHarvester::deep() { CombineHarvester cpy; // std::cout << "[CombineHarvester] Deep copy called " << this @@ -606,10 +628,13 @@ std::shared_ptr CombineHarvester::SetupWorkspace( // - No: clone with same name and return // IMPORTANT: Don't used RooWorkspace::Clone(), it seems to introduce // bugs - // wspaces_[std::string(ws.GetName())] = std::shared_ptr( - // reinterpret_cast(ws.Clone())); - wspaces_[std::string(ws.GetName())] = - std::make_shared(RooWorkspace(ws)); + if (GetFlag("workspaces-use-clone")) { + wspaces_[std::string(ws.GetName())] = std::shared_ptr( + reinterpret_cast(ws.Clone())); + } else { + wspaces_[std::string(ws.GetName())] = + std::make_shared(RooWorkspace(ws)); + } return wspaces_.at(ws.GetName()); } @@ -641,10 +666,13 @@ std::shared_ptr CombineHarvester::SetupWorkspace( << " already defined, renaming to " << new_name << "\n"; - // wspaces_[new_name] = std::shared_ptr( - // reinterpret_cast(ws.Clone(new_name.c_str()))); - std::shared_ptr new_wsp = - std::make_shared(RooWorkspace(ws)); + std::shared_ptr new_wsp; + if (GetFlag("workspaces-use-clone")) { + new_wsp = std::shared_ptr( + reinterpret_cast(ws.Clone(new_name.c_str()))); + } else { + new_wsp = std::make_shared(RooWorkspace(ws)); + } new_wsp->SetName(new_name.c_str()); wspaces_[new_name] = new_wsp; return wspaces_.at(new_name); @@ -664,8 +692,13 @@ void CombineHarvester::ImportParameters(RooArgSet *vars) { Parameter par; par.set_name(y->GetName()); par.set_val(y->getVal()); - par.set_err_d(0.0); - par.set_err_u(0.0); + if (y->hasError() || y->hasAsymError()) { + par.set_err_d(y->getErrorLo()); + par.set_err_u(y->getErrorHi()); + } else { + par.set_err_d(0.); + par.set_err_u(0.); + } params_[y->GetName()] = std::make_shared(par); } else { if (verbosity_ >= 1) diff --git a/CombineTools/src/CombineHarvester_Python.cc b/CombineTools/src/CombineHarvester_Python.cc index 0df060cd35d..fbd24d56d84 100644 --- a/CombineTools/src/CombineHarvester_Python.cc +++ b/CombineTools/src/CombineHarvester_Python.cc @@ -201,6 +201,8 @@ BOOST_PYTHON_MODULE(libCombineHarvesterCombineTools) // Constructors, destructors and copying .def("cp", &CombineHarvester::cp) .def("deep", &CombineHarvester::deep) + .def("SetFlag", &CombineHarvester::SetFlag) + .def("GetFlag", &CombineHarvester::GetFlag) // Logging and printing .def("PrintAll", &CombineHarvester::PrintAll, py::return_internal_reference<>()) diff --git a/CombineTools/src/HttSystematics_MSSMRun2.cc b/CombineTools/src/HttSystematics_MSSMRun2.cc new file mode 100644 index 00000000000..e50dd59ee3a --- /dev/null +++ b/CombineTools/src/HttSystematics_MSSMRun2.cc @@ -0,0 +1,88 @@ +#include "CombineHarvester/CombineTools/interface/HttSystematics.h" +#include +#include +#include "CombineHarvester/CombineTools/interface/Systematics.h" +#include "CombineHarvester/CombineTools/interface/Process.h" +#include "CombineHarvester/CombineTools/interface/Utilities.h" + +namespace ch { + +using ch::syst::SystMap; +using ch::syst::SystMapAsymm; +using ch::syst::era; +using ch::syst::channel; +using ch::syst::bin_id; +using ch::syst::process; +using ch::JoinStr; + + +void AddMSSMRun2Systematics(CombineHarvester & cb) { + CombineHarvester src = cb.cp(); + + auto signal = Set2Vec(src.cp().signals().SetFromProcs( + std::mem_fn(&Process::process))); + + //src.cp().process({"ggH","bbH"}) + // .AddSyst(cb, "pdf_gg", "lnN", SystMap<>::init(1.097)); + + src.cp().process(ch::JoinStr({signal, {"ZTT", "TT"}})) + .AddSyst(cb, "CMS_eff_m", "lnN", SystMap<>::init(1.02)); + + src.cp().process(ch::JoinStr({signal, {"TT","VV","ZLL","ZTT","W"}})).channel({"em"}) + .AddSyst(cb, "CMS_eff_e", "lnN", SystMap<>::init(1.02)); + +// src.cp().channel({"em"}).process(ch::JoinStr({signal, {"TT","VV","ZLL","ZTT","W"}})).AddSyst(cb, "CMS_eff_m", "lnN", SystMap<>::init(1.02)); + +// src.cp().channel({"em"}).AddSyst(cb, "CMS_scale_j_13TeV", "lnN", SystMap::init + // ({"13TeV"}, {"TT"}, 0.91) + // ({"13TeV"}, {"VV"}, 0.98)); + + + // src.cp().process(ch::JoinStr({signal, {"TT","VV","ZL","W","ZJ","ZTT"}})).channel({"et"}) + // .AddSyst(cb, "CMS_eff_e", "lnN", SystMap<>::init(1.02)); + + src.cp().process({"TT","VV","ZL","W","ZJ","ZTT","ZLL"}).AddSyst(cb, "lumi_13TeV", "lnN", SystMap<>::init(1.026)); + + src.cp().process({"ZTT"}) + .AddSyst(cb, "CMS_htt_zttNorm_13TeV", "lnN", SystMap<>::init(1.03)); + + src.cp().process(signal).AddSyst(cb, "lumi_13TeV", "lnN", SystMap<>::init(1.026)); + +// src.cp().channel({"em"}).process({"ZLL"}).AddSyst(cb, "CMS_htt_zttNorm_13TeV", "lnN", SystMap<>::init(1.03)); +// src.cp().channel({"et"}).process({"ZL","ZJ"}).AddSyst(cb, "CMS_htt_zttNorm_13TeV", "lnN", SystMap<>::init(1.03)); + src.cp().channel({"mt"}).process({"ZL","ZJ"}).AddSyst(cb, "CMS_htt_zttNorm_13TeV", "lnN", SystMap<>::init(1.03)); + + src.cp().process({"TT"}) + .AddSyst(cb, "CMS_htt_ttbarNorm_13TeV", "lnN", SystMap::init + ({"13TeV"}, 1.10)); + +/* src.cp().process({"QCD"}).channel({"em"}).AddSyst(cb, + "CMS_htt_em_QCD_13TeV","lnN",SystMap<>::init(1.3)); + + src.cp().process({"QCD"}).channel({"et"}).AddSyst(cb, + "CMS_htt_et_QCD_13TeV","lnN",SystMap<>::init(1.3));*/ + + src.cp().process({"QCD"}).channel({"mt"}).AddSyst(cb, + "CMS_htt_mt_QCD_13TeV","lnN",SystMap<>::init(1.3)); + + src.cp().process({"VV"}).AddSyst(cb, + "CMS_htt_VVNorm_13TeV", "lnN", SystMap<>::init(1.15)); + + src.cp().process({"W"}).channel({"mt"}).AddSyst(cb, + "CMS_htt_WNorm_13TeV","lnN",SystMap<>::init(1.2)); + +/* src.cp() + .AddSyst(cb, + "CMS_scale_j_$ERA", "lnN", SystMap::init + ({"8TeV"}, {1}, {"ggH"}, 1.04) + ({"8TeV"}, {1}, {"qqH"}, 0.99) + ({"8TeV"}, {2}, {"ggH"}, 1.10) + ({"8TeV"}, {2}, {"qqH"}, 1.04) + ({"8TeV"}, {2}, {"TT"}, 1.05)); +*/ + +// src.cp().process(ch::JoinStr({signal, {"ZTT"}})) + // .AddSyst(cb, "CMS_scale_t_mutau_$ERA", "shape", SystMap<>::init(1.00)); + //! [part6] +} +} diff --git a/CombineTools/src/HttSystematics_MSSMUpdate.cc b/CombineTools/src/HttSystematics_MSSMUpdate.cc index 6b9b31e2b67..c80c47b9404 100644 --- a/CombineTools/src/HttSystematics_MSSMUpdate.cc +++ b/CombineTools/src/HttSystematics_MSSMUpdate.cc @@ -68,8 +68,10 @@ void AddMSSMUpdateSystematics_et_mt(CombineHarvester & cb, CombineHarvester src) // src.cp().process(JoinStr({signal,{"ZTT", "ggH_SM125", "qqH_SM125", "VH_SM125"}})) // .AddSyst(cb, "CMS_scale_t_$CHANNEL_$ERA", "shape", SystMap::init(1)); + //! [part1] src.cp().process({"W"}) .AddSyst(cb, "CMS_shift1_muTau_nobtag_low_$ERA_W_fine_binning", "shape", SystMap::init({"mt"}, {10}, 1.000)); + //! [part1] src.cp().process({"W"}) .AddSyst(cb, "CMS_shift2_muTau_nobtag_low_$ERA_W_fine_binning", "shape", SystMap::init({"mt"}, {10}, 1.000)); src.cp().process({"QCD"}) diff --git a/CombineTools/src/Parameter.cc b/CombineTools/src/Parameter.cc index 908db47c8cd..9095f01c941 100644 --- a/CombineTools/src/Parameter.cc +++ b/CombineTools/src/Parameter.cc @@ -12,7 +12,8 @@ Parameter::Parameter() err_u_(1.0), err_d_(-1.0), range_u_(std::numeric_limits::max()), - range_d_(std::numeric_limits::lowest()) { + range_d_(std::numeric_limits::lowest()), + frozen_(false) { } Parameter::~Parameter() { } @@ -25,6 +26,7 @@ void swap(Parameter& first, Parameter& second) { swap(first.err_d_, second.err_d_); swap(first.range_u_, second.range_u_); swap(first.range_d_, second.range_d_); + swap(first.frozen_, second.frozen_); swap(first.vars_, second.vars_); } @@ -35,6 +37,7 @@ Parameter::Parameter(Parameter const& other) err_d_(other.err_d_), range_u_(other.range_u_), range_d_(other.range_d_), + frozen_(other.frozen_), vars_(other.vars_) { } @@ -44,7 +47,8 @@ Parameter::Parameter(Parameter&& other) err_u_(1.0), err_d_(-1.0), range_u_(std::numeric_limits::max()), - range_d_(std::numeric_limits::lowest()) { + range_d_(std::numeric_limits::lowest()), + frozen_(false) { swap(*this, other); } @@ -55,8 +59,8 @@ Parameter& Parameter::operator=(Parameter other) { std::ostream& Parameter::PrintHeader(std::ostream &out) { std::string line = - (boost::format("%-70s %-10.4f %-9.4f %-7.4f") - % "name" % "value" % "error_d" % "error_u").str(); + (boost::format("%-70s %-10.4f %-9.4f %-7.4f %-6s") + % "name" % "value" % "error_d" % "error_u" % "frozen").str(); std::string div(line.length(), '-'); out << div << std::endl; out << line << std::endl; @@ -65,11 +69,12 @@ std::ostream& Parameter::PrintHeader(std::ostream &out) { } std::ostream& operator<< (std::ostream &out, Parameter &val) { - out << boost::format("%-70s %-10.4f %-9.4f %-7.4f") + out << boost::format("%-70s %-10.4f %-9.4f %-7.4f %-6i") % val.name() % val.val() % val.err_d() - % val.err_u(); + % val.err_u() + % val.frozen(); return out; } } diff --git a/Doxyfile b/Doxyfile index 1d963e32c63..bd43d7d1cd7 100644 --- a/Doxyfile +++ b/Doxyfile @@ -1,4 +1,4 @@ -# Doxyfile 1.8.8 +# Doxyfile 1.8.10 # This file describes the settings to be used by the documentation system # doxygen (www.doxygen.org) for a project. @@ -46,10 +46,10 @@ PROJECT_NUMBER = PROJECT_BRIEF = -# With the PROJECT_LOGO tag one can specify an logo or icon that is included in -# the documentation. The maximum height of the logo should not exceed 55 pixels -# and the maximum width should not exceed 200 pixels. Doxygen will copy the logo -# to the output directory. +# With the PROJECT_LOGO tag one can specify a logo or an icon that is included +# in the documentation. The maximum height of the logo should not exceed 55 +# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy +# the logo to the output directory. PROJECT_LOGO = @@ -60,7 +60,7 @@ PROJECT_LOGO = OUTPUT_DIRECTORY = docs/ -# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 4096 sub- +# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub- # directories (in 2 levels) under the output directory of each output format and # will distribute the generated files over these directories. Enabling this # option can be useful when feeding doxygen a huge amount of source files, where @@ -93,14 +93,14 @@ ALLOW_UNICODE_NAMES = NO OUTPUT_LANGUAGE = English -# If the BRIEF_MEMBER_DESC tag is set to YES doxygen will include brief member +# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member # descriptions after the members that are listed in the file and class # documentation (similar to Javadoc). Set to NO to disable this. # The default value is: YES. BRIEF_MEMBER_DESC = YES -# If the REPEAT_BRIEF tag is set to YES doxygen will prepend the brief +# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief # description of a member or function before the detailed description # # Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the @@ -135,7 +135,7 @@ ALWAYS_DETAILED_SEC = NO INLINE_INHERITED_MEMB = NO -# If the FULL_PATH_NAMES tag is set to YES doxygen will prepend the full path +# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path # before files name in the file list and in the header files. If set to NO the # shortest path that makes the file name unique will be used # The default value is: YES. @@ -205,9 +205,9 @@ MULTILINE_CPP_IS_BRIEF = YES INHERIT_DOCS = YES -# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce a -# new page for each member. If set to NO, the documentation of a member will be -# part of the file/class/namespace that contains it. +# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new +# page for each member. If set to NO, the documentation of a member will be part +# of the file/class/namespace that contains it. # The default value is: NO. SEPARATE_MEMBER_PAGES = NO @@ -276,7 +276,7 @@ OPTIMIZE_OUTPUT_VHDL = NO # instance to make doxygen treat .inc files as Fortran files (default is PHP), # and .f files as C (default is Fortran), use: inc=Fortran f=C. # -# Note For files without extension you can use no_extension as a placeholder. +# Note: For files without extension you can use no_extension as a placeholder. # # Note that for custom extensions you also need to set FILE_PATTERNS otherwise # the files are not read by doxygen. @@ -295,8 +295,8 @@ MARKDOWN_SUPPORT = YES # When enabled doxygen tries to link words that correspond to documented # classes, or namespaces to their corresponding documentation. Such a link can -# be prevented in individual cases by by putting a % sign in front of the word -# or globally by setting AUTOLINK_SUPPORT to NO. +# be prevented in individual cases by putting a % sign in front of the word or +# globally by setting AUTOLINK_SUPPORT to NO. # The default value is: YES. AUTOLINK_SUPPORT = YES @@ -336,13 +336,20 @@ SIP_SUPPORT = NO IDL_PROPERTY_SUPPORT = YES # If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC -# tag is set to YES, then doxygen will reuse the documentation of the first +# tag is set to YES then doxygen will reuse the documentation of the first # member in the group (if any) for the other members of the group. By default # all members of a group must be documented explicitly. # The default value is: NO. DISTRIBUTE_GROUP_DOC = NO +# If one adds a struct or class to a group and this option is enabled, then also +# any nested class or struct is added to the same group. By default this option +# is disabled and one has to add nested compounds explicitly via \ingroup. +# The default value is: NO. + +GROUP_NESTED_COMPOUNDS = NO + # Set the SUBGROUPING tag to YES to allow class member groups of the same type # (for instance a group of public functions) to be put as a subgroup of that # type (e.g. under the Public Functions section). Set it to NO to prevent @@ -401,7 +408,7 @@ LOOKUP_CACHE_SIZE = 0 # Build related configuration options #--------------------------------------------------------------------------- -# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in +# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in # documentation are documented, even if no documentation was available. Private # class members and static file members will be hidden unless the # EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES. @@ -411,35 +418,35 @@ LOOKUP_CACHE_SIZE = 0 EXTRACT_ALL = YES -# If the EXTRACT_PRIVATE tag is set to YES all private members of a class will +# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will # be included in the documentation. # The default value is: NO. EXTRACT_PRIVATE = NO -# If the EXTRACT_PACKAGE tag is set to YES all members with package or internal +# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal # scope will be included in the documentation. # The default value is: NO. EXTRACT_PACKAGE = NO -# If the EXTRACT_STATIC tag is set to YES all static members of a file will be +# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be # included in the documentation. # The default value is: NO. EXTRACT_STATIC = NO -# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) defined -# locally in source files will be included in the documentation. If set to NO +# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined +# locally in source files will be included in the documentation. If set to NO, # only classes defined in header files are included. Does not have any effect # for Java sources. # The default value is: YES. EXTRACT_LOCAL_CLASSES = YES -# This flag is only useful for Objective-C code. When set to YES local methods, +# This flag is only useful for Objective-C code. If set to YES, local methods, # which are defined in the implementation section but not in the interface are -# included in the documentation. If set to NO only methods in the interface are +# included in the documentation. If set to NO, only methods in the interface are # included. # The default value is: NO. @@ -464,21 +471,21 @@ HIDE_UNDOC_MEMBERS = NO # If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all # undocumented classes that are normally visible in the class hierarchy. If set -# to NO these classes will be included in the various overviews. This option has -# no effect if EXTRACT_ALL is enabled. +# to NO, these classes will be included in the various overviews. This option +# has no effect if EXTRACT_ALL is enabled. # The default value is: NO. HIDE_UNDOC_CLASSES = NO # If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend -# (class|struct|union) declarations. If set to NO these declarations will be +# (class|struct|union) declarations. If set to NO, these declarations will be # included in the documentation. # The default value is: NO. HIDE_FRIEND_COMPOUNDS = NO # If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any -# documentation blocks found inside the body of a function. If set to NO these +# documentation blocks found inside the body of a function. If set to NO, these # blocks will be appended to the function's detailed documentation block. # The default value is: NO. @@ -492,7 +499,7 @@ HIDE_IN_BODY_DOCS = NO INTERNAL_DOCS = NO # If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file -# names in lower-case letters. If set to YES upper-case letters are also +# names in lower-case letters. If set to YES, upper-case letters are also # allowed. This is useful if you have classes or files whose names only differ # in case and if your file system supports case sensitive file names. Windows # and Mac users are advised to set this option to NO. @@ -501,12 +508,19 @@ INTERNAL_DOCS = NO CASE_SENSE_NAMES = NO # If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with -# their full class and namespace scopes in the documentation. If set to YES the +# their full class and namespace scopes in the documentation. If set to YES, the # scope will be hidden. # The default value is: NO. HIDE_SCOPE_NAMES = NO +# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will +# append additional text to a page's title, such as Class Reference. If set to +# YES the compound reference will be hidden. +# The default value is: NO. + +HIDE_COMPOUND_REFERENCE= NO + # If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of # the files that are included by a file in the documentation of that file. # The default value is: YES. @@ -534,14 +548,14 @@ INLINE_INFO = YES # If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the # (detailed) documentation of file and class members alphabetically by member -# name. If set to NO the members will appear in declaration order. +# name. If set to NO, the members will appear in declaration order. # The default value is: YES. SORT_MEMBER_DOCS = NO # If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief # descriptions of file, namespace and class members alphabetically by member -# name. If set to NO the members will appear in declaration order. Note that +# name. If set to NO, the members will appear in declaration order. Note that # this will also influence the order of the classes in the class list. # The default value is: NO. @@ -586,27 +600,25 @@ SORT_BY_SCOPE_NAME = NO STRICT_PROTO_MATCHING = NO -# The GENERATE_TODOLIST tag can be used to enable ( YES) or disable ( NO) the -# todo list. This list is created by putting \todo commands in the -# documentation. +# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo +# list. This list is created by putting \todo commands in the documentation. # The default value is: YES. -GENERATE_TODOLIST = YES +GENERATE_TODOLIST = NO -# The GENERATE_TESTLIST tag can be used to enable ( YES) or disable ( NO) the -# test list. This list is created by putting \test commands in the -# documentation. +# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test +# list. This list is created by putting \test commands in the documentation. # The default value is: YES. GENERATE_TESTLIST = YES -# The GENERATE_BUGLIST tag can be used to enable ( YES) or disable ( NO) the bug +# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug # list. This list is created by putting \bug commands in the documentation. # The default value is: YES. GENERATE_BUGLIST = YES -# The GENERATE_DEPRECATEDLIST tag can be used to enable ( YES) or disable ( NO) +# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO) # the deprecated list. This list is created by putting \deprecated commands in # the documentation. # The default value is: YES. @@ -631,8 +643,8 @@ ENABLED_SECTIONS = MAX_INITIALIZER_LINES = 30 # Set the SHOW_USED_FILES tag to NO to disable the list of files generated at -# the bottom of the documentation of classes and structs. If set to YES the list -# will mention the files that were used to generate the documentation. +# the bottom of the documentation of classes and structs. If set to YES, the +# list will mention the files that were used to generate the documentation. # The default value is: YES. SHOW_USED_FILES = YES @@ -696,7 +708,7 @@ CITE_BIB_FILES = QUIET = YES # The WARNINGS tag can be used to turn on/off the warning messages that are -# generated to standard error ( stderr) by doxygen. If WARNINGS is set to YES +# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES # this implies that the warnings are on. # # Tip: Turn warnings on while writing the documentation. @@ -704,7 +716,7 @@ QUIET = YES WARNINGS = YES -# If the WARN_IF_UNDOCUMENTED tag is set to YES, then doxygen will generate +# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate # warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag # will automatically be disabled. # The default value is: YES. @@ -721,8 +733,8 @@ WARN_IF_DOC_ERROR = YES # This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that # are documented, but have no documentation for their parameters or return -# value. If set to NO doxygen will only warn about wrong or incomplete parameter -# documentation, but not about the absence of documentation. +# value. If set to NO, doxygen will only warn about wrong or incomplete +# parameter documentation, but not about the absence of documentation. # The default value is: NO. WARN_NO_PARAMDOC = NO @@ -750,13 +762,14 @@ WARN_LOGFILE = # The INPUT tag is used to specify the files and/or directories that contain # documented source files. You may enter file names like myfile.cpp or # directories like /usr/src/myproject. Separate the files or directories with -# spaces. +# spaces. See also FILE_PATTERNS and EXTENSION_MAPPING # Note: If this tag is empty the current directory is searched. INPUT = CombineTools/interface \ CombineTools/src \ CombinePdfs/interface \ CombinePdfs/src \ + CombineTools/python/plotting.py \ CombineTools/bin/SMLegacyExample.cpp \ CombineTools/bin/MSSMYieldTable.cpp \ CombineTools/bin/PostFitShapes.cpp \ @@ -765,6 +778,13 @@ INPUT = CombineTools/interface \ docs/Example2.md \ docs/PythonInterface.md \ docs/PostFitShapes.md \ + docs/RooMorphingPdf.md \ + docs/ReproduceRun1HTTDatacards.md \ + docs/ChargedHiggs.md \ + docs/BSM-ModelIndependent-Limits-MSSMHTT.md \ + docs/BSM-ModelIndependent-Limits-HhhAZh.md \ + docs/mA-tanb-Limits.md \ + docs/HybridNewGrid.md \ # docs/BuildSystem.md \ # docs/MSSMWithMorphing.md @@ -779,12 +799,17 @@ INPUT_ENCODING = UTF-8 # If the value of the INPUT tag contains directories, you can use the # FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and -# *.h) to filter out the source-files in the directories. If left blank the -# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii, -# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp, -# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown, -# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf, -# *.qsf, *.as and *.js. +# *.h) to filter out the source-files in the directories. +# +# Note that for custom extensions or not directly supported extensions you also +# need to set EXTENSION_MAPPING for the extension otherwise the files are not +# read by doxygen. +# +# If left blank the following patterns are tested:*.c, *.cc, *.cxx, *.cpp, +# *.c++, *.java, *.ii, *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, +# *.hh, *.hxx, *.hpp, *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, +# *.m, *.markdown, *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, +# *.vhdl, *.ucf, *.qsf, *.as and *.js. FILE_PATTERNS = *.cc \ *.cpp \ @@ -842,6 +867,11 @@ EXCLUDE_SYMBOLS = # command). EXAMPLE_PATH = CombineTools/bin \ + CombinePdfs/bin \ + CombinePdfs/src \ + CombinePdfs/python \ + Run1BSMComb/bin \ + CombineTools/src \ CombineTools/input/examples # If the value of the EXAMPLE_PATH tag contains directories, you can use the @@ -888,10 +918,10 @@ INPUT_FILTER = # filters are used. If the FILTER_PATTERNS tag is empty or if none of the # patterns match the file name, INPUT_FILTER is applied. -FILTER_PATTERNS = +FILTER_PATTERNS = *.py=./docs/py_filter # If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using -# INPUT_FILTER ) will also be used to filter the input files that are used for +# INPUT_FILTER) will also be used to filter the input files that are used for # producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES). # The default value is: NO. @@ -951,7 +981,7 @@ REFERENCED_BY_RELATION = NO REFERENCES_RELATION = NO # If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set -# to YES, then the hyperlinks from functions in REFERENCES_RELATION and +# to YES then the hyperlinks from functions in REFERENCES_RELATION and # REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will # link to the documentation. # The default value is: YES. @@ -1028,7 +1058,7 @@ IGNORE_PREFIX = # Configuration options related to the HTML output #--------------------------------------------------------------------------- -# If the GENERATE_HTML tag is set to YES doxygen will generate HTML output +# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output # The default value is: YES. GENERATE_HTML = YES @@ -1094,10 +1124,10 @@ HTML_STYLESHEET = # cascading style sheets that are included after the standard style sheets # created by doxygen. Using this option one can overrule certain style aspects. # This is preferred over using HTML_STYLESHEET since it does not replace the -# standard style sheet and is therefor more robust against future updates. +# standard style sheet and is therefore more robust against future updates. # Doxygen will copy the style sheet files to the output directory. -# Note: The order of the extra stylesheet files is of importance (e.g. the last -# stylesheet in the list overrules the setting of the previous ones in the +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the # list). For an example see the documentation. # This tag requires that the tag GENERATE_HTML is set to YES. @@ -1114,7 +1144,7 @@ HTML_EXTRA_STYLESHEET = docs/customdoxygen.css HTML_EXTRA_FILES = # The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen -# will adjust the colors in the stylesheet and background images according to +# will adjust the colors in the style sheet and background images according to # this color. Hue is specified as an angle on a colorwheel, see # http://en.wikipedia.org/wiki/Hue for more information. For instance the value # 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300 @@ -1145,8 +1175,9 @@ HTML_COLORSTYLE_GAMMA = 100 # If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML # page will contain the date and time when the page was generated. Setting this -# to NO can help when comparing the output of multiple runs. -# The default value is: YES. +# to YES can help to show when doxygen was last run and thus if the +# documentation is up to date. +# The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. HTML_TIMESTAMP = YES @@ -1242,28 +1273,28 @@ GENERATE_HTMLHELP = NO CHM_FILE = # The HHC_LOCATION tag can be used to specify the location (absolute path -# including file name) of the HTML help compiler ( hhc.exe). If non-empty +# including file name) of the HTML help compiler (hhc.exe). If non-empty, # doxygen will try to run the HTML help compiler on the generated index.hhp. # The file has to be specified with full path. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. HHC_LOCATION = -# The GENERATE_CHI flag controls if a separate .chi index file is generated ( -# YES) or that it should be included in the master .chm file ( NO). +# The GENERATE_CHI flag controls if a separate .chi index file is generated +# (YES) or that it should be included in the master .chm file (NO). # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. GENERATE_CHI = NO -# The CHM_INDEX_ENCODING is used to encode HtmlHelp index ( hhk), content ( hhc) +# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc) # and project file content. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. CHM_INDEX_ENCODING = -# The BINARY_TOC flag controls whether a binary table of contents is generated ( -# YES) or a normal table of contents ( NO) in the .chm file. Furthermore it +# The BINARY_TOC flag controls whether a binary table of contents is generated +# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it # enables the Previous and Next buttons. # The default value is: NO. # This tag requires that the tag GENERATE_HTMLHELP is set to YES. @@ -1377,7 +1408,7 @@ DISABLE_INDEX = YES # index structure (just like the one that is generated for HTML Help). For this # to work a browser that supports JavaScript, DHTML, CSS and frames is required # (i.e. any modern browser). Windows users are probably better off using the -# HTML help feature. Via custom stylesheets (see HTML_EXTRA_STYLESHEET) one can +# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can # further fine-tune the look of the index. As an example, the default style # sheet generated by doxygen has an example that shows how to put an image at # the root of the tree instead of the PROJECT_NAME. Since the tree basically has @@ -1405,7 +1436,7 @@ ENUM_VALUES_PER_LINE = 4 TREEVIEW_WIDTH = 270 -# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open links to +# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to # external symbols imported via tag files in a separate window. # The default value is: NO. # This tag requires that the tag GENERATE_HTML is set to YES. @@ -1434,7 +1465,7 @@ FORMULA_TRANSPARENT = YES # Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see # http://www.mathjax.org) which uses client side Javascript for the rendering -# instead of using prerendered bitmaps. Use this if you do not have LaTeX +# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX # installed or if you want to formulas look prettier in the HTML output. When # enabled you may also need to install MathJax separately and configure the path # to it using the MATHJAX_RELPATH option. @@ -1520,7 +1551,7 @@ SERVER_BASED_SEARCH = NO # external search engine pointed to by the SEARCHENGINE_URL option to obtain the # search results. # -# Doxygen ships with an example indexer ( doxyindexer) and search engine +# Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library # Xapian (see: http://xapian.org/). # @@ -1533,7 +1564,7 @@ EXTERNAL_SEARCH = NO # The SEARCHENGINE_URL should point to a search engine hosted by a web server # which will return the search results when EXTERNAL_SEARCH is enabled. # -# Doxygen ships with an example indexer ( doxyindexer) and search engine +# Doxygen ships with an example indexer (doxyindexer) and search engine # (doxysearch.cgi) which are based on the open source search engine library # Xapian (see: http://xapian.org/). See the section "External Indexing and # Searching" for details. @@ -1571,7 +1602,7 @@ EXTRA_SEARCH_MAPPINGS = # Configuration options related to the LaTeX output #--------------------------------------------------------------------------- -# If the GENERATE_LATEX tag is set to YES doxygen will generate LaTeX output. +# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output. # The default value is: YES. GENERATE_LATEX = NO @@ -1602,7 +1633,7 @@ LATEX_CMD_NAME = latex MAKEINDEX_CMD_NAME = makeindex -# If the COMPACT_LATEX tag is set to YES doxygen generates more compact LaTeX +# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX # documents. This may be useful for small projects and may help to save some # trees in general. # The default value is: NO. @@ -1620,9 +1651,12 @@ COMPACT_LATEX = NO PAPER_TYPE = a4 # The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names -# that should be included in the LaTeX output. To get the times font for -# instance you can specify -# EXTRA_PACKAGES=times +# that should be included in the LaTeX output. The package can be specified just +# by its name or with the correct syntax as to be used with the LaTeX +# \usepackage command. To get the times font for instance you can specify : +# EXTRA_PACKAGES=times or EXTRA_PACKAGES={times} +# To use the option intlimits with the amsmath package you can specify: +# EXTRA_PACKAGES=[intlimits]{amsmath} # If left blank no extra packages will be included. # This tag requires that the tag GENERATE_LATEX is set to YES. @@ -1637,9 +1671,9 @@ EXTRA_PACKAGES = # Note: Only use a user-defined header if you know what you are doing! The # following commands have a special meaning inside the header: $title, # $datetime, $date, $doxygenversion, $projectname, $projectnumber, -# $projectbrief, $projectlogo. Doxygen will replace $title with the empy string, -# for the replacement values of the other commands the user is refered to -# HTML_HEADER. +# $projectbrief, $projectlogo. Doxygen will replace $title with the empty +# string, for the replacement values of the other commands the user is referred +# to HTML_HEADER. # This tag requires that the tag GENERATE_LATEX is set to YES. LATEX_HEADER = @@ -1655,6 +1689,17 @@ LATEX_HEADER = LATEX_FOOTER = +# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined +# LaTeX style sheets that are included after the standard style sheets created +# by doxygen. Using this option one can overrule certain style aspects. Doxygen +# will copy the style sheet files to the output directory. +# Note: The order of the extra style sheet files is of importance (e.g. the last +# style sheet in the list overrules the setting of the previous ones in the +# list). +# This tag requires that the tag GENERATE_LATEX is set to YES. + +LATEX_EXTRA_STYLESHEET = + # The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or # other source files which should be copied to the LATEX_OUTPUT output # directory. Note that the files will be copied as-is; there are no commands or @@ -1673,7 +1718,7 @@ LATEX_EXTRA_FILES = PDF_HYPERLINKS = YES # If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate -# the PDF file directly from the LaTeX files. Set this option to YES to get a +# the PDF file directly from the LaTeX files. Set this option to YES, to get a # higher quality PDF documentation. # The default value is: YES. # This tag requires that the tag GENERATE_LATEX is set to YES. @@ -1718,7 +1763,7 @@ LATEX_BIB_STYLE = plain # Configuration options related to the RTF output #--------------------------------------------------------------------------- -# If the GENERATE_RTF tag is set to YES doxygen will generate RTF output. The +# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The # RTF output is optimized for Word 97 and may not look too pretty with other RTF # readers/editors. # The default value is: NO. @@ -1733,7 +1778,7 @@ GENERATE_RTF = NO RTF_OUTPUT = rtf -# If the COMPACT_RTF tag is set to YES doxygen generates more compact RTF +# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF # documents. This may be useful for small projects and may help to save some # trees in general. # The default value is: NO. @@ -1770,11 +1815,21 @@ RTF_STYLESHEET_FILE = RTF_EXTENSIONS_FILE = +# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code +# with syntax highlighting in the RTF output. +# +# Note that which sources are shown also depends on other settings such as +# SOURCE_BROWSER. +# The default value is: NO. +# This tag requires that the tag GENERATE_RTF is set to YES. + +RTF_SOURCE_CODE = NO + #--------------------------------------------------------------------------- # Configuration options related to the man page output #--------------------------------------------------------------------------- -# If the GENERATE_MAN tag is set to YES doxygen will generate man pages for +# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for # classes and files. # The default value is: NO. @@ -1818,7 +1873,7 @@ MAN_LINKS = NO # Configuration options related to the XML output #--------------------------------------------------------------------------- -# If the GENERATE_XML tag is set to YES doxygen will generate an XML file that +# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that # captures the structure of the code including all documentation. # The default value is: NO. @@ -1832,7 +1887,7 @@ GENERATE_XML = NO XML_OUTPUT = xml -# If the XML_PROGRAMLISTING tag is set to YES doxygen will dump the program +# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program # listings (including syntax highlighting and cross-referencing information) to # the XML output. Note that enabling this will significantly increase the size # of the XML output. @@ -1845,7 +1900,7 @@ XML_PROGRAMLISTING = YES # Configuration options related to the DOCBOOK output #--------------------------------------------------------------------------- -# If the GENERATE_DOCBOOK tag is set to YES doxygen will generate Docbook files +# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files # that can be used to generate PDF. # The default value is: NO. @@ -1859,7 +1914,7 @@ GENERATE_DOCBOOK = NO DOCBOOK_OUTPUT = docbook -# If the DOCBOOK_PROGRAMLISTING tag is set to YES doxygen will include the +# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the # program listings (including syntax highlighting and cross-referencing # information) to the DOCBOOK output. Note that enabling this will significantly # increase the size of the DOCBOOK output. @@ -1872,10 +1927,10 @@ DOCBOOK_PROGRAMLISTING = NO # Configuration options for the AutoGen Definitions output #--------------------------------------------------------------------------- -# If the GENERATE_AUTOGEN_DEF tag is set to YES doxygen will generate an AutoGen -# Definitions (see http://autogen.sf.net) file that captures the structure of -# the code including all documentation. Note that this feature is still -# experimental and incomplete at the moment. +# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an +# AutoGen Definitions (see http://autogen.sf.net) file that captures the +# structure of the code including all documentation. Note that this feature is +# still experimental and incomplete at the moment. # The default value is: NO. GENERATE_AUTOGEN_DEF = NO @@ -1884,7 +1939,7 @@ GENERATE_AUTOGEN_DEF = NO # Configuration options related to the Perl module output #--------------------------------------------------------------------------- -# If the GENERATE_PERLMOD tag is set to YES doxygen will generate a Perl module +# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module # file that captures the structure of the code including all documentation. # # Note that this feature is still experimental and incomplete at the moment. @@ -1892,7 +1947,7 @@ GENERATE_AUTOGEN_DEF = NO GENERATE_PERLMOD = NO -# If the PERLMOD_LATEX tag is set to YES doxygen will generate the necessary +# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary # Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI # output from the Perl module output. # The default value is: NO. @@ -1900,9 +1955,9 @@ GENERATE_PERLMOD = NO PERLMOD_LATEX = NO -# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be nicely +# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely # formatted so it can be parsed by a human reader. This is useful if you want to -# understand what is going on. On the other hand, if this tag is set to NO the +# understand what is going on. On the other hand, if this tag is set to NO, the # size of the Perl module output will be much smaller and Perl will parse it # just the same. # The default value is: YES. @@ -1922,14 +1977,14 @@ PERLMOD_MAKEVAR_PREFIX = # Configuration options related to the preprocessor #--------------------------------------------------------------------------- -# If the ENABLE_PREPROCESSING tag is set to YES doxygen will evaluate all +# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all # C-preprocessor directives found in the sources and include files. # The default value is: YES. ENABLE_PREPROCESSING = YES -# If the MACRO_EXPANSION tag is set to YES doxygen will expand all macro names -# in the source code. If set to NO only conditional compilation will be +# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names +# in the source code. If set to NO, only conditional compilation will be # performed. Macro expansion can be done in a controlled way by setting # EXPAND_ONLY_PREDEF to YES. # The default value is: NO. @@ -1945,7 +2000,7 @@ MACRO_EXPANSION = NO EXPAND_ONLY_PREDEF = NO -# If the SEARCH_INCLUDES tag is set to YES the includes files in the +# If the SEARCH_INCLUDES tag is set to YES, the include files in the # INCLUDE_PATH will be searched if a #include is found. # The default value is: YES. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. @@ -2021,20 +2076,21 @@ TAGFILES = GENERATE_TAGFILE = -# If the ALLEXTERNALS tag is set to YES all external class will be listed in the -# class index. If set to NO only the inherited external classes will be listed. +# If the ALLEXTERNALS tag is set to YES, all external class will be listed in +# the class index. If set to NO, only the inherited external classes will be +# listed. # The default value is: NO. ALLEXTERNALS = NO -# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed in -# the modules index. If set to NO, only the current project's groups will be +# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed +# in the modules index. If set to NO, only the current project's groups will be # listed. # The default value is: YES. EXTERNAL_GROUPS = YES -# If the EXTERNAL_PAGES tag is set to YES all external pages will be listed in +# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in # the related pages index. If set to NO, only the current project's pages will # be listed. # The default value is: YES. @@ -2051,7 +2107,7 @@ PERL_PATH = /usr/bin/perl # Configuration options related to the dot tool #--------------------------------------------------------------------------- -# If the CLASS_DIAGRAMS tag is set to YES doxygen will generate a class diagram +# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram # (in HTML and LaTeX) for classes with base or super classes. Setting the tag to # NO turns the diagrams off. Note that this option also works with HAVE_DOT # disabled, but it is recommended to install and use dot, since it yields more @@ -2076,7 +2132,7 @@ MSCGEN_PATH = DIA_PATH = -# If set to YES, the inheritance and collaboration graphs will hide inheritance +# If set to YES the inheritance and collaboration graphs will hide inheritance # and usage relations if the target is undocumented or is not a class. # The default value is: YES. @@ -2149,7 +2205,7 @@ COLLABORATION_GRAPH = YES GROUP_GRAPHS = YES -# If the UML_LOOK tag is set to YES doxygen will generate inheritance and +# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and # collaboration diagrams in a style similar to the OMG's Unified Modeling # Language. # The default value is: NO. @@ -2201,7 +2257,8 @@ INCLUDED_BY_GRAPH = YES # # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable call graphs for selected -# functions only using the \callgraph command. +# functions only using the \callgraph command. Disabling a call graph can be +# accomplished by means of the command \hidecallgraph. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. @@ -2212,7 +2269,8 @@ CALL_GRAPH = NO # # Note that enabling this option will significantly increase the time of a run. # So in most cases it will be better to enable caller graphs for selected -# functions only using the \callergraph command. +# functions only using the \callergraph command. Disabling a caller graph can be +# accomplished by means of the command \hidecallergraph. # The default value is: NO. # This tag requires that the tag HAVE_DOT is set to YES. @@ -2235,11 +2293,15 @@ GRAPHICAL_HIERARCHY = YES DIRECTORY_GRAPH = YES # The DOT_IMAGE_FORMAT tag can be used to set the image format of the images -# generated by dot. +# generated by dot. For an explanation of the image formats see the section +# output formats in the documentation of the dot tool (Graphviz (see: +# http://www.graphviz.org/)). # Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order # to make the SVG files visible in IE 9+ (other browsers do not have this # requirement). -# Possible values are: png, jpg, gif and svg. +# Possible values are: png, jpg, gif, svg, png:gd, png:gd:gd, png:cairo, +# png:cairo:gd, png:cairo:cairo, png:cairo:gdiplus, png:gdiplus and +# png:gdiplus:gdiplus. # The default value is: png. # This tag requires that the tag HAVE_DOT is set to YES. @@ -2287,10 +2349,14 @@ DIAFILE_DIRS = # PlantUML is not used or called during a preprocessing step. Doxygen will # generate a warning when it encounters a \startuml command in this case and # will not generate output for the diagram. -# This tag requires that the tag HAVE_DOT is set to YES. PLANTUML_JAR_PATH = +# When using plantuml, the specified paths are searched for files specified by +# the !include statement in a plantuml block. + +PLANTUML_INCLUDE_PATH = + # The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes # that will be shown in the graph. If the number of nodes in a graph becomes # larger than this value, doxygen will truncate the graph, which is visualized @@ -2327,7 +2393,7 @@ MAX_DOT_GRAPH_DEPTH = 0 DOT_TRANSPARENT = NO -# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output +# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output # files in one run (i.e. multiple -o and -T options on the command line). This # makes dot run faster, but since only newer versions of dot (>1.8.10) support # this, this feature is disabled by default. @@ -2344,7 +2410,7 @@ DOT_MULTI_TARGETS = NO GENERATE_LEGEND = YES -# If the DOT_CLEANUP tag is set to YES doxygen will remove the intermediate dot +# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot # files that are used to generate the various graphs. # The default value is: YES. # This tag requires that the tag HAVE_DOT is set to YES. diff --git a/HIG15002/.gitignore b/HIG15002/.gitignore new file mode 100644 index 00000000000..2cf1da096ca --- /dev/null +++ b/HIG15002/.gitignore @@ -0,0 +1,5 @@ +*.tar +*.tar.gz +cov-matrix +lhchcg +*.json diff --git a/HIG15002/scripts/D1_ranges.py b/HIG15002/scripts/D1_ranges.py new file mode 100755 index 00000000000..bf47e185872 --- /dev/null +++ b/HIG15002/scripts/D1_ranges.py @@ -0,0 +1,62 @@ +#!/usr/bin/env python +import sys + +RANGES = { + 'D1_general': { + 'mu_WW' : [0,5], + 'mu_ZZ' : [0,5], + 'mu_gamgam' : [0,5], + 'mu_tautau' : [0,5], + 'l_VBF_gamgam' : [0,5], + 'l_VBF_WW' : [0,5], + 'l_VBF_ZZ' : [0,10], + 'l_VBF_tautau' : [0,5], + 'l_WH_gamgam' : [0,30], + 'l_WH_WW' : [0,10], + 'l_WH_ZZ' : [0,30], + 'l_WH_tautau' : [0,10], + 'l_WH_bb' : [0,10], + 'l_ZH_gamgam' : [0,10], + 'l_ZH_WW' : [0,20], + 'l_ZH_ZZ' : [0,50], + 'l_ZH_tautau' : [0,20], + 'l_ZH_bb' : [0,5], + 'l_ttH_gamgam' : [0,20], + 'l_ttH_WW' : [0,10], + 'l_ttH_ZZ' : [0,50], + 'l_ttH_tautau' : [0,30], + 'l_ttH_bb' : [0,10] + }, + 'D1_rank1' : { + 'mu_WW' : [0,5], + 'mu_ZZ' : [0,5], + 'mu_gamgam' : [0,5], + 'mu_tautau' : [0,5], + 'mu_bb' : [0,5], + 'l_VBF' : [0,5], + 'l_WH' : [0,5], + 'l_ZH' : [0,5], + 'l_ttH' : [0,10] + } +} + + + + +args = sys.argv[1:] +assert(len(args) >= 2) +expr = '--setPhysicsModelParameterRanges=' +expr_list = [] + +for p, vals in RANGES[args[0]].iteritems(): + if p == args[1]: + vmin = vals[1]*-1 + vmax = vals[1] + else: + vmin = vals[0] + vmax = vals[1] + expr_list.append('%s=%g,%g' % (p, vmin, vmax)) + + +sys.stdout.write(expr + ':'.join(expr_list)) + diff --git a/HIG15002/scripts/checkValid.py b/HIG15002/scripts/checkValid.py new file mode 100755 index 00000000000..a76ac18ef2a --- /dev/null +++ b/HIG15002/scripts/checkValid.py @@ -0,0 +1,25 @@ +#!/usr/bin/env python +import sys +import json +import os + +check = ['ValidErrorHi', 'ValidErrorLo'] + +open_list = [] +prefix = '/Users/Andrew/Dropbox/Apps/PlotDrop/plots/22.08.2015' + +for arg in sys.argv[1:]: + print '> %s' % arg + exp = arg.replace('.json', '') + with open(arg) as jsonfile: + js = json.load(jsonfile) + for m, p in js.iteritems(): + for par, vals in p.iteritems(): + print '%-15s %-10.2f %-+10.2f %-+10.2f %i %i' % (par, vals['Val'], vals['ErrorHi'], vals['ErrorLo'], vals['ValidErrorHi'], vals['ValidErrorLo']) + for ch in check: + if ch in vals and not vals[ch]: + #print '%-15s %-20s %-20s False' % (m, par, ch) + open_list.append('scan_obs_exp_%s_%s_%s.pdf' % (exp, m, par)) + +#os.system('open ' + ' '.join(open_list)) +print 'open ' + ' '.join(open_list) diff --git a/HIG15002/scripts/compare_results.py b/HIG15002/scripts/compare_results.py new file mode 100755 index 00000000000..8b0de8d2ed0 --- /dev/null +++ b/HIG15002/scripts/compare_results.py @@ -0,0 +1,28 @@ +#!/usr/bin/env python +import sys +import json +import os + +args = sys.argv[1:] + +old = args[0] +new = args[1] + + +with open(old) as jsonfile: + js_old = json.load(jsonfile) +with open(new) as jsonfile: + js_new = json.load(jsonfile) + +for M in sorted(js_old.keys()): + print '\n%-40s | %-15s | %-15s | %-7s | %-10s | %-10s' % (M, 'PAS', 'New', 'New-PAS', '(New-PAS)/Err', 'NewErr/PASErr') + print '%s | %s | %s | %s | %s | %s | %s' % ('-'*40, '-'*15, '-'*5, '-'*7, '-'*7, '-'*13, '-'*13) + if M in js_new.keys(): + for POI in sorted(js_old[M].keys()): + val_old = js_old[M][POI]["Val"] + val_new = js_new[M][POI]["Val"] + err_old = (js_old[M][POI]["ErrorHi"] - js_old[M][POI]["ErrorLo"]) / 2. + err_new = (js_new[M][POI]["ErrorHi"] - js_new[M][POI]["ErrorLo"]) / 2. + frac_shift = (val_new - val_old) / err_old + print '%-40s | %.3f +/- %.3f | %.3f +/- %.3f | %+.3f | %+12.0f%% | %.3f' % (POI, val_old, err_old, val_new, err_new, val_new - val_old, frac_shift*100., err_new/err_old) + diff --git a/HIG15002/scripts/covFit.py b/HIG15002/scripts/covFit.py new file mode 100644 index 00000000000..8bac86a0f19 --- /dev/null +++ b/HIG15002/scripts/covFit.py @@ -0,0 +1,376 @@ +import ROOT +from array import array +import argparse + +parser = argparse.ArgumentParser() +parser.add_argument('-i', help='input file') +parser.add_argument('-l', help='output label') +parser.add_argument('--POI', help='POI to scan') +parser.add_argument('--range', type=float, help='scan range') +parser.add_argument( + '--saveInactivePOI', action='store_true', + help='save other param values in tree') +parser.add_argument('-p', type=int, help='scan points') +parser.add_argument('-t', help='type', default=None) +args = parser.parse_args() + +ROOT.gSystem.Load('libHiggsAnalysisCombinedLimit') + +fin = ROOT.TFile(args.i) +POI = args.POI +type = args.t +if type is None: + type = 'Default' + +w = fin.Get('w') +# w.Print() + +pdf = w.pdf('pdf') +data = w.data('global_obs') + +wfinal = w + + +def doKappa_g(wsp, + k_t='kappa_t', + k_b='kappa_b'): + kappa_g = '1.06*@0*@0 + 0.01*@1*@1 - 0.07*@0*@1' + wsp.factory('expr::kappa_g("sqrt(%s)",%s)' % (kappa_g, ','.join([k_t, k_b]))) + + +def doKappa_gam(wsp, + k_W='kappa_W', + k_t='kappa_t'): + kappa_gam = '1.59*@0*@0 + 0.07*@1*@1 - 0.66*@0*@1' + wsp.factory('expr::kappa_gam("sqrt(%s)",%s)' % + (kappa_gam, ','.join([k_W, k_t]))) + + +def doKappaH(wsp, + k_b='kappa_b', + k_W='kappa_W', + k_tau='kappa_tau', + k_Z='kappa_Z', + k_t='kappa_t', + k_g='kappa_g', + k_gam='kappa_gam'): + kappaHscale = ('0.57*@0*@0 + 0.22*@1*@1 + 0.06*@2*@2 + 0.03*@3*@3 + ' + '0.03*@4*@4 + 0.0016 + 0.0001*@5*@5 + 0.00022*@6*@6 + ' + '0.09*@7*@7 + 0.0023*@8*@8') + wsp.factory('expr::kappa_H("sqrt(%s)",%s)' % ( + kappaHscale, + ','.join([k_b, k_W, k_tau, k_Z, k_t, k_b, k_tau, k_g, k_gam]))) + + +# K1 --> K3 +if type == 'K1toK3': + kV = 'kappa_V' + kF = 'kappa_F' + wnew = ROOT.RooWorkspace() + wnew.factory('%s[1,0.0,2.0]' % kV) + wnew.factory('%s[1,0.0,2.0]' % kF) + wnew.factory('expr::kappa_t("@0",kappa_F)') + wnew.factory('expr::kappa_W("@0",kappa_V)') + wnew.factory('expr::kappa_Z("@0",kappa_V)') + wnew.factory('expr::kappa_b("@0",kappa_F)') + wnew.factory('expr::kappa_tau("@0",kappa_F)') + getattr(wnew, 'import')(pdf, ROOT.RooFit.RecycleConflictNodes()) + pdf = wnew.pdf('pdf') + pdf.Print('tree') + wfinal = wnew + +# K2 --> K3 +if type == 'K2toK3': + kV = 'kappa_V' + kF = 'kappa_F' + wnew = ROOT.RooWorkspace() + wnew.factory('%s[1,0.0,2.0]' % kV) + wnew.factory('%s[1,0.0,2.0]' % kF) + wnew.factory('expr::kappa_t("@0",kappa_F)') + wnew.factory('expr::kappa_W("@0",kappa_V)') + wnew.factory('expr::kappa_Z("@0",kappa_V)') + wnew.factory('expr::kappa_b("@0",kappa_F)') + wnew.factory('expr::kappa_tau("@0",kappa_F)') + doKappa_g(wnew, k_t=kF, k_b=kF) + doKappa_gam(wnew, k_W=kV, k_t=kF) + getattr(wnew, 'import')(pdf, ROOT.RooFit.RecycleConflictNodes()) + pdf = wnew.pdf('pdf') + pdf.Print('tree') + wfinal = wnew + +# L1 --> K1 +if type == 'L1toK1': + k_t = 'kappa_t' + k_b = 'kappa_b' + k_W = 'kappa_W' + k_Z = 'kappa_Z' + k_tau = 'kappa_tau' + kappas = [k_t, k_b, k_W, k_Z, k_tau] + wnew = ROOT.RooWorkspace() + for k in kappas: + wnew.factory('%s[1,0.0,2.0]' % k) + doKappa_g(wnew) + doKappa_gam(wnew) + doKappaH(wnew) + wnew.function('kappa_H').Print() + wnew.factory('expr::kappa_gZ("@0*@1/@2",kappa_g, kappa_Z, kappa_H)') + wnew.factory('expr::lambda_WZ("@0/@1",kappa_W, kappa_Z)') + wnew.factory('expr::lambda_Zg("@0/@1",kappa_Z, kappa_g)') + wnew.factory('expr::lambda_bZ("@0/@1",kappa_b, kappa_Z)') + wnew.factory('expr::lambda_gamZ("@0/@1",kappa_gam, kappa_Z)') + wnew.factory('expr::lambda_tauZ("@0/@1",kappa_tau, kappa_Z)') + wnew.factory('expr::lambda_tg("@0/@1",kappa_t, kappa_g)') + getattr(wnew, 'import')(pdf, ROOT.RooFit.RecycleConflictNodes()) + pdf = wnew.pdf('pdf') + pdf.Print('tree') + wfinal = wnew + +# L1 --> K2 +if type == 'L1toK2': + k_t = 'kappa_t' + k_b = 'kappa_b' + k_W = 'kappa_W' + k_Z = 'kappa_Z' + k_tau = 'kappa_tau' + k_g = 'kappa_g' + k_gam = 'kappa_gam' + kappas = [k_t, k_b, k_W, k_Z, k_tau, k_g, k_gam] + wnew = ROOT.RooWorkspace() + for k in kappas: + wnew.factory('%s[1,0.0,3.0]' % k) + doKappaH(wnew) + wnew.function('kappa_H').Print() + wnew.factory('expr::kappa_gZ("@0*@1/@2",kappa_g, kappa_Z, kappa_H)') + wnew.factory('expr::lambda_WZ("@0/@1",kappa_W, kappa_Z)') + wnew.factory('expr::lambda_Zg("@0/@1",kappa_Z, kappa_g)') + wnew.factory('expr::lambda_bZ("@0/@1",kappa_b, kappa_Z)') + wnew.factory('expr::lambda_gamZ("@0/@1",kappa_gam, kappa_Z)') + wnew.factory('expr::lambda_tauZ("@0/@1",kappa_tau, kappa_Z)') + wnew.factory('expr::lambda_tg("@0/@1",kappa_t, kappa_g)') + getattr(wnew, 'import')(pdf, ROOT.RooFit.RecycleConflictNodes()) + pdf = wnew.pdf('pdf') + pdf.Print('tree') + wfinal = wnew + +# L1 --> K3 +if type == 'L1toK3': + kV = 'kappa_V' + kF = 'kappa_F' + wnew = ROOT.RooWorkspace() + wnew.factory('%s[1,0.0,2.0]' % kV) + wnew.factory('%s[1,0.0,2.0]' % kF) + doKappa_g(wnew, k_t=kF, k_b=kF) + doKappa_gam(wnew, k_W=kV, k_t=kF) + doKappaH(wnew, k_b=kF, k_W=kV, k_tau=kF, k_Z=kV, k_t=kF) + wnew.function('kappa_H').Print() + wnew.factory('expr::kappa_gZ("@0*@1/@2",kappa_g, kappa_V, kappa_H)') + wnew.factory('expr::lambda_WZ("@0/@1",kappa_V, kappa_V)') + wnew.factory('expr::lambda_Zg("@0/@1",kappa_V, kappa_g)') + wnew.factory('expr::lambda_bZ("@0/@1",kappa_F, kappa_V)') + wnew.factory('expr::lambda_gamZ("@0/@1",kappa_gam, kappa_V)') + wnew.factory('expr::lambda_tauZ("@0/@1",kappa_F, kappa_V)') + wnew.factory('expr::lambda_tg("@0/@1",kappa_F, kappa_g)') + getattr(wnew, 'import')(pdf, ROOT.RooFit.RecycleConflictNodes()) + pdf = wnew.pdf('pdf') + pdf.Print('tree') + wfinal = wnew + +# B1ZZ_TH --> A1_mu +if type == 'B1ZZ_THtoA1_mu': + wnew = ROOT.RooWorkspace() + wnew.factory('mu[1,0.0,2.0]') + wnew.factory('expr::mu_XS_ggF_x_BR_ZZ("@0", mu)') + wnew.factory('mu_XS_VBF_r_XS_ggF[1]') + wnew.factory('mu_XS_WH_r_XS_ggF[1]') + wnew.factory('mu_XS_ZH_r_XS_ggF[1]') + wnew.factory('mu_XS_ttH_r_XS_ggF[1]') + wnew.factory('mu_BR_WW_r_BR_ZZ[1]') + wnew.factory('mu_BR_bb_r_BR_ZZ[1]') + wnew.factory('mu_BR_gamgam_r_BR_ZZ[1]') + wnew.factory('mu_BR_tautau_r_BR_ZZ[1]') + getattr(wnew, 'import')(pdf, ROOT.RooFit.RecycleConflictNodes()) + pdf = wnew.pdf('pdf') + pdf.Print('tree') + wfinal = wnew + +# B1ZZ_TH --> A1_5D +if type == 'B1ZZ_THtoA1_5D': + wnew = ROOT.RooWorkspace() + for P in ['mu_BR_WW', 'mu_BR_ZZ', 'mu_BR_bb', 'mu_BR_gamgam', 'mu_BR_tautau']: + wnew.factory('%s[1,-10.0,10.0]' % P) + wnew.factory('expr::mu_XS_ggF_x_BR_ZZ("@0", mu_BR_ZZ)') + wnew.factory('mu_XS_VBF_r_XS_ggF[1]') + wnew.factory('mu_XS_WH_r_XS_ggF[1]') + wnew.factory('mu_XS_ZH_r_XS_ggF[1]') + wnew.factory('mu_XS_ttH_r_XS_ggF[1]') + wnew.factory('expr::mu_BR_WW_r_BR_ZZ("@0/@1", mu_BR_WW, mu_BR_ZZ)') + wnew.factory('expr::mu_BR_bb_r_BR_ZZ("@0/@1", mu_BR_bb, mu_BR_ZZ)') + wnew.factory('expr::mu_BR_gamgam_r_BR_ZZ("@0/@1", mu_BR_gamgam, mu_BR_ZZ)') + wnew.factory('expr::mu_BR_tautau_r_BR_ZZ("@0/@1", mu_BR_tautau, mu_BR_ZZ)') + getattr(wnew, 'import')(pdf, ROOT.RooFit.RecycleConflictNodes()) + pdf = wnew.pdf('pdf') + pdf.Print('tree') + wfinal = wnew + +# B1ZZ_TH --> A1_5P +if type == 'B1ZZ_THtoA1_5P': + wnew = ROOT.RooWorkspace() + for P in ['mu_XS_ggFbbH', 'mu_XS_VBF', 'mu_XS_WH', 'mu_XS_ZH', 'mu_XS_ttHtH']: + wnew.factory('%s[1,-10.0,10.0]' % P) + wnew.factory('expr::mu_XS_ggF_x_BR_ZZ("@0", mu_XS_ggFbbH)') + wnew.factory('expr::mu_XS_VBF_r_XS_ggF("@0/@1", mu_XS_VBF, mu_XS_ggFbbH)') + wnew.factory('expr::mu_XS_WH_r_XS_ggF("@0/@1", mu_XS_WH, mu_XS_ggFbbH)') + wnew.factory('expr::mu_XS_ZH_r_XS_ggF("@0/@1", mu_XS_ZH, mu_XS_ggFbbH)') + wnew.factory('expr::mu_XS_ttH_r_XS_ggF("@0/@1", mu_XS_ttHtH, mu_XS_ggFbbH)') + wnew.factory('mu_BR_WW_r_BR_ZZ[1]') + wnew.factory('mu_BR_bb_r_BR_ZZ[1]') + wnew.factory('mu_BR_gamgam_r_BR_ZZ[1]') + wnew.factory('mu_BR_tautau_r_BR_ZZ[1]') + getattr(wnew, 'import')(pdf, ROOT.RooFit.RecycleConflictNodes()) + pdf = wnew.pdf('pdf') + pdf.Print('tree') + wfinal = wnew + +# B1ZZ_TH --> B1WW_TH +if type == 'B1ZZ_THtoB1WW_TH': + wnew = ROOT.RooWorkspace() + for P in ['mu_XS_ggF_x_BR_WW', 'mu_XS_VBF_r_XS_ggF', 'mu_XS_ttH_r_XS_ggF', 'mu_XS_WH_r_XS_ggF', 'mu_XS_ZH_r_XS_ggF', 'mu_BR_tautau_r_BR_WW', 'mu_BR_bb_r_BR_WW', 'mu_BR_gamgam_r_BR_WW', 'mu_BR_ZZ_r_BR_WW']: + wnew.factory('%s[1,-10.0,10.0]' % P) + wnew.factory( + 'expr::mu_XS_ggF_x_BR_ZZ("@0*@1", mu_XS_ggF_x_BR_WW, mu_BR_ZZ_r_BR_WW)') + wnew.factory('expr::mu_BR_WW_r_BR_ZZ("1/@0", mu_BR_ZZ_r_BR_WW)') + wnew.factory( + 'expr::mu_BR_bb_r_BR_ZZ("@0/@1", mu_BR_bb_r_BR_WW, mu_BR_ZZ_r_BR_WW)') + wnew.factory( + 'expr::mu_BR_gamgam_r_BR_ZZ("@0/@1", mu_BR_gamgam_r_BR_WW, mu_BR_ZZ_r_BR_WW)') + wnew.factory( + 'expr::mu_BR_tautau_r_BR_ZZ("@0/@1", mu_BR_tautau_r_BR_WW, mu_BR_ZZ_r_BR_WW)') + getattr(wnew, 'import')(pdf, ROOT.RooFit.RecycleConflictNodes()) + pdf = wnew.pdf('pdf') + pdf.Print('tree') + wfinal = wnew + +# A1_5PD --> A1_mu +if type == 'A1_5PDtoA1_mu': + wnew = ROOT.RooWorkspace() + wnew.factory('mu[1,0.0,2.0]') + for P in ['ggFbbH', 'VBF', 'WH', 'ZH', 'ttHtH']: + for D in ['WW', 'ZZ', 'gamgam', 'tautau', 'bb']: + wnew.factory('expr::mu_XS_%s_BR_%s("@0", mu)' % (P, D)) + getattr(wnew, 'import')(pdf, ROOT.RooFit.RecycleConflictNodes()) + pdf = wnew.pdf('pdf') + pdf.Print('tree') + wfinal = wnew + +# A1_5PD --> A1_5P +if type == 'A1_5PDtoA1_5P': + wnew = ROOT.RooWorkspace() + for P in ['mu_XS_ggFbbH', 'mu_XS_VBF', 'mu_XS_WH', 'mu_XS_ZH', 'mu_XS_ttHtH']: + wnew.factory('%s[1,-10.0,10.0]' % P) + for P in ['ggFbbH', 'VBF', 'WH', 'ZH', 'ttHtH']: + for D in ['WW', 'ZZ', 'gamgam', 'tautau', 'bb']: + wnew.factory('expr::mu_XS_%s_BR_%s("@0", mu_XS_%s)' % (P, D, P)) + getattr(wnew, 'import')(pdf, ROOT.RooFit.RecycleConflictNodes()) + pdf = wnew.pdf('pdf') + pdf.Print('tree') + wfinal = wnew + +if type == 'A1_5PDtoA1_4P': + wnew = ROOT.RooWorkspace() + for P in ['mu_XS_ggFbbH', 'mu_XS_VBF', 'mu_XS_VH', 'mu_XS_ttHtH']: + wnew.factory('%s[1,-10.0,10.0]' % P) + for P in ['ggFbbH', 'VBF', 'WH', 'ZH', 'ttHtH']: + for D in ['WW', 'ZZ', 'gamgam', 'tautau', 'bb']: + PX = P + if P in ['WH', 'ZH']: + PX = 'VH' + wnew.factory('expr::mu_XS_%s_BR_%s("@0", mu_XS_%s)' % (P, D, PX)) + getattr(wnew, 'import')(pdf, ROOT.RooFit.RecycleConflictNodes()) + pdf = wnew.pdf('pdf') + pdf.Print('tree') + wfinal = wnew + +# A1_5PD --> A1_5D +if type == 'A1_5PDtoA1_5D': + wnew = ROOT.RooWorkspace() + for D in ['mu_BR_WW', 'mu_BR_ZZ', 'mu_BR_bb', 'mu_BR_gamgam', 'mu_BR_tautau']: + wnew.factory('%s[1,-10.0,10.0]' % D) + for P in ['ggFbbH', 'VBF', 'WH', 'ZH', 'ttHtH']: + for D in ['WW', 'tautau', 'ZZ', 'bb', 'gamgam']: + wnew.factory('expr::mu_XS_%s_BR_%s("@0", mu_BR_%s)' % (P, D, D)) + getattr(wnew, 'import')(pdf, ROOT.RooFit.RecycleConflictNodes()) + pdf = wnew.pdf('pdf') + pdf.Print('tree') + wfinal = wnew + +nll = pdf.createNLL(data) + +minim = ROOT.RooMinimizer(nll) +minim.setVerbose(False) +minim.minimize('Minuit2', 'migrad') +minim.setPrintLevel(-1) +wfinal.allFunctions().Print('v') + +nll0 = nll.getVal() + +param = wfinal.var(POI) +param.setConstant() + +# Figure out the other floating parameters +params = pdf.getParameters(data) +param_it = params.createIterator() +var = param_it.Next() +float_params = set() +snapshot = {} +while var: + if not var.isConstant(): + float_params.add(var.GetName()) + snapshot[var.GetName()] = var.getVal() + var = param_it.Next() + +fit_range = args.range +points = args.p + +r_min = max(param.getVal() - fit_range, param.getMin()) +r_max = min(param.getVal() + fit_range, param.getMax()) +width = (r_max - r_min) / points + +r = r_min + 0.5 * width + +fout = ROOT.TFile('scan_covariance_%s_%s_%s.root' % + (args.l, type, POI), 'RECREATE') +tout = ROOT.TTree('limit', 'limit') + +a_r = array('f', [param.getVal()]) +a_deltaNLL = array('f', [0]) +a_quantileExpected = array('f', [1]) + +float_arrs = [] +for var in float_params: + float_arrs.append(array('f', [0.])) + tout.Branch(var, float_arrs[-1], '%s/f' % var) + +tout.Branch(POI, a_r, '%s/f' % POI) +tout.Branch('deltaNLL', a_deltaNLL, 'deltaNLL/f') +tout.Branch('quantileExpected', a_quantileExpected, 'quantileExpected/f') + +for i, var in enumerate(float_params): + float_arrs[i][0] = wfinal.var(var).getVal() +tout.Fill() + +for p in xrange(points): + for key,val in snapshot.iteritems(): + wfinal.var(key).setVal(val) + param.setVal(r) + a_r[0] = r + minim.minimize('Minuit2', 'migrad') + for i, var in enumerate(float_params): + float_arrs[i][0] = wfinal.var(var).getVal() + nllf = nll.getVal() + print '%s = %f; nll0 = %f; nll = %f, deltaNLL = %f' % (POI, r, nll0, nllf, nllf - nll0) + a_deltaNLL[0] = nllf - nll0 + tout.Fill() + r += width + +tout.Write() +fout.Close() diff --git a/HIG15002/scripts/fit_ranges.py b/HIG15002/scripts/fit_ranges.py new file mode 100755 index 00000000000..0efa461eb72 --- /dev/null +++ b/HIG15002/scripts/fit_ranges.py @@ -0,0 +1,321 @@ +#!/usr/bin/env python +import sys + +RANGES = { + 'A1_mu' : { + 'mu' : 0.111 + }, + 'A1_4P' : { + 'mu_XS_ggFbbH' : 0.163, + 'mu_XS_VBF' : 0.252, + 'mu_XS_VH' : 0.396, + 'mu_XS_ttHtH' : 0.678 + }, + 'A1_5P' : { + 'mu_XS_ggFbbH' : 0.163, + 'mu_XS_VBF' : 0.252, + 'mu_XS_WH' : 0.396, + 'mu_XS_ZH' : 0.387, + 'mu_XS_ttHtH' : 0.678 + }, + 'A1_5D' : { + 'mu_BR_gamgam' :0.197, + 'mu_BR_ZZ' :0.271, + 'mu_BR_WW' :0.187, + 'mu_BR_tautau' :0.244, + 'mu_BR_bb' :0.301 + }, + 'A1_6D' : { + 'mu_BR_gamgam' :0.197, + 'mu_BR_ZZ' :0.271, + 'mu_BR_WW' :0.187, + 'mu_BR_tautau' :0.244, + 'mu_BR_bb' :0.301, + 'mu_BR_mumu' :2.000 + }, + 'A1_5PD_cms' : { + 'mu_XS_ggFbbH_BR_WW': 0.5, + 'mu_XS_VBF_BR_WW': 1.0, + 'mu_XS_WH_BR_WW': 2.5, + 'mu_XS_ZH_BR_WW': 5.0, + 'mu_XS_ttHtH_BR_WW': 5.0, + 'mu_XS_ggFbbH_BR_ZZ': 0.5, + 'mu_XS_VBF_BR_ZZ': 1.0, + 'mu_XS_WH_BR_ZZ': 5.0, + 'mu_XS_ZH_BR_ZZ': 50.0, + 'mu_XS_ttHtH_BR_ZZ': 50.0, + 'mu_XS_WH_BR_bb': 1.0, + 'mu_XS_ZH_BR_bb': 1.0, + 'mu_XS_ttHtH_BR_bb': 2.5, + 'mu_XS_ggFbbH_BR_gamgam': 0.5, + 'mu_XS_VBF_BR_gamgam': 1.0, + 'mu_XS_WH_BR_gamgam': 2.5, + 'mu_XS_ZH_BR_gamgam': 5.0, + 'mu_XS_ttHtH_BR_gamgam': 10.0, + 'mu_XS_ggFbbH_BR_tautau': 1.0, + 'mu_XS_VBF_BR_tautau': 1.0, + 'mu_XS_WH_BR_tautau': 2.5, + 'mu_XS_ZH_BR_tautau': 2.5, + 'mu_XS_ttHtH_BR_tautau': 10.0 + }, + 'A1_5PD_lhc' : { + 'mu_XS_ggFbbH_BR_WW': 0.5, + 'mu_XS_VBF_BR_WW': 1.0, + 'mu_XS_WH_BR_WW': 2.5, + 'mu_XS_ZH_BR_WW': 5.0, + 'mu_XS_ttHtH_BR_WW': 5.0, + 'mu_XS_ggFbbH_BR_ZZ': 0.5, + 'mu_XS_VBF_BR_ZZ': 1.0, + 'mu_XS_WH_BR_ZZ': 5.0, + 'mu_XS_ZH_BR_ZZ': 10.0, + 'mu_XS_ttHtH_BR_ZZ': 10.0, + 'mu_XS_WH_BR_bb': 1.0, + 'mu_XS_ZH_BR_bb': 1.0, + 'mu_XS_ttHtH_BR_bb': 2.5, + 'mu_XS_ggFbbH_BR_gamgam': 0.5, + 'mu_XS_VBF_BR_gamgam': 1.0, + 'mu_XS_WH_BR_gamgam': 2.5, + 'mu_XS_ZH_BR_gamgam': 5.0, + 'mu_XS_ttHtH_BR_gamgam': 1.5, + 'mu_XS_ggFbbH_BR_tautau': 1.0, + 'mu_XS_VBF_BR_tautau': 1.0, + 'mu_XS_WH_BR_tautau': 2.5, + 'mu_XS_ZH_BR_tautau': 2.5, + 'mu_XS_ttHtH_BR_tautau': 10.0 + }, + 'A1_muVmuF' : { + 'mu_V_gamgam' :0.443, + 'mu_V_ZZ' :1.367, + 'mu_V_WW' :0.412, + 'mu_V_tautau' :0.375, + 'mu_V_bb' :0.300, + 'mu_F_gamgam' :0.277, + 'mu_F_ZZ' :0.380, + 'mu_F_WW' :0.232, + 'mu_F_tautau' :0.575, + 'mu_F_bb' :0.937 + }, + 'A2' : { + 'mu_V_r_F' :0.356, + 'mu_F_gamgam' :0.222, + 'mu_F_ZZ' :0.269, + 'mu_F_WW' :0.197, + 'mu_F_tautau' :0.317, + 'mu_F_bb' :0.370 + }, + 'B1WW' : { + 'mu_XS_ggF_x_BR_WW' :0.194, + 'mu_XS_VBF_r_XS_ggF' :0.600, + 'mu_XS_WH_r_XS_ggF' :0.822, + 'mu_XS_ZH_r_XS_ggF' :2.200, + 'mu_XS_ttH_r_XS_ggF' :1.800, + 'mu_BR_ZZ_r_BR_WW' :0.297, + 'mu_BR_gamgam_r_BR_WW' :0.218, + 'mu_BR_tautau_r_BR_WW' :0.273, + 'mu_BR_bb_r_BR_WW' :0.300 + }, + 'B1ZZ' : { + 'mu_XS_ggF_x_BR_ZZ' :0.255, + 'mu_BR_gamgam_r_BR_ZZ' :0.222, + 'mu_BR_WW_r_BR_ZZ' :0.208, + 'mu_BR_tautau_r_BR_ZZ' :0.262, + 'mu_BR_bb_r_BR_ZZ' :0.300, + 'mu_XS_VBF_r_XS_ggF' :0.600, + 'mu_XS_WH_r_XS_ggF' :0.799, + 'mu_XS_ZH_r_XS_ggF' :2.200, + 'mu_XS_ttH_r_XS_ggF' :1.800 + }, + 'B1WW_P78' : { + 'mu_XS_ggF_x_BR_WW' :0.194, + 'mu_XS_VBF_r_XS_ggF' :0.600, + 'mu_XS_WH_r_XS_ggF' :0.822, + 'mu_XS_ZH_r_XS_ggF' :2.200, + 'mu_XS_ttH_r_XS_ggF' :1.800, + 'mu_BR_ZZ_r_BR_WW' :0.297, + 'mu_BR_gamgam_r_BR_WW' :0.218, + 'mu_BR_tautau_r_BR_WW' :0.273, + 'mu_BR_bb_r_BR_WW' :0.248, + 'mu_XS7_r_XS8_VBF' :1.000, + 'mu_XS7_r_XS8_WH' :2.000, + 'mu_XS7_r_XS8_ZH' :2.000, + 'mu_XS7_r_XS8_ggF' :1.000, + 'mu_XS7_r_XS8_ttH' :2.000 + }, + 'B1ZZ_P78' : { + 'mu_XS_ggF_x_BR_ZZ' :0.255, + 'mu_BR_gamgam_r_BR_ZZ' :0.222, + 'mu_BR_WW_r_BR_ZZ' :0.208, + 'mu_BR_tautau_r_BR_ZZ' :0.262, + 'mu_BR_bb_r_BR_ZZ' :0.210, + 'mu_XS_VBF_r_XS_ggF' :0.600, + 'mu_XS_WH_r_XS_ggF' :0.799, + 'mu_XS_ZH_r_XS_ggF' :2.200, + 'mu_XS_ttH_r_XS_ggF' :1.800, + 'mu_XS7_r_XS8_VBF' :1.000, + 'mu_XS7_r_XS8_WH' :2.000, + 'mu_XS7_r_XS8_ZH' :2.000, + 'mu_XS7_r_XS8_ggF' :1.000, + 'mu_XS7_r_XS8_ttH' :2.000 + }, + 'B1WW_TH' : { + 'mu_XS_ggF_x_BR_WW' :0.194, + 'mu_XS_VBF_r_XS_ggF' :0.600, + 'mu_XS_WH_r_XS_ggF' :0.822, + 'mu_XS_ZH_r_XS_ggF' :2.200, + 'mu_XS_ttH_r_XS_ggF' :1.800, + 'mu_BR_ZZ_r_BR_WW' :0.297, + 'mu_BR_gamgam_r_BR_WW' :0.218, + 'mu_BR_tautau_r_BR_WW' :0.273, + 'mu_BR_bb_r_BR_WW' :0.300 + }, + 'B1ZZ_TH' : { + 'mu_XS_ggF_x_BR_ZZ' :0.255, + 'mu_BR_gamgam_r_BR_ZZ' :0.222, + 'mu_BR_WW_r_BR_ZZ' :0.208, + 'mu_BR_tautau_r_BR_ZZ' :0.262, + 'mu_BR_bb_r_BR_ZZ' :0.300, + 'mu_XS_VBF_r_XS_ggF' :0.600, + 'mu_XS_WH_r_XS_ggF' :0.799, + 'mu_XS_ZH_r_XS_ggF' :2.200, + 'mu_XS_ttH_r_XS_ggF' :1.800 + }, + 'B2' : { + 'mu_XS_ggF_x_BR_WW' :0.155, + 'mu_XS_VBF_x_BR_tautau' :0.261, + 'mu_XS_WH_r_XS_VBF' :0.600, + 'mu_XS_ZH_r_XS_WH' :8.000, + 'mu_XS_ttH_r_XS_ggF' :1.800, + 'mu_BR_ZZ_r_BR_WW' :0.280, + 'mu_BR_gamgam_r_BR_WW' :0.208, + 'mu_BR_tautau_r_BR_WW' :0.600, + 'mu_BR_bb_r_BR_tautau' :0.296 + }, + 'D1_general' : { + 'l_VBF_WW' :1.0, + 'l_VBF_ZZ' :1.0, + 'l_VBF_gamgam' :1.0, + 'l_VBF_tautau' :1.5, + 'l_WH_WW' :2.0, + 'l_WH_ZZ' :10.000, + 'l_WH_bb' :2.0, + 'l_WH_gamgam' :2.0, + 'l_WH_tautau' :2.0, + 'l_ZH_WW' :5.0, + 'l_ZH_ZZ' :10.0, + 'l_ZH_bb' :1.0, + 'l_ZH_gamgam' :5.0, + 'l_ZH_tautau' :7.0, + 'l_ttH_WW' :2.0, + 'l_ttH_ZZ' :10.0, + 'l_ttH_bb' :2.0, + 'l_ttH_gamgam' :2.0, + 'l_ttH_tautau' :2.0, + 'mu_WW' :0.2, + 'mu_ZZ' :0.2, + 'mu_bb' :0.2, + 'mu_gamgam' :0.2, + 'mu_tautau' :0.2 + }, + 'D1_rank1' : { + 'mu_WW' : 0.2, + 'mu_ZZ' : 0.2, + 'mu_gamgam' : 0.2, + 'mu_tautau' : 0.2, + 'mu_bb' : 0.2, + 'l_VBF' : 0.5, + 'l_WH' : 1.0, + 'l_ZH' : 1.0, + 'l_ttH' : 2.0 + }, + 'K1' : { + 'kappa_Z' :0.104, + 'kappa_W' :0.094, + 'kappa_t' :0.143, + 'kappa_tau' :0.139, + 'kappa_b' :0.209 + }, + 'K1_mm' : { + 'kappa_Z' :0.104, + 'kappa_W' :0.094, + 'kappa_t' :0.143, + 'kappa_tau' :0.139, + 'kappa_b' :0.209, + 'kappa_mu' :1.500 + }, + 'K2' : { + 'kappa_Z' :0.109, + 'kappa_W' :0.098, + 'kappa_t' :0.226, + 'kappa_tau' :0.124, + 'kappa_b' :0.187, + 'kappa_g' :0.115, + 'kappa_gam' :0.107 + }, + 'K2_BSM' : { + 'kappa_Z' :0.109, + 'kappa_W' :0.098, + 'kappa_t' :0.226, + 'kappa_tau' :0.124, + 'kappa_b' :0.187, + 'kappa_g' :0.115, + 'kappa_gam' :0.107, + 'BRinv' :0.100 + }, + 'K2_2D' : { + 'kappa_g' :0.115, + 'kappa_gam' :0.107 + }, + 'K3' : { + 'kappa_V' :0.053, + 'kappa_F' :0.109 + }, + 'K3_5D' : { + 'kappa_F_WW' :0.206628, + 'kappa_F_ZZ' :0.546678, + 'kappa_F_bb' :0.125614, + 'kappa_F_gamgam' :0.186866, + 'kappa_F_tautau' :0.142708, + 'kappa_V_WW' :0.0760111, + 'kappa_V_ZZ' :0.152273, + 'kappa_V_bb' :4.34253, + 'kappa_V_gamgam' :0.0698755, + 'kappa_V_tautau' :0.296897 + }, + 'L1' : { + 'kappa_gZ' :0.114, + 'lambda_WZ' :0.097, + 'lambda_Zg' :0.223, + 'lambda_bZ' :0.162, + 'lambda_gamZ' :0.112, + 'lambda_tauZ' :0.137, + 'lambda_tg' :0.313 + }, + 'L2_ldu' : { + 'lambda_du' :0.125, + 'lambda_Vu' :0.133, + 'kappa_uu' :0.221 + }, + 'L2_lfv' : { + 'lambda_FV' :0.096, + 'kappa_VV' :0.098 + }, + 'L2_llq' : { + 'lambda_lq' :0.155, + 'lambda_Vq' :0.137, + 'kappa_qq' :0.166 + }, + 'M1' : { + 'M' :10.0, + 'eps' :0.03 + } +} + +assert(len(sys.argv) >= 3) + +multiplier = 1. + +if len(sys.argv) >= 4: + multiplier = float(sys.argv[3]) + +sys.stdout.write(str(RANGES[sys.argv[1]][sys.argv[2]] * multiplier)) + diff --git a/HIG15002/scripts/generic2D.py b/HIG15002/scripts/generic2D.py new file mode 100755 index 00000000000..dd2ee1aa234 --- /dev/null +++ b/HIG15002/scripts/generic2D.py @@ -0,0 +1,230 @@ +#!/usr/bin/env python +import sys +import ROOT +import math +from functools import partial +import CombineHarvester.CombineTools.plotting as plot +import json +import argparse +import os.path + +def read(scan, param_x, param_y, file): + # print files + goodfiles = [f for f in [file] if plot.TFileIsGood(f)] + limit = plot.MakeTChain(goodfiles, 'limit') + graph = plot.TGraph2DFromTree(limit, param_x, param_y, '2*deltaNLL', 'quantileExpected > -0.5 && deltaNLL > 0') + best = plot.TGraphFromTree(limit, param_x, param_y, 'quantileExpected > -0.5 && deltaNLL == 0') + plot.RemoveGraphXDuplicates(best) + assert(best.GetN() == 1) + graph.SetName(scan) + best.SetName(scan+'_best') + # graph.Print() + return (graph, best) + +def fillTH2(hist2d, graph): + for x in xrange(1, hist2d.GetNbinsX()+1): + for y in xrange(1, hist2d.GetNbinsY()+1): + xc = hist2d.GetXaxis().GetBinCenter(x) + yc = hist2d.GetYaxis().GetBinCenter(y) + val = graph.Interpolate(xc, yc) + hist2d.SetBinContent(x, y, val) + +def fixZeros(hist2d): + for x in xrange(1, hist2d.GetNbinsX()+1): + for y in xrange(1, hist2d.GetNbinsY()+1): + xc = hist2d.GetXaxis().GetBinCenter(x) + yc = hist2d.GetYaxis().GetBinCenter(y) + if hist2d.GetBinContent(x, y) == 0: + # print 'Bin at (%f,%f) is zero!' % (xc, yc) + hist2d.SetBinContent(x, y, 1000) + +def makeHist(name, bins, graph2d): + len_x = graph2d.GetXmax() - graph2d.GetXmin() + binw_x = (len_x * 0.5 / (float(bins) - 1.)) - 1E-5 + len_y = graph2d.GetYmax() - graph2d.GetYmin() + binw_y = (len_y * 0.5 / (float(bins) - 1.)) - 1E-5 + hist = ROOT.TH2F(name, '', bins, graph2d.GetXmin()-binw_x, graph2d.GetXmax()+binw_x, bins, graph2d.GetYmin()-binw_y, graph2d.GetYmax()+binw_y) + return hist + +ROOT.PyConfig.IgnoreCommandLineOptions = True +ROOT.gROOT.SetBatch(ROOT.kTRUE) + +plot.ModTDRStyle(l=0.13, b=0.10, r=0.19) +plot.SetBirdPalette() + +# ROOT.gStyle.SetNdivisions(510, "XYZ") +ROOT.gStyle.SetNdivisions(506, "Y") +ROOT.gStyle.SetMarkerSize(1.0) +ROOT.gStyle.SetPadTickX(1) +ROOT.gStyle.SetPadTickY(1) + +parser = argparse.ArgumentParser() +parser.add_argument('--output', '-o', help='output name') +parser.add_argument('--file', '-f', help='named input scans') +parser.add_argument('--multi', type=int, default=1, help='scale number of bins') +parser.add_argument('--thin', type=int, default=1, help='thin graph points') +parser.add_argument('--order', default='b,tau,Z,gam,W,comb') +parser.add_argument('--x-range', default=None) +parser.add_argument('--x-axis', default='#kappa_{V}') +parser.add_argument('--y-axis', default='#kappa_{F}') +parser.add_argument('--axis-hist', default=None) +parser.add_argument('--layout', type=int, default=1) +parser.add_argument('--sm-point', default='1,1') +parser.add_argument('--translate', help='json file with POI name translation') +parser.add_argument('--title-right', default='', help='title text') + + + +args = parser.parse_args() + +if args.translate is not None: + with open(args.translate) as jsonfile: + name_translate = json.load(jsonfile) + +infile = args.file + +order = args.order.split(',') + +graph_test = read('test', args.x_axis, args.y_axis, infile)[0] + + +if args.axis_hist is not None: + hargs = args.axis_hist.split(',') + axis = ROOT.TH2F('hist2d', '', int(hargs[0]), float(hargs[1]), float(hargs[2]), int(hargs[3]), float(hargs[4]), float(hargs[5])) +else: + axis = makeHist('hist2d', 40 * args.multi, graph_test) + +# axis = None +x_axis = args.x_axis +if x_axis in name_translate: + x_axis = name_translate[x_axis] +y_axis = args.y_axis +if y_axis in name_translate: + y_axis = name_translate[y_axis] +axis.GetXaxis().SetTitle(x_axis) +axis.GetYaxis().SetTitle(y_axis) + + +canv = ROOT.TCanvas(args.output, args.output) +pads = plot.OnePad() +pads[0].SetGridx(False) +pads[0].SetGridy(False) +pads[0].Draw() +axis.Draw() +if args.x_range is not None: + xranges = args.x_range.split(',') + axis.GetXaxis().SetRangeUser(float(xranges[0]), float(xranges[1])) + +if args.layout == 1: + legend = ROOT.TLegend(0.14, 0.53, 0.35, 0.74, '', 'NBNDC') +if args.layout == 2: + legend = ROOT.TLegend(0.15, 0.11, 0.46, 0.27, '', 'NBNDC') + legend.SetNColumns(2) +if args.layout == 3: + legend = ROOT.TLegend(0.14, 0.53, 0.35, 0.74, '', 'NBNDC') + + +graphs = {} +bestfits = {} +hists = {} +conts68 = {} +conts95 = {} + +outfile = ROOT.TFile(args.output+'.root', 'RECREATE') +order = ['default'] +for scan in order: + graphs[scan], bestfits[scan] = read(scan, args.x_axis, args.y_axis, infile) + outfile.WriteTObject(graphs[scan], scan+'_graph') + outfile.WriteTObject(bestfits[scan]) + hists[scan] = makeHist(scan+'_hist', 40 * args.multi, graph_test) + fillTH2(hists[scan], graphs[scan]) + outfile.WriteTObject(hists[scan], hists[scan].GetName()+'_input') + fixZeros(hists[scan]) + hists[scan].GetZaxis().SetTitle('-2 #Delta ln #Lambda(%s,%s)' %( x_axis, y_axis)) + # hists[scan].GetZaxis().SetTitleOffset(0) + hists[scan].Draw('COLZSAME') + hists[scan].SetMinimum(0) + hists[scan].SetMaximum(10) + outfile.WriteTObject(hists[scan], hists[scan].GetName()+'_processed') + conts68[scan] = plot.contourFromTH2(hists[scan], ROOT.Math.chisquared_quantile_c(1-0.68, 2)) + conts95[scan] = plot.contourFromTH2(hists[scan], ROOT.Math.chisquared_quantile_c(1-0.95, 2)) + for i, c in enumerate(conts68[scan]): + if args.thin > 1: + newgr = ROOT.TGraph(c.GetN() / args.thin) + needLast = True + for a,p in enumerate(range(0, c.GetN(), args.thin)): + if p == c.GetN()-1: needLast = False + newgr.SetPoint(a, c.GetX()[p], c.GetY()[p]) + if needLast: newgr.SetPoint(newgr.GetN(), c.GetX()[c.GetN()-1], c.GetY()[c.GetN()-1]) + conts68[scan][i] = newgr + c = conts68[scan][i] + c.SetLineWidth(3) + pads[0].cd() + c.Draw('L SAME') + outfile.WriteTObject(c, 'graph68_%s_%i' % (scan, i)) + if scan in conts95: + for i, c in enumerate(conts95[scan]): + c.SetLineWidth(3) + c.SetLineStyle(7) + pads[0].cd() + outfile.WriteTObject(c, 'graph95_%s_%i' % (scan, i)) + legend.AddEntry(conts68[scan][0], 'Scan', 'F') +for scan in order: + for i, c in enumerate(conts68[scan]): + c.Draw('L SAME') + if scan in conts95: + for i, c in enumerate(conts95[scan]): + c.Draw('L SAME') + +for scan in order: + bestfits[scan].SetMarkerStyle(34) + bestfits[scan].SetMarkerSize(1.2) + if scan == 'comb': bestfits[scan].SetMarkerSize(1.5) + bestfits[scan].Draw('PSAME') + +sm_point = ROOT.TGraph() +sm_point.SetPoint(0, *[float(x) for x in args.sm_point.split(',')]) +# sm_point.SetMarkerColor(ROOT.TColor.GetColor(249, 71, 1)) +sm_point.SetMarkerColor(ROOT.kRed) +sm_point.SetMarkerStyle(33) +sm_point.SetMarkerSize(2) +sm_point.Draw('PSAME') +# sm_point.SetFillColor(ROOT.TColor.GetColor(248, 255, 1)) + +# legend.Draw() + +if args.layout == 1: + legend2 = ROOT.TLegend(0.14, 0.13, 0.79, 0.17, '', 'NBNDC') + legend2.SetNColumns(4) +if args.layout == 2: + legend2 = ROOT.TLegend(0.14, 0.53, 0.34, 0.74, '', 'NBNDC') +if args.layout == 3: + legend2 = ROOT.TLegend(0.57, 0.7, 0.82, 0.9, '', 'NBNDC') + # legend2.SetNColumns(2) +legend2.AddEntry(conts68['default'][0], '68% CL', 'L') +legend2.AddEntry(conts95['default'][0], '95% CL', 'L') +legend2.AddEntry(bestfits['default'], 'Best fit', 'P') +legend2.AddEntry(sm_point, 'SM expected', 'P') +legend2.SetMargin(0.4) +legend2.Draw() + +box = ROOT.TPave(0.15, 0.82, 0.41, 0.92, 0, 'NBNDC') +# box.Draw() +plot.DrawCMSLogo(pads[0], '#it{ATLAS}#bf{ and }CMS', '#it{LHC Run 1}', 11, 0.025, 0.035, 1.1, extraText2='#it{Internal}') +plot.DrawTitle(pads[0], args.title_right, 3) + +axis.SetMinimum(0) +axis.SetMaximum(6) + +axis.GetZaxis().SetTitleOffset(0) + +pads[0].RedrawAxis() +# plot.DrawCMSLogo(pads[0], '#it{ATLAS}#bf{+}CMS', '#it{LHC Run 1}', 11, 0.02, 0.035, 1.1, extraText2='#it{Internal}') +# pads[0].RedrawAxis() +canv.Print('.pdf') +canv.Print('.png') +outfile.Close() + + + + diff --git a/HIG15002/scripts/getPOIs.sh b/HIG15002/scripts/getPOIs.sh new file mode 100755 index 00000000000..3cf1412dbe0 --- /dev/null +++ b/HIG15002/scripts/getPOIs.sh @@ -0,0 +1,146 @@ +#!/bin/bash +WSP=$1; +POIsF="" +if [[ "$1" == "" ]]; then echo "usage: $0 model"; exit 1; fi; + BRU="param_alphaS,param_mB,param_mC,param_mt,HiggsDecayWidthTHU_hqq,HiggsDecayWidthTHU_hvv,HiggsDecayWidthTHU_hll,HiggsDecayWidthTHU_hgg,HiggsDecayWidthTHU_hzg,HiggsDecayWidthTHU_hgluglu" + THEORY="QCDscale_ggH,QCDscale_qqH,QCDscale_VH,QCDscale_ttH,pdf_Higgs_gg,pdf_Higgs_qq,pdf_Higgs_qg,pdf_Higgs_ttH,${BRU}" + B1WW_POIS="mu_XS_ggF_x_BR_WW mu_XS_VBF_r_XS_ggF mu_XS_ttH_r_XS_ggF mu_XS_WH_r_XS_ggF mu_XS_ZH_r_XS_ggF mu_BR_tautau_r_BR_WW mu_BR_bb_r_BR_WW mu_BR_gamgam_r_BR_WW mu_BR_ZZ_r_BR_WW" + B1WW_RANGES="mu_XS_VBF_r_XS_ggF=-2,5:mu_XS_WH_r_XS_ggF=-2,10:mu_XS_ZH_r_XS_ggF=-2,20:mu_XS_ttH_r_XS_ggF=-2,20:mu_BR_ZZ_r_BR_WW=-2,5:mu_BR_bb_r_BR_WW=-2,5:mu_BR_tautau_r_BR_WW=-2,5:mu_BR_gamgam_r_BR_WW=-2,5:mu_XS_ggF_x_BR_WW=-2,5" + B1ZZ_POIS="mu_XS_ggF_x_BR_ZZ mu_XS_VBF_r_XS_ggF mu_XS_ttH_r_XS_ggF mu_XS_WH_r_XS_ggF mu_XS_ZH_r_XS_ggF mu_BR_tautau_r_BR_ZZ mu_BR_bb_r_BR_ZZ mu_BR_gamgam_r_BR_ZZ mu_BR_WW_r_BR_ZZ" + B1ZZ_RANGES="mu_XS_VBF_r_XS_ggF=-2,5:mu_XS_WH_r_XS_ggF=-2,10:mu_XS_ZH_r_XS_ggF=-2,20:mu_XS_ttH_r_XS_ggF=-2,20:mu_BR_WW_r_BR_ZZ=-2,5:mu_BR_bb_r_BR_ZZ=-2,5:mu_BR_tautau_r_BR_ZZ=-2,5:mu_BR_gamgam_r_BR_ZZ=-2,5:mu_XS_ggF_x_BR_ZZ=-2,5" +case $WSP in +A1_mu) POIs="mu" ; + POIsF=",${BRU}"; + POIsR="mu=0,2" ;; +A1_4P) POIs="mu_XS_ggFbbH mu_XS_VBF mu_XS_VH mu_XS_ttHtH" ; + POIsF=",${BRU}"; + POIsR="mu_XS_ggFbbH=-2,2:mu_XS_VBF=-5,5:mu_XS_VH=-10,10:mu_XS_ttHtH=-10,10" ;; +A1_5P) POIs="mu_XS_ggFbbH mu_XS_VBF mu_XS_WH mu_XS_ZH mu_XS_ttHtH" ; + POIsF=",${BRU}"; + POIsR="mu_XS_ggFbbH=-2,2:mu_XS_VBF=-5,5:mu_XS_WH=-10,10:mu_XS_ZH=-10,10:mu_XS_ttHtH=-10,10" ;; +A1_5D) POIs="mu_BR_WW mu_BR_ZZ mu_BR_bb mu_BR_gamgam mu_BR_tautau" ; + POIsF=",${BRU}"; + POIsR="mu_BR_WW=-5,5:mu_BR_gamgam=-5,5:mu_BR_ZZ=-5,5:mu_BR_tautau=-5,5:mu_BR_bb=-5,5" ;; +A1_6D) POIs="mu_BR_WW mu_BR_ZZ mu_BR_bb mu_BR_gamgam mu_BR_tautau mu_BR_mumu" ; + POIsF=",${BRU}"; + POIsR="mu_BR_WW=-5,5:mu_BR_gamgam=-5,5:mu_BR_ZZ=-5,5:mu_BR_tautau=-5,5:mu_BR_bb=-5,5:mu_BR_mumu=-10,10" ;; +A1_5PD) POIs="mu_XS_ggFbbH_BR_WW mu_XS_VBF_BR_WW mu_XS_WH_BR_WW mu_XS_ZH_BR_WW mu_XS_ttHtH_BR_WW mu_XS_ggFbbH_BR_ZZ mu_XS_VBF_BR_ZZ mu_XS_WH_BR_ZZ mu_XS_ZH_BR_ZZ mu_XS_ttHtH_BR_ZZ mu_XS_WH_BR_bb mu_XS_ZH_BR_bb mu_XS_ttHtH_BR_bb mu_XS_ggFbbH_BR_gamgam mu_XS_VBF_BR_gamgam mu_XS_WH_BR_gamgam mu_XS_ZH_BR_gamgam mu_XS_ttHtH_BR_gamgam mu_XS_ggFbbH_BR_tautau mu_XS_VBF_BR_tautau mu_XS_WH_BR_tautau mu_XS_ZH_BR_tautau mu_XS_ttHtH_BR_tautau" ; + POIsF=",${THEORY}"; + #POIsR="mu_XS_ggFbbH_BR_WW=-10,10:mu_XS_VBF_BR_WW=-10,10:mu_XS_WH_BR_WW=-30,30:mu_XS_ZH_BR_WW=-30,30:mu_XS_ttHtH_BR_WW=-20,20:mu_XS_ggFbbH_BR_ZZ=-10,10:mu_XS_VBF_BR_ZZ=-100,100:mu_XS_WH_BR_ZZ=0,300:mu_XS_ZH_BR_ZZ=-300,300:mu_XS_ttHtH_BR_ZZ=-100,100:mu_XS_WH_BR_bb=-30,30:mu_XS_ZH_BR_bb=-30,30:mu_XS_ttHtH_BR_bb=-20,20:mu_XS_ggFbbH_BR_gamgam=-10,10:mu_XS_VBF_BR_gamgam=-10,10:mu_XS_WH_BR_gamgam=-30,30:mu_XS_ZH_BR_gamgam=-30,30:mu_XS_ttHtH_BR_gamgam=-20,20:mu_XS_ggFbbH_BR_tautau=-10,10:mu_XS_VBF_BR_tautau=-10,10:mu_XS_WH_BR_tautau=-30,30:mu_XS_ZH_BR_tautau=-30,30:mu_XS_ttHtH_BR_tautau=-20,20" ;; + POIsR="mu_XS_ggFbbH_BR_WW=0,10:mu_XS_VBF_BR_WW=0,10:mu_XS_WH_BR_WW=0,30:mu_XS_ZH_BR_WW=0,30:mu_XS_ttHtH_BR_WW=0,20:mu_XS_ggFbbH_BR_ZZ=0,10:mu_XS_VBF_BR_ZZ=0,100:mu_XS_WH_BR_ZZ=0,300:mu_XS_ZH_BR_ZZ=0,300:mu_XS_ttHtH_BR_ZZ=0,100:mu_XS_WH_BR_bb=0,30:mu_XS_ZH_BR_bb=0,30:mu_XS_ttHtH_BR_bb=0,20:mu_XS_ggFbbH_BR_gamgam=0,10:mu_XS_VBF_BR_gamgam=0,10:mu_XS_WH_BR_gamgam=0,30:mu_XS_ZH_BR_gamgam=0,30:mu_XS_ttHtH_BR_gamgam=0,20:mu_XS_ggFbbH_BR_tautau=0,10:mu_XS_VBF_BR_tautau=0,10:mu_XS_WH_BR_tautau=0,30:mu_XS_ZH_BR_tautau=0,30:mu_XS_ttHtH_BR_tautau=0,20" ;; +A1_5PD_cms) POIs="mu_XS_ggFbbH_BR_WW mu_XS_VBF_BR_WW mu_XS_WH_BR_WW mu_XS_ZH_BR_WW mu_XS_ttHtH_BR_WW mu_XS_ggFbbH_BR_ZZ mu_XS_WH_BR_bb mu_XS_ZH_BR_bb mu_XS_ttHtH_BR_bb mu_XS_ggFbbH_BR_gamgam mu_XS_VBF_BR_gamgam mu_XS_ttHtH_BR_gamgam mu_XS_ggFbbH_BR_tautau mu_XS_VBF_BR_tautau mu_XS_WH_BR_tautau mu_XS_ZH_BR_tautau mu_XS_ttHtH_BR_tautau mu_XS_WH_BR_gamgam mu_XS_ZH_BR_gamgam" ; + POIsF=",${BRU},mu_XS_VBF_BR_ZZ,mu_XS_ZH_BR_ZZ,mu_XS_WH_BR_ZZ,mu_XS_ttHtH_BR_ZZ"; + POIsR="mu_XS_ggFbbH_BR_WW=-10,10:mu_XS_VBF_BR_WW=-10,10:mu_XS_WH_BR_WW=-30,30:mu_XS_ZH_BR_WW=-30,30:mu_XS_ttHtH_BR_WW=-20,20:mu_XS_ggFbbH_BR_ZZ=-10,10:mu_XS_VBF_BR_ZZ=-100,100:mu_XS_WH_BR_ZZ=-300,300:mu_XS_ZH_BR_ZZ=-300,300:mu_XS_ttHtH_BR_ZZ=-100,100:mu_XS_WH_BR_bb=-30,30:mu_XS_ZH_BR_bb=-30,30:mu_XS_ttHtH_BR_bb=-20,20:mu_XS_ggFbbH_BR_gamgam=-10,10:mu_XS_VBF_BR_gamgam=-10,10:mu_XS_WH_BR_gamgam=-30,30:mu_XS_ZH_BR_gamgam=-30,30:mu_XS_ttHtH_BR_gamgam=-20,20:mu_XS_ggFbbH_BR_tautau=-10,10:mu_XS_VBF_BR_tautau=-10,10:mu_XS_WH_BR_tautau=-30,30:mu_XS_ZH_BR_tautau=-30,30:mu_XS_ttHtH_BR_tautau=-20,20" ;; + #POIsR="mu_XS_ggFbbH_BR_WW=0,10:mu_XS_VBF_BR_WW=0,10:mu_XS_WH_BR_WW=0,30:mu_XS_ZH_BR_WW=0,30:mu_XS_ttHtH_BR_WW=0,20:mu_XS_ggFbbH_BR_ZZ=0,10:mu_XS_VBF_BR_ZZ=0,100:mu_XS_WH_BR_ZZ=0,300:mu_XS_ZH_BR_ZZ=0,300:mu_XS_ttHtH_BR_ZZ=0,100:mu_XS_WH_BR_bb=0,30:mu_XS_ZH_BR_bb=0,30:mu_XS_ttHtH_BR_bb=0,20:mu_XS_ggFbbH_BR_gamgam=0,10:mu_XS_VBF_BR_gamgam=0,10:mu_XS_WH_BR_gamgam=0,30:mu_XS_ZH_BR_gamgam=0,30:mu_XS_ttHtH_BR_gamgam=0,20:mu_XS_ggFbbH_BR_tautau=0,10:mu_XS_VBF_BR_tautau=0,10:mu_XS_WH_BR_tautau=0,30:mu_XS_ZH_BR_tautau=0,30:mu_XS_ttHtH_BR_tautau=0,20" ;; +A1_5PD_atlas) POIs="mu_XS_ggFbbH_BR_WW mu_XS_VBF_BR_WW mu_XS_WH_BR_WW mu_XS_ZH_BR_WW mu_XS_ttHtH_BR_WW mu_XS_ggFbbH_BR_ZZ mu_XS_VBF_BR_ZZ mu_XS_WH_BR_ZZ mu_XS_ZH_BR_ZZ mu_XS_ttHtH_BR_ZZ mu_XS_WH_BR_bb mu_XS_ZH_BR_bb mu_XS_ttHtH_BR_bb mu_XS_ggFbbH_BR_gamgam mu_XS_VBF_BR_gamgam mu_XS_WH_BR_gamgam mu_XS_ZH_BR_gamgam mu_XS_ttHtH_BR_gamgam mu_XS_ggFbbH_BR_tautau mu_XS_VBF_BR_tautau mu_XS_WH_BR_tautau mu_XS_ZH_BR_tautau mu_XS_ttHtH_BR_tautau" ; + POIsF=",${THEORY}"; + #POIsR="mu_XS_ggFbbH_BR_WW=-10,10:mu_XS_VBF_BR_WW=-10,10:mu_XS_WH_BR_WW=-30,30:mu_XS_ZH_BR_WW=-30,30:mu_XS_ttHtH_BR_WW=-20,20:mu_XS_ggFbbH_BR_ZZ=-10,10:mu_XS_VBF_BR_ZZ=-100,100:mu_XS_WH_BR_ZZ=0,300:mu_XS_ZH_BR_ZZ=-300,300:mu_XS_ttHtH_BR_ZZ=-100,100:mu_XS_WH_BR_bb=-30,30:mu_XS_ZH_BR_bb=-30,30:mu_XS_ttHtH_BR_bb=-20,20:mu_XS_ggFbbH_BR_gamgam=-10,10:mu_XS_VBF_BR_gamgam=-10,10:mu_XS_WH_BR_gamgam=-30,30:mu_XS_ZH_BR_gamgam=-30,30:mu_XS_ttHtH_BR_gamgam=-20,20:mu_XS_ggFbbH_BR_tautau=-10,10:mu_XS_VBF_BR_tautau=-10,10:mu_XS_WH_BR_tautau=-30,30:mu_XS_ZH_BR_tautau=-30,30:mu_XS_ttHtH_BR_tautau=-20,20" ;; + POIsR="mu_XS_ggFbbH_BR_WW=0,10:mu_XS_VBF_BR_WW=0,10:mu_XS_WH_BR_WW=0,30:mu_XS_ZH_BR_WW=0,30:mu_XS_ttHtH_BR_WW=0,20:mu_XS_ggFbbH_BR_ZZ=0,10:mu_XS_VBF_BR_ZZ=0,100:mu_XS_WH_BR_ZZ=0,300:mu_XS_ZH_BR_ZZ=0,300:mu_XS_ttHtH_BR_ZZ=0,100:mu_XS_WH_BR_bb=0,30:mu_XS_ZH_BR_bb=0,30:mu_XS_ttHtH_BR_bb=0,20:mu_XS_ggFbbH_BR_gamgam=0,10:mu_XS_VBF_BR_gamgam=0,10:mu_XS_WH_BR_gamgam=0,30:mu_XS_ZH_BR_gamgam=0,30:mu_XS_ttHtH_BR_gamgam=0,20:mu_XS_ggFbbH_BR_tautau=0,10:mu_XS_VBF_BR_tautau=0,10:mu_XS_WH_BR_tautau=0,30:mu_XS_ZH_BR_tautau=0,30:mu_XS_ttHtH_BR_tautau=0,20" ;; +A1_5PD_lhc) POIs="mu_XS_ggFbbH_BR_WW mu_XS_VBF_BR_WW mu_XS_WH_BR_WW mu_XS_ZH_BR_WW mu_XS_ttHtH_BR_WW mu_XS_ggFbbH_BR_ZZ mu_XS_VBF_BR_ZZ mu_XS_ttHtH_BR_ZZ mu_XS_WH_BR_bb mu_XS_ZH_BR_bb mu_XS_ttHtH_BR_bb mu_XS_ggFbbH_BR_gamgam mu_XS_VBF_BR_gamgam mu_XS_WH_BR_gamgam mu_XS_ZH_BR_gamgam mu_XS_ttHtH_BR_gamgam mu_XS_ggFbbH_BR_tautau mu_XS_VBF_BR_tautau mu_XS_WH_BR_tautau mu_XS_ZH_BR_tautau mu_XS_ttHtH_BR_tautau mu_XS_ZH_BR_ZZ" ; + POIsF=",${BRU},mu_XS_WH_BR_ZZ"; + POIsR="mu_XS_ggFbbH_BR_WW=-10,10:mu_XS_VBF_BR_WW=-10,10:mu_XS_WH_BR_WW=-30,30:mu_XS_ZH_BR_WW=-30,30:mu_XS_ttHtH_BR_WW=-20,20:mu_XS_ggFbbH_BR_ZZ=-10,10:mu_XS_VBF_BR_ZZ=-100,100:mu_XS_WH_BR_ZZ=-100,100:mu_XS_ZH_BR_ZZ=-100,100:mu_XS_ttHtH_BR_ZZ=-100,100:mu_XS_WH_BR_bb=-30,30:mu_XS_ZH_BR_bb=-30,30:mu_XS_ttHtH_BR_bb=-20,20:mu_XS_ggFbbH_BR_gamgam=-10,10:mu_XS_VBF_BR_gamgam=-10,10:mu_XS_WH_BR_gamgam=-30,30:mu_XS_ZH_BR_gamgam=-30,30:mu_XS_ttHtH_BR_gamgam=-20,20:mu_XS_ggFbbH_BR_tautau=-10,10:mu_XS_VBF_BR_tautau=-10,10:mu_XS_WH_BR_tautau=-30,30:mu_XS_ZH_BR_tautau=-30,30:mu_XS_ttHtH_BR_tautau=-20,20" ;; + #POIsR="mu_XS_ggFbbH_BR_WW=0,10:mu_XS_VBF_BR_WW=0,10:mu_XS_WH_BR_WW=0,30:mu_XS_ZH_BR_WW=0,30:mu_XS_ttHtH_BR_WW=0,20:mu_XS_ggFbbH_BR_ZZ=0,10:mu_XS_VBF_BR_ZZ=0,100:mu_XS_WH_BR_ZZ=0,300:mu_XS_ZH_BR_ZZ=0,300:mu_XS_ttHtH_BR_ZZ=0,100:mu_XS_WH_BR_bb=0,30:mu_XS_ZH_BR_bb=0,30:mu_XS_ttHtH_BR_bb=0,20:mu_XS_ggFbbH_BR_gamgam=0,10:mu_XS_VBF_BR_gamgam=0,10:mu_XS_WH_BR_gamgam=0,30:mu_XS_ZH_BR_gamgam=0,30:mu_XS_ttHtH_BR_gamgam=0,20:mu_XS_ggFbbH_BR_tautau=0,10:mu_XS_VBF_BR_tautau=0,10:mu_XS_WH_BR_tautau=0,30:mu_XS_ZH_BR_tautau=0,30:mu_XS_ttHtH_BR_tautau=0,20" ;; +A1_muVmuF) POIs="mu_V_gamgam mu_V_ZZ mu_V_WW mu_V_tautau mu_V_bb mu_F_gamgam mu_F_ZZ mu_F_WW mu_F_tautau mu_F_bb" ; + POIsF=",${BRU}"; + POIsR="mu_V_ZZ=-20,20:mu_V_gamgam=-5,5:mu_V_WW=-5,5:mu_V_tautau=-5,5:mu_V_bb=-5,5:mu_F_gamgam=-5,5:mu_F_ZZ=-5,5:mu_F_WW=-5,5:mu_F_tautau=-5,5:mu_F_bb=-5,5" ;; +A2) POIs="mu_V_r_F mu_F_gamgam mu_F_ZZ mu_F_WW mu_F_tautau mu_F_bb" ; + POIsF=",mu_V_r_F_WW,mu_V_r_F_ZZ,mu_V_r_F_gamgam,mu_V_r_F_bb,mu_V_r_F_tautau,${BRU}"; + POIsR="mu_V_r_F=-5,5:mu_F_gamgam=-5,5:mu_F_ZZ=-5,5:mu_F_WW=-5,5:mu_F_tautau=-5,5:mu_F_bb=-5,5" ;; +A2_5D) POIs="mu_F_gamgam mu_F_ZZ mu_F_WW mu_F_tautau mu_F_bb mu_V_r_F_WW mu_V_r_F_ZZ mu_V_r_F_gamgam mu_V_r_F_bb mu_V_r_F_tautau" ; + POIsF=",mu_V_r_F,${BRU}" ; + POIsR="mu_F_gamgam=-5,5:mu_F_ZZ=-5,5:mu_F_WW=-5,5:mu_F_tautau=-5,5:mu_F_bb=-5,5:mu_V_r_F_ZZ=-20,20" ;; +B1WW) POIs="${B1WW_POIS}" ; + POIsD="mu_XS_WH_r_XS_ggF mu_XS_ZH_r_XS_ggF mu_BR_bb_r_BR_WW" ; + POIsR="${B1WW_RANGES}" ; + POIsF=",${THEORY},mu_XS7_r_XS8_VBF,mu_XS7_r_XS8_WH,mu_XS7_r_XS8_ZH,mu_XS7_r_XS8_ggF,mu_XS7_r_XS8_ttH";; +B1ZZ) POIs="${B1ZZ_POIS}" ; + POIsD="mu_XS_WH_r_XS_ggF mu_XS_ZH_r_XS_ggF mu_BR_bb_r_BR_ZZ" ; + POIsR="${B1ZZ_RANGES}" ; + POIsF=",${THEORY},mu_XS7_r_XS8_VBF,mu_XS7_r_XS8_WH,mu_XS7_r_XS8_ZH,mu_XS7_r_XS8_ggF,mu_XS7_r_XS8_ttH";; +B1WW_TH) POIs="${B1WW_POIS}" ; + POIsD="mu_XS_WH_r_XS_ggF mu_XS_ZH_r_XS_ggF mu_BR_bb_r_BR_WW" ; + POIsR="${B1WW_RANGES}" ; + POIsF=",${BRU},mu_XS7_r_XS8_VBF,mu_XS7_r_XS8_WH,mu_XS7_r_XS8_ZH,mu_XS7_r_XS8_ggF,mu_XS7_r_XS8_ttH";; +B1ZZ_TH) POIs="${B1ZZ_POIS}" ; + POIsD="mu_XS_WH_r_XS_ggF mu_XS_ZH_r_XS_ggF mu_BR_bb_r_BR_ZZ" ; + POIsR="${B1ZZ_RANGES}" ; + POIsF=",${BRU},mu_XS7_r_XS8_VBF,mu_XS7_r_XS8_WH,mu_XS7_r_XS8_ZH,mu_XS7_r_XS8_ggF,mu_XS7_r_XS8_ttH";; +B1WW_P78) POIs="${B1WW_POIS} mu_XS7_r_XS8_VBF mu_XS7_r_XS8_ggF mu_XS7_r_XS8_ttH mu_XS7_r_XS8_WH mu_XS7_r_XS8_ZH" ; + POIsR="${B1WW_RANGES}:mu_XS7_r_XS8_VBF=-5,5:mu_XS7_r_XS8_WH=-10,10:mu_XS7_r_XS8_ZH=-10,10:mu_XS7_r_XS8_ggF=-5,5:mu_XS7_r_XS8_ttH=-30,30" ; + POIsF=",${THEORY}";; +B1ZZ_P78) POIs="${B1ZZ_POIS} mu_XS7_r_XS8_VBF mu_XS7_r_XS8_ggF mu_XS7_r_XS8_ttH mu_XS7_r_XS8_WH mu_XS7_r_XS8_ZH" ; + POIsR="${B1ZZ_RANGES}:mu_XS7_r_XS8_VBF=-5,5:mu_XS7_r_XS8_WH=-10,10:mu_XS7_r_XS8_ZH=-10,10:mu_XS7_r_XS8_ggF=-5,5:mu_XS7_r_XS8_ttH=-30,30" ; + POIsF=",${THEORY}";; +D1_general) POIs="mu_WW mu_ZZ mu_gamgam mu_tautau l_VBF_gamgam l_VBF_WW l_VBF_ZZ l_VBF_tautau l_WH_gamgam l_WH_WW l_WH_ZZ l_WH_tautau l_WH_bb l_ZH_gamgam l_ZH_WW l_ZH_ZZ l_ZH_tautau l_ZH_bb l_ttH_gamgam l_ttH_WW l_ttH_ZZ l_ttH_tautau l_ttH_bb"; + #POIsR="mu_WW=-5,5:mu_ZZ=-5,5:mu_bb=-5,5:mu_gamgam=-5,5:mu_tautau=-5,5,l_VBF_gamgam=-5,5:l_VBF_WW=-5,5:l_VBF_ZZ=-10,10:l_VBF_tautau=-5,5:l_VBF_bb=-20,20:l_WH_gamgam=-30,30:l_WH_WW=-10,10:l_WH_ZZ=-30,30:l_WH_tautau=-10,10:l_WH_bb=-10,10:l_ZH_gamgam=-10,10:l_ZH_WW=-20,20:l_ZH_ZZ=-50,50:l_ZH_tautau=-20,20:l_ZH_bb=-5,5:l_ttH_gamgam=-20,20:l_ttH_WW=-10,10:l_ttH_ZZ=-50,50:l_ttH_tautau=-30,30:l_ttH_bb=-10,10"; + POIsR="mu_WW=0,5:mu_ZZ=0,5:mu_gamgam=0,5:mu_tautau=0,5:l_VBF_gamgam=0,5:l_VBF_WW=0,5:l_VBF_ZZ=0,10:l_VBF_tautau=0,5:l_WH_gamgam=0,30:l_WH_WW=0,10:l_WH_ZZ=0,30:l_WH_tautau=0,10:l_WH_bb=0,10:l_ZH_gamgam=0,10:l_ZH_WW=0,20:l_ZH_ZZ=0,50:l_ZH_tautau=0,100:l_ZH_bb=0,5:l_ttH_gamgam=0,20:l_ttH_WW=0,10:l_ttH_ZZ=0,50:l_ttH_tautau=0,30:l_ttH_bb=0,10"; + POIsF=",l_VBF,l_WH,l_ZH,l_ttH,mu_bb,${BRU}";; +D1_rank1) POIs="mu_WW mu_ZZ mu_gamgam mu_tautau mu_bb l_VBF l_WH l_ZH l_ttH"; + POIsR="mu_WW=0,5:mu_ZZ=0,5:mu_gamgam=0,5:mu_tautau=0,5:mu_bb=0,5:l_VBF=0,5:l_WH=0,10:l_ZH=0,10:l_ttH=0,10"; + POIsF=",l_VBF_gamgam,l_VBF_WW,l_VBF_ZZ,l_VBF_tautau,l_WH_gamgam,l_WH_WW,l_WH_ZZ,l_WH_tautau,l_WH_bb,l_ZH_gamgam,l_ZH_WW,l_ZH_ZZ,l_ZH_tautau,l_ZH_bb,l_ttH_gamgam,l_ttH_WW,l_ttH_ZZ,l_ttH_tautau,l_ttH_bb,${BRU}";; +B2) POIs="mu_XS_ggF_x_BR_WW mu_XS_VBF_x_BR_tautau mu_XS_WH_r_XS_VBF mu_XS_ZH_r_XS_WH mu_XS_ttH_r_XS_ggF mu_BR_ZZ_r_BR_WW mu_BR_gamgam_r_BR_WW mu_BR_tautau_r_BR_WW mu_BR_bb_r_BR_tautau" ; + POIsR="mu_XS_ggF_x_BR_WW=0,5:mu_XS_VBF_x_BR_tautau=0,5:mu_XS_WH_r_XS_VBF=-10,30:mu_XS_ZH_r_XS_WH=-20,60:mu_XS_ttH_r_XS_ggF=-10,10:mu_BR_ZZ_r_BR_WW=-5,5:mu_BR_gamgam_r_BR_WW=-5,5:mu_BR_tautau_r_BR_WW=-5,5:mu_BR_bb_r_BR_tautau=-5,5" ; + POIsF=",${THEORY},mu_XS7_r_XS8_VBF,mu_XS7_r_XS8_WH,mu_XS7_r_XS8_ZH,mu_XS7_r_XS8_ggF,mu_XS7_r_XS8_ttH";; +K1) POIs="kappa_W kappa_Z kappa_tau kappa_t kappa_b" ; + POIsF=",BRinv" + POIsR="kappa_W=0,2:kappa_Z=0,2:kappa_tau=0,2:kappa_t=0,2:kappa_b=0,2:BRinv=0,0.0000000" ;; +K1_mm) POIs="kappa_W kappa_Z kappa_tau kappa_t kappa_b kappa_mu" ; + POIsF=",BRinv" + POIsR="kappa_W=0,2:kappa_Z=0,2:kappa_tau=0,2:kappa_t=0,2:kappa_b=0,2:kappa_mu=0,10:BRinv=0,0.0000000" ;; +# BRinv set to 0.5 in ATLAS workspaces, we have to reset it to 0 +K2) POIs="kappa_W kappa_Z kappa_tau kappa_t kappa_b kappa_g kappa_gam" ; + POIsF=",BRinv" + POIsR="kappa_W=0,2:kappa_Z=0,2:kappa_tau=0,2:kappa_t=0,5:kappa_b=0,2:kappa_g=0,2:kappa_gam=0,2:BRinv=0,0.0000000" ;; +K2_BSM) POIs="kappa_W kappa_Z kappa_tau kappa_t kappa_b kappa_g kappa_gam BRinv" ; + POIsR="kappa_W=0,1:kappa_Z=0,1:kappa_tau=0,2:kappa_t=0,5:kappa_b=0,2:kappa_g=0,2:kappa_gam=0,2:BRinv=0,0.6" ;; +K2_BSM_N) POIs="kappa_W kappa_Z kappa_tau kappa_t kappa_b kappa_g kappa_gam BRinv" ; + POIsR="kappa_W=0,1:kappa_Z=0,1:kappa_tau=0,2:kappa_t=0,5:kappa_b=0,2:kappa_g=0,2:kappa_gam=0,2:BRinv=-1,0.6" ;; +K2_2D) POIs="kappa_g kappa_gam" ; + POIsF=",BRinv,kappa_W,kappa_Z,kappa_tau,kappa_t,kappa_b " + POIsR="kappa_g=0,2:kappa_gam=0,2:BRinv=0,0.0000000" ;; +K3) POIs="kappa_V kappa_F" ; + POIsR="kappa_V=0,2:kappa_F=0,2" ;; +K3_5D) POIs="kappa_V_gamgam kappa_F_gamgam kappa_V_WW kappa_F_WW kappa_V_ZZ kappa_F_ZZ kappa_V_tautau kappa_F_tautau kappa_V_bb kappa_F_bb" ; + POIsF=",kappa_V,kappa_F" ; + POIsR="kappa_V_gamgam=0,5:kappa_F_gamgam=0,5:kappa_V_WW=0,5:kappa_F_WW=0,5:kappa_V_ZZ=0,15:kappa_F_ZZ=0,5:kappa_V_tautau=0,5:kappa_F_tautau=0,5:kappa_V_bb=0,5:kappa_F_bb=0,5" ;; +L1) POIs="lambda_WZ lambda_Zg lambda_bZ lambda_gamZ lambda_tauZ lambda_tg kappa_gZ" ; + POIsR="lambda_WZ=0,2:lambda_tg=0,5:lambda_bZ=0,4:lambda_tauZ=0,4:lambda_Zg=0,4:lambda_gamZ=0,2:kappa_gZ=0,3" ;; +L2_ldu) POIs="kappa_uu lambda_du lambda_Vu" ; + POIsR="lambda_du=0,3:lambda_Vu=0,3:kappa_uu=0,3" ; + POIsF=",kappa_qq,lambda_lq,lambda_Vq,kappa_VV,lambda_FV" ;; +L2_llq) POIs="kappa_qq lambda_lq lambda_Vq" ; + POIsR="lambda_lq=0,3:lambda_Vq=0,3:kappa_qq=0,3" ; + POIsF=",kappa_uu,lambda_du,lambda_Vu,kappa_VV,lambda_FV" ;; +L2_lfv) POIs="kappa_VV lambda_FV" ; + POIsR="lambda_FV=0,3:kappa_VV=0,3" ; + POIsF=",kappa_uu,lambda_du,lambda_Vu,kappa_qq,lambda_lq,lambda_Vq" ;; +M1) POIs="M eps"; + POIsR="M=150,350:eps=-1,1"; + POIsF=",BRinv";; +M1_2D) POIs="M,eps"; + POIsR="M=160,340:eps=-0.2,0.2"; + POIsF=",BRinv";; +K3_2D) POIs="kappa_V,kappa_F" ; + POIsR="kappa_V=0.4,1.8:kappa_F=0.4,1.8" ;; +K3_5D_2D) POIs="kappa_V_gamgam,kappa_F_gamgam kappa_V_WW,kappa_F_WW kappa_V_ZZ,kappa_F_ZZ kappa_V_tautau,kappa_F_tautau kappa_V_bb,kappa_F_bb" ; + POIsF=",kappa_V,kappa_F" ; + POIsR="kappa_V_gamgam=0.4,1.8:kappa_F_gamgam=0,3:kappa_V_WW=0.4,1.8:kappa_F_WW=0,3:kappa_V_ZZ=0.4,1.8:kappa_F_ZZ=0,3:kappa_V_tautau=0.4,1.8:kappa_F_tautau=0,3:kappa_V_bb=0.4,1.8:kappa_F_bb=0,3" ;; +K3_5D_2D_N) POIs="kappa_V_gamgam,kappa_F_gamgam kappa_V_WW,kappa_F_WW kappa_V_ZZ,kappa_F_ZZ kappa_V_tautau,kappa_F_tautau kappa_V_bb,kappa_F_bb" ; + POIsF=",kappa_V,kappa_F" ; + POIsR="kappa_V_gamgam=0.4,1.8:kappa_F_gamgam=-2.5,0:kappa_V_WW=0.4,1.8:kappa_F_WW=-2.5,0:kappa_V_ZZ=0.4,1.8:kappa_F_ZZ=-2.5,0:kappa_V_tautau=0.4,1.8:kappa_F_tautau=-2.5,0:kappa_V_bb=0.4,1.8:kappa_F_bb=0,2.5" ;; +# ---- FOR L1 ---- +esac; +if [[ "$2" == "-r" ]]; then + echo " --redefineSignalPOIs=${POIs// /,}"; +elif [[ "$2" == "-b" ]]; then + echo " --setPhysicsModelParameterRanges=${POIsR}"; +elif [[ "$2" == "-f" ]]; then + echo "${POIsF}"; +elif [[ "$2" == "-d" ]]; then + echo "${POIsD}"; +elif [[ "$2" == "-," ]]; then + echo "${POIs// /,}"; +else + echo $POIs; +fi; + diff --git a/HIG15002/scripts/makeMepsBand.py b/HIG15002/scripts/makeMepsBand.py new file mode 100644 index 00000000000..4f5a153f10d --- /dev/null +++ b/HIG15002/scripts/makeMepsBand.py @@ -0,0 +1,230 @@ +import sys +import ROOT +import math +from functools import partial +import CombineHarvester.CombineTools.plotting as plot +import json +import argparse +import os.path + +ROOT.PyConfig.IgnoreCommandLineOptions = True +ROOT.gROOT.SetBatch(ROOT.kTRUE) + +def m6bandFromVEps(h2, best, xmin, xmax, points, toys): + c68 = ROOT.TGraphAsymmErrors() + c95 = ROOT.TGraphAsymmErrors() + sm = ROOT.TGraph() + h2 = h2.Clone(h2.GetName()+'_th2_prob') + for ix in xrange(1, h2.GetNbinsX()+1): + for iy in xrange(1, h2.GetNbinsY()+1): + hx = h2.GetXaxis().GetBinCenter(ix) + hy = h2.GetYaxis().GetBinCenter(iy) + h2.SetBinContent(ix, iy, ROOT.TMath.Prob(h2.GetBinContent(ix,iy),2)) + + # h2->Draw("COLZ"); c1->Print("~/public_html/drop/plot.png"); + # make histogram of log of relative shift wrt SM + v0 = 246.22 + # TH1D *histos[99]; double x[99]; + # if (points > 98) points = 98; + + histos = [] + x = [0.] * points + for p in xrange(points): + x[p] = xmin * math.exp(p*math.log(xmax/xmin)/(points-1)) + print 'x[%i] = %f' % (p, x[p]) + histos.append(ROOT.TH1D('h%d' % p,'', 10000, -5., 5.)) + + for t in xrange(toys): + v = ROOT.Double() + eps = ROOT.Double() + h2.GetRandom2(v,eps) + for p in xrange(points): + histos[p].Fill((1+eps)*math.log(x[p]/v) - math.log(x[p]/v0)) + # get 68% and 95% bands + c68.Set(points) + c95.Set(points) + sm.Set(points) + vbest = best.GetX()[0] + epsbest = best.GetY()[0] + for p in xrange(points): + ybest = math.exp((1+epsbest)*math.log(x[p]/vbest)) + ysm = math.exp(math.log(x[p]/v0)) + c68.SetPoint(p, x[p], ybest) + c95.SetPoint(p, x[p], ybest) + sm.SetPoint(p, x[p], ysm) + c68.SetPointError(p, 0.,0., 0.,0.) + c95.SetPointError(p, 0.,0., 0.,0.) + histos[p].Scale(1.0/histos[p].Integral(0,histos[p].GetNbinsX()+1)) + prob0 = histos[p].GetBinContent(0) + for ix in xrange(1, histos[p].GetNbinsX() + 1): + prob = prob0 + histos[p].GetBinContent(ix) + if prob >= 0.025 and prob0 < 0.025: + c95.GetEYlow()[p] = ybest - (x[p]/v0)*math.exp(histos[p].GetBinLowEdge(ix)) + elif prob >= 0.16 and prob0 < 0.16: + c68.GetEYlow()[p] = ybest - (x[p]/v0)*math.exp(histos[p].GetBinLowEdge(ix)) + elif prob > 0.84 and prob0 <= 0.84: + c68.GetEYhigh()[p] = (x[p]/v0)*math.exp(histos[p].GetBinLowEdge(ix)) - ybest + elif prob > 0.975 and prob0 <= 0.975: + c95.GetEYhigh()[p] = (x[p]/v0)*math.exp(histos[p].GetBinLowEdge(ix)) - ybest + prob0 = prob + + #printf("at x[%d] = %.0f GeV, c = %.5f -%.5f/+%.5f\n", p, x[p], ybest, c68->GetEYlow()[p], c68->GetEYhigh()[p]); + c68.SetFillColor(ROOT.kGreen) + c95.SetFillColor(ROOT.kYellow) + return (c68, c95, sm) + +def GetFromTFile(filename, object): + fin = ROOT.TFile(filename) + obj = ROOT.gDirectory.Get(object).Clone() + fin.Close() + return obj + +ROOT.TH1.AddDirectory(False) + +parser = argparse.ArgumentParser() +parser.add_argument('--th2-input', help='input file containing the 2D NLL scan in the format FILE.root:NAME') +parser.add_argument('--best-fit-input', help='input file containing the best fit TGraph in the format FILE.root:NAME') +parser.add_argument('--kappa-results', help='json file containing kappa-model fit results') +parser.add_argument('--title-right', default='', help='title text') +parser.add_argument('--output', default='Meps', help='output filename') +parser.add_argument('--fix', action='store_true', help='fix drawing of the kappa_mu bar') +parser.add_argument('--masses', help='particle masses') +parser.add_argument('--x-range', default='0.07,300', help='custom x-axis range') +parser.add_argument('--y-range', default=None, help='custom y-axis range') +args = parser.parse_args() + +SM_VEV = 246.22 + + +plot.ModTDRStyle(l=0.18) + +# Get the TH2 input +hist_nll = GetFromTFile(*args.th2_input.split(':')) +best_fit = GetFromTFile(*args.best_fit_input.split(':')) + + +c68, c95, sm = m6bandFromVEps(hist_nll, best_fit, float(args.x_range.split(',')[0]), float(args.x_range.split(',')[1]), 40, 20000) + +best_line = c68.Clone() + + +plot.Set(sm, LineColor=ROOT.kRed, LineWidth=3, LineStyle=7) +plot.Set(best_line, LineColor=ROOT.kBlue, LineWidth=3) + +with open(args.kappa_results.split(':')[0]) as jsonfile: + k1_res = json.load(jsonfile)[args.kappa_results.split(':')[1]] + +masses = args.masses.split(',') +mass_dict = {} +for m in masses: + particle = m.split(':')[0] + mass = float(m.split(':')[1]) + mass_dict[particle] = mass + +kappa_graph = ROOT.TGraphAsymmErrors(len(k1_res.keys())) + +kappa_graph_fix = ROOT.TGraphAsymmErrors(1) + + +canv = ROOT.TCanvas(args.output, args.output) +pads = plot.OnePad() + +haxis = plot.CreateAxisHist(c68, True) +haxis.GetXaxis().SetTitle('Particle mass [GeV]') +haxis.GetYaxis().SetTitle('#kappa_{F}#frac{m_{F}}{v} or #sqrt{#kappa_{V}}#frac{m_{V}}{v}') +haxis.GetYaxis().SetTitleOffset(haxis.GetYaxis().GetTitleOffset()* 0.9) +haxis.SetMinimum(7E-5) +pads[0].SetLogx(True) +pads[0].SetLogy(True) +pads[0].cd() +if args.x_range is not None: + haxis.GetXaxis().SetLimits(float(args.x_range.split(',')[0]),float(args.x_range.split(',')[1])) +if args.y_range is not None: + haxis.SetMinimum(float(args.y_range.split(',')[0])) + haxis.SetMaximum(float(args.y_range.split(',')[1])) +haxis.Draw() + +c95.Draw('SAME3') +c68.Draw('SAME3') +sm.Draw('LSAME') +best_line.Draw('LXSAME') + +xmin = haxis.GetXaxis().GetXmin() +xmax = haxis.GetXaxis().GetXmax() +ymin = haxis.GetMinimum() +ymax = haxis.GetMaximum() +print (xmin, xmax, ymin, ymax) + +pl = pads[0].GetLeftMargin() +pr = pads[0].GetRightMargin() +pt = pads[0].GetTopMargin() +pb = pads[0].GetBottomMargin() + +latex = ROOT.TLatex() +latex.SetNDC() +plot.Set(latex, TextAlign=21, TextSize=0.04) + +for i, POI in enumerate(k1_res): + particle = POI.replace('kappa_','') + mass = mass_dict[particle] + kappa = k1_res[POI]['Val'] + if particle in ['W', 'Z']: + kappa = math.sqrt(kappa) + reduced = kappa * mass / SM_VEV + err_hi = (abs(k1_res[POI]['ErrorHi'])) * mass / SM_VEV + err_lo = (abs(k1_res[POI]['ErrorLo'])) * mass / SM_VEV + kappa_graph.SetPoint(i, mass, reduced) + kappa_graph.SetPointError(i, 0, 0, err_lo, err_hi) + if args.fix and particle == 'mu': + kappa_graph.SetPointError(i, 0, 0, 0, err_hi) + print (particle, reduced, reduced+err_hi, reduced-err_lo) + pad_x = (math.log(mass) - math.log(xmin))/(math.log(xmax) - math.log(xmin)) + pad_y = (math.log(reduced+err_hi) - math.log(ymin))/(math.log(ymax) - math.log(ymin)) + pad_x = pl + (1-pl-pr) * pad_x + if particle == 'W': pad_x -= 0.01 + if particle == 'Z': pad_x += 0.01 + pad_y = (pb + (1-pt-pb) * pad_y) + 0.025 + text = particle + if particle in ['tau', 'mu']: + text = '#' + text + latex.DrawLatex(pad_x, pad_y, text) + if particle == 'mu': + kappa_graph_fix.SetPoint(i, mass, reduced) + kappa_graph_fix.SetPointError(i, 0, 0, err_lo, err_hi) + + +plot.Set(kappa_graph, LineWidth=3) +plot.Set(kappa_graph_fix, LineWidth=3) +kappa_graph.Draw('P0SAME') +if args.fix: + kappa_graph_fix.Draw('P0ZSAME') + +legend = ROOT.TLegend(0.20, 0.68, 0.6, 0.78, '', 'NBNDC') +legend.AddEntry(kappa_graph, 'Observed (68% CL)', 'EP') +legend.AddEntry(sm, 'SM Higgs Boson', 'L') +legend.Draw() + +legend2 = ROOT.TLegend(0.68, 0.16, 0.93, 0.37, '', 'NBNDC') +legend2.SetHeader('(M, #epsilon) fit') +legend2.AddEntry(best_line, 'Observed', 'L') +legend2.AddEntry(c68, '68% CL', 'F') +legend2.AddEntry(c95, '95% CL', 'F') +legend2.Draw() + +plot.DrawTitle(pads[0], args.title_right, 3) +plot.DrawCMSLogo(pads[0], '#it{ATLAS}#bf{ and }CMS', '#it{LHC Run 1 Internal}', 11, 0.045, 0.035, 1.2) + +pads[0].RedrawAxis() + + +canv.Print('.pdf') +canv.Print('.png') + +fout = ROOT.TFile('%s.root' % args.output, 'RECREATE') +fout.WriteTObject(c68, 'MepsFit68') +fout.WriteTObject(c95, 'MepsFit95') +fout.WriteTObject(kappa_graph, 'ReducedKappas') +fout.WriteTObject(sm, 'SMExpected') +fout.Close() + + diff --git a/HIG15002/scripts/plot1DScan.py b/HIG15002/scripts/plot1DScan.py new file mode 100755 index 00000000000..cd76476ad94 --- /dev/null +++ b/HIG15002/scripts/plot1DScan.py @@ -0,0 +1,502 @@ +#!/usr/bin/env python +import sys +import ROOT +import math +from functools import partial +import CombineHarvester.CombineTools.plotting as plot +import json +import argparse +import os.path + +ROOT.PyConfig.IgnoreCommandLineOptions = True +ROOT.gROOT.SetBatch(ROOT.kTRUE) + +plot.ModTDRStyle(width=700, l = 0.13) +ROOT.gStyle.SetNdivisions(510, "XYZ") +ROOT.gStyle.SetMarkerSize(0.7) + +NAMECOUNTER = 0 + +def read(scan, param, files, chop, remove_near_min, rezero, remove_delta = None, improve = False): + # print files + goodfiles = [f for f in files if plot.TFileIsGood(f)] + limit = plot.MakeTChain(goodfiles, 'limit') + # require quantileExpected > -0.5 to avoid the final point which is always committed twice + # (even if the fit fails) + graph = plot.TGraphFromTree(limit, param, '2*deltaNLL', 'quantileExpected > -0.5') + graph.SetName(scan) + graph.Sort() + plot.RemoveGraphXDuplicates(graph) + if remove_delta is not None: plot.RemoveSmallDelta(graph, remove_delta) + plot.RemoveGraphYAbove(graph, chop) + plot.ReZeroTGraph(graph, rezero) + if remove_near_min is not None: plot.RemoveNearMin(graph, remove_near_min) + if improve: + global NAMECOUNTER + spline = ROOT.TSpline3("spline3", graph) + func = ROOT.TF1('splinefn'+str(NAMECOUNTER), partial(Eval, spline), graph.GetX()[0], graph.GetX()[graph.GetN() - 1], 1) + NAMECOUNTER += 1 + plot.ImproveMinimum(graph, func, True) + # graph.Print() + return graph + +def Eval(obj, x, params): + return obj.Eval(x[0]) + + +def ProcessEnvelope(main, others, relax_safety = 0): + '[ProcessEnvelope] Will create envelope from %i other scans' % len(others) + vals = {} + for oth in others: + gr = oth['graph'] + for i in xrange(gr.GetN()): + x = gr.GetX()[i] + y = gr.GetY()[i] + if x not in vals: vals[x] = [] + vals[x].append(y) + lengths = [] + for key in sorted(vals): + # print '%f %i' % (key,len(vals[key])) + lengths.append(len(vals[key])) + mode = max(set(lengths), key=lengths.count) + to_del = [] + for key in sorted(vals): + if len(vals[key]) < (mode - relax_safety): to_del.append(key) + for x in to_del: del vals[x] + + gr = ROOT.TGraph() + gr.Set(len(vals)) # will not contain the best fit + for i, key in enumerate(sorted(vals)): + gr.SetPoint(i, key, min(vals[key])) + + spline = ROOT.TSpline3("spline3", gr) + global NAMECOUNTER + func = ROOT.TF1('splinefn'+str(NAMECOUNTER), partial(Eval, spline), gr.GetX()[0], gr.GetX()[gr.GetN() - 1], 1) + min_x, min_y = plot.ImproveMinimum(gr, func) + gr.Set(len(vals)+1) + gr.SetPoint(len(vals), min_x, min_y) + gr.Sort() + + for i in xrange(gr.GetN()): + gr.GetY()[i] -= min_y + for oth in others: + for i in xrange(oth['graph'].GetN()): + oth['graph'].GetY()[i] -= min_y + + plot.RemoveGraphXDuplicates(gr) + gr.Print() + return gr + + + +def BuildScan(scan, param, files, color, yvals, chop, remove_near_min = None, rezero = False, envelope = False, pregraph = None, remove_delta = None, improve = False): + if pregraph is None: + graph = read(scan, param, files, chop, remove_near_min, rezero, remove_delta, improve) + else: + graph = pregraph + bestfit = None + for i in xrange(graph.GetN()): + if graph.GetY()[i] == 0.: + bestfit = graph.GetX()[i] + if envelope: plot.RemoveGraphYAll(graph, 0.) + graph.SetMarkerColor(color) + spline = ROOT.TSpline3("spline3", graph) + global NAMECOUNTER + func = ROOT.TF1('splinefn'+str(NAMECOUNTER), partial(Eval, spline), graph.GetX()[0], graph.GetX()[graph.GetN() - 1], 1) + NAMECOUNTER += 1 + func.SetLineColor(color) + func.SetLineWidth(3) + assert(bestfit is not None) + if not envelope: plot.ImproveMinimum(graph, func) + crossings = {} + cross_1sig = None + cross_2sig = None + other_1sig = [] + other_2sig = [] + val = None + val_2sig = None + for yval in yvals: + crossings[yval] = plot.FindCrossingsWithSpline(graph, func, yval) + for cr in crossings[yval]: + cr["contains_bf"] = cr["lo"] <= bestfit and cr["hi"] >= bestfit + for cr in crossings[yvals[0]]: + if cr['contains_bf']: + val = (bestfit, cr['hi'] - bestfit, cr['lo'] - bestfit) + cross_1sig = cr + else: + other_1sig.append(cr) + if len(yvals) > 1: + for cr in crossings[yvals[1]]: + if cr['contains_bf']: + val_2sig = (bestfit, cr['hi'] - bestfit, cr['lo'] - bestfit) + cross_2sig = cr + else: + other_2sig.append(cr) + else: + val_2sig = (0., 0., 0.) + cross_2sig = cross_1sig + return { + "graph" : graph, + "spline" : spline, + "func" : func, + "crossings" : crossings, + "val" : val, + "val_2sig": val_2sig, + "cross_1sig" : cross_1sig, + "cross_2sig" : cross_2sig, + "other_1sig" : other_1sig, + "other_2sig" : other_2sig + } + +parser = argparse.ArgumentParser( + prog='plot1DScan.py', + ) + +parser.add_argument('--no-input-label', action='store_true', help='do not draw the input label') +parser.add_argument('--no-numbers', action='store_true', help='do not draw the input label') +parser.add_argument('--improve', action='store_true', help='use spline interp to correct to a better minimum') +parser.add_argument('--remove-delta', type=float, help='remove best fit and close-by points') +parser.add_argument('--no-box', action='store_true', help='do not draw the input label') +parser.add_argument('--pub', action='store_true', help='do not draw the input label') +parser.add_argument('--signif', action='store_true', help='do significance plot') +parser.add_argument('--envelope', action='store_true', help='do envelope plot') +parser.add_argument('--upper-cl', type=float, help='quote upper limit instead') +parser.add_argument('--chop', type=float, default=7., help='remove vals above') +parser.add_argument('--y-max', type=float, default=8., help='max y to draw') +parser.add_argument('--remove-near-min', type=float, help='remove points with this fraction of the average x-spacing to the best-fit point') +parser.add_argument('--rezero', action='store_true', help='correct y-values if a point with a lower minimum than zero is found') +parser.add_argument('--output', '-o', help='output name') +parser.add_argument('--main', '-m', help='Main input file for the scan') +parser.add_argument('--json', help='update this json file') +parser.add_argument('--model', help='use this model identifier') +parser.add_argument('--POI', help='use this parameter of interest') +parser.add_argument('--translate', default='texName.json', help='json file with POI name translation') +parser.add_argument('--main-label', default='Observed', type=str, help='legend label for the main scan') +parser.add_argument('--main-color', default=1, type=int, help='line and marker color for main scan') +parser.add_argument('--relax-safety', default=0, type=int, help='line and marker color for main scan') +parser.add_argument('--others', nargs='*', help='add secondary scans processed as main: FILE:LABEL:COLOR') +parser.add_argument('--breakdown', help='do quadratic error subtraction using --others') +parser.add_argument('--meta', default='', help='Other metadata to save in format KEY:VAL,KEY:VAL') + +args = parser.parse_args() +if args.pub: args.no_input_label = True + + +print '--------------------------------------' +print args.output +print '--------------------------------------' + +fixed_name = args.POI +if args.translate is not None: + with open(args.translate) as jsonfile: + name_translate = json.load(jsonfile) + if args.POI in name_translate: + fixed_name = name_translate[args.POI] + +yvals = [1., 4.] +if args.upper_cl is not None: + yvals = [ROOT.Math.chisquared_quantile(args.upper_cl, 1)] + +main_scan = BuildScan(args.output, args.POI, [args.main], args.main_color, yvals, args.chop, args.remove_near_min, args.rezero, remove_delta = args.remove_delta, improve = args.improve) + +other_scans = [ ] +other_scans_opts = [ ] +if args.others is not None: + for oargs in args.others: + splitargs = oargs.split(':') + other_scans_opts.append(splitargs) + other_scans.append(BuildScan(args.output, args.POI, [splitargs[0]], int(splitargs[2]), yvals, args.chop, args.remove_near_min if args.envelope is False else None, args.rezero, args.envelope, remove_delta = args.remove_delta, improve = args.improve)) + +if args.envelope: + new_gr = ProcessEnvelope(main_scan, other_scans, args.relax_safety) + main_scan = BuildScan(args.output, args.POI, [args.main], args.main_color, yvals, args.chop, args.remove_near_min, args.rezero, pregraph = new_gr) + for other in other_scans: + color = other['func'].GetLineColor() + other['spline'] = ROOT.TSpline3("spline3", other['graph']) + other['func'] = ROOT.TF1('splinefn'+str(NAMECOUNTER), partial(Eval, other['spline']), other['graph'].GetX()[0], other['graph'].GetX()[other['graph'].GetN() - 1], 1) + NAMECOUNTER += 1 + other['func'].SetLineColor(color) + other['func'].SetLineWidth(2) + # other['func'].SetLineStyle(2) + other['graph'].SetMarkerSize(0.4) + +canv = ROOT.TCanvas(args.output, args.output) +pads = plot.OnePad() +if args.pub: main_scan['graph'].SetMarkerSize(0) +main_scan['graph'].SetMarkerColor(1) +main_scan['graph'].Draw('AP') +# main_scan['graph'].Fit('pol4') +# polfunc = main_scan['graph'].GetFunction('pol4') +# print 'Function min at %f' % polfunc.GetMinimumX(0.05, 0.25) + + +axishist = plot.GetAxisHist(pads[0]) +# axishist.SetMinimum(1E-5) +# pads[0].SetLogy(True) +axishist.SetMaximum(args.y_max) +axishist.GetYaxis().SetTitle("- 2 #Delta ln #Lambda(%s)" % fixed_name) +axishist.GetXaxis().SetTitle("%s" % fixed_name) + +# main_scan['graph'].Draw('PSAME') + + +new_min = axishist.GetXaxis().GetXmin() +new_max = axishist.GetXaxis().GetXmax() +mins = [] +maxs = [] +for other in other_scans: + mins.append(other['graph'].GetX()[0]) + maxs.append(other['graph'].GetX()[other['graph'].GetN()-1]) + +if len(other_scans) > 0: + if min(mins) < main_scan['graph'].GetX()[0]: + new_min = min(mins) - (main_scan['graph'].GetX()[0] - new_min) + if max(maxs) > main_scan['graph'].GetX()[main_scan['graph'].GetN()-1]: + new_max = max(maxs) + (new_max - main_scan['graph'].GetX()[main_scan['graph'].GetN()-1]) + axishist.GetXaxis().SetLimits(new_min, new_max) + +for other in other_scans: + if args.breakdown is not None: + other['graph'].SetMarkerSize(0.4) + if args.pub: other['graph'].SetMarkerSize(0.0) + other['graph'].Draw('PSAME') + +line = ROOT.TLine() +line.SetLineColor(ROOT.kRed) +for yval in yvals: + plot.DrawHorizontalLine(pads[0], line, yval) + if (len(other_scans) == 0 or args.upper_cl is not None) and args.pub is False: + for cr in main_scan['crossings'][yval]: + if cr['valid_lo']: line.DrawLine(cr['lo'], 0, cr['lo'], yval) + if cr['valid_hi']: line.DrawLine(cr['hi'], 0, cr['hi'], yval) + +main_scan['func'].Draw('same') +for other in other_scans: + if args.breakdown is not None: + other['func'].SetLineStyle(2) + other['func'].SetLineWidth(2) + if args.pub: + other['func'].SetLineStyle(2) + other['func'].Draw('SAME') + + +if args.envelope: + main_scan['graph'].Draw('PSAME') + +# if len(args) >= 4: +# syst_scan = BuildScan(args.output, args.POI, [args[3]], ROOT.kBlue, yvals) +# syst_scan['graph'].Draw('sameP') +# syst_scan['func'].Draw('same') + + +box = ROOT.TBox(axishist.GetXaxis().GetXmin(), 0.625*args.y_max, axishist.GetXaxis().GetXmax(), args.y_max) +# box = ROOT.TBox(axishist.GetXaxis().GetXmin(), 4.5, axishist.GetXaxis().GetXmax(), 7) +if not args.no_box: box.Draw() +pads[0].GetFrame().Draw() +pads[0].RedrawAxis() + +crossings = main_scan['crossings'] +val_nom = main_scan['val'] +val_2sig = main_scan['val_2sig'] + +textfit = '%s = %.3f{}^{#plus %.3f}_{#minus %.3f}' % (fixed_name, val_nom[0], val_nom[1], abs(val_nom[2])) +if args.upper_cl: + textfit = '%s < %.2f (%i%% CL)' % (fixed_name, val_nom[1], int(args.upper_cl * 100)) + +pt = ROOT.TPaveText(0.59, 0.82 - len(other_scans)*0.08, 0.95, 0.91, 'NDCNB') +if args.envelope: pt.SetY2(0.78) +if args.envelope: pt.SetY1(0.66) +pt.AddText(textfit) + +if args.breakdown is None and args.envelope is False: + for i, other in enumerate(other_scans): + textfit = '#color[%s]{%s = %.3f{}^{#plus %.3f}_{#minus %.3f}}' % (other_scans_opts[i][2], fixed_name, other['val'][0], other['val'][1], abs(other['val'][2])) + if args.upper_cl: + print 'here' + textfit = '#color[%s]{%s < %.2f (%i%% CL)}' % (other_scans_opts[i][2], fixed_name, other['val'][1], int(args.upper_cl * 100)) + pt.AddText(textfit) + +breakdown_json = {} +if args.breakdown is not None: + pt.SetX1(0.50) + if len(other_scans) >= 3: + pt.SetX1(0.19) + pt.SetX2(0.88) + pt.SetY1(0.66) + pt.SetY2(0.82) + breakdown = args.breakdown.split(',') + v_hi = [val_nom[1]] + v_lo = [val_nom[2]] + for other in other_scans: + v_hi.append(other['val'][1]) + v_lo.append(other['val'][2]) + assert(len(v_hi) == len(breakdown)) + textfit = '%s = %.3f' % (fixed_name, val_nom[0]) + for i, br in enumerate(breakdown): + if i < (len(breakdown) - 1): + if (abs(v_hi[i+1]) > abs(v_hi[i])): + print 'ERROR SUBTRACTION IS NEGATIVE FOR %s HI' % br + hi = 0. + else: + hi = math.sqrt(v_hi[i]*v_hi[i] - v_hi[i+1]*v_hi[i+1]) + if (abs(v_lo[i+1]) > abs(v_lo[i])): + print 'ERROR SUBTRACTION IS NEGATIVE FOR %s LO' % br + lo = 0. + else: + lo = math.sqrt(v_lo[i]*v_lo[i] - v_lo[i+1]*v_lo[i+1]) + else: + hi = v_hi[i] + lo = v_lo[i] + breakdown_json[br+"Hi"] = hi + breakdown_json[br+"Lo"] = abs(lo) * -1. + textfit += '{}^{#plus %.3f}_{#minus %.3f}(%s)' % (hi, abs(lo), br) + pt.AddText(textfit) + # hi_1 = math.sqrt(val_nom[1]*val_nom[1] - other_scans[0]['val'][1]*other_scans[0]['val'][1]) + # lo_1 = math.sqrt(val_nom[2]*val_nom[2] - other_scans[0]['val'][2]*other_scans[0]['val'][2]) + # textfit = '%s = %.3f{}^{#plus %.3f}_{#minus %.3f}(%s)' % (fixed_name, val_nom[0], hi_1, lo_1, breakdown[0]) + # hi_2 = math.sqrt(other_scans[0]['val'][1]*other_scans[0]['val'][1] - other_scans[1]['val'][1]*other_scans[1]['val'][1]) + # lo_2 = math.sqrt(other_scans[0]['val'][2]*other_scans[0]['val'][2] - other_scans[1]['val'][2]*other_scans[1]['val'][2]) + # hi_3 = math.sqrt(other_scans[1]['val'][1]*other_scans[1]['val'][1] - other_scans[2]['val'][1]*other_scans[2]['val'][1]) + # lo_3 = math.sqrt(other_scans[1]['val'][2]*other_scans[1]['val'][2] - other_scans[2]['val'][2]*other_scans[2]['val'][2]) + # textfit += '{}^{#plus %.3f}_{#minus %.3f}(%s)' % (hi_3, lo_3, breakdown[2]) + # hi_4 = math.sqrt(other_scans[2]['val'][1]*other_scans[2]['val'][1]) + # lo_4 = math.sqrt(other_scans[2]['val'][2]*other_scans[2]['val'][2]) + # textfit += '{}^{#plus %.3f}_{#minus %.3f}(%s)' % (hi_4, lo_4, breakdown[3]) + + +# print textfit +# textfit = '%s = %.3f{}^{#plus %.3f}_{#minus %.3f} (stat.)' % (fixed_name, syst_scan['val'][0], abs(syst_scan['val'][1]), abs(syst_scan['val'][2])) + +signif = None +signif_x = None +signif_y = None +if args.signif: + gr = main_scan['graph'] + for i in xrange(gr.GetN()): + if abs(gr.GetX()[i] - 0.) < 1E-4: + print 'Found scan point at %s = %.6f' % (args.POI, gr.GetX()[i]) + pt.SetY1(pt.GetY1() - 0.1) + pt.SetX1(0.52) + signif = ROOT.Math.normal_quantile_c(ROOT.Math.chisquared_cdf_c(gr.GetY()[i], 1)/2., 1) + signif_x = gr.GetX()[i] + signif_y = gr.GetY()[i] + txt_cross = '[-2#DeltalnL @ %s = %.1f is %.3f]' % (fixed_name, gr.GetX()[i], gr.GetY()[i]) + txt_signif = '[Significance = %.2f#sigma]' % (signif) + pt.AddText(txt_cross) + pt.AddText(txt_signif) + print '-2#DeltalnL @ %s = %.1f is %.3f]' % (fixed_name, 1., main_scan['func'].Eval(1.0)) + + +# pt.AddText(textfit) +# if len(args) >= 4: +# syst_hi = math.sqrt(val_nom[1]*val_nom[1] - syst_scan['val'][1] * syst_scan['val'][1]) +# syst_lo = math.sqrt(val_nom[2]*val_nom[2] - syst_scan['val'][2] * syst_scan['val'][2]) +# textfit = '%s = %.3f{}^{#plus %.3f}_{#minus %.3f} (stat.){}^{#plus %.3f}_{#minus %.3f} (syst.)' % (fixed_name, syst_scan['val'][0], abs(syst_scan['val'][1]), abs(syst_scan['val'][2]), syst_hi, syst_lo) +# pt.AddText(textfit) +pt.SetTextAlign(11) +pt.SetTextFont(42) +if not args.no_numbers: pt.Draw() + + +if args.json is not None: + if os.path.isfile(args.json): + with open(args.json) as jsonfile: + js = json.load(jsonfile) + else: + js = {} + if not args.model in js: js[args.model] = {} + js[args.model][args.POI] = { + 'Val' : val_nom[0], + 'ErrorHi' : val_nom[1], + 'ErrorLo' : val_nom[2], + 'ValidErrorHi' : main_scan['cross_1sig']['valid_hi'], + 'ValidErrorLo' : main_scan['cross_1sig']['valid_lo'], + '2sig_ErrorHi' : val_2sig[1], + '2sig_ErrorLo' : val_2sig[2], + '2sig_ValidErrorHi' : main_scan['cross_2sig']['valid_hi'], + '2sig_ValidErrorLo' : main_scan['cross_2sig']['valid_lo'] + } + if signif is not None: + js[args.model][args.POI].update({ + 'Signif' : signif, + 'Signif_x' : signif_x, + 'Signif_y' : signif_y + }) + if args.breakdown is not None: + js[args.model][args.POI].update(breakdown_json) + if args.envelope is not None: + print main_scan['other_1sig'] + print main_scan['other_2sig'] + js_extra = { + 'OtherLimitLo': 0., + 'OtherLimitHi': 0., + '2sig_OtherLimitLo': 0., + '2sig_OtherLimitHi': 0., + 'ValidOtherLimitLo': False, + 'ValidOtherLimitHi': False, + '2sig_ValidOtherLimitLo': False, + '2sig_ValidOtherLimitHi': False + } + if len(main_scan['other_1sig']) >= 1: + other = main_scan['other_1sig'][0] + js_extra['OtherLimitLo'] = other['lo'] + js_extra['OtherLimitHi'] = other['hi'] + js_extra['ValidOtherLimitLo'] = other['valid_lo'] + js_extra['ValidOtherLimitHi'] = other['valid_hi'] + if len(main_scan['other_2sig']) >= 1: + other = main_scan['other_2sig'][0] + js_extra['2sig_OtherLimitLo'] = other['lo'] + js_extra['2sig_OtherLimitHi'] = other['hi'] + js_extra['2sig_ValidOtherLimitLo'] = other['valid_lo'] + js_extra['2sig_ValidOtherLimitHi'] = other['valid_hi'] + js[args.model][args.POI].update(js_extra) + + with open(args.json, 'w') as outfile: + json.dump(js, outfile, sort_keys=True, indent=4, separators=(',', ': ')) + +collab = 'Combined' +if 'cms_' in args.output: collab = 'CMS' +if 'atlas_' in args.output: collab = 'ATLAS' + +plot.DrawCMSLogo(pads[0], '#it{ATLAS}#bf{ and }CMS', '#it{LHC Run 1 Internal}', 11, 0.045, 0.035, 1.2) +# plot.DrawCMSLogo(pads[0], '#it{ATLAS}#bf{ and }CMS', '#it{LHC Run 1 Preliminary}', 11, 0.025, 0.035, 1.1, cmsTextSize = 1.) + +if not args.no_input_label: plot.DrawTitle(pads[0], '#bf{Input:} %s' % collab, 3) +# legend_l = 0.70 if len(args) >= 4 else 0.73 +legend_l = 0.73 +if len(other_scans) > 0: + legend_l = legend_l - len(other_scans) * 0.04 +legend = ROOT.TLegend(0.15, legend_l, 0.45, 0.78, '', 'NBNDC') +if len(other_scans) >= 3: + if args.envelope: + legend = ROOT.TLegend(0.58, 0.79, 0.95, 0.93, '', 'NBNDC') + legend.SetNColumns(2) + else: + legend = ROOT.TLegend(0.46, 0.83, 0.95, 0.93, '', 'NBNDC') + legend.SetNColumns(2) + +legend.AddEntry(main_scan['func'], args.main_label, 'L') +for i, other in enumerate(other_scans): + legend.AddEntry(other['func'], other_scans_opts[i][1], 'L') +# if len(args) >= 4: legend.AddEntry(syst_scan['func'], 'Stat. Only', 'L') +legend.Draw() + +save_graph = main_scan['graph'].Clone() +save_graph.GetXaxis().SetTitle('%s = %.3f %+.3f/%+.3f' % (fixed_name, val_nom[0], val_nom[2], val_nom[1])) +outfile = ROOT.TFile(args.output+'.root', 'RECREATE') +outfile.WriteTObject(save_graph) +outfile.Close() +canv.Print('.pdf') +canv.Print('.png') + +meta = {} + +if args.meta != '': + meta_list = args.meta.split(',') + for m in meta_list: + meta_pair = m.split(':') + meta[meta_pair[0]] = meta_pair[1] + with open(args.output+'.json', 'w') as outmeta: + json.dump(meta, outmeta, sort_keys=True, indent=4, separators=(',', ': ')) + +# canv.Print('.C') diff --git a/HIG15002/scripts/plot2DLimits.py b/HIG15002/scripts/plot2DLimits.py new file mode 100755 index 00000000000..5489c23f57b --- /dev/null +++ b/HIG15002/scripts/plot2DLimits.py @@ -0,0 +1,96 @@ +#!/usr/bin/env python +import sys +import ROOT +import math +from functools import partial +import CombineHarvester.CombineTools.plotting as plot +import json +import argparse +import os.path + +def read(scan, param_x, param_y, file): + # print files + goodfiles = [f for f in [file] if plot.TFileIsGood(f)] + limit = plot.MakeTChain(goodfiles, 'limit') + graph = plot.TGraph2DFromTree(limit, param_x, param_y, '2*deltaNLL', 'quantileExpected > -0.5 && deltaNLL > 0 && deltaNLL < 100') + best = plot.TGraphFromTree(limit, param_x, param_y, 'quantileExpected > -0.5 && deltaNLL == 0') + plot.RemoveGraphXDuplicates(best) + assert(best.GetN() == 1) + graph.SetName(scan) + best.SetName(scan+'_best') + # graph.Print() + return (graph, best) + +def fillTH2(hist2d, graph): + for x in xrange(1, hist2d.GetNbinsX()+1): + for y in xrange(1, hist2d.GetNbinsY()+1): + xc = hist2d.GetXaxis().GetBinCenter(x) + yc = hist2d.GetYaxis().GetBinCenter(y) + val = graph.Interpolate(xc, yc) + hist2d.SetBinContent(x, y, val) + +def fixZeros(hist2d): + for x in xrange(1, hist2d.GetNbinsX()+1): + for y in xrange(1, hist2d.GetNbinsY()+1): + xc = hist2d.GetXaxis().GetBinCenter(x) + yc = hist2d.GetYaxis().GetBinCenter(y) + if hist2d.GetBinContent(x, y) == 0: + # print 'Bin at (%f,%f) is zero!' % (xc, yc) + hist2d.SetBinContent(x, y, 1000) + +def makeHist(name, bins, graph2d): + len_x = graph2d.GetXmax() - graph2d.GetXmin() + binw_x = (len_x * 0.5 / (float(bins) - 1.)) - 1E-5 + len_y = graph2d.GetYmax() - graph2d.GetYmin() + binw_y = (len_y * 0.5 / (float(bins) - 1.)) - 1E-5 + hist = ROOT.TH2F(name, '', bins, graph2d.GetXmin()-binw_x, graph2d.GetXmax()+binw_x, bins, graph2d.GetYmin()-binw_y, graph2d.GetYmax()+binw_y) + return hist + +ROOT.PyConfig.IgnoreCommandLineOptions = True +ROOT.gROOT.SetBatch(ROOT.kTRUE) + +plot.ModTDRStyle(l=0.13, b=0.10) +# ROOT.gStyle.SetNdivisions(510, "XYZ") +ROOT.gStyle.SetNdivisions(506, "Y") +ROOT.gStyle.SetMarkerSize(1.0) +ROOT.gStyle.SetPadTickX(1) +ROOT.gStyle.SetPadTickY(1) + + +fin = ROOT.TFile('asymptotic_grid.root') +hist = fin.Get('h_observed') +axis = hist.Clone('axis') +axis.Reset() + +# axis = None +axis.GetXaxis().SetTitle('m_{A} (GeV)') +axis.GetYaxis().SetTitle('tan#beta') + +canv = ROOT.TCanvas('asymptotic_grid', 'asymptotic_grid') +pads = plot.OnePad() +pads[0].SetGridx(False) +pads[0].SetGridy(False) +pads[0].Draw() +axis.Draw() + +hist.Draw('COLSAME') + +conts = plot.contourFromTH2(hist, 0.05, mult = 4) +for i, c in enumerate(conts): + c.SetFillColor(ROOT.kBlue) + c.SetLineColor(ROOT.kBlack) + c.SetLineWidth(3) + pads[0].cd() + # c.Draw('F SAME') + c.Draw('L SAME') + # c.Draw('P SAME') + +# plot.DrawCMSLogo(pads[0], '#it{ATLAS}#bf{ and }CMS', '#it{LHC Run 1}', 11, 0.025, 0.035, 1.1, extraText2='#it{Preliminary}') +# plot.DrawCMSLogo(pads[0], '#it{ATLAS}#bf{+}CMS', '#it{LHC Run 1}', 11, 0.02, 0.035, 1.1, extraText2='#it{Internal}') +# pads[0].RedrawAxis() +canv.Print('.pdf') +canv.Print('.png') + + + + diff --git a/HIG15002/scripts/plotCovMatrix.py b/HIG15002/scripts/plotCovMatrix.py new file mode 100755 index 00000000000..504da573111 --- /dev/null +++ b/HIG15002/scripts/plotCovMatrix.py @@ -0,0 +1,91 @@ +#!/usr/bin/env python +import sys +import ROOT +import math +from functools import partial +import CombineHarvester.CombineTools.plotting as plot +import json +import argparse +import os.path + +ROOT.PyConfig.IgnoreCommandLineOptions = True +ROOT.gROOT.SetBatch(ROOT.kTRUE) + +plot.ModTDRStyle(width=750, height=600, l = 0.15, b = 0.07, r = 0.17) +plot.SetBirdPalette() +ROOT.gStyle.SetNdivisions(510, "XYZ") +ROOT.gStyle.SetMarkerSize(0.7) +ROOT.gStyle.SetPaintTextFormat('.2f') + + +def ColorLabels(text): + # text = text.replace('#tau#tau', '#color[2]{#tau#tau}') + # text = text.replace('WW', '#color[8]{WW}') + # text = text.replace('ZZ', '#color[9]{ZZ}') + # text = text.replace('bb', '#color[6]{bb}') + # text = text.replace('#gamma#gamma', '#color[43]{#gamma#gamma}') + return text + + +parser = argparse.ArgumentParser( + prog='plot1DScan.py', + ) + + +parser.add_argument('--output', '-o', help='output name') +parser.add_argument('--input', '-i', help='Main input file') +parser.add_argument('--label', '-l', help='Main input file') +parser.add_argument('--translate', default='texName.json', help='json file with POI name translation') +parser.add_argument('--cov', action='store_true', help='json file with POI name translation') + +args = parser.parse_args() + +if args.cov: + ROOT.gStyle.SetPaintTextFormat('.3f') + + +if args.translate is not None: + with open(args.translate) as jsonfile: + name_translate = json.load(jsonfile) + + +inputs = args.input.split(':') +fin = ROOT.TFile(inputs[0]) +hist = fin.Get(inputs[1]) + +for i in xrange(1,hist.GetXaxis().GetNbins()+1): + new_name = hist.GetXaxis().GetBinLabel(i) + if new_name in name_translate: + new_name = name_translate[new_name] + new_name = ColorLabels(new_name) + hist.GetXaxis().SetBinLabel(i, new_name) + +for i in xrange(1,hist.GetYaxis().GetNbins()+1): + new_name = hist.GetYaxis().GetBinLabel(i) + if new_name in name_translate: + new_name = name_translate[new_name] + new_name = ColorLabels(new_name) + hist.GetYaxis().SetBinLabel(i, new_name) + +hist.GetXaxis().LabelsOption('h') +hist.GetXaxis().SetLabelFont(62) +hist.GetYaxis().SetLabelFont(62) +hist.GetXaxis().SetLabelSize(0.04) +hist.GetYaxis().SetLabelSize(0.041) +if not args.cov: + hist.SetMinimum(-1) + hist.SetMaximum(+1) +hist.SetContour(255) + +canv = ROOT.TCanvas(args.output, args.output) +pads = plot.OnePad() + +hist.Draw('COLZTEXT') + +plot.DrawCMSLogo(pads[0], '#it{ATLAS}#bf{ and }CMS', 'LHC Run 1 Internal', 0, 0.38, 0.035, 1.2) +if args.label is not None: plot.DrawTitle(pads[0], args.label, 3) + + +canv.Print('.pdf') +canv.Print('.png') +# canv.Print('.C') diff --git a/HIG15002/scripts/plotKVKF.py b/HIG15002/scripts/plotKVKF.py new file mode 100755 index 00000000000..4dcdedede5d --- /dev/null +++ b/HIG15002/scripts/plotKVKF.py @@ -0,0 +1,339 @@ +#!/usr/bin/env python +import sys +import ROOT +import math +from functools import partial +import CombineHarvester.CombineTools.plotting as plot +import json +import argparse +import os.path + +def read(scan, param_x, param_y, file): + # print files + goodfiles = [f for f in [file] if plot.TFileIsGood(f)] + limit = plot.MakeTChain(goodfiles, 'limit') + graph = plot.TGraph2DFromTree(limit, param_x, param_y, '2*deltaNLL', 'quantileExpected > -0.5 && deltaNLL > 0 && deltaNLL < 100') + best = plot.TGraphFromTree(limit, param_x, param_y, 'quantileExpected > -0.5 && deltaNLL == 0') + plot.RemoveGraphXDuplicates(best) + assert(best.GetN() == 1) + graph.SetName(scan) + best.SetName(scan+'_best') + # graph.Print() + return (graph, best) + +def fillTH2(hist2d, graph): + for x in xrange(1, hist2d.GetNbinsX()+1): + for y in xrange(1, hist2d.GetNbinsY()+1): + xc = hist2d.GetXaxis().GetBinCenter(x) + yc = hist2d.GetYaxis().GetBinCenter(y) + val = graph.Interpolate(xc, yc) + hist2d.SetBinContent(x, y, val) + +def fixZeros(hist2d): + for x in xrange(1, hist2d.GetNbinsX()+1): + for y in xrange(1, hist2d.GetNbinsY()+1): + xc = hist2d.GetXaxis().GetBinCenter(x) + yc = hist2d.GetYaxis().GetBinCenter(y) + if hist2d.GetBinContent(x, y) == 0: + # print 'Bin at (%f,%f) is zero!' % (xc, yc) + hist2d.SetBinContent(x, y, 1000) + +def makeHist(name, bins, graph2d): + len_x = graph2d.GetXmax() - graph2d.GetXmin() + binw_x = (len_x * 0.5 / (float(bins) - 1.)) - 1E-5 + len_y = graph2d.GetYmax() - graph2d.GetYmin() + binw_y = (len_y * 0.5 / (float(bins) - 1.)) - 1E-5 + hist = ROOT.TH2F(name, '', bins, graph2d.GetXmin()-binw_x, graph2d.GetXmax()+binw_x, bins, graph2d.GetYmin()-binw_y, graph2d.GetYmax()+binw_y) + return hist + +SETTINGSC = { + 'tau' : { + 'xvar' : 'kappa_V_tautau', + 'yvar' : 'kappa_F_tautau', + 'fill_color' : ROOT.TColor.GetColor(204,204,102), + 'line_color' : ROOT.TColor.GetColor(51,51,0), + 'legend' : 'H#rightarrow#tau#tau', + 'multi' : 2 + }, + 'b' : { + 'xvar' : 'kappa_V_bb', + 'yvar' : 'kappa_F_bb', + 'fill_color' : ROOT.TColor.GetColor(255, 252, 102), + 'line_color' : ROOT.TColor.GetColor(102, 102, 0), + 'legend' : 'H#rightarrowbb', + 'multi' : 4 + }, + 'Z' : { + 'xvar' : 'kappa_V_ZZ', + 'yvar' : 'kappa_F_ZZ', + 'fill_color' : ROOT.TColor.GetColor(102, 102, 255), + 'line_color' : ROOT.TColor.GetColor(21, 21, 183), + 'legend' : 'H#rightarrowZZ', + 'multi' : 2 + }, + 'gam' : { + 'xvar' : 'kappa_V_gamgam', + 'yvar' : 'kappa_F_gamgam', + 'fill_color' : ROOT.TColor.GetColor(102, 204, 103), + 'line_color' : ROOT.TColor.GetColor(23, 102, 0), + 'legend' : 'H#rightarrow#gamma#gamma', + 'multi' : 2 + }, + 'W' : { + 'xvar' : 'kappa_V_WW', + 'yvar' : 'kappa_F_WW', + 'fill_color' : ROOT.TColor.GetColor(153, 153, 255), + 'line_color' : ROOT.TColor.GetColor(90, 85, 216), + 'legend' : 'H#rightarrowWW', + 'multi' : 2 + }, + 'comb' : { + 'xvar' : 'kappa_V', + 'yvar' : 'kappa_F', + 'fill_color' : ROOT.TColor.GetColor(153, 153, 153), + 'line_color' : ROOT.TColor.GetColor(0, 0, 0), + 'legend' : 'ATLAS+CMS', + 'multi' : 2 + }, + 'cms' : { + 'xvar' : 'kappa_V', + 'yvar' : 'kappa_F', + 'fill_color' : ROOT.TColor.GetColor(250, 226, 235), + 'line_color' : ROOT.kRed, + 'legend' : 'CMS', + 'multi' : 2 + }, + 'atlas' : { + 'xvar' : 'kappa_V', + 'yvar' : 'kappa_F', + 'fill_color' : ROOT.TColor.GetColor(234, 239, 255), + 'line_color' : ROOT.kBlue, + 'legend' : 'ATLAS', + 'multi' : 2 + } +} + +SETTINGSA = { + 'tau' : { + 'xvar' : 'kappa_V_tautau', + 'yvar' : 'kappa_F_tautau', + 'fill_color' : ROOT.TColor.GetColor(240, 234, 245), + 'line_color' : ROOT.TColor.GetColor(102, 51, 153), + 'legend' : 'H#rightarrow#tau#tau', + 'multi' : 4 + }, + 'b' : { + 'xvar' : 'kappa_V_bb', + 'yvar' : 'kappa_F_bb', + 'fill_color' : ROOT.TColor.GetColor(230, 249, 249), + 'line_color' : ROOT.TColor.GetColor(51 , 204, 204), + 'legend' : 'H#rightarrowbb', + 'multi' : 4 + }, + 'Z' : { + 'xvar' : 'kappa_V_ZZ', + 'yvar' : 'kappa_F_ZZ', + 'fill_color' : ROOT.TColor.GetColor(230, 249, 236 ), + 'line_color' : ROOT.TColor.GetColor(56, 204, 51 ), + 'legend' : 'H#rightarrowZZ', + 'multi' : 2 + }, + 'gam' : { + 'xvar' : 'kappa_V_gamgam', + 'yvar' : 'kappa_F_gamgam', + 'fill_color' : ROOT.TColor.GetColor(254, 234, 235 ), + 'line_color' : ROOT.TColor.GetColor(249, 51 , 51 ), + 'legend' : 'H#rightarrow#gamma#gamma', + 'multi' : 2 + }, + 'W' : { + 'xvar' : 'kappa_V_WW', + 'yvar' : 'kappa_F_WW', + 'fill_color' : ROOT.TColor.GetColor(208, 236, 249 ), + 'line_color' : ROOT.TColor.GetColor(7 , 111, 249 ), + 'legend' : 'H#rightarrowWW', + 'multi' : 2 + }, + 'comb' : { + 'xvar' : 'kappa_V', + 'yvar' : 'kappa_F', + 'fill_color' : ROOT.TColor.GetColor(200, 200, 200 ), + 'line_color' : ROOT.TColor.GetColor(0, 0, 0), + 'legend' : 'Combined', + 'multi' : 2 + }, + 'cms' : { + 'xvar' : 'kappa_V', + 'yvar' : 'kappa_F', + 'fill_color' : ROOT.TColor.GetColor(250, 226, 235), + 'line_color' : ROOT.kRed, + 'legend' : 'CMS', + 'multi' : 2 + }, + 'atlas' : { + 'xvar' : 'kappa_V', + 'yvar' : 'kappa_F', + 'fill_color' : ROOT.TColor.GetColor(234, 239, 255), + 'line_color' : ROOT.kBlue, + 'legend' : 'ATLAS', + 'multi' : 2 + } +} + +SETTINGS = SETTINGSA + + + +ROOT.PyConfig.IgnoreCommandLineOptions = True +ROOT.gROOT.SetBatch(ROOT.kTRUE) + +plot.ModTDRStyle(l=0.13, b=0.10) +# ROOT.gStyle.SetNdivisions(510, "XYZ") +ROOT.gStyle.SetNdivisions(506, "Y") +ROOT.gStyle.SetMarkerSize(1.0) +ROOT.gStyle.SetPadTickX(1) +ROOT.gStyle.SetPadTickY(1) + +parser = argparse.ArgumentParser() +parser.add_argument('--output', '-o', help='output name') +parser.add_argument('--files', '-f', help='named input scans') +parser.add_argument('--multi', type=int, default=1, help='scale number of bins') +parser.add_argument('--thin', type=int, default=1, help='thin graph points') +parser.add_argument('--order', default='b,tau,Z,gam,W,comb') +parser.add_argument('--x-range', default=None) +parser.add_argument('--x-axis', default='#kappa_{V}') +parser.add_argument('--y-axis', default='#kappa_{F}') +parser.add_argument('--axis-hist', default=None) +parser.add_argument('--layout', type=int, default=1) +args = parser.parse_args() + +infiles = {key: value for (key, value) in [tuple(x.split('=')) for x in args.files.split(',')]} +print infiles + +order = args.order.split(',') + +graph_test = read('test', SETTINGS[order[0]]['xvar'], SETTINGS[order[0]]['yvar'], infiles[order[0]])[0] + + +if args.axis_hist is not None: + hargs = args.axis_hist.split(',') + axis = ROOT.TH2F('hist2d', '', int(hargs[0]), float(hargs[1]), float(hargs[2]), int(hargs[3]), float(hargs[4]), float(hargs[5])) +else: + axis = makeHist('hist2d', 40 * args.multi, graph_test) + +# axis = None +axis.GetXaxis().SetTitle(args.x_axis) +axis.GetYaxis().SetTitle(args.y_axis) + +canv = ROOT.TCanvas(args.output, args.output) +pads = plot.OnePad() +pads[0].SetGridx(False) +pads[0].SetGridy(False) +pads[0].Draw() +axis.Draw() +if args.x_range is not None: + xranges = args.x_range.split(',') + axis.GetXaxis().SetRangeUser(float(xranges[0]), float(xranges[1])) + +if args.layout == 1: + legend = ROOT.TLegend(0.14, 0.53, 0.35, 0.74, '', 'NBNDC') +if args.layout == 2: + legend = ROOT.TLegend(0.15, 0.11, 0.46, 0.27, '', 'NBNDC') + legend.SetNColumns(2) + + +graphs = {} +bestfits = {} +hists = {} +conts68 = {} +conts95 = {} + +outfile = ROOT.TFile(args.output+'.root', 'RECREATE') + +for scan in order: + if scan not in infiles: continue + graphs[scan], bestfits[scan] = read(scan, SETTINGS[scan]['xvar'], SETTINGS[scan]['yvar'], infiles[scan]) + outfile.WriteTObject(graphs[scan], scan+'_graph') + outfile.WriteTObject(bestfits[scan]) + hists[scan] = makeHist(scan+'_hist', 40 * SETTINGS[scan]['multi'] * args.multi, graph_test) + fillTH2(hists[scan], graphs[scan]) + outfile.WriteTObject(hists[scan], hists[scan].GetName()+'_input') + fixZeros(hists[scan]) + outfile.WriteTObject(hists[scan], hists[scan].GetName()+'_processed') + conts68[scan] = plot.contourFromTH2(hists[scan], ROOT.Math.chisquared_quantile_c(1-0.68, 2), mult = SETTINGS[scan]['multi'] * 4) + if scan in ['cms', 'atlas', 'comb']: + conts95[scan] = plot.contourFromTH2(hists[scan], ROOT.Math.chisquared_quantile_c(1-0.95, 2)) + for i, c in enumerate(conts68[scan]): + if args.thin > 1: + newgr = ROOT.TGraph(c.GetN() / args.thin) + needLast = True + for a,p in enumerate(range(0, c.GetN(), args.thin)): + if p == c.GetN()-1: needLast = False + newgr.SetPoint(a, c.GetX()[p], c.GetY()[p]) + if needLast: newgr.SetPoint(newgr.GetN(), c.GetX()[c.GetN()-1], c.GetY()[c.GetN()-1]) + conts68[scan][i] = newgr + c = conts68[scan][i] + c.SetFillColor(SETTINGS[scan]['fill_color']) + c.SetLineColor(SETTINGS[scan]['line_color']) + c.SetLineWidth(3) + pads[0].cd() + c.Draw('F SAME') + outfile.WriteTObject(c, 'graph68_%s_%i' % (scan, i)) + if scan in conts95: + for i, c in enumerate(conts95[scan]): + c.SetLineColor(SETTINGS[scan]['line_color']) + c.SetLineWidth(3) + c.SetLineStyle(9) + pads[0].cd() + outfile.WriteTObject(c, 'graph95_%s_%i' % (scan, i)) + legend.AddEntry(conts68[scan][0], SETTINGS[scan]['legend'], 'F') +for scan in order: + for i, c in enumerate(conts68[scan]): + c.Draw('L SAME') + if scan in conts95: + for i, c in enumerate(conts95[scan]): + c.Draw('C SAME') + +for scan in order: + bestfits[scan].SetMarkerColor(SETTINGS[scan]['line_color']) + bestfits[scan].SetMarkerStyle(34) + bestfits[scan].SetMarkerSize(1.2) + if scan == 'comb': bestfits[scan].SetMarkerSize(1.5) + bestfits[scan].Draw('PSAME') + +sm_point = ROOT.TGraph() +sm_point.SetPoint(0, 1, 1) +# sm_point.SetMarkerColor(ROOT.TColor.GetColor(249, 71, 1)) +sm_point.SetMarkerColor(ROOT.kBlack) +sm_point.SetMarkerStyle(33) +sm_point.SetMarkerSize(2) +sm_point.Draw('PSAME') +# sm_point.SetFillColor(ROOT.TColor.GetColor(248, 255, 1)) + +legend.Draw() + +if args.layout == 1: + legend2 = ROOT.TLegend(0.15, 0.11, 0.94, 0.15, '', 'NBNDC') + legend2.SetNColumns(4) +if args.layout == 2: + legend2 = ROOT.TLegend(0.14, 0.53, 0.34, 0.74, '', 'NBNDC') + # legend2.SetNColumns(2) +legend2.AddEntry(conts68['comb'][0], '68% CL', 'L') +legend2.AddEntry(conts95['comb'][0], '95% CL', 'L') +legend2.AddEntry(bestfits['comb'], 'Best fit', 'P') +legend2.AddEntry(sm_point, 'SM expected', 'P') +legend2.SetMargin(0.4) +legend2.Draw() + +box = ROOT.TPave(0.15, 0.82, 0.41, 0.92, 0, 'NBNDC') +box.Draw() +plot.DrawCMSLogo(pads[0], '#it{ATLAS}#bf{ and }CMS', '#it{LHC Run 1}', 11, 0.025, 0.035, 1.1, extraText2='#it{Preliminary}') +# plot.DrawCMSLogo(pads[0], '#it{ATLAS}#bf{+}CMS', '#it{LHC Run 1}', 11, 0.02, 0.035, 1.1, extraText2='#it{Internal}') +# pads[0].RedrawAxis() +canv.Print('.pdf') +canv.Print('.png') +outfile.Close() + + + + diff --git a/HIG15002/scripts/printResults.py b/HIG15002/scripts/printResults.py new file mode 100755 index 00000000000..e6dc7b317a2 --- /dev/null +++ b/HIG15002/scripts/printResults.py @@ -0,0 +1,30 @@ +#!/usr/bin/env python +import sys +import json +import os + +""" +Usage: printResults.py [json files] [model] + +e.g. printResults.py lhc.json A1_5PD + +""" + +args = sys.argv[1:] + +file = args[0] +model = args[1] + +with open(file) as jsonfile: + js = json.load(jsonfile) + if model not in js: + print 'Model %s not in json file, available models:' % model + print [str(x) for x in js.keys()] + sys.exit(0) + js = js[model] + print '%-30s %7s %11s %11s' % ('POI', 'Val', '+1sig', '-1sig') + for POI, res in sorted(js.iteritems()): + if 'ErrorHi' in res and 'ErrorLo' in res: + print '%-30s %+7.3f %+7.3f (%i) %+7.3f (%i)' % (POI, res['Val'], res['ErrorHi'], res['ValidErrorHi'], res['ErrorLo'], res['ValidErrorLo']) + else: + print '%-30s %+7.3f' % (POI, res['Val']) \ No newline at end of file diff --git a/HIG15002/scripts/scan1DFrom2D.py b/HIG15002/scripts/scan1DFrom2D.py new file mode 100755 index 00000000000..19f6909b8bf --- /dev/null +++ b/HIG15002/scripts/scan1DFrom2D.py @@ -0,0 +1,100 @@ +#!/usr/bin/env python +import sys +import ROOT +import math +from functools import partial +import CombineHarvester.CombineTools.plotting as plot +import json +import argparse +import os.path +from array import array + +def read(scan, param_x, param_y, file): + # print files + goodfiles = [f for f in [file] if plot.TFileIsGood(f)] + limit = plot.MakeTChain(goodfiles, 'limit') + graph = plot.TGraph2DFromTree(limit, param_x, param_y, '2*deltaNLL', 'quantileExpected > -0.5 && deltaNLL > 0') + best = plot.TGraphFromTree(limit, param_x, param_y, 'quantileExpected > -0.5 && deltaNLL == 0') + plot.RemoveGraphXDuplicates(best) + assert(best.GetN() == 1) + graph.SetName(scan) + best.SetName(scan+'_best') + # graph.Print() + return (graph, best) + +def Eval(obj, y, x, params): + return obj.Interpolate(x[0], y) + + +ROOT.PyConfig.IgnoreCommandLineOptions = True +ROOT.gROOT.SetBatch(ROOT.kTRUE) + +plot.ModTDRStyle(l=0.13, b=0.10, r=0.15) +# plot.SetBirdPalette() + +# # ROOT.gStyle.SetNdivisions(510, "XYZ") +# ROOT.gStyle.SetNdivisions(506, "Y") +# ROOT.gStyle.SetMarkerSize(1.0) +# ROOT.gStyle.SetPadTickX(1) +# ROOT.gStyle.SetPadTickY(1) + +parser = argparse.ArgumentParser() +parser.add_argument('--output', '-o', help='output name') +parser.add_argument('--file', '-f', help='named input scans') +parser.add_argument('--x-range', default=None) +parser.add_argument('--x-axis', default='#kappa_{V}') +parser.add_argument('--y-axis', default='#kappa_{F}') +args = parser.parse_args() + +infile = args.file + +graph_test, best = read('test', args.x_axis, args.y_axis, infile) + +y_vals = set() +x_vals = set() + +for i in xrange(0, graph_test.GetN()): + if graph_test.GetZ()[i] > 99: graph_test.GetZ()[i] = 99 + if graph_test.GetZ()[i] == 0.: graph_test.GetZ()[i] = 99 + y_vals.add(graph_test.GetY()[i]) + x_vals.add(graph_test.GetX()[i]) + +y_list = sorted(y_vals) +print y_list + + +fout = ROOT.TFile('%s.root' % (args.output), 'RECREATE') +tout = ROOT.TTree('limit', 'limit') +a_r = array( 'f', [ 0 ] ) +a_deltaNLL = array( 'f', [ 0 ] ) +a_quantileExpected = array( 'f', [ 1 ] ) +tout.Branch( args.y_axis, a_r, '%s/f'%args.y_axis ) +tout.Branch( 'deltaNLL', a_deltaNLL, 'deltaNLL/f' ) +tout.Branch( 'quantileExpected', a_quantileExpected, 'quantileExpected/f' ) + +a_r[0] = best.GetY()[0] +tout.Fill() + +for i, yval in enumerate(y_list): + func = ROOT.TF1('fn', partial(Eval, graph_test, yval), min(x_vals), max(x_vals), 1) + min_x = func.GetMinimumX(-20 if i <= 17 else -44, 44) + min_y = func.Eval(min_x) + print '[%i] %s = %f, -2lnL = %f at %s = %s' % (i, args.y_axis, yval, min_y, args.x_axis, min_x) + if min_y > 0.: + a_r[0] = yval + a_deltaNLL[0] = min_y + tout.Fill() + +tout.Write() +fout.Close() + # if i == 14: + # canv = ROOT.TCanvas(args.output, args.output) + # pads = plot.OnePad() + # func.Draw('L') + # canv.Print('.pdf') + # canv.Print('.png') + + + + + diff --git a/HIG15002/scripts/texName.json b/HIG15002/scripts/texName.json new file mode 100644 index 00000000000..e0834d79f25 --- /dev/null +++ b/HIG15002/scripts/texName.json @@ -0,0 +1,167 @@ +{ +"lambda_WZ":"#lambda_{WZ}", +"lambda_Zg":"#lambda_{Zg}", +"lambda_bZ":"#lambda_{bZ}", +"lambda_gamZ":"#lambda_{#gammaZ}", +"lambda_tauZ":"#lambda_{#tauZ}", +"lambda_tg":"#lambda_{tg}", +"kappa_gZ":"#kappa_{gZ}", + +"kappa_W":"#kappa_{W}", +"kappa_Z":"#kappa_{Z}", +"kappa_tau":"#kappa_{#tau}", +"kappa_t":"#kappa_{t}", +"kappa_b":"#kappa_{b}", +"kappa_mu":"#kappa_{#mu}", + +"kappa_g":"#kappa_{g}", +"kappa_gam":"#kappa_{#gamma}", +"BRinv":"BR_{BSM}", + +"kappa_V":"#kappa_{V}", +"kappa_F":"#kappa_{F}", + +"kappa_V_gamgam":"#kappa_{V}^{#gamma#gamma}", +"kappa_F_gamgam":"#kappa_{F}^{#gamma#gamma}", +"kappa_V_WW":"#kappa_{V}^{WW}", +"kappa_F_WW":"#kappa_{F}^{WW}", +"kappa_V_ZZ":"#kappa_{V}^{ZZ}", +"kappa_F_ZZ":"#kappa_{F}^{ZZ}", +"kappa_V_tautau":"#kappa_{V}^{#tau#tau}", +"kappa_F_tautau":"#kappa_{F}^{#tau#tau}", +"kappa_V_bb":"#kappa_{V}^{bb}", +"kappa_F_bb":"#kappa_{F}^{bb}", + +"kappa_uu":"#kappa_{uu}", +"lambda_du":"#lambda_{du}", +"lambda_Vu":"#lambda_{Vu}", +"kappa_qq":"#kappa_{qq}", +"lambda_lq":"#lambda_{lq}", +"lambda_Vq":"#lambda_{Vq}", +"kappa_VV":"#kappa_{VV}", +"lambda_FV":"#lambda_{FV}", + +"mu_BR_WW":"#mu^{WW}", +"mu_BR_ZZ":"#mu^{ZZ}", +"mu_BR_bb":"#mu^{bb}", +"mu_BR_gamgam":"#mu^{#gamma#gamma}", +"mu_BR_tautau":"#mu^{#tau#tau}", +"mu_BR_mumu":"#mu^{#mu#mu}", + +"mu_XS_ggFbbH":"#mu_{ggF}", +"mu_XS_VBF":"#mu_{VBF}", +"mu_XS_WH":"#mu_{WH}", +"mu_XS_ZH":"#mu_{ZH}", +"mu_XS_VH":"#mu_{VH}", +"mu_XS_ttHtH":"#mu_{ttH}", + +"mu_V_gamgam":"#mu_{V}^{#gamma#gamma}", +"mu_V_ZZ":"#mu_{V}^{ZZ}", +"mu_V_WW":"#mu_{V}^{WW}", +"mu_V_tautau":"#mu_{V}^{#tau#tau}", +"mu_V_bb":"#mu_{V}^{bb}", +"mu_F_gamgam":"#mu_{F}^{#gamma#gamma}", +"mu_F_ZZ":"#mu_{F}^{ZZ}", +"mu_F_WW":"#mu_{F}^{WW}", +"mu_F_tautau":"#mu_{F}^{#tau#tau}", +"mu_F_bb":"#mu_{F}^{bb}", + +"mu":"#mu", + +"mu_V_r_F":"#mu_{V}/#mu_{F}", + +"mu_V_r_F_WW":"#mu_{V}^{WW}/#mu_{F}^{WW}", +"mu_V_r_F_ZZ":"#mu_{V}^{ZZ}/#mu_{F}^{ZZ}", +"mu_V_r_F_gamgam":"#mu_{V}^{#gamma#gamma}/#mu_{F}^{#gamma#gamma}", +"mu_V_r_F_bb":"#mu_{V}^{bb}/#mu_{F}^{bb}", +"mu_V_r_F_tautau":"#mu_{V}^{#tau#tau/#mu_{F}^{#tau#tau}", + +"mu_XS_ggF_x_BR_ZZ":"#mu_{ggF}^{ZZ}", +"mu_XS_VBF_r_XS_ggF":"#mu_{VBF}/#mu_{ggF}", +"mu_XS_ttH_r_XS_ggF":"#mu_{ttH}/#mu_{ggF}", +"mu_XS_WH_r_XS_ggF":"#mu_{WH}/#mu_{ggF}", +"mu_XS_ZH_r_XS_ggF":"#mu_{ZH}/#mu_{ggF}", +"mu_BR_tautau_r_BR_ZZ":"#mu^{#tau#tau}/#mu^{ZZ}", +"mu_BR_bb_r_BR_ZZ":"#mu^{bb}/#mu^{ZZ}", +"mu_BR_gamgam_r_BR_ZZ":"#mu^{#gamma#gamma}/#mu^{ZZ}", +"mu_BR_WW_r_BR_ZZ":"#mu^{WW}/#mu^{ZZ}", + +"mu_XS_ggF_x_BR_WW":"#mu_{ggF}^{ZZ}", +"mu_XS_VBF_r_XS_ggF":"#mu_{VBF}/#mu_{ggF}", +"mu_XS_ttH_r_XS_ggF":"#mu_{ttH}/#mu_{ggF}", +"mu_XS_WH_r_XS_ggF":"#mu_{WH}/#mu_{ggF}", +"mu_XS_ZH_r_XS_ggF":"#mu_{ZH}/#mu_{ggF}", +"mu_BR_tautau_r_BR_WW":"#mu^{#tau#tau}/#mu^{WW}", +"mu_BR_bb_r_BR_WW":"#mu^{bb}/#mu^{WW}", +"mu_BR_gamgam_r_BR_WW":"#mu^{#gamma#gamma}/#mu^{WW}", +"mu_BR_ZZ_r_BR_WW":"#mu^{ZZ}/#mu^{WW}", + +"mu_XS7_r_XS8_VBF":"#mu_{VBF}^{7/8}", +"mu_XS7_r_XS8_WH":"#mu_{WH}^{7/8}", +"mu_XS7_r_XS8_ZH":"#mu_{ZH}^{7/8}", +"mu_XS7_r_XS8_ggF":"#mu_{ggF}^{7/8}", +"mu_XS7_r_XS8_ttH":"#mu_{ttH}^{7/8}", + +"mu_XS_ggF_x_BR_WW":"#mu_{ggF}^{WW}", +"mu_XS_VBF_x_BR_tautau":"#mu_{VBF}^{#tau#tau}", + +"mu_XS_WH_r_XS_VBF":"#mu_{WH}/#mu_{VBF}", +"mu_XS_ZH_r_XS_WH":"#mu_{ZH}/#mu_{WH}", +"mu_BR_bb_r_BR_tautau":"#mu^{bb}/#mu^{#tau#tau}", + +"mu_WW":"#mu^{WW}", +"mu_ZZ":"#mu^{ZZ}", +"mu_gamgam":"#mu^{#gamma#gamma}", +"mu_tautau":"#mu^{#tau#tau}", +"mu_bb":"#mu^{bb}", +"l_VBF_gamgam":"#lambda_{VBF}^{#gamma#gamma}", +"l_VBF_WW":"#lambda_{VBF}^{WW}", +"l_VBF_ZZ":"#lambda_{VBF}^{ZZ}", +"l_VBF_tautau":"#lambda_{VBF}^{#tau#tau}", +"l_WH_gamgam":"#lambda_{WH}^{#gamma#gamma}", +"l_WH_WW":"#lambda_{WH}^{WW}", +"l_WH_ZZ":"#lambda_{WH}^{ZZ}", +"l_WH_tautau":"#lambda_{WH}^{#tau#tau}", +"l_WH_bb":"#lambda_{WH}^{bb}", +"l_ZH_gamgam":"#lambda_{ZH}^{#gamma#gamma}", +"l_ZH_WW":"#lambda_{ZH}^{WW}", +"l_ZH_ZZ":"#lambda_{ZH}^{ZZ}", +"l_ZH_tautau":"#lambda_{ZH}^{#tau#tau}", +"l_ZH_bb":"#lambda_{ZH}^{bb}", +"l_ttH_gamgam":"#lambda_{ttH}^{#gamma#gamma}", +"l_ttH_WW":"#lambda_{ttH}^{WW}", +"l_ttH_ZZ":"#lambda_{ttH}^{ZZ}", +"l_ttH_tautau":"#lambda_{ttH}^{#tau#tau}", +"l_ttH_bb":"#lambda_{ttH}^{bb}", +"l_VBF":"#lambda_{VBF}", +"l_WH":"#lambda_{WH}", +"l_ZH":"#lambda_{ZH}", +"l_ttH":"#lambda_{ttH}", + +"mu_XS_ggFbbH_BR_WW" : "#mu_{ggF}^{WW}", +"mu_XS_VBF_BR_WW" : "#mu_{VBF}^{WW}", +"mu_XS_WH_BR_WW" : "#mu_{WH}^{WW}", +"mu_XS_ZH_BR_WW" : "#mu_{ZH}^{WW}", +"mu_XS_ttHtH_BR_WW" : "#mu_{ttH}^{WW}", +"mu_XS_ggFbbH_BR_ZZ" : "#mu_{ggF}^{ZZ}", +"mu_XS_VBF_BR_ZZ" : "#mu_{VBF}^{ZZ}", +"mu_XS_WH_BR_ZZ" : "#mu_{WH}^{ZZ}", +"mu_XS_ZH_BR_ZZ" : "#mu_{ZH}^{ZZ}", +"mu_XS_ttHtH_BR_ZZ" : "#mu_{ttH}^{ZZ}", +"mu_XS_WH_BR_bb" : "#mu_{WH}^{bb}", +"mu_XS_ZH_BR_bb" : "#mu_{ZH}^{bb}", +"mu_XS_ttHtH_BR_bb" : "#mu_{ttH}^{bb}", +"mu_XS_ggFbbH_BR_gamgam" : "#mu_{ggF}^{#gamma#gamma}", +"mu_XS_VBF_BR_gamgam" : "#mu_{VBF}^{#gamma#gamma}", +"mu_XS_WH_BR_gamgam" : "#mu_{WH}^{#gamma#gamma}", +"mu_XS_ZH_BR_gamgam" : "#mu_{ZH}^{#gamma#gamma}", +"mu_XS_ttHtH_BR_gamgam" : "#mu_{ttH}^{#gamma#gamma}", +"mu_XS_ggFbbH_BR_tautau" : "#mu_{ggF}^{#tau#tau}", +"mu_XS_VBF_BR_tautau" : "#mu_{VBF}^{#tau#tau}", +"mu_XS_WH_BR_tautau" : "#mu_{WH}^{#tau#tau}", +"mu_XS_ZH_BR_tautau" : "#mu_{ZH}^{#tau#tau}", +"mu_XS_ttHtH_BR_tautau" : "#mu_{ttH}^{#tau#tau}", + +"M" : "M", +"eps" : "#epsilon" +} diff --git a/MSSM_13TeV/BuildFile.xml b/MSSM_13TeV/BuildFile.xml new file mode 100644 index 00000000000..abdd6096c29 --- /dev/null +++ b/MSSM_13TeV/BuildFile.xml @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + + diff --git a/MSSM_13TeV/bin/BuildFile.xml b/MSSM_13TeV/bin/BuildFile.xml new file mode 100644 index 00000000000..517bd54b4df --- /dev/null +++ b/MSSM_13TeV/bin/BuildFile.xml @@ -0,0 +1,16 @@ + + + + + + + + + + + + + + + + diff --git a/MSSM_13TeV/bin/MorphingMSSMRun2-NoModel.cpp b/MSSM_13TeV/bin/MorphingMSSMRun2-NoModel.cpp new file mode 100644 index 00000000000..637c07cf57f --- /dev/null +++ b/MSSM_13TeV/bin/MorphingMSSMRun2-NoModel.cpp @@ -0,0 +1,192 @@ +#include +#include +#include +#include +#include +#include +#include +#include "boost/algorithm/string/predicate.hpp" +#include "boost/program_options.hpp" +#include "CombineHarvester/CombineTools/interface/CombineHarvester.h" +#include "CombineHarvester/CombineTools/interface/Observation.h" +#include "CombineHarvester/CombineTools/interface/Process.h" +#include "CombineHarvester/CombineTools/interface/Utilities.h" +#include "CombineHarvester/CombineTools/interface/Systematics.h" +#include "CombineHarvester/CombineTools/interface/BinByBin.h" +#include "CombineHarvester/CombinePdfs/interface/MorphFunctions.h" +#include "CombineHarvester/CombineTools/interface/HttSystematics.h" +#include "RooWorkspace.h" +#include "RooRealVar.h" +#include "TH2.h" + +using namespace std; +using boost::starts_with; +namespace po = boost::program_options; + +int main(int argc, char** argv) { + // First define the location of the "auxiliaries" directory where we can + // source the input files containing the datacard shapes + string SM125= ""; + string mass = "mA"; + po::variables_map vm; + po::options_description config("configuration"); + config.add_options() + ("mass,m", po::value(&mass)->default_value(mass)) + ("SM125,h", po::value(&SM125)->default_value(SM125)); + po::store(po::command_line_parser(argc, argv).options(config).run(), vm); + po::notify(vm); + + typedef vector VString; + typedef vector> Categories; + string input_dir = + string(getenv("CMSSW_BASE")) + "/src/auxiliaries/shapes/Imperial/"; + + VString chns = + {"mt","et"}; + + RooRealVar mA(mass.c_str(), mass.c_str(), 160., 1000.); + RooRealVar mH("mH", "mH", 160., 1000.); + RooRealVar mh("mh", "mh", 160., 1000.); + + map bkg_procs; + bkg_procs["et"] = {"W", "QCD", "ZL", "ZJ", "TT", "VV","ZTT"}; + bkg_procs["mt"] = {"W", "QCD", "ZL", "ZJ", "TT", "VV","ZTT"}; +// bkg_procs["em"] = {"QCD", "ZLL", "TT", "VV", "W","ZTT"}; + + + // Create an empty CombineHarvester instance that will hold all of the + // datacard configuration and histograms etc. + ch::CombineHarvester cb; + // Uncomment this next line to see a *lot* of debug information + // cb.SetVerbosity(3); + + // Here we will just define two categories for an 8TeV analysis. Each entry in + // the vector below specifies a bin name and corresponding bin_id. + // + map cats; + cats["et_13TeV"] = { + {8, "et_nobtagnotwoprong"}, + {9, "et_btagnotwoprong"} + }; + + cats["mt_13TeV"] = { + {8, "mt_nobtagnotwoprong"}, + {9, "mt_btagnotwoprong"} + }; + + + vector masses = {"90","100","110","120","130","140","160","180", "200", "250", "300", "350", "400", "450", "500", "600", "700", "800", "900","1000"}; + + map signal_types = { + {"ggH", {"ggh_htautau", "ggH_Htautau", "ggA_Atautau"}}, + {"bbH", {"bbh_htautau", "bbH_Htautau", "bbA_Atautau"}} + }; + if(mass=="MH"){ + signal_types = { + {"ggH", {"ggH"}}, + {"bbH", {"bbH"}} + }; + } + vector sig_procs = {"ggH","bbH"}; + for(auto chn : chns){ + cb.AddObservations({"*"}, {"htt"}, {"13TeV"}, {chn}, cats[chn+"_13TeV"]); + + cb.AddProcesses({"*"}, {"htt"}, {"13TeV"}, {chn}, bkg_procs[chn], cats[chn+"_13TeV"], false); + + cb.AddProcesses(masses, {"htt"}, {"13TeV"}, {chn}, signal_types["ggH"], cats[chn+"_13TeV"], true); + cb.AddProcesses(masses, {"htt"}, {"13TeV"}, {chn}, signal_types["bbH"], cats[chn+"_13TeV"], true); + } + + + ch::AddMSSMRun2Systematics(cb); + + //! [part7] + for (string chn:chns){ + cb.cp().channel({chn}).backgrounds().ExtractShapes( + input_dir + "htt_"+chn+".inputs-mssm-13TeV.root", + "$BIN/$PROCESS", + "$BIN/$PROCESS_$SYSTEMATIC"); + cb.cp().channel({chn}).process(signal_types["ggH"]).ExtractShapes( + input_dir + "htt_"+chn+".inputs-mssm-13TeV.root", + "$BIN/ggH$MASS", + "$BIN/ggH$MASS_$SYSTEMATIC"); + cb.cp().channel({chn}).process(signal_types["bbH"]).ExtractShapes( + input_dir + "htt_"+chn+".inputs-mssm-13TeV.root", + "$BIN/bbH$MASS", + "$BIN/bbH$MASS_$SYSTEMATIC"); + } + //Replacing observation with the sum of the backgrounds (asimov) - nice to ensure blinding + auto bins = cb.cp().bin_set(); + for (auto b : bins) { + cb.cp().bin({b}).ForEachObs([&](ch::Observation *obs) { + obs->set_shape(cb.cp().bin({b}).backgrounds().GetShape(), true); + }); + } + + + // This function modifies every entry to have a standardised bin name of + // the form: {analysis}_{channel}_{bin_id}_{era} + // which is commonly used in the htt analyses + ch::SetStandardBinNames(cb); + //! [part8] + + //! [part9] + // First we generate a set of bin names: + RooWorkspace ws("htt", "htt"); + + TFile demo("htt_mssm_demo.root", "RECREATE"); + + bool do_morphing = true; + map mass_var = { + {"ggh_htautau", &mh}, {"ggH_Htautau", &mH}, {"ggA_Atautau", &mA}, + {"bbh_htautau", &mh}, {"bbH_Htautau", &mH}, {"bbA_Atautau", &mA} + }; + if(mass=="MH"){ + mass_var = { + {"ggH", &mA}, + {"bbH", &mA} + }; + } + if (do_morphing) { + auto bins = cb.bin_set(); + for (auto b : bins) { + auto procs = cb.cp().bin({b}).signals().process_set(); + for (auto p : procs) { + ch::BuildRooMorphing(ws, cb, b, p, *(mass_var[p]), + "norm", true, true, false, &demo); + } + } + } + demo.Close(); + cb.AddWorkspace(ws); + //cb.cp().signals().ExtractPdfs(cb, "htt", "$BIN_$PROCESS_morph"); + cb.cp().process(ch::JoinStr({signal_types["ggH"], signal_types["bbH"]})).ExtractPdfs(cb, "htt", "$BIN_$PROCESS_morph"); + cb.PrintAll(); + + string folder = "output/mssm_run2/cmb"; + boost::filesystem::create_directories(folder); + + cout << "Writing datacards ..."; + TFile output((folder + "/htt_mssm_input.root").c_str(), "RECREATE"); + for (string chn : chns) { + auto bins = cb.cp().channel({chn}).bin_set(); + for (auto b : bins) { + cb.cp().channel({chn}).bin({b}).mass({"*"}).WriteDatacard(folder + "/" + b + ".txt", output); + } + } + cb.cp().mass({"*"}).WriteDatacard(folder + "/htt_mssm.txt", output); + output.Close(); + + for (string chn : chns) { + string folderchn = "output/mssm_run2/"+chn; + boost::filesystem::create_directories(folderchn); + TFile outputchn((folderchn + "/htt_"+chn+"_mssm_input.root").c_str(), "RECREATE"); + cb.cp().channel({chn}).mass({"*"}).WriteDatacard(folderchn + "/htt_"+chn+"_mssm.txt", outputchn); + outputchn.Close(); + } + + cout << " done\n"; + + +} + diff --git a/Run1BSMComb/BuildFile.xml b/Run1BSMComb/BuildFile.xml new file mode 100644 index 00000000000..abdd6096c29 --- /dev/null +++ b/Run1BSMComb/BuildFile.xml @@ -0,0 +1,18 @@ + + + + + + + + + + + + + + + + + + diff --git a/CombineTools/bin/AZhExample.cpp b/Run1BSMComb/bin/AZh.cpp similarity index 89% rename from CombineTools/bin/AZhExample.cpp rename to Run1BSMComb/bin/AZh.cpp index 3347d6a4a41..da953e6c3ee 100644 --- a/CombineTools/bin/AZhExample.cpp +++ b/Run1BSMComb/bin/AZh.cpp @@ -84,6 +84,15 @@ int main() { } } + //This part is rather hacky, but needed to reproduce the legacy results. + //The legacy results are set in fb, so the signal processes are all divided + //by 1000. Here the precision is also explicitly set to match that + //in the legacy datacards. + + cb.cp().process({"AZh"}).ForEachProc([&](ch::Process *proc) { + proc->set_rate(std::roundf(((proc->rate() / 1000)*10000.0))/10000.0 ); + + }); cout << ">> Generating bbb uncertainties...\n"; auto bbb = ch::BinByBinFactory() @@ -96,7 +105,7 @@ int main() { cout << ">> Setting standardised bin names...\n"; ch::SetStandardBinNames(cb); - string folder = "output/AZh_cards"; + string folder = "output/AZh_cards_fb"; boost::filesystem::create_directories(folder); for (string chn :chns){ boost::filesystem::create_directories(folder+"/"+chn); diff --git a/CombinePdfs/bin/AdaptChargedHiggs.cpp b/Run1BSMComb/bin/AdaptChargedHiggs.cpp similarity index 100% rename from CombinePdfs/bin/AdaptChargedHiggs.cpp rename to Run1BSMComb/bin/AdaptChargedHiggs.cpp diff --git a/Run1BSMComb/bin/AdaptHbb.cpp b/Run1BSMComb/bin/AdaptHbb.cpp new file mode 100644 index 00000000000..31e43a18ba5 --- /dev/null +++ b/Run1BSMComb/bin/AdaptHbb.cpp @@ -0,0 +1,192 @@ +#include +#include +#include "boost/algorithm/string/predicate.hpp" +#include "boost/program_options.hpp" +#include "CombineHarvester/CombineTools/interface/CombineHarvester.h" +#include "CombineHarvester/CombineTools/interface/Utilities.h" +#include "CombineHarvester/CombineTools/interface/CardWriter.h" +#include "CombineHarvester/CombineTools/interface/TFileIO.h" +#include "CombineHarvester/CombineTools/interface/CopyTools.h" +#include "CombineHarvester/CombinePdfs/interface/MorphFunctions.h" + +#include "RooWorkspace.h" +#include "RooRealVar.h" + +using namespace std; +using boost::starts_with; +namespace po = boost::program_options; + +int main(int argc, char* argv[]) { + std::string mass = "mA"; + po::variables_map vm; + po::options_description config("configuration"); + bool single_resonance = false; + config.add_options() + ("mass,m", po::value(&mass)->default_value(mass)) + ("single,s", po::value(&single_resonance)->implicit_value(true)); + po::store(po::command_line_parser(argc, argv).options(config).run(), vm); + po::notify(vm); + + typedef vector> Categories; + typedef vector VString; + + // We will need to source some inputs from the "auxiliaries" repo + string auxiliaries = string(getenv("CMSSW_BASE")) + "/src/auxiliaries/"; + string aux_cards = "Hbb"; + + auto masses = ch::MassesFromRange("100,140,160,200,300,350,400,500"); + + // RooFit will be quite noisy if we don't set this + // RooMsgService::instance().setGlobalKillBelow(RooFit::WARNING); + + RooRealVar mA(mass.c_str(), mass.c_str(), 100., 500.); + RooRealVar mH("mH", "mH", 100., 500.); + RooRealVar mh("mh", "mh", 100., 500.); + + TH1::AddDirectory(false); + ch::CombineHarvester cb; + for (auto m : masses) { + cb.ParseDatacard( + aux_cards + "/Comb_7TeV_HIG14017_HighMass2012_Packed_M"+m+"_card_theoryUnc.txt", "hbb", + "", "hbb", 0, m); + } + + + // Rename categories and harmonize signal process names + // ch1 --> inclusive (8 TeV) + // ch2 --> semileptonic (7 TeV) + // ch3 --> hadronic (7 TeV) + // In the 7TeV the analysis is split into low mass (100, 120, 160) and + // high mass (200,300,250) event selections + cb.ForEachObj([](ch::Object *obj) { + double m = boost::lexical_cast(obj->mass()); + if (obj->bin() == "ch1" || obj->bin() == "bbHTo4b") { // it's bbHTo4b if we take the 8 TeV only cards + obj->set_bin("hbb_inclusive_8TeV"); + obj->set_era("8TeV"); + } else if (obj->bin() == "ch2" && m < 180.) { + obj->set_bin("hbb_semilept_low_7TeV"); + obj->set_era("7TeV"); + } else if (obj->bin() == "ch2" && m > 180.) { + obj->set_bin("hbb_semilept_high_7TeV"); + obj->set_era("7TeV"); + } else if (obj->bin() == "ch3" && m < 180.) { + obj->set_bin("hbb_hadronic_low_7TeV"); + obj->set_era("7TeV"); + } else if (obj->bin() == "ch3" && m > 180.) { + obj->set_bin("hbb_hadronic_high_7TeV"); + obj->set_era("7TeV"); + } + if (obj->process() == "sgnBBB" || obj->process() == "bbH") { + obj->set_process("bbA_Abb"); + } + }); + + for (auto const& bin : cb.bin_set()) { + auto bin_masses = cb.cp().bin({bin}).mass_set(); + if (bin_masses.size() == 0) continue; + auto collapse = *(bin_masses.begin()); + std::cout << "Collapse procs in bin " << bin << " to mass point " << collapse << "\n"; + cb.FilterAll([&](ch::Object *obj) { + return (obj->bin() == bin && !obj->signal() && obj->mass() != collapse); + }); + cb.cp().bin({bin}).ForEachObj([&](ch::Object *obj) { + if (!obj->signal()) obj->set_mass("*"); + }); + } + + // Unscale the signal cross sections + std::map hmap; + TFile model_file_8TeV(TString(auxiliaries) + + "models/out.mhmax-mu+200-8TeV-tanbHigh-nnlo.root"); + hmap["h_xs_bbA_8TeV"] = (TH2F*)(gDirectory->Get("h_bbH_xsec_A")->Clone()); + hmap["h_xs_bbH_8TeV"] = (TH2F*)(gDirectory->Get("h_bbH_xsec_H")->Clone()); + hmap["h_xs_bbh_8TeV"] = (TH2F*)(gDirectory->Get("h_bbH_xsec_h")->Clone()); + hmap["h_br_Abb_8TeV"] = (TH2F*)(gDirectory->Get("h_brbb_A")->Clone()); + hmap["h_br_Hbb_8TeV"] = (TH2F*)(gDirectory->Get("h_brbb_H")->Clone()); + hmap["h_br_hbb_8TeV"] = (TH2F*)(gDirectory->Get("h_brbb_h")->Clone()); + model_file_8TeV.Close(); + TFile model_file_7TeV(TString(auxiliaries) + + "models/out.mhmax-mu+200-7TeV-tanbHigh-nnlo.root"); + hmap["h_xs_bbA_7TeV"] = (TH2F*)(gDirectory->Get("h_bbH_xsec_A")->Clone()); + hmap["h_xs_bbH_7TeV"] = (TH2F*)(gDirectory->Get("h_bbH_xsec_H")->Clone()); + hmap["h_xs_bbh_7TeV"] = (TH2F*)(gDirectory->Get("h_bbH_xsec_h")->Clone()); + hmap["h_br_Abb_7TeV"] = (TH2F*)(gDirectory->Get("h_brbb_A")->Clone()); + hmap["h_br_Hbb_7TeV"] = (TH2F*)(gDirectory->Get("h_brbb_H")->Clone()); + hmap["h_br_hbb_7TeV"] = (TH2F*)(gDirectory->Get("h_brbb_h")->Clone()); + model_file_7TeV.Close(); + + double fixed_tanb = 30.; + + cb.cp().signals().ForEachProc([&](ch::Process *p) { + double m = boost::lexical_cast(p->mass()); + auto e = p->era(); + double tot = hmap["h_xs_bbA_"+e]->GetBinContent(hmap["h_xs_bbA_"+e]->FindBin(m, fixed_tanb)) * + hmap["h_br_Abb_"+e]->GetBinContent(hmap["h_br_Abb_"+e]->FindBin(m, fixed_tanb)); + if (m < 125.) { + tot += hmap["h_xs_bbh_"+e]->GetBinContent(hmap["h_xs_bbh_"+e]->FindBin(m, fixed_tanb)) * + hmap["h_br_hbb_"+e]->GetBinContent(hmap["h_br_hbb_"+e]->FindBin(m, fixed_tanb)); + } else if (m < 135.) { + tot += hmap["h_xs_bbh_"+e]->GetBinContent(hmap["h_xs_bbh_"+e]->FindBin(m, fixed_tanb)) * + hmap["h_br_hbb_"+e]->GetBinContent(hmap["h_br_hbb_"+e]->FindBin(m, fixed_tanb)); + tot += hmap["h_xs_bbH_"+e]->GetBinContent(hmap["h_xs_bbH_"+e]->FindBin(m, fixed_tanb)) * + hmap["h_br_Hbb_"+e]->GetBinContent(hmap["h_br_Hbb_"+e]->FindBin(m, fixed_tanb)); + } else { + tot += hmap["h_xs_bbH_"+e]->GetBinContent(hmap["h_xs_bbH_"+e]->FindBin(m, fixed_tanb)) * + hmap["h_br_Hbb_"+e]->GetBinContent(hmap["h_br_Hbb_"+e]->FindBin(m, fixed_tanb)); + } + p->set_rate(p->rate() / tot); + }); + + // Drop signal theory uncertainties + cb.syst_name({"QCDscale", "pdf_gg", "QCD_scale", "PDF"}, false); + + // Now clone signal procs to create the H and the h + ch::CloneProcsAndSysts(cb.cp().process({"bbA_Abb"}), cb, [](ch::Object *p) { + p->set_process("bbH_Hbb"); + }); + ch::CloneProcsAndSysts(cb.cp().process({"bbA_Abb"}), cb, [](ch::Object *p) { + p->set_process("bbh_hbb"); + }); + + RooWorkspace ws("hbb", "hbb"); + TFile debug("hbb_morphing_debug.root", "RECREATE"); + + map mass_var = { + {"bbh_hbb", &mh}, {"bbH_Hbb", &mH}, {"bbA_Abb", &mA} + }; + + // If we're only doing a one-bump search we will just drop the other signal + // processes we created + if (single_resonance) { + cb.process({"bbh_hbb", "bbH_Hbb"}, false); + } + + ch::CardWriter writer("$TAG/$MASS/$BIN.txt", "$TAG/common/$BIN.input.root"); + writer.WriteCards("output/hbb", cb); + + + bool do_morphing = true; + if (do_morphing) { + auto bins = cb.bin_set(); + for (auto b : bins) { + auto procs = cb.cp().bin({b}).signals().process_set(); + for (auto p : procs) { + ch::BuildRooMorphing(ws, cb, b, p, *(mass_var.at(p)), + "norm", false, true, false, &debug); + } + } + } + debug.Close(); + cb.AddWorkspace(ws); + cb.cp().signals().ExtractPdfs(cb, "hbb", "$BIN_$PROCESS_morph"); + + + cb.PrintAll(); + + ch::CardWriter writer2("$TAG/morphed/$BIN.txt", "$TAG/common/hbb.input.root"); + writer2.SetVerbosity(1); + writer2.SetWildcardMasses({}); + writer2.WriteCards("output/hbb", cb); +} + + diff --git a/Run1BSMComb/bin/BuildFile.xml b/Run1BSMComb/bin/BuildFile.xml new file mode 100644 index 00000000000..24fb0bc68a7 --- /dev/null +++ b/Run1BSMComb/bin/BuildFile.xml @@ -0,0 +1,23 @@ + + + + + + + + + + + + + + + + + + + + + + + diff --git a/CombineTools/bin/HhhExample.cpp b/Run1BSMComb/bin/Hhh.cpp similarity index 100% rename from CombineTools/bin/HhhExample.cpp rename to Run1BSMComb/bin/Hhh.cpp diff --git a/CombinePdfs/bin/AZhMorphing-NoModel.cpp b/Run1BSMComb/bin/MorphingAZh.cpp similarity index 96% rename from CombinePdfs/bin/AZhMorphing-NoModel.cpp rename to Run1BSMComb/bin/MorphingAZh.cpp index bec5f98ea19..17d5ca6dc0f 100644 --- a/CombinePdfs/bin/AZhMorphing-NoModel.cpp +++ b/Run1BSMComb/bin/MorphingAZh.cpp @@ -102,6 +102,10 @@ int main() { } } + cb.cp().signals().ForEachProc([&](ch::Process *proc) { + proc->set_rate(std::roundf(((proc->rate() / 1000)*10000.0))/10000.0 ); + + }); cout << ">> Generating bbb uncertainties...\n"; auto bbb = ch::BinByBinFactory() diff --git a/CombinePdfs/bin/HhhMorphingExample-NoModel.cpp b/Run1BSMComb/bin/MorphingHhh.cpp similarity index 100% rename from CombinePdfs/bin/HhhMorphingExample-NoModel.cpp rename to Run1BSMComb/bin/MorphingHhh.cpp diff --git a/CombinePdfs/bin/MorphingMSSM-NoModel.cpp b/Run1BSMComb/bin/MorphingMSSMLegacy.cpp similarity index 81% rename from CombinePdfs/bin/MorphingMSSM-NoModel.cpp rename to Run1BSMComb/bin/MorphingMSSMLegacy.cpp index e7da005f89d..aef5be20c60 100644 --- a/CombinePdfs/bin/MorphingMSSM-NoModel.cpp +++ b/Run1BSMComb/bin/MorphingMSSMLegacy.cpp @@ -1,6 +1,8 @@ #include #include #include +#include "boost/algorithm/string/predicate.hpp" +#include "boost/program_options.hpp" #include "CombineHarvester/CombineTools/interface/CombineHarvester.h" #include "CombineHarvester/CombineTools/interface/Utilities.h" #include "CombineHarvester/CombineTools/interface/TFileIO.h" @@ -12,19 +14,31 @@ #include "TH2.h" using namespace std; +using boost::starts_with; +namespace po = boost::program_options; -int main() { +int main(int argc, char** argv) { typedef vector> Categories; typedef vector VString; + string SM125= ""; + string mass = "mA"; + po::variables_map vm; + po::options_description config("configuration"); + config.add_options() + ("mass,m", po::value(&mass)->default_value(mass)) + ("SM125,h", po::value(&SM125)->default_value(SM125)); + po::store(po::command_line_parser(argc, argv).options(config).run(), vm); + po::notify(vm); // We will need to source some inputs from the "auxiliaries" repo string auxiliaries = string(getenv("CMSSW_BASE")) + "/src/auxiliaries/"; string aux_shapes = auxiliaries +"shapes/"; + string input_dir = string(getenv("CMSSW_BASE")) + "/src/CombineHarvester/CombineTools/input"; // RooFit will be quite noisy if we don't set this // RooMsgService::instance().setGlobalKillBelow(RooFit::WARNING); - RooRealVar mA("mA", "mA", 90., 1000.); + RooRealVar mA(mass.c_str(), mass.c_str(), 90., 1000.); RooRealVar mH("mH", "mH", 90., 1000.); RooRealVar mh("mh", "mh", 90., 1000.); @@ -55,6 +69,12 @@ int main() { {"ggH", {"ggh_htautau", "ggH_Htautau", "ggA_Atautau"}}, {"bbH", {"bbh_htautau", "bbH_Htautau", "bbA_Atautau"}} }; + if(mass=="MH"){ + signal_types = { + {"ggH", {"ggH"}}, + {"bbH", {"bbH"}} + }; + } cb.AddProcesses(masses, {"htt"}, {"8TeV"}, {"mt"}, signal_types["ggH"], {mt_cats[0]}, true); cb.AddProcesses(masses, {"htt"}, {"8TeV"}, {"mt"}, @@ -91,7 +111,7 @@ int main() { for (string p : {"ggH", "bbH"}) { cout << "Scaling for process " << p << " and era " << e << "\n"; auto gr = ch::TGraphFromTable( - "input/xsecs_brs/mssm_" + p + "_" + e + "_accept.txt", "mPhi", + input_dir+"/xsecs_brs/mssm_" + p + "_" + e + "_accept.txt", "mPhi", "accept"); cb.cp().process(signal_types[p]).era({e}).ForEachProc([&](ch::Process *proc) { double m = boost::lexical_cast(proc->mass()); @@ -113,6 +133,12 @@ int main() { {"ggh_htautau", &mh}, {"ggH_Htautau", &mH}, {"ggA_Atautau", &mA}, {"bbh_htautau", &mh}, {"bbH_Htautau", &mH}, {"bbA_Atautau", &mA} }; + if(mass=="MH"){ + mass_var = { + {"ggH", &mA}, + {"bbH", &mA} + }; + } if (do_morphing) { auto bins = cb.bin_set(); for (auto b : bins) { @@ -125,7 +151,7 @@ int main() { } demo.Close(); cb.AddWorkspace(ws); - cb.cp().signals().ExtractPdfs(cb, "htt", "$BIN_$PROCESS_morph"); + cb.cp().process(ch::JoinStr({signal_types["ggH"], signal_types["bbH"]})).ExtractPdfs(cb, "htt", "$BIN_$PROCESS_morph"); cb.PrintAll(); string folder = "output/mssm_nomodel"; diff --git a/CombinePdfs/bin/MorphingMSSMUpdate-NoModel.cpp b/Run1BSMComb/bin/MorphingMSSMUpdate.cpp similarity index 97% rename from CombinePdfs/bin/MorphingMSSMUpdate-NoModel.cpp rename to Run1BSMComb/bin/MorphingMSSMUpdate.cpp index 2c302885687..80ed054adb6 100644 --- a/CombinePdfs/bin/MorphingMSSMUpdate-NoModel.cpp +++ b/Run1BSMComb/bin/MorphingMSSMUpdate.cpp @@ -95,22 +95,26 @@ int main(int argc, char** argv) { cout << " done\n"; cout << "Adding signal processes..."; + //! [part1] // Unlike in previous MSSM H->tautau analyses we will create a separate // process for each Higgs in the datacards map signal_types = { {"ggH", {"ggh_htautau", "ggH_Htautau", "ggA_Atautau"}}, {"bbH", {"bbh_htautau", "bbH_Htautau", "bbA_Atautau"}} }; + //! [part1] if(mass=="MH"){ signal_types = { {"ggH", {"ggH"}}, {"bbH", {"bbH"}} }; } + //! [part2] for (auto chn : chns) { cb.AddProcesses(masses, {"htt"}, {"8TeV"}, {chn}, signal_types["ggH"], cats[chn+"_8TeV"], true); cb.AddProcesses(masses, {"htt"}, {"8TeV"}, {chn}, signal_types["bbH"], cats[chn+"_8TeV"], true); } + //! [part2] cout << " done\n"; cout << "Adding systematic uncertainties..."; @@ -123,17 +127,19 @@ int main(int argc, char** argv) { cout << "Extracting histograms from input root files..."; for (string chn : chns) { string file = aux_shapes + input_folders[chn] + "/htt_" + chn + - ".inputs-mssm-" + "8TeV" + "-0.root"; + ".inputs-mssm-" + "8TeV" + "-0-CH-HIG-14-029.root"; cb.cp().channel({chn}).era({"8TeV"}).backgrounds().ExtractShapes (file, "$CHANNEL/$PROCESS", "$CHANNEL/$PROCESS_$SYSTEMATIC"); if(SM125==string("signal_SM125")) cb.cp().channel({chn}).era({"8TeV"}).process(SM_procs).ExtractShapes(file, "$BIN/$PROCESS", "$BIN/$PROCESS_$SYSTEMATIC"); // We have to map each Higgs signal process to the same histogram, i.e: // {ggh, ggH, ggA} --> ggH // {bbh, bbH, bbA} --> bbH + //! [part3] cb.cp().channel({chn}).era({"8TeV"}).process(signal_types["ggH"]).ExtractShapes (file, "$CHANNEL/ggH$MASS", "$CHANNEL/ggH$MASS_$SYSTEMATIC"); cb.cp().channel({chn}).era({"8TeV"}).process(signal_types["bbH"]).ExtractShapes (file, "$CHANNEL/bbH$MASS", "$CHANNEL/bbH$MASS_$SYSTEMATIC"); + //! [part3] } cout << " done\n"; @@ -142,7 +148,7 @@ int main(int argc, char** argv) { for (string p : {"ggH", "bbH"}) { cout << "Scaling for process " << p << " and era " << e << "\n"; auto gr = ch::TGraphFromTable( - "input/xsecs_brs/mssm_" + p + "_" + e + "_accept.txt", "mPhi", + input_dir+"/xsecs_brs/mssm_" + p + "_" + e + "_accept.txt", "mPhi", "accept"); cb.cp().process(signal_types[p]).era({e}).ForEachProc([&](ch::Process *proc) { double m = boost::lexical_cast(proc->mass()); @@ -180,6 +186,7 @@ int main(int argc, char** argv) { TFile demo("htt_mssm_demo.root", "RECREATE"); + //! [part4] bool do_morphing = true; map mass_var = { {"ggh_htautau", &mh}, {"ggH_Htautau", &mH}, {"ggA_Atautau", &mA}, @@ -205,6 +212,7 @@ int main(int argc, char** argv) { cb.cp().process(ch::JoinStr({signal_types["ggH"], signal_types["bbH"]})).ExtractPdfs(cb, "htt", "$BIN_$PROCESS_morph"); cb.PrintAll(); cout << "done\n"; + //! [part4] string folder = "output/mssm_nomodel"; boost::filesystem::create_directories(folder); diff --git a/Run1BSMComb/scripts/hbb_high_mass_grid.json b/Run1BSMComb/scripts/hbb_high_mass_grid.json new file mode 100644 index 00000000000..3854e0091d4 --- /dev/null +++ b/Run1BSMComb/scripts/hbb_high_mass_grid.json @@ -0,0 +1,8 @@ +{ + "opts" : "--singlePoint 1.0", + "POIs" : ["mA", "tanb"], + "grids" : [ + ["200,300,350,400,500", "1,5,10,10:60|2", ""] + ], + "hist_binning" : [87, 130, 1000, 60, 1, 60] +} diff --git a/Run1BSMComb/scripts/hbb_low_mass_grid.json b/Run1BSMComb/scripts/hbb_low_mass_grid.json new file mode 100644 index 00000000000..0de199fc1b8 --- /dev/null +++ b/Run1BSMComb/scripts/hbb_low_mass_grid.json @@ -0,0 +1,8 @@ +{ + "opts" : "--singlePoint 1.0", + "POIs" : ["mA", "tanb"], + "grids" : [ + ["100,140,160", "1,5,10,10:60|2", ""] + ], + "hist_binning" : [87, 130, 1000, 60, 1, 60] +} diff --git a/docs/BSM-ModelIndependent-Limits-HhhAZh.md b/docs/BSM-ModelIndependent-Limits-HhhAZh.md new file mode 100644 index 00000000000..0b5ff299f71 --- /dev/null +++ b/docs/BSM-ModelIndependent-Limits-HhhAZh.md @@ -0,0 +1,37 @@ +BSM Model independent Limits using MorphingHhh or MorphingAZh {#ModelIndepHhhAZh} +================================================================= + +The model independent limits for the H->hh and A->Zh analyses (HIG-14-034) are slightly simpler than for the MSSM H->tautau analysis, since only one signal process is considered and hence no profiling is required. This removes the need for any kind of physics model at the text2workspace step. + +Creating datacards {#Hhh-Azh-p1} +======================== + +The first step is to create the datacards, which will be used to produce the limit later on. To do this, go into the Folder CombineHarvester/Run1BSMComb/ and then execute MorphingHhh or MorphingAZh. All of the programs in the following steps also need to be executed from this folder. Also make sure that all the files have been compiled beforehand: + + cd CombineHarvester/Run1BSMComb/ + MorphingAZh + MorphingHhh + +MorphingHhh.cpp and MorphingAZh.cpp are setup similarly to Example2 and MorphingMSSMUpdate as documented previously. No additional option -m MH is needed as in the case of the MSSMUpdate cards because there is only one Higgs boson considered anyway for these analyses. + + +Creating the workspace {#Hhh-Azh-p2} +======================================= + +Now that the datacards have been created, we can create a workspace as follows: + +text2workspace.py -b output/AZh_cards_nomodel/htt_cmb_AZh.txt -o output/AZh_cards_nomodel/htt_cmb_AZh.root --default-morphing shape2 +text2workspace.py -b output/hhh_cards_nomodel/htt_cmb_Hhh.txt -o output/hhh_cards_nomodel/htt_cmb_Hhh.root --default-morphing shape2 + +This creates a workspace based on the combined datacard. + + +Calculating values {#Hhh-Azh-p3} +======================== + +Now that we have created the workspace, we can now run combineTool.py to run the limit for a given masspoint: + +python ../CombineTools/scripts/combineTool.py -m 260 -M Asymptotic output/hhh_cards_nomodel/htt_cmb_Hhh.root --freezeNuisances mH --setPhysicsModelParameters mH=260 +python ../CombineTools/scripts/combineTool.py -m 220 -M Asymptotic output/AZh_cards_nomodel/htt_cmb_AZh.root --freezeNuisances mA --setPhysicsModelParameters mA=220 + +This will run the asymptotic limit calculation for mH=260 for the H->hh cards and mA=220 for the A->Zh cards. Note that it is necessary to use the combine options to set the masspoint and to freeze the parameter controlling it so that it is not floated in the fit. diff --git a/docs/BSM-ModelIndependent-Limits-MSSMHTT.md b/docs/BSM-ModelIndependent-Limits-MSSMHTT.md new file mode 100644 index 00000000000..9105a478efb --- /dev/null +++ b/docs/BSM-ModelIndependent-Limits-MSSMHTT.md @@ -0,0 +1,81 @@ +BSM Model independent Limits using MorphingMSSMUpdate {#MSSMUpdateNoModel} +================================================================= + +These instruction shall elaborate how to produce model independent limits using the 8TeV part of the MSSM (CMS-HIG-14-039) analysis. Below there will be given instruction how to set limits on one signal process (ggH) while another is left floating (bbH). + +Creating datacards {#MSSMUpdate-p1} +======================== + +The first step is to create the datacards, which will be used to produce the limit later on. To do this, go into the Folder CombineHarvester/Run1BSMComb/ and then execute MorphingMSSMUpdate. All of the programs in the following steps also need to be executed from this folder. Also make sure that all the files have been computed beforehand: + + cd CombineHarvester/Run1BSMComb/ + MorphingMSSMUpdate -m MH + +MorphingMSSMUpdate.cpp is set up similarly like Example2.cpp. More information about the datacard steps could be found in the respective example. Adding the option "-m MH" when executing MorphingMSSMUpdate is necessary to ensure that the signal types are set to only assume one single narrow resonance produced via ggH and bbH instead of distinguishing between the three neutral MSSM bosons h, A and H. +It should be mentioned that CombineHarvester needs to use specific shape-rootfiles, which have been edited to reproduce the original result, since tail-fitting is not yet included in Combine Harvester. +The output will be a set of datacards. The special point is that not for each mass a datacard is created. In contrast a workspace is given for the signals which contain all centrally produced mass hypothesis. In the calculating process the signal will be morphed to the chosen mass. If, for example, MC signal templates exist for m=100GeV and 200GeV one could still calculate limits for m=150GeV ("combine -m 150 ..."). The created root file, named "htt_mssm_demo.root", will be in the CombinePdfs folder. It contains the centrally available MC signals for each channel, category and mass. Per default a combined datacard "htt_mssm.txt" is created, which contains all the information of the other datacards together. + +Scaling the workspace accordingly {#MSSMUpdate-p2} +======================================= + +Now that the datacards have been created, we can add which model we would like to study. We need to have a workspace, where the signals for each process are scaled correctly according to whatever model is selected. + + text2workspace.py -b output/mssm_nomodel/htt_mssm.txt -o output/mssm_nomodel/htt_ggPhi_mssm.root -P CombineHarvester.CombinePdfs.ModelIndependent:floatingMSSMXSHiggs --PO 'modes=ggH' --PO 'ggHRange=0:20' + +This creates a workspace "output/mssm_nomodel/htt_ggPhi_mssm.root" based on the combined datacard. The physic model "floatingMSSMXSHiggs" is built to split the signal into the processes ggH and bbH. A signal scaling parameter is assigned to one process while the other is left floating and therefore will be treated as an nuisance parameter in the fit later on. In this example we like to set a limit on the xs*BR of the ggH (to tau tau) process. The fit range of 0 to 20 is set for this parameter (be sure that the fitting range contains the minimum and has sensible ranges). Here the bbH process is left floating. By changing "ggH" to "bbH" in the text2workspace.py step it is easily possible to switch this. +More advanced users might extend the physic model to be able to scale different processes (like ttH ...). + + +Calculating values {#MSSMUpdate-p3} +======================== + +Now that we have created the workspace htt_ggPhi_mssm.root, which can again be found in the output-folder, we now run combineTool.py, which will lead to the calculation all the values, which will later be used to create a plot. + + python ../CombineTools/scripts/combineTool.py -m 100:200:20,90,100,250:500:50,600:1000:100 -M Asymptotic --boundlist $CMSSW_BASE/src/CombineHarvester/CombinePdfs/scripts/mssm_ggh_boundaries.json output/mssm_nomodel/htt_ggPhi_mssm.root --freezeNuisances MH + +This program by itself only creates lists of jobs for the program "combine". These jobs can be run interactively on your own computer, or they can be sent somewhere else by using the option "--job-mode" (for example --job-mode 'lxbatch' when using CERN's computing power). To avoid sending too many jobs, we may use "--merge" to include a certain number of combine calls into one job (for example --merge=8 will include up to 8 combine calls into one single job). +Other than that, we need to specify which method to use, as well as the workspace. The specification of what method to use is done with the option "-M". In this example, the asymptotic limits are being calculated. The mass-range can be include with the option "-m". In this example, 19 different combine calls will be produced for the Higgs masses as seen at the bottom of this example. Since sensible boundaries on the parameter of interest (here ggH) can be mass dependent an important option can be to add a list of boundaries of the POI for each mass via a extern json fil, here "scripts/mssm_ggh_boundaries.json". This will set the range of relevant physics model parameters, depending on the production process and the mass. +The option "--freezeNuisances MH" is important to fix the hypothetical Higgs mass of the considered process to the one selected via the option "-m" instead of letting it freely floating in the fitting process. + + +Collecting the results in a single file {#MSSMUpdate-p4} +============================================= + +Once all the jobs sent in the previous step and done, we will then collect all relevant data from the created root files into a single file. To do this, we will run combineTool.py again, but time with the method "CollectLimits", as well as different options. + + python ../CombineTools/scripts/combineTool.py -M CollectLimits -i higgsCo* -o mssm.json + +The option "-i" is used to include all root files which have been previously created. These filenames usually all begin with "higgsCombine*". The filename specified after "-o" is the name of the json file which will be created. This json file contains all necessary, computated values from the root files, which will be needed to plot the limit. These include the values for the observation, expectation and the -2, -1, +1, +2 sigma uncertainties. + + +Plotting the limits {#MSSMUpdate-p5} +========================= + +Finally we can produce the plot. To do this, we use the program "plotBSMxsBRLimit.py". + + python ../CombineTools/scripts/plotBSMxsBRLimit.py --file=mssm.json + +The filename specified as "--file" is the json file produced in the previous step. We may also add more options regarding the aesthetics of the plot, such as changing the range of the x- and y-axis, or enabling logarithmic scaling. Executing this program along with the additional parameters will create the desired plot as a png- and pdf file. It will also create a textfile "mssm_limit_table.txt", which contains a list of the exact values for all mass points. +The limits of the described example should agree with the following numbers: + +| mass | minus2sigma | minus1sigma | expected | plus1sigma | plus2sigma | observed | +|--------|------------------|------------------|------------------|------------------|-----------------|-------------------| +| 90.0 | 7.44689941406 | 10.4886741638 | 15.3125 | 22.3316726685 | 31.1344871521 | 21.4247200433 | +| 100.0 | 6.0858001709 | 8.5330581665 | 12.4140625 | 18.1046066284 | 25.0876998901 | 17.6605401733 | +| 120.0 | 1.18392789364 | 1.57787442207 | 2.17265629768 | 3.03002619743 | 4.02200269699 | 2.97035384799 | +| 130.0 | 0.639678955078 | 0.856723308563 | 1.1953125 | 1.68129837513 | 2.2536046505 | 1.59457176432 | +| 140.0 | 0.438079833984 | 0.584083616734 | 0.815625011921 | 1.14398777485 | 1.52501606941 | 0.94770346896 | +| 160.0 | 0.253028869629 | 0.337358653545 | 0.47109374404 | 0.660751521587 | 0.886857688427 | 0.460536925886 | +| 180.0 | 0.180056750774 | 0.240920364857 | 0.332812488079 | 0.466799587011 | 0.626536250114 | 0.302229212372 | +| 200.0 | 0.143707275391 | 0.19228386879 | 0.265625 | 0.370445460081 | 0.49172270298 | 0.227834272278 | +| 250.0 | 0.0768411234021 | 0.102051369846 | 0.142031252384 | 0.198079377413 | 0.264753729105 | 0.117494322717 | +| 300.0 | 0.0484149158001 | 0.0645136460662 | 0.090468749404 | 0.126169368625 | 0.168638512492 | 0.06854323815 | +| 350.0 | 0.0495315566659 | 0.0657380968332 | 0.0886718779802 | 0.118715122342 | 0.152319088578 | 0.0842439601436 | +| 400.0 | 0.0397595204413 | 0.052680041641 | 0.0721874982119 | 0.0972210913897 | 0.12731602788 | 0.0725939537161 | +| 450.0 | 0.0276245102286 | 0.035754583776 | 0.0484374985099 | 0.06581415236 | 0.086501725018 | 0.0503201500801 | +| 500.0 | 0.0173510741442 | 0.0233783721924 | 0.0321874991059 | 0.0452741757035 | 0.0613017454743 | 0.0379442905513 | +| 600.0 | 0.0101513667032 | 0.0140254208818 | 0.019687499851 | 0.02816282399 | 0.0385656952858 | 0.0255840519774 | +| 700.0 | 0.00771972676739 | 0.00984832737595 | 0.0145312501118 | 0.0205551572144 | 0.0290479976684 | 0.0209467474855 | +| 800.0 | 0.0062255859375 | 0.00794219970703 | 0.01171875 | 0.0165767390281 | 0.0222346615046 | 0.0163398869015 | +| 900.0 | 0.00439453125 | 0.00567626953125 | 0.0078125 | 0.0113002872095 | 0.0163606926799 | 0.0107711296097 | +| 1000.0 | 0.00259765610099 | 0.00385009753518 | 0.00593749992549 | 0.00858821813017 | 0.0124341268092 | 0.008082145785341 | diff --git a/docs/ChargedHiggs.md b/docs/ChargedHiggs.md index 31ae53ecf89..7ae77bbb7c2 100644 --- a/docs/ChargedHiggs.md +++ b/docs/ChargedHiggs.md @@ -3,7 +3,7 @@ Charged Higgs datacards with RooMorphingPdf # Getting the cards -The cards from the CMS HCG svn need to be copied into the auxilliaries directory: +The cards from the CMS HCG svn need to be copied into the auxiliaries directory: cp -r /afs/cern.ch/work/a/agilbert/public/CombineTools/data/HIG-14-020.r6636 $CMSSW_BASE/src/auxiliaries/datacards/ diff --git a/docs/HybridNewGrid.md b/docs/HybridNewGrid.md new file mode 100644 index 00000000000..57bdee9fb15 --- /dev/null +++ b/docs/HybridNewGrid.md @@ -0,0 +1,51 @@ +# Calculating grids of CLs values using toys + +The following are basic instructions for using the **HybridNewGrid** method of `combineTool.py`. In a similar fashion to the **AsymptoticGrid** method most of the settings are specified in a json file. An example is given in `CombinePdfs/scripts/mssm_hybrid_grid.json`: + +```json +{ + "verbose" : false, + "opts" : "--testStat=TEV --frequentist --singlePoint 1.0 --saveHybridResult --clsAcc 0 --fullBToys --fork 0", + "POIs" : ["mA", "tanb"], + "grids" : [ + ["580:600:20", "10:30:4", ""] + ], + "toys_per_cycle" : 500, + "min_toys" : 500, + "max_toys" : 5000, + "signif" : 3.0, + "CL" : 0.95, + "contours" : ["obs", "exp-2", "exp-1", "exp0", "exp+1", "exp+2"], + "make_plots" : false, + "plot_settings" : { + "one_sided" : false, + "model_label" : "m_{H}^{mod+}", + "poi_labels" : ["m_{A}", "tan#beta"], + "null_label" : "SM", + "alt_label" : "MSSM", + "cms_subtitle" : "Internal", + "formats" : [".pdf", ".png"] + }, + "zipfile" : "collected.zip", + "output" : "HybridNewGridMSSM.root", + "output_incomplete" : true +} +``` + +The `combineTool.py` method is then invoked as follows: + +```sh +combineTool.py -M HybridNewGrid scripts/mssm_hybrid_grid.json --cycles [cycles] -d [workspace] +``` + +This method will generate the specified number of toy cycles for each model point, where one cycle implies one run of combine with a new random seed. The setting `toys_per_cycle` controls the number of background-only and signal+background toys that will be generated in each combine job. Several options are also provided which can be used concentrate the generation of toys around the relevant observed and expected exclusion contours. These specify a set of criteria which, when fulfilled, prevent the creation of any further combine jobs for that model point. These settings are: + + - `min_toys`: Require that at least this many toys are run at each point + - `max_toys`: When this number of toys is reached do not run any more jobs, even if other criteria are still not fulfilled + - `signif`: Require that the CLs value at each point is at least this many standard deviations from the specified confidence level value (1 - CL), i.e. `abs(CLs - (1-CL)) / ClsErr > signif`, where `CLsErr` is the statistical uncertainty on the CLs value. This criteria must be met for observed and/or expected quantiles listed in `contours`. Essentially this requires the probablity of the CLs value we have determined being on the wrong side of the critical CLs value to be very small. + +The `combineTool.py` command above can be repeatedly invoked until a sufficient number of points have met the chosen criteria. At any point the TGraph2Ds of the CLs values can be produced by adding the option `--output`. To just check the progress of the points run with `--cycles 0`. It is possible to choose if model points which have not yet met these criteria should be included in the output graphs with the option `output_incomplete`. When set to `false` only the points passing the validation will be included. + +Plots of the test statistic distributions at each model point can be created by setting the option `make_plots` to `true`. + +Due to the potentially huge number of output files expected for large grids, and because this can cause problems for some networked file-systems, it is possible to automatically transfer the output to a single zip file and have ROOT read the files directly from this archive. This is enable by setting the value of `zipfile` to any non-empty string. Each time `combineTool.py` is invoked it will first look for output files in the given zip file (creating it if it doesn't already exist), then it will look for any other files in the local directory and append these to the zip file before deleting the local version. \ No newline at end of file diff --git a/docs/ReproduceRun1HTTDatacards.md b/docs/ReproduceRun1HTTDatacards.md new file mode 100644 index 00000000000..66dfa1fb15d --- /dev/null +++ b/docs/ReproduceRun1HTTDatacards.md @@ -0,0 +1,95 @@ +Reproducing Run 1 H->tautau results {#introrun1HTT} +========================= + +Using all of the techniques described previously, both in terms of datacard production and usage of the RooMorphingPdf object for signal processes, code to reproduce many of the Run 1 H->tautau results is included in the package. For some analyses, development was first performed for the datacard production without morphing applied, and fully validated, before moving to using morphing for the signal process. Note that in the non-morphing version many of the analyses have an equivalent python version. Once the usage of RooMorphingPdf for signal was validated in several use cases some additional analyses were added making use of this method only. This section describes how to find the code for each of the legacy analyses and details any specifics beyond the previous examples. More detail on the validation which was made can be found in the analysis note AN-15-235. Note that to run all of the examples below, the shape files exist in /auxiliaries and are linked to the script correctly. For more information on running the statistical results with the produced datacards, see later sections. + + +Systematics for legacy H->tautau results {#run1HTTsystematics} +========================= + +**Files** CombineTools/interface/HttSystematics.h, CombineTools/python/systematics + +For analyses with a large number of systematic uncertainties, it is neater to detail these in a separate file from the main datacard production code. Files for the different analyses can be found at the paths above, either in c++ or python. + +Legacy SM H->tautau results {#run1HTTSM} +========================= + +**Files** : CombineTools/bin/SMLegacyExample.cpp, CombineTools/scripts/SMLegacyExample.py + +**Files** : CombinePdfs/bin/SMLegacyMorphing.cpp + +The datacards for the legacy SM H->tautau results can be produced using the files listed above, available in both c++ and python. The code currently includes all the main tautau channels: ee, mm, mt, et, em and tt, and not the VH channels. The datacards are up to date with those used in the most recent CMS-ATLAS combination. + +The code itself makes use of similar functions as to those already described for Example1 and Example2. A few new features are used as well: + +1) Scaling signals by a cross-section times branching ratio factor + +Generally for the SM analysis, limits are set on best fit signal strength mu with reference to the SM prediction for cross section times branching ratio. Thus it makes sense to scale the signal according to this value when filling the datacard. This can be done via code like: + +\snippet CombineTools/bin/SMLegacyExample.cpp part1 + +This reads the values for cross section times branching ratio from a text file and then uses them to scale each of the signal processes by the appropriate value. + +2) Merging bin by bin uncertainties + +The addition of bin by bin uncertainties was illustrated in Example2. The SM H->tautau code uses an additional option in the BinByBinFactory which allows the user to merge uncertainties from different background processes in a given bin. This can be useful when an analysis has a large number of bins and/or processes contributing, such that adding a separate nuisance for each case would be costly in computing time for the limit calculations. The code starts in the same way as in Example2, by setting up the BinByBinFactory as follows: + +\snippet CombineTools/bin/SMLegacyExample.cpp part2 + +In this example the merge threshold is set to 0.5. This controls the proportion of the total error that is allowed to be merged. The add threshold controls the value below which the stat uncertainty should be for the bin by bin uncertainty to be added. The uncertainties for different processes can then be added and merged simultaneously using calls like: + +\snippet CombineTools/bin/SMLegacyExample.cpp part3 + +The filters for eta, bin id and channel can be used in this way to add the specific requirements for each channel and category, of which there were many for the legacy SM analysis. + +3) Pruning uncertainties + +Alongside merging of bin by bin uncertainties, another trick is employed in the SM analysis to help reduce the computing time for the limits. The method is to first run a maximum likelihood fit in each of the channels separately, using the full uncertainty model, and then use the information on the impacts of the uncertainties to judge which ones have a negligible effect on the fit result. These are then removed from the uncertainty model for the full combined fit. The procedure is to read in a text file which includes the list of uncertainties to be dropped (this was created at the time using the old statistical framework, but could easily be created again if required). The systematics added to the CH instance are then filtered using this list, with code as follows: + +\snippet CombineTools/bin/SMLegacyExample.cpp part4 + +The validation of the produced SM cards as compared to the official cards can be found in the Analysis Note. + + +Run 1 H->hh->bbtautau and A->Zh->lltautau results {#run1HTTHhhAZh} +========================= + +**Files** Run1BSMComb/bin/AZh.cpp, Run1BSMComb/bin/Hhh.cpp, CombineTools/scripts/HhhExample.py + +**Files** Run1BSMComb/bin/MorphingAZh.cpp, Run1BSMComb/bin/MorphingHhh.cpp + +The above scripts illustrate the datacard production for the H->hh and A->Zh analyses of HIG-14-034. The cards are very similar to those shown previously. The H->hh analysis makes use of the bin by bin merging functions exactly as described for the SM analysis. The A->Zh analysis makes use of one feature described for the SM cards- the ability to multiply signal by a constant factor. In this case the factor is 1000 to put the signal into femptobarns instead of picobarns. + +The validation of the produced cards as compared to the official cards can be found in the Analysis note. + + +MSSM update H->tautau results {#run1HTTMSSM} +========================= + +**File** Run1BSMComb/bin/MorphingMSSMUpdate.cpp + +The fit model for the MSSM update analysis is similarly complicated to the SM legacy analysis. There are a couple of unique features which are illustrated below: + +1) Possibility to setup 3 Higgs bosons + +In the MSSM there are three neutral Higgs bosons, and to set a limit on a particular MSSM model the signal model contains all three for the correct mass and ratios of cross section times branching ratios. For the model dependent limits therefore we need to have 6 separate signal processes in the datacards, one for each of the three neutral Higgs bosons, multiplied by two for the two different signal production mechanisms. This is done by mapping what exists in the shape files (two signal production processes generically named ggH and bbH, which can stand for any one of the three Higgs bosons) to the signal processes we want, using the following: + +\snippet Run1BSMComb/bin/MorphingMSSMUpdate.cpp part1 + +When declaring the processes to be added to the CH instance, the full set of signal processes is included: + +\snippet Run1BSMComb/bin/MorphingMSSMUpdate.cpp part2 + +Whereas when reading in the information from the shape file the usual names are used the individual ggH and bbH templates are used to fill the processes for all 3 Higgs bosons: + +\snippet Run1BSMComb/bin/MorphingMSSMUpdate.cpp part3 + +When running model dependent limits, the 6 signal processes are used and manipulated in the required way to build a signal template for a given mA-tanb point. When running model independent limits, the datacards are simply setup with only one neutral Higgs boson, as appropriate for a single resonance search. + +2) Tail fit uncertainties + +During Run 1 for the MSSM analysis an analytic fit was performed to the high mass tail of the mass distribution for some of the backgrounds and associated systematics were included, in order to reduce problems with low statistics in the tails. For the initial implementation of the Run 1 cards, these tail fits are added 'by hand', i.e. directly included in the input shape file rather than the fits rerun. The systematics are then lifted from the shape file directly as shape uncertainties in the format: + +\snippet CombineTools/src/HttSystematics_MSSMUpdate.cc part1 + +The validation of the MSSM update analysis has been performed in terms of the model independent and model dependent limits and is detailed in the Analysis note. diff --git a/docs/RooMorphingPdf.md b/docs/RooMorphingPdf.md new file mode 100644 index 00000000000..2c365df62ae --- /dev/null +++ b/docs/RooMorphingPdf.md @@ -0,0 +1,208 @@ +RooMorphingPdf {#introMorph} +========================= + + +BuildRooMorphing function {#BuildRooMorph} +========================= + +**File**: CombinePdfs/src/MorphFunctions.cc + +The PDF which is used for the signal is a custom RooFit PDF. Its application is +made via the datacard production code, with a call to a function named +BuildRooMorphing. Here we highlight some of the important features of this +function. For a greater level of detail we recommend the reader look at the code +itself, which is well commented. + +The arguments to the function are as follows: + +\snippet CombinePdfs/src/MorphFunctions.cc part1 + +The options to this function are as follows: + +* A RooWorkspace to be passed to the function. +* The Combine Harvester instance, already setup with all the necessary channels, +categories and systematics as described in the previous sections. +* The process upon which the morphing is to be applied, each signal +process. +* A RooAbsReal which controls the signal mass. +* The "norm_prefix" is an optional additional prefix to add to the normalisation term +controlling the PDF. +* The "allow_morph" option controls whether the PDF is +designed such that for masses inbetween those for which signal templates are +available the signal yield and shape are taken via interpolation, or whether the +nearest template is simply taken. +* Simply controls the verbose output of the code +* The option "force_template_limit" is an optional setting to force the +normalisation of the PDF to go to zero outside of the range of available masses. +* An optional TFile to contain debug information. + +The first step of the function is to work out the list of available masspoints +for the signal process, and sort and count them: + +\snippet CombinePdfs/src/MorphFunctions.cc part2 + +Then it is necessary to track all the information on the systematics affecting +the signal process. This is done separately for the shape and normalisation +systematics. After some manipulation of this information (including special +treatment for the case where a shape systematic also alters the normalisation, +i.e. has a value different from 1.00), it is possible to build a RooArgList of +the parameters controlling the vertical template morphing which is internally +used by combine to apply the effect of the shape systematics to the signal. + +\snippet CombinePdfs/src/MorphFunctions.cc part3 + +Note that the above code takes care over the possibility that the shape +systematics are not the same for all masspoints - this is not currently +supported in this function. The created "ss_list" is used later to create the +vertical template morphing PDFs. + +For the normalisation systematics we must consider separately the two cases: 1) +in the first case the uncertainty is the same for each masspoint, so we can +leave this in the datacard in the usual way, but in case 2) where the +uncertainty is not the same for each masspoint, we have to include this +information in the signal PDF by creating a RooFit object that makes the +uncertainty a function off mass. Finally a list is built of all objects required +for the interpolation. + +A 1D spline is built directly from the array of rates and masses: + +\snippet CombinePdfs/src/MorphFunctions.cc part4 + +Here the option "force_template_limit" allows the user to use an alternative +rate array where the rate falls to 0 for masses only very slightly outside of +those in the mass array. Then a normalisation term is built which contains all +the terms which go into the normalisation - the original rate and the +normalisation systematics. The array of vertical morphing PDFs is also built +using the information from earlier. Finally, the RooMorphingPdf is built with +the following obtained information: + +\snippet CombinePdfs/src/MorphFunctions.cc part5 + +with the arguments: +* xvar : the fixed "CMS_th1x" x-axis variable with uniform integer binning +* mass_var: the floating mass value for the interpolation +* vpdf_list: the list of vertical-interpolation pdfs +* m_vec: the corresponding list of mass points +* allow_morph: if false will just evaluate to the closest pdf in mass +* data_hist.GetXaxis(): The original (non-uniform) target binning +* proc_hist.GetXaxis(): The original (non-uniform) morphing binning + +The final two arguments are needed to support the possibility that the signal +template being interpolated has a finer binning than the target binning for the +fit. in order to avoid known problems with the RMS of a peaking distribution +not being morphed smoothly from mass point to mass point if the binning is too +wide. A RooProduct is added for the normalisation of this PDF: + +\snippet CombinePdfs/src/MorphFunctions.cc part6 + +The creation of this term is very important. When building the workspace, this +term will be searched for and tied to the PDF. It is possible at the workspace +building step to add additional terms to those normalisation term, for example a +scaling for cross-section times branching ratio. Example usage of this will be +discussed later. + +The final step in the function is to tidy up the CH instance before it is +returned to the main code to write out the datacards. The signal yield in the +datacards is replaced with a single value of 1.0, such that the normalisation +will now be read from the normalisation object just created. + +Example usage for SM analysis {#SMMorph} +========================= + +The code to produce SM datacards is almost exactly the same as the example +described previously with the exception of a few additional lines to call the +BuildRooMorphing function. The call to BuildRooMorphing is as follows: + +\snippet CombinePdfs/bin/SMLegacyMorphing.cpp part1 + +A single call is made for each signal process of each channel and category. The +produced workspace is then imported into the CH instance and the created PDFs +extracted. The workspace is stored in the shape file created for the limit +calculations alongside the text datacards. + +For cases like the SM analysis, this method can be used simply to produce one +combined workspace for all masses, making the mass a continous parameter of the +workspace. The rest of the calculations of the statistical results on the +workspace are unchanged, except for the requirement to tell combine to fix the +mass rather than float it. The workspace is build using the command: + +text2workspace.py -b combinedCard.txt -o combinedCard.root + --default-morphing shape2 + + Note that mass is not specified here, since we want to build a combined + workspace for all possible masses. Then, to run the maximum likelihood fit for a + given mass: + + combine -M MaxLikelihoodFit --freezeNuisances MH -m 125 combinedCard.root + + where in this case the mass must be specified. + +Creating 1 or 3 Higgs bosons for an MSSM analysis {#MSSMMorph} +========================= + +For an MSSM or 2HDM interpretation, datacards must be setup containing 3 neutral Higgs +bosons. The extension of the previous description to create datacards with +three Higgs bosons in and a signal PDF for each is simple: + +\snippet Run1BSMComb/bin/MorphingMSSMUpdate.cpp part4 + +In this case the BuildRooMorphing function is passed the signal process +corresponding to each of the three Higgs bosons, each controlled by their +separate mass terms. Note the option "mass", which if set to "MH" will only +build the datacards with one Higgs boson - this is reserved for the case of +running the single resonance model independent limits and will instead place +only one Higgs boson in the datacards. + +Applying a model at text2workspace step {#MSSMModel} +========================= + +Having produced datacards containing signal PDFs for the appropriate number of +Higgs bosons, we can go one step further and make extra use of the normalisation +term associated with the PDFs. One specific use of this, as discussed in the +introduction, is in the model dependent exclusions provided for example in the +MSSM H->tautau analysis. In this case, each point in mA-tanb +phase space has a different signal model containing contributions from the three +Higgs bosons. At each point the masses of the Higgs bosons and the +cross-sections times branching ratios are different. The normal procedure in +this case would be to produce a workspace for each model point with a specific +signal template built and added by hand. + +Now that we have RooMorphingPdfs we can do this in a more clever way, by +attaching terms controlling the normalisation of the signal to its normalisation +term. This can be done within the text2workspace.py step, where the .txt +datacard is converted into a RooWorkspace as input to combine. In this step, it +is possible to apply a physics model which performs some scaling and +manipulation to the different quantities in the datacards. By designing an +appropriate physics model, we can add terms to the normalisation to control the +cross section times branching ratio of the three Higgs bosons. +These can be added as a function of mA and tanb which can be +made parameters of the workspace as RooRealVars: + +\snippet CombinePdfs/python/MSSM.py part1 + +The histograms containing the information on the different cross-sections and +branching ratios are included as TH2Ds inside a file for a particular model. +These are read in and turned into a RooHistFunc via a helper function called +doHistFunc: + +\snippet CombinePdfs/python/MSSM.py part2 + +Note the option 'interpolate' which is off by default - this allows the +possibility of interpolating the model inputs to reach values beyond the +granularity that is provided. + +RooHistFuncs are then built for the mass terms controlling the two Higgs bosons +which are not the A boson (since this is already controlled by the parameter +mA, as well as the cross-section and branching ratio, for example using +terms: + +\snippet CombinePdfs/python/MSSM.py part3 + +In this code snippet, 'era' controls the centre of mass energy of the model, +currently either 7TeV, 8TeV or 13TeV. Note that theoretical uncertainties, +which are also provided from the model inputs as a function of mA and +tanb, are applied at this stage as well by building the relevant terms +and adding them to the workspace. Finally the relevant parameters are combined +to give a complete term used to control the normalisation of the signal. + + diff --git a/docs/customdoxygen.css b/docs/customdoxygen.css index 3ce6de388ca..1417da8e94e 100644 --- a/docs/customdoxygen.css +++ b/docs/customdoxygen.css @@ -1,7 +1,31 @@ body, table, div, p, dl { - font: 400 14px/21px "Open Sans",sans-serif; + -webkit-text-size-adjust: 100%; + text-size-adjust: 100%; + color: #333; + font-family: "Helvetica Neue", Helvetica, "Segoe UI", Arial, freesans, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + font-size: 16px; + line-height: 1.6; + word-wrap: break-word; } +/*Copied from markdown css*/ +a { + background-color: transparent; +} + +/*Copied from markdown css*/ +a:active, +a:hover { + outline: 0; +} + +/*Copied from markdown css*/ + +strong { + font-weight: bold; +} + + div#top { width: 270px; float: left; @@ -17,7 +41,7 @@ div#titlearea td { } #MSearchField { - font: 9pt 'Open Sans', sans-serif; + font: 9pt "Helvetica Neue", Helvetica, "Segoe UI", Arial, freesans, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; } #MSearchClose { @@ -25,7 +49,7 @@ div#titlearea td { } .SelectItem { - font: 8pt 'Open Sans', sans-serif; + font: 8pt "Helvetica Neue", Helvetica, "Segoe UI", Arial, freesans, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; } div#MSearchResultsWindow { @@ -36,18 +60,23 @@ div#MSearchResultsWindow { border-bottom-right-radius: 7px; border-top-left-radius: 7px; border-bottom-left-radius: 7px; + border: none; +} + +.SRPage .SREntry { + font-size: 10pt; } .SRSymbol { - font-family: 'Open Sans', sans-serif !important; + font-family: "Helvetica Neue", Helvetica, "Segoe UI", Arial, freesans, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; } a.SRScope { - font-family: 'Open Sans', sans-serif !important; + font-family:"Helvetica Neue", Helvetica, "Segoe UI", Arial, freesans, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol" !important; } span.SRScope { - font-family: 'Open Sans', sans-serif !important; + font-family: "Helvetica Neue", Helvetica, "Segoe UI", Arial, freesans, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol" !important; } #nav-tree { @@ -58,7 +87,7 @@ span.SRScope { } #nav-tree .label a { - font: 12px 'Open Sans',Geneva,Helvetica,Arial,sans-serif; + font: 12px "Helvetica Neue", Helvetica, "Segoe UI", Arial, freesans, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; color: rgb(164,164,164); } @@ -80,13 +109,33 @@ span.SRScope { } code { - font-family: Menlo; - font-size: 12px; - padding: 2px 4px; - white-space: nowrap; - background-color: rgba(102, 128, 153, 0.0745098); + font-family: Consolas, "Liberation Mono", Menlo, Courier, monospace; + padding: 0; + padding-top: 0.2em; + padding-bottom: 0.2em; + margin: 0; + font-size: 85%; + background-color: rgba(0,0,0,0.04); + border-radius: 3px; +} + +code:before, +code:after { + letter-spacing: -0.2em; + content: "\00a0"; +} + +pre>code { + padding: 0; + margin: 0; + font-size: 100%; + word-break: normal; + white-space: pre; + background: transparent; + border: 0; } + body { font: 400 16px/24px sans-serif; } @@ -96,16 +145,63 @@ body { } .title { - font: 400 14px/28px "Open Sans",sans-serif; - font-size: 150%; + font: 400 14px/28px "Helvetica Neue", Helvetica, "Segoe UI", Arial, freesans, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + font-size: 1.9em; font-weight: bold; margin: 10px 2px; } h1, h2, h3, h4, h5, h6 { - font-family: 'Open Sans', sans-serif; + font-family: "Helvetica Neue", Helvetica, "Segoe UI", Arial, freesans, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; +} + + +h1, +h2, +h3, +h4, +h5, +h6 { + margin-top: 1em; + margin-bottom: 16px; + font-weight: bold; + line-height: 1.4; +} + +h1 { + padding-bottom: 0.3em; + font-size: 1.75em; + line-height: 1.2; + border-bottom: 1px solid #eee; +} + +h2 { + padding-bottom: 0.3em; + font-size: 1.5em; + line-height: 1.225; + border-bottom: 1px solid #eee; +} + +h3 { + font-size: 1.25em; + line-height: 1.43; +} + +h4 { + font-size: 1em; +} + +h5 { + font-size: 1em; +} + +h6 { + font-size: 1em; + color: #777; } + + h1.glow, h2.glow, h3.glow, h4.glow, h5.glow, h6.glow { text-shadow: 0 0 15px rgb(61, 87, 140); } @@ -123,7 +219,7 @@ a.el { } -pre.fragment { +/*pre.fragment { border: 0px solid #C4CFE5; background-color: rgba(0,0,0,0.03); border-left: 0px; @@ -137,6 +233,36 @@ pre.fragment { font-family: Menlo, monospace, fixed; font-size: 12px; } +*/ + +pre.fragment { + overflow: auto; + font: 12px Consolas, "Liberation Mono", Menlo, Courier, monospace; + margin-top: 0px; + margin-bottom: 0px; + margin-left: 0px; + margin-right: 0px; + padding: 16px; + font-size: 85%; + line-height: 1.45; + background-color: #f7f7f7; + border-radius: 3px; + word-wrap: normal; + border: none; +} + +dd pre.fragment { + overflow: auto; + font: 12px Consolas, "Liberation Mono", Menlo, Courier, monospace; + margin: 4px 0px 4px 0px; + padding: 6px; + font-size: 85%; + line-height: 1.45; + background-color: rgba(0, 0, 0, 0.03); + border-radius: 3px; + word-wrap: normal; + border: none; +} div.fragment { padding: 4px 13px; @@ -148,10 +274,10 @@ div.fragment { } div.line { - font-family: "Menlo", monospace, fixed; - font-size: 12px; + font-family: Consolas, "Liberation Mono", Menlo, Courier, monospace; + font-size: 85%; min-height: 12px; - line-height: 100%; + line-height: 1; text-wrap: unrestricted; white-space: -moz-pre-wrap; /* Moz */ white-space: -pre-wrap; /* Opera 4-6 */ @@ -237,7 +363,7 @@ blockquote { box-shadow: none; border-top-right-radius: 0; border-top-left-radius: 0; - -moz-box-shadow: none + -moz-box-shadow: none; -moz-border-radius-topright: 0; -moz-border-radius-topleft: 0; -webkit-box-shadow: none; @@ -260,26 +386,34 @@ blockquote { table.doxtable { - margin: 20px; - width: 85%; - font-size: 14px; + display: block; + width: 100%; + overflow: auto; + word-break: normal; + word-break: keep-all; +} + +table.doxtable th { + font-weight: bold; + background-color: rgba(0, 0, 0, 0); + color: rgb(51, 51, 51); } + +table.doxtable th, table.doxtable td { - border: 0px solid #FFFFFF; - border-bottom: 1px solid #FFFFFF; - border-top: 1px solid transparent; - padding: 4px; - color: #000000; + font-size: 16px; + padding: 6px 13px; + border: 1px solid #ddd; } -table.doxtable th { - background-color: #FFFFFF; - border: 0px solid #000000; - border-bottom: 2px solid #606860; - font-size: 100%; - padding: 4px; - color: #000000; +table.doxtable tr { + background-color: #fff; + border-top: 1px solid #ccc; +} + +table.doxtable tr:nth-child(2n) { + background-color: #f8f8f8; } table.fieldtable { @@ -404,12 +538,13 @@ dl.section dd { #projectname { - font: 140% "Open Sans", Arial,sans-serif; + font: 140% "Helvetica Neue", Helvetica, "Segoe UI", Arial, freesans, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; + color: rgb(255, 255, 255); } #projectbrief { - font: 70% "Open Sans", Arial,sans-serif; + font: 70% "Helvetica Neue", Helvetica, "Segoe UI", Arial, freesans, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; } @@ -434,11 +569,11 @@ div.toc { } div.toc li { - font: 11px/1.2 "Open Sans",sans-serif; + font: 11px/1.2 "Helvetica Neue", Helvetica, "Segoe UI", Arial, freesans, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; } div.toc h3 { - font: bold 11px/1.2 'Open Sans',FreeSans,sans-serif; + font: bold 11px/1.2 "Helvetica Neue", Helvetica, "Segoe UI", Arial, freesans, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; } @@ -455,5 +590,5 @@ div.toc li.level4 { } #powerTip div { - font: 12px/16px "Open Sans",sans-serif; + font: 12px/16px "Helvetica Neue", Helvetica, "Segoe UI", Arial, freesans, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol"; } diff --git a/docs/doxypypy/__init__.py b/docs/doxypypy/__init__.py new file mode 100755 index 00000000000..e69de29bb2d diff --git a/docs/doxypypy/doxypypy.py b/docs/doxypypy/doxypypy.py new file mode 100755 index 00000000000..833a723af9b --- /dev/null +++ b/docs/doxypypy/doxypypy.py @@ -0,0 +1,836 @@ +#!/usr/bin/env python +# -*- coding: utf-8 -*- +""" +Filters Python code for use with Doxygen, using a syntax-aware approach. + +Rather than implementing a partial Python parser with regular expressions, this +script uses Python's own abstract syntax tree walker to isolate meaningful +constructs. It passes along namespace information so Doxygen can construct a +proper tree for nested functions, classes, and methods. It understands bed lump +variables are by convention private. It groks Zope-style Python interfaces. +It can automatically turn PEP 257 compliant that follow the more restrictive +Google style guide into appropriate Doxygen tags, and is even aware of +doctests. +""" + +from ast import NodeVisitor, parse, iter_fields, AST, Name, get_docstring +from re import compile as regexpCompile, IGNORECASE, MULTILINE +from types import GeneratorType +from sys import stderr +from os import linesep +from string import whitespace +from codeop import compile_command + + +def coroutine(func): + """Basic decorator to implement the coroutine pattern.""" + def __start(*args, **kwargs): + """Automatically calls next() on the internal generator function.""" + __cr = func(*args, **kwargs) + next(__cr) + return __cr + return __start + + +class AstWalker(NodeVisitor): + """ + A walker that'll recursively progress through an AST. + + Given an abstract syntax tree for Python code, walk through all the + nodes looking for significant types (for our purposes we only care + about module starts, class definitions, function definitions, variable + assignments, and function calls, as all the information we want to pass + to Doxygen is found within these constructs). If the autobrief option + is set, it further attempts to parse docstrings to create appropriate + Doxygen tags. + """ + + # We have a number of regular expressions that we use. They don't + # vary across instances and so are compiled directly in the class + # definition. + __indentRE = regexpCompile(r'^(\s*)\S') + __newlineRE = regexpCompile(r'^#', MULTILINE) + __blanklineRE = regexpCompile(r'^\s*$') + __docstrMarkerRE = regexpCompile(r"\s*([uUbB]*[rR]?(['\"]{3}))") + __docstrOneLineRE = regexpCompile(r"\s*[uUbB]*[rR]?(['\"]{3})(.+)\1") + + __implementsRE = regexpCompile(r"^(\s*)(?:zope\.)?(?:interface\.)?" + r"(?:module|class|directly)?" + r"(?:Provides|Implements)\(\s*(.+)\s*\)", + IGNORECASE) + __interfaceRE = regexpCompile(r"^\s*class\s+(\S+)\s*\(\s*(?:zope\.)?" + r"(?:interface\.)?" + r"Interface\s*\)\s*:", IGNORECASE) + __attributeRE = regexpCompile(r"^(\s*)(\S+)\s*=\s*(?:zope\.)?" + r"(?:interface\.)?" + r"Attribute\s*\(['\"]{1,3}(.*)['\"]{1,3}\)", + IGNORECASE) + + __singleLineREs = { + ' @author: ': regexpCompile(r"^(\s*Authors?:\s*)(.*)$", IGNORECASE), + ' @copyright ': regexpCompile(r"^(\s*Copyright:\s*)(.*)$", IGNORECASE), + ' @date ': regexpCompile(r"^(\s*Date:\s*)(.*)$", IGNORECASE), + ' @file ': regexpCompile(r"^(\s*File:\s*)(.*)$", IGNORECASE), + ' @version: ': regexpCompile(r"^(\s*Version:\s*)(.*)$", IGNORECASE), + ' @note ': regexpCompile(r"^(\s*Note:\s*)(.*)$", IGNORECASE), + ' @warning ': regexpCompile(r"^(\s*Warning:\s*)(.*)$", IGNORECASE) + } + __argsStartRE = regexpCompile(r"^(\s*(?:(?:Keyword\s+)?" + r"(?:A|Kwa)rg(?:ument)?|Attribute)s?" + r"\s*:\s*)$", IGNORECASE) + __argsRE = regexpCompile(r"^\s*(?P\w+)\s*(?P\(?\S*\)?)?\s*" + r"(?:-|:)+\s+(?P.+)$") + __returnsStartRE = regexpCompile(r"^\s*(?:Return|Yield)s:\s*$", IGNORECASE) + __raisesStartRE = regexpCompile(r"^\s*(Raises|Exceptions|See Also):\s*$", + IGNORECASE) + __listRE = regexpCompile(r"^\s*(([\w\.]+),\s*)+(&|and)?\s*([\w\.]+)$") + __singleListItemRE = regexpCompile(r'^\s*([\w\.]+)\s*$') + __listItemRE = regexpCompile(r'([\w\.]+),?\s*') + __examplesStartRE = regexpCompile(r"^\s*(?:Example|Doctest)s?:\s*$", + IGNORECASE) + __sectionStartRE = regexpCompile(r"^\s*(([A-Z]\w* ?){1,2}):\s*$") + # The error line should match traceback lines, error exception lines, and + # (due to a weird behavior of codeop) single word lines. + __errorLineRE = regexpCompile(r"^\s*((?:\S+Error|Traceback.*):?\s*(.*)|@?[\w.]+)\s*$", + IGNORECASE) + + def __init__(self, lines, options, inFilename): + """Initialize a few class variables in preparation for our walk.""" + self.lines = lines + self.options = options + self.inFilename = inFilename + self.docLines = [] + + @staticmethod + def _stripOutAnds(inStr): + """Takes a string and returns the same without ands or ampersands.""" + assert isinstance(inStr, str) + return inStr.replace(' and ', ' ').replace(' & ', ' ') + + @staticmethod + def _endCodeIfNeeded(line, inCodeBlock): + """Simple routine to append end code marker if needed.""" + assert isinstance(line, str) + if inCodeBlock: + line = '# @endcode{0}{1}'.format(linesep, line.rstrip()) + inCodeBlock = False + return line, inCodeBlock + + @coroutine + def _checkIfCode(self, inCodeBlock): + """Checks whether or not a given line appears to be Python code.""" + while True: + line, lines, lineNum = (yield) + testLineNum = 1 + currentLineNum = 0 + testLine = line.strip() + lineOfCode = None + while lineOfCode is None: + match = AstWalker.__errorLineRE.match(testLine) + if not testLine or testLine == '...' or match: + # These are ambiguous. + line, lines, lineNum = (yield) + testLine = line.strip() + #testLineNum = 1 + elif testLine.startswith('>>> '): + # This is definitely code. + lineOfCode = True + else: + try: + compLine = compile_command(testLine) + if compLine and lines[currentLineNum].strip().startswith('#'): + lineOfCode = True + else: + line, lines, lineNum = (yield) + line = line.strip() + if line.startswith('>>> '): + # Definitely code, don't compile further. + lineOfCode = True + else: + testLine += linesep + line + testLine = testLine.strip() + testLineNum += 1 + except (SyntaxError, RuntimeError): + # This is definitely not code. + lineOfCode = False + except Exception: + # Other errors are ambiguous. + line, lines, lineNum = (yield) + testLine = line.strip() + #testLineNum = 1 + currentLineNum = lineNum - testLineNum + if not inCodeBlock and lineOfCode: + inCodeBlock = True + lines[currentLineNum] = '{0}{1}# @code{1}'.format( + lines[currentLineNum], + linesep + ) + elif inCodeBlock and lineOfCode is False: + # None is ambiguous, so strict checking + # against False is necessary. + inCodeBlock = False + lines[currentLineNum] = '{0}{1}# @endcode{1}'.format( + lines[currentLineNum], + linesep + ) + + @coroutine + def __alterDocstring(self, tail='', writer=None): + """ + Runs eternally, processing docstring lines. + + Parses docstring lines as they get fed in via send, applies appropriate + Doxygen tags, and passes them along in batches for writing. + """ + assert isinstance(tail, str) and isinstance(writer, GeneratorType) + + lines = [] + timeToSend = False + inCodeBlock = False + inSection = False + prefix = '' + firstLineNum = -1 + sectionHeadingIndent = 0 + codeChecker = self._checkIfCode(False) + proseChecker = self._checkIfCode(True) + while True: + lineNum, line = (yield) + if firstLineNum < 0: + firstLineNum = lineNum + # Don't bother doing extra work if it's a sentinel. + if line is not None: + # Also limit work if we're not parsing the docstring. + if self.options.autobrief: + for doxyTag, tagRE in AstWalker.__singleLineREs.items(): + match = tagRE.search(line) + if match: + # We've got a simple one-line Doxygen command + lines[-1], inCodeBlock = self._endCodeIfNeeded( + lines[-1], inCodeBlock) + writer.send((firstLineNum, lineNum - 1, lines)) + lines = [] + firstLineNum = lineNum + line = line.replace(match.group(1), doxyTag) + timeToSend = True + + if inSection: + # The last line belonged to a section. + # Does this one too? (Ignoring empty lines.) + match = AstWalker.__blanklineRE.match(line) + if not match: + indent = len(line.expandtabs(self.options.tablength)) - \ + len(line.expandtabs(self.options.tablength).lstrip()) + if indent <= sectionHeadingIndent: + inSection = False + else: + if lines[-1] == '#': + # If the last line was empty, but we're still in a section + # then we need to start a new paragraph. + lines[-1] = '# @par' + + match = AstWalker.__returnsStartRE.match(line) + if match: + # We've got a "returns" section + line = line.replace(match.group(0), ' @return\t').rstrip() + prefix = '@return\t' + else: + match = AstWalker.__argsStartRE.match(line) + if match: + # We've got an "arguments" section + line = line.replace(match.group(0), '').rstrip() + if 'attr' in match.group(0).lower(): + prefix = '@property\t' + else: + prefix = '@param\t' + lines[-1], inCodeBlock = self._endCodeIfNeeded( + lines[-1], inCodeBlock) + lines.append('#' + line) + continue + else: + match = AstWalker.__argsRE.match(line) + if match and not inCodeBlock: + # We've got something that looks like an item / + # description pair. + if 'property' in prefix: + line = '# {0}\t{1[name]}{2}# {1[desc]}'.format( + prefix, match.groupdict(), linesep) + else: + line = ' {0}\t{1[name]}\t{1[desc]}'.format( + prefix, match.groupdict()) + else: + match = AstWalker.__raisesStartRE.match(line) + if match: + line = line.replace(match.group(0), '').rstrip() + if 'see' in match.group(1).lower(): + # We've got a "see also" section + prefix = '@sa\t' + else: + # We've got an "exceptions" section + prefix = '@exception\t' + lines[-1], inCodeBlock = self._endCodeIfNeeded( + lines[-1], inCodeBlock) + lines.append('#' + line) + continue + else: + match = AstWalker.__listRE.match(line) + if match and not inCodeBlock: + # We've got a list of something or another + itemList = [] + for itemMatch in AstWalker.__listItemRE.findall(self._stripOutAnds( + match.group(0))): + itemList.append('# {0}\t{1}{2}'.format( + prefix, itemMatch, linesep)) + line = ''.join(itemList)[1:] + else: + match = AstWalker.__examplesStartRE.match(line) + if match and lines[-1].strip() == '#' \ + and self.options.autocode: + # We've got an "example" section + inCodeBlock = True + line = line.replace(match.group(0), + ' @b Examples{0}# @code'.format(linesep)) + else: + match = AstWalker.__sectionStartRE.match(line) + if match: + # We've got an arbitrary section + prefix = '' + inSection = True + # What's the indentation of the section heading? + sectionHeadingIndent = len(line.expandtabs(self.options.tablength)) \ + - len(line.expandtabs(self.options.tablength).lstrip()) + line = line.replace( + match.group(0), + ' @par {0}'.format(match.group(1)) + ) + if lines[-1] == '# @par': + lines[-1] = '#' + lines[-1], inCodeBlock = self._endCodeIfNeeded( + lines[-1], inCodeBlock) + lines.append('#' + line) + continue + elif prefix: + match = AstWalker.__singleListItemRE.match(line) + if match and not inCodeBlock: + # Probably a single list item + line = ' {0}\t{1}'.format( + prefix, match.group(0)) + elif self.options.autocode and inCodeBlock: + proseChecker.send( + ( + line, lines, + lineNum - firstLineNum + ) + ) + elif self.options.autocode: + codeChecker.send( + ( + line, lines, + lineNum - firstLineNum + ) + ) + + # If we were passed a tail, append it to the docstring. + # Note that this means that we need a docstring for this + # item to get documented. + if tail and lineNum == len(self.docLines) - 1: + line = '{0}{1}# {2}'.format(line.rstrip(), linesep, tail) + + # Add comment marker for every line. + line = '#{0}'.format(line.rstrip()) + # Ensure the first line has the Doxygen double comment. + if lineNum == 0: + line = '#' + line + + lines.append(line.replace(' ' + linesep, linesep)) + else: + # If we get our sentinel value, send out what we've got. + timeToSend = True + + if timeToSend: + lines[-1], inCodeBlock = self._endCodeIfNeeded(lines[-1], + inCodeBlock) + writer.send((firstLineNum, lineNum, lines)) + lines = [] + firstLineNum = -1 + timeToSend = False + + @coroutine + def __writeDocstring(self): + """ + Runs eternally, dumping out docstring line batches as they get fed in. + + Replaces original batches of docstring lines with modified versions + fed in via send. + """ + while True: + firstLineNum, lastLineNum, lines = (yield) + newDocstringLen = lastLineNum - firstLineNum + 1 + while len(lines) < newDocstringLen: + lines.append('') + # Substitute the new block of lines for the original block of lines. + self.docLines[firstLineNum: lastLineNum + 1] = lines + + def _processDocstring(self, node, tail='', **kwargs): + """ + Handles a docstring for functions, classes, and modules. + + Basically just figures out the bounds of the docstring and sends it + off to the parser to do the actual work. + """ + typeName = type(node).__name__ + # Modules don't have lineno defined, but it's always 0 for them. + curLineNum = startLineNum = 0 + if typeName != 'Module': + startLineNum = curLineNum = node.lineno - 1 + # Figure out where both our enclosing object and our docstring start. + line = '' + while curLineNum < len(self.lines): + line = self.lines[curLineNum] + match = AstWalker.__docstrMarkerRE.match(line) + if match: + break + curLineNum += 1 + docstringStart = curLineNum + # Figure out where our docstring ends. + if not AstWalker.__docstrOneLineRE.match(line): + # Skip for the special case of a single-line docstring. + curLineNum += 1 + while curLineNum < len(self.lines): + line = self.lines[curLineNum] + if line.find(match.group(2)) >= 0: + break + curLineNum += 1 + endLineNum = curLineNum + 1 + + # Isolate our enclosing object's declaration. + defLines = self.lines[startLineNum: docstringStart] + # Isolate our docstring. + self.docLines = self.lines[docstringStart: endLineNum] + + # If we have a docstring, extract information from it. + if self.docLines: + # Get rid of the docstring delineators. + self.docLines[0] = AstWalker.__docstrMarkerRE.sub('', + self.docLines[0]) + self.docLines[-1] = AstWalker.__docstrMarkerRE.sub('', + self.docLines[-1]) + # Handle special strings within the docstring. + docstringConverter = self.__alterDocstring( + tail, self.__writeDocstring()) + for lineInfo in enumerate(self.docLines): + docstringConverter.send(lineInfo) + docstringConverter.send((len(self.docLines) - 1, None)) + + # Add a Doxygen @brief tag to any single-line description. + if self.options.autobrief: + safetyCounter = 0 + while len(self.docLines) > 0 and self.docLines[0].lstrip('#').strip() == '': + del self.docLines[0] + self.docLines.append('') + safetyCounter += 1 + if safetyCounter >= len(self.docLines): + # Escape the effectively empty docstring. + break + if len(self.docLines) == 1 or (len(self.docLines) >= 2 and ( + self.docLines[1].strip(whitespace + '#') == '' or + self.docLines[1].strip(whitespace + '#').startswith('@'))): + self.docLines[0] = "## @brief {0}".format(self.docLines[0].lstrip('#')) + if len(self.docLines) > 1 and self.docLines[1] == '# @par': + self.docLines[1] = '#' + + if defLines: + match = AstWalker.__indentRE.match(defLines[0]) + indentStr = match and match.group(1) or '' + self.docLines = [AstWalker.__newlineRE.sub(indentStr + '#', docLine) + for docLine in self.docLines] + + # Taking away a docstring from an interface method definition sometimes + # leaves broken code as the docstring may be the only code in it. + # Here we manually insert a pass statement to rectify this problem. + if typeName != 'Module': + if docstringStart < len(self.lines): + match = AstWalker.__indentRE.match(self.lines[docstringStart]) + indentStr = match and match.group(1) or '' + else: + indentStr = '' + containingNodes = kwargs.get('containingNodes', []) or [] + fullPathNamespace = self._getFullPathName(containingNodes) + parentType = fullPathNamespace[-2][1] + if parentType == 'interface' and typeName == 'FunctionDef' \ + or fullPathNamespace[-1][1] == 'interface': + defLines[-1] = '{0}{1}{2}pass'.format(defLines[-1], + linesep, indentStr) + elif self.options.autobrief and typeName == 'ClassDef': + # If we're parsing docstrings separate out class attribute + # definitions to get better Doxygen output. + for firstVarLineNum, firstVarLine in enumerate(self.docLines): + if '@property\t' in firstVarLine: + break + lastVarLineNum = len(self.docLines) + if '@property\t' in firstVarLine: + while lastVarLineNum > firstVarLineNum: + lastVarLineNum -= 1 + if '@property\t' in self.docLines[lastVarLineNum]: + break + lastVarLineNum += 1 + if firstVarLineNum < len(self.docLines): + indentLineNum = endLineNum + indentStr = '' + while not indentStr and indentLineNum < len(self.lines): + match = AstWalker.__indentRE.match(self.lines[indentLineNum]) + indentStr = match and match.group(1) or '' + indentLineNum += 1 + varLines = ['{0}{1}'.format(linesep, docLine).replace( + linesep, linesep + indentStr) + for docLine in self.docLines[ + firstVarLineNum: lastVarLineNum]] + defLines.extend(varLines) + self.docLines[firstVarLineNum: lastVarLineNum] = [] + # After the property shuffling we will need to relocate + # any existing namespace information. + namespaceLoc = defLines[-1].find('\n# @namespace') + if namespaceLoc >= 0: + self.docLines[-1] += defLines[-1][namespaceLoc:] + defLines[-1] = defLines[-1][:namespaceLoc] + + # For classes and functions, apply our changes and reverse the + # order of the declaration and docstring, and for modules just + # apply our changes. + if typeName != 'Module': + self.lines[startLineNum: endLineNum] = self.docLines + defLines + else: + self.lines[startLineNum: endLineNum] = defLines + self.docLines + + @staticmethod + def _checkMemberName(name): + """ + See if a member name indicates that it should be private. + + Private variables in Python (starting with a double underscore but + not ending in a double underscore) and bed lumps (variables that + are not really private but are by common convention treated as + protected because they begin with a single underscore) get Doxygen + tags labeling them appropriately. + """ + assert isinstance(name, str) + restrictionLevel = None + if not name.endswith('__'): + if name.startswith('__'): + restrictionLevel = 'private' + elif name.startswith('_'): + restrictionLevel = 'protected' + return restrictionLevel + + def _processMembers(self, node, contextTag): + """ + Mark up members if they should be private. + + If the name indicates it should be private or protected, apply + the appropriate Doxygen tags. + """ + restrictionLevel = self._checkMemberName(node.name) + if restrictionLevel: + workTag = '{0}{1}# @{2}'.format(contextTag, + linesep, + restrictionLevel) + else: + workTag = contextTag + return workTag + + def generic_visit(self, node, **kwargs): + """ + Extract useful information from relevant nodes including docstrings. + + This is virtually identical to the standard version contained in + NodeVisitor. It is only overridden because we're tracking extra + information (the hierarchy of containing nodes) not preserved in + the original. + """ + for field, value in iter_fields(node): + if isinstance(value, list): + for item in value: + if isinstance(item, AST): + self.visit(item, containingNodes=kwargs['containingNodes']) + elif isinstance(value, AST): + self.visit(value, containingNodes=kwargs['containingNodes']) + + def visit(self, node, **kwargs): + """ + Visit a node and extract useful information from it. + + This is virtually identical to the standard version contained in + NodeVisitor. It is only overridden because we're tracking extra + information (the hierarchy of containing nodes) not preserved in + the original. + """ + containingNodes = kwargs.get('containingNodes', []) + method = 'visit_' + node.__class__.__name__ + visitor = getattr(self, method, self.generic_visit) + return visitor(node, containingNodes=containingNodes) + + def _getFullPathName(self, containingNodes): + """ + Returns the full node hierarchy rooted at module name. + + The list representing the full path through containing nodes + (starting with the module itself) is returned. + """ + assert isinstance(containingNodes, list) + return [(self.options.fullPathNamespace, 'module')] + containingNodes + + def visit_Module(self, node, **kwargs): + """ + Handles the module-level docstring. + + Process the module-level docstring and create appropriate Doxygen tags + if autobrief option is set. + """ + if self.options.debug: + stderr.write("# Module {0}{1}".format(self.options.fullPathNamespace, + linesep)) + if get_docstring(node): + self._processDocstring(node) + # Visit any contained nodes (in this case pretty much everything). + self.generic_visit(node, containingNodes=kwargs.get('containingNodes', + [])) + + def visit_Assign(self, node, **kwargs): + """ + Handles assignments within code. + + Variable assignments in Python are used to represent interface + attributes in addition to basic variables. If an assignment appears + to be an attribute, it gets labeled as such for Doxygen. If a variable + name uses Python mangling or is just a bed lump, it is labeled as + private for Doxygen. + """ + lineNum = node.lineno - 1 + # Assignments have one Doxygen-significant special case: + # interface attributes. + match = AstWalker.__attributeRE.match(self.lines[lineNum]) + if match: + self.lines[lineNum] = '{0}## @property {1}{2}{0}# {3}{2}' \ + '{0}# @hideinitializer{2}{4}{2}'.format( + match.group(1), + match.group(2), + linesep, + match.group(3), + self.lines[lineNum].rstrip() + ) + if self.options.debug: + stderr.write("# Attribute {0.id}{1}".format(node.targets[0], + linesep)) + if isinstance(node.targets[0], Name): + match = AstWalker.__indentRE.match(self.lines[lineNum]) + indentStr = match and match.group(1) or '' + restrictionLevel = self._checkMemberName(node.targets[0].id) + if restrictionLevel: + self.lines[lineNum] = '{0}## @var {1}{2}{0}' \ + '# @hideinitializer{2}{0}# @{3}{2}{4}{2}'.format( + indentStr, + node.targets[0].id, + linesep, + restrictionLevel, + self.lines[lineNum].rstrip() + ) + # Visit any contained nodes. + self.generic_visit(node, containingNodes=kwargs['containingNodes']) + + def visit_Call(self, node, **kwargs): + """ + Handles function calls within code. + + Function calls in Python are used to represent interface implementations + in addition to their normal use. If a call appears to mark an + implementation, it gets labeled as such for Doxygen. + """ + lineNum = node.lineno - 1 + # Function calls have one Doxygen-significant special case: interface + # implementations. + match = AstWalker.__implementsRE.match(self.lines[lineNum]) + if match: + self.lines[lineNum] = '{0}## @implements {1}{2}{0}{3}{2}'.format( + match.group(1), match.group(2), linesep, + self.lines[lineNum].rstrip()) + if self.options.debug: + stderr.write("# Implements {0}{1}".format(match.group(1), + linesep)) + # Visit any contained nodes. + self.generic_visit(node, containingNodes=kwargs['containingNodes']) + + def visit_FunctionDef(self, node, **kwargs): + """ + Handles function definitions within code. + + Process a function's docstring, keeping well aware of the function's + context and whether or not it's part of an interface definition. + """ + if self.options.debug: + stderr.write("# Function {0.name}{1}".format(node, linesep)) + # Push either 'interface' or 'class' onto our containing nodes + # hierarchy so we can keep track of context. This will let us tell + # if a function is nested within another function or even if a class + # is nested within a function. + containingNodes = kwargs.get('containingNodes', []) or [] + containingNodes.append((node.name, 'function')) + if self.options.topLevelNamespace: + fullPathNamespace = self._getFullPathName(containingNodes) + contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace) + modifiedContextTag = self._processMembers(node, contextTag) + tail = '@namespace {0}'.format(modifiedContextTag) + else: + tail = self._processMembers(node, '') + if get_docstring(node): + self._processDocstring(node, tail, + containingNodes=containingNodes) + # Visit any contained nodes. + self.generic_visit(node, containingNodes=containingNodes) + # Remove the item we pushed onto the containing nodes hierarchy. + containingNodes.pop() + + def visit_ClassDef(self, node, **kwargs): + """ + Handles class definitions within code. + + Process the docstring. Note though that in Python Class definitions + are used to define interfaces in addition to classes. + If a class definition appears to be an interface definition tag it as an + interface definition for Doxygen. Otherwise tag it as a class + definition for Doxygen. + """ + lineNum = node.lineno - 1 + # Push either 'interface' or 'class' onto our containing nodes + # hierarchy so we can keep track of context. This will let us tell + # if a function is a method or an interface method definition or if + # a class is fully contained within another class. + containingNodes = kwargs.get('containingNodes', []) or [] + match = AstWalker.__interfaceRE.match(self.lines[lineNum]) + if match: + if self.options.debug: + stderr.write("# Interface {0.name}{1}".format(node, linesep)) + containingNodes.append((node.name, 'interface')) + else: + if self.options.debug: + stderr.write("# Class {0.name}{1}".format(node, linesep)) + containingNodes.append((node.name, 'class')) + if self.options.topLevelNamespace: + fullPathNamespace = self._getFullPathName(containingNodes) + contextTag = '.'.join(pathTuple[0] for pathTuple in fullPathNamespace) + tail = '@namespace {0}'.format(contextTag) + else: + tail = '' + # Class definitions have one Doxygen-significant special case: + # interface definitions. + if match: + contextTag = '{0}{1}# @interface {2}'.format(tail, + linesep, + match.group(1)) + else: + contextTag = tail + contextTag = self._processMembers(node, contextTag) + if get_docstring(node): + self._processDocstring(node, contextTag, + containingNodes=containingNodes) + # Visit any contained nodes. + self.generic_visit(node, containingNodes=containingNodes) + # Remove the item we pushed onto the containing nodes hierarchy. + containingNodes.pop() + + def parseLines(self): + """Form an AST for the code and produce a new version of the source.""" + inAst = parse(''.join(self.lines), self.inFilename) + # Visit all the nodes in our tree and apply Doxygen tags to the source. + self.visit(inAst) + + def getLines(self): + """Return the modified file once processing has been completed.""" + return linesep.join(line.rstrip() for line in self.lines) + + +def main(): + """ + Starts the parser on the file given by the filename as the first + argument on the command line. + """ + from optparse import OptionParser, OptionGroup + from os import sep + from os.path import basename + from sys import argv, exit as sysExit + + def optParse(): + """ + Parses command line options. + + Generally we're supporting all the command line options that doxypy.py + supports in an analogous way to make it easy to switch back and forth. + We additionally support a top-level namespace argument that is used + to trim away excess path information. + """ + + parser = OptionParser(prog=basename(argv[0])) + + parser.set_usage("%prog [options] filename") + parser.add_option( + "-a", "--autobrief", + action="store_true", dest="autobrief", + help="parse the docstring for @brief description and other information" + ) + parser.add_option( + "-c", "--autocode", + action="store_true", dest="autocode", + help="parse the docstring for code samples" + ) + parser.add_option( + "-n", "--ns", + action="store", type="string", dest="topLevelNamespace", + help="specify a top-level namespace that will be used to trim paths" + ) + parser.add_option( + "-t", "--tablength", + action="store", type="int", dest="tablength", default=4, + help="specify a tab length in spaces; only needed if tabs are used" + ) + group = OptionGroup(parser, "Debug Options") + group.add_option( + "-d", "--debug", + action="store_true", dest="debug", + help="enable debug output on stderr" + ) + parser.add_option_group(group) + + ## Parse options based on our definition. + (options, filename) = parser.parse_args() + + # Just abort immediately if we are don't have an input file. + if not filename: + stderr.write("No filename given." + linesep) + sysExit(-1) + + # Turn the full path filename into a full path module location. + fullPathNamespace = filename[0].replace(sep, '.')[:-3] + # Use any provided top-level namespace argument to trim off excess. + realNamespace = fullPathNamespace + if options.topLevelNamespace: + namespaceStart = fullPathNamespace.find(options.topLevelNamespace) + if namespaceStart >= 0: + realNamespace = fullPathNamespace[namespaceStart:] + options.fullPathNamespace = realNamespace + + return options, filename[0] + + # Figure out what is being requested. + (options, inFilename) = optParse() + + # Read contents of input file. + inFile = open(inFilename) + lines = inFile.readlines() + inFile.close() + # Create the abstract syntax tree for the input file. + astWalker = AstWalker(lines, options, inFilename) + astWalker.parseLines() + # Output the modified source. + print(astWalker.getLines()) + +# See if we're running as a script. +if __name__ == "__main__": + main() diff --git a/docs/header.html b/docs/header.html index 0c71f80cce9..6d2e889001a 100644 --- a/docs/header.html +++ b/docs/header.html @@ -2,7 +2,6 @@ - diff --git a/docs/mA-tanb-Limits.md b/docs/mA-tanb-Limits.md new file mode 100644 index 00000000000..b5e90ce9808 --- /dev/null +++ b/docs/mA-tanb-Limits.md @@ -0,0 +1,67 @@ +Model dependent Limits using MorphingMSSMUpdate {#MSSMUpdateModelDep} +================================================================= + +These instruction shall elaborate how to produce MSSM model dependent limits using the 8TeV part of the MSSM (CMS-PAS-HIG-14-029) analysis. + + +Creating datacards {#p1} +======================== + +First we create the datacards. + + cd CombineHarvester/Run1BSMComb/ + MorphingMSSMUpdate + +The created datacards contain six signals: Three for each considered production process, ggH and bbH and each of them separated in the three neutral MSSM Higgs bosons h, H and A. The particular interesting feature of the "-NoModel" creation of the datacards is that all considered signal masses are stored in the workspace instead of having a single datacard for each mass. This has the advantage that high-level morphing tools of combine could be directly used instead of having to perform manual morphing using fixed mass datacards. + + +Scaling the workspace accordingly {#p2} +======================================= + +The combined datacard "htt_mssm.txt" is now transfered to a MSSM model dependent workspace. + + text2workspace.py -b output/mssm_nomodel/htt_mssm.txt -o output/mssm_nomodel/htt_cmb_mhmodp.root -P CombineHarvester.CombinePdfs.MSSMv2:MSSM --PO filePrefix=$CMSSW_BASE/src/auxiliaries/models/ --PO modelFiles=8TeV,out.mhmodp-8TeV-tanbHigh-nnlo.root,0 + +Therefore, a physic model "MSSMv2.py" is used. It will try to read model files from the directory auxiliaries/models/, so we need to add this path as well as the model files themselves as additional parameters. The syntax for setting the path to the models is as above. The general syntax for the model files themselves is "--PO modelFiles=ERA,FILE,VERSION". In this example, we choose the mhmod+ model for high values of tanb at 8TeV. For a list of all available options for text2workspace, run text2workspace.py -h. For upcoming 13/14TeV runs VERSION=1 should be set. +After the creation the workspace "htt_cmb_mhmodp.root" contains the model information like BR/xs/masses for all considered mA/tanb which are set as parameters of interest. The six signal processes dependent on the parameters of interest. + + +Calculating values {#p3} +======================== + +In the next step we calculate the limits in the considered MSSM model phase space. + + python ../CombineTools/scripts/combineTool.py -M AsymptoticGrid scripts/mssm_asymptotic_grid.json -d output/mssm_nomodel/htt_cmb_mssm.root --job-mode 'lxbatch' --task-name 'mssm_mhodp' --sub-opts '-q 1nh' --merge=8 + +The scanned grid (=considered mA/tanb points) in the MSSM model is defined in the json file "mssm_asymptotic_grid.json". As example: + + "opts" : "--singlePoint 1.0", + "POIs" : ["mA", "tanb"], + "grids" : [ + ["130:150|10", "1:3|1", "0"], + ["130:150|10", "4:60|20", ""] + ], + "hist_binning" : [87, 130, 1000, 60, 1, 60] + +The "opts", "POIS" are mandatory. The "hist_binning" has no influence yet. The list of grids can be set like above. The first row, ["130:150|10", "1:3|1", "0"], defines a grid of mA=130, 140 and 150 GeV scanning tanb=1,2 and 3. The third command, here "0", sets the CLs limit to the given value instead of calculating it. This could be used to exclude some regions of the phase space which for example are known to be excluded by other theoretical constraints. If the third option is empty "" the CLs limit for the given region will be computed. Like in the second row, ["130:150|10", "4:60|20", ""], where a grid of mA=130, 140 and 150 GeV and tanb=4, 24 and 44 is scanned. +Here, the lxbatch computing system is used. Eight grid points are merged in one job. + + +Collecting the results in a single file {#p4} +============================================= + +After all jobs have finished, the results can be collected by simple rerunning the calculating command. + + python ../CombineTools/scripts/combineTool.py -M AsymptoticGrid scripts/mssm_asymptotic_grid.json -d output/mssm_nomodel/htt_cmb_mhmodp.root --task-name 'mssm_mhodp' + +The limits for the median expected, expected error bands and observed are stored in TGraph2D. The resulting file "asymptotic_grid.root" is needed for the plotting. + + +Plotting the limits {#p5} +========================= + +The plotting is done be the "MSSMtanbPlot.py" script. + + python ../CombineTools/scripts/MSSMtanbPlot.py --file=asymptotic_grid.root --scenario="mhmodp" + +Again, the filename after "--file" has to be the root file which was created in the previous step. MSSMtanbPlot.py also requires a name for the scenario, which will be written in the plot. MSSMtanbPlot.py will only produce the plots as a png- and pdf file. The plotting is still work in progress. diff --git a/docs/py_filter b/docs/py_filter new file mode 100755 index 00000000000..37d846a3011 --- /dev/null +++ b/docs/py_filter @@ -0,0 +1,2 @@ +#!/bin/bash +python docs/doxypypy/doxypypy.py -a -c $1