diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md index 6db29002b..7ef2cafe2 100644 --- a/.github/ISSUE_TEMPLATE/bug_report.md +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -24,11 +24,7 @@ A clear and concise description of what you want to happen (i.e. instead of the If applicable, add screenshots to help explain your problem. **Environment:** - - Inside the container: - - Output of `ldmx-container-config` - - Outside the container: - - OS of computer you are running on - - Version of Geant4, ROOT, ONNXRuntime, Xerces, and gcc +Output of `ldmx config`: **Additional context** Add any other context about the problem here. diff --git a/.github/actions/common.sh b/.github/actions/common.sh index e7853af7c..40db7cd88 100644 --- a/.github/actions/common.sh +++ b/.github/actions/common.sh @@ -43,7 +43,7 @@ set_output() { local _key="$1" local _val="$2" echo "${_key} = ${_val}" - echo "::set-output name=${_key}::${_val}" + echo "${_key}=${_val}" >> $GITHUB_OUTPUT } # GitHub workflow command to start an group of output messages diff --git a/.github/actions/setup/setup.sh b/.github/actions/setup/setup.sh index 8343b6015..ccf4c9c90 100644 --- a/.github/actions/setup/setup.sh +++ b/.github/actions/setup/setup.sh @@ -15,6 +15,11 @@ set -e source ${GITHUB_ACTION_PATH}/../common.sh __main__() { + start_group Pull the Environment + docker pull ${LDMX_DOCKER_TAG} + docker inspect ${LDMX_DOCKER_TAG} + end_group + start_group Configure the Build local _build=${LDMX_BASE}/ldmx-sw/build mkdir ${_build} diff --git a/.github/actions/validate/gold_label b/.github/actions/validate/gold_label index 6d260c3af..4671105d7 100644 --- a/.github/actions/validate/gold_label +++ b/.github/actions/validate/gold_label @@ -1 +1 @@ -v3.2.0 +v3.2.7 diff --git a/.github/validation_samples/ecal_pn/config.py b/.github/validation_samples/ecal_pn/config.py index 99fb6debd..7d7719d95 100644 --- a/.github/validation_samples/ecal_pn/config.py +++ b/.github/validation_samples/ecal_pn/config.py @@ -61,5 +61,6 @@ TrigScintClusterProducer.pad2(), TrigScintClusterProducer.pad3(), trigScintTrack, - count, TriggerProcessor('trigger') + count, TriggerProcessor('trigger'), + dqm.PhotoNuclearDQM(verbose=True), ] + dqm.all_dqm) diff --git a/.github/validation_samples/ecal_pn/gold.root b/.github/validation_samples/ecal_pn/gold.root index 9e62b8902..3d99de6ab 100644 Binary files a/.github/validation_samples/ecal_pn/gold.root and b/.github/validation_samples/ecal_pn/gold.root differ diff --git a/.github/validation_samples/hcal/gold.root b/.github/validation_samples/hcal/gold.root index db9d204ee..991c352bc 100644 Binary files a/.github/validation_samples/hcal/gold.root and b/.github/validation_samples/hcal/gold.root differ diff --git a/.github/validation_samples/inclusive/config.py b/.github/validation_samples/inclusive/config.py index 107e44073..f638f255f 100644 --- a/.github/validation_samples/inclusive/config.py +++ b/.github/validation_samples/inclusive/config.py @@ -61,5 +61,6 @@ TrigScintClusterProducer.pad2(), TrigScintClusterProducer.pad3(), trigScintTrack, - count, TriggerProcessor('trigger') + count, TriggerProcessor('trigger'), + dqm.PhotoNuclearDQM(verbose=False), ] + dqm.all_dqm) diff --git a/.github/validation_samples/inclusive/gold.root b/.github/validation_samples/inclusive/gold.root index d73f09585..283cb7709 100644 Binary files a/.github/validation_samples/inclusive/gold.root and b/.github/validation_samples/inclusive/gold.root differ diff --git a/.github/validation_samples/it_pileup/config.py b/.github/validation_samples/it_pileup/config.py index a340b8274..3d0ac4414 100644 --- a/.github/validation_samples/it_pileup/config.py +++ b/.github/validation_samples/it_pileup/config.py @@ -95,8 +95,11 @@ trigScintTrack, count, TriggerProcessor('trigger'), dqm.SimObjects(sim_pass=thisPassName), - ecalDigiVerify,dqm.EcalShowerFeatures(), - dqm.HCalDQM()]+dqm.recoil_dqm+dqm.trigger_dqm) + ecalDigiVerify,dqm.EcalShowerFeatures(), + dqm.PhotoNuclearDQM(verbose=False), + dqm.HCalDQM(), + +]+dqm.recoil_dqm+dqm.trigger_dqm) p.inputFiles = ['ecal_pn.root'] p.outputFiles= ['events.root'] diff --git a/.github/validation_samples/it_pileup/gold.root b/.github/validation_samples/it_pileup/gold.root index b45023030..bf26fd755 100644 Binary files a/.github/validation_samples/it_pileup/gold.root and b/.github/validation_samples/it_pileup/gold.root differ diff --git a/.github/workflows/basic_test.yml b/.github/workflows/basic_test.yml index 2145ab6f3..e300bb097 100644 --- a/.github/workflows/basic_test.yml +++ b/.github/workflows/basic_test.yml @@ -14,7 +14,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout ldmx-sw - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: 'recursive' diff --git a/.github/workflows/build_production_image.yml b/.github/workflows/build_production_image.yml index ebae56dba..3ede7ed6c 100644 --- a/.github/workflows/build_production_image.yml +++ b/.github/workflows/build_production_image.yml @@ -25,19 +25,19 @@ jobs: steps: - name: Setup QEMU - uses: docker/setup-qemu-action@v1 + uses: docker/setup-qemu-action@v2 - name: Set up Docker Buildx - uses: docker/setup-buildx-action@v1 + uses: docker/setup-buildx-action@v2 - name: Login to DockerHub - uses: docker/login-action@v1 + uses: docker/login-action@v2 with: username: ${{ secrets.DOCKER_USERNAME }} password: ${{ secrets.DOCKER_PASSWORD }} - name: Git Build Context - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: recursive ref: ${{ github.event.inputs.branch }} @@ -77,7 +77,7 @@ jobs: - name: Build the Image id: docker_build - uses: docker/build-push-action@v2 + uses: docker/build-push-action@v3 with: tags: ${{ steps.generate_tag.outputs.tags }} push: true diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 3369c64dd..66c1278ea 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -13,7 +13,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout ldmx-sw - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: 'recursive' @@ -29,14 +29,14 @@ jobs: # Runs doxygen doxygen.conf in the docs/ directory - name: Run Doxygen to build C++ Docs - uses: mattnotmitt/doxygen-action@v1.1.0 + uses: mattnotmitt/doxygen-action@v1.9 with: doxyfile-path: doxygen.conf/doxyfile #relative to working directory working-directory: docs #docs subdirectory # sphinx is a python package, so we need to setup python on this runner - name: Setup Python for Sphinx - uses: actions/setup-python@v2 + uses: actions/setup-python@v3 # Runs sphinx-apidoc and sphinx-build in the docs/ directory # sphinx-apidoc requires the python files to be packaged together diff --git a/.github/workflows/generate_pr_gold.yml b/.github/workflows/generate_pr_gold.yml index 7b4345595..198d6a239 100644 --- a/.github/workflows/generate_pr_gold.yml +++ b/.github/workflows/generate_pr_gold.yml @@ -16,7 +16,7 @@ jobs: job_matrix: ${{steps.gen-mat.outputs.job_matrix}} steps: - name: Checkout ldmx-sw - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: 'recursive' @@ -27,7 +27,7 @@ jobs: run: tar cf ldmx-sw-package.tar install/ .github/ - name: Upload ldmx-sw Package - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: ldmx-sw-build-${{ github.sha }} path: ldmx-sw-package.tar @@ -45,7 +45,7 @@ jobs: matrix: ${{fromJson(needs.compile-ldmx-sw.outputs.job_matrix)}} steps: - name: Download ldmx-sw Package - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: ldmx-sw-build-${{ github.sha }} @@ -62,7 +62,7 @@ jobs: no_comp: true - name: Upload New Golden Histograms - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: ${{matrix.sample}}-new-gold path: ${{steps.validation.outputs.hists}} @@ -72,14 +72,14 @@ jobs: runs-on: ubuntu-latest steps: - name: Checkout ldmx-sw - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: 'recursive' ref: trunk - name: Download All the Gold id: download - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: path: '../' @@ -116,7 +116,7 @@ jobs: shell: bash - name: Open PR for the New Gold - uses: peter-evans/create-pull-request@v3 + uses: peter-evans/create-pull-request@v4 with: commit-message: 'Updated Gold for release ${{ steps.update-gold.outputs.new-label }}' title: 'New Gold for Generated by release ${{ steps.update-gold.outputs.new-label }}' diff --git a/.github/workflows/new_pre_release.yml b/.github/workflows/new_pre_release.yml index 3a5b9ff4f..9d5027142 100644 --- a/.github/workflows/new_pre_release.yml +++ b/.github/workflows/new_pre_release.yml @@ -23,7 +23,7 @@ jobs: job_matrix: ${{steps.gen-mat.outputs.job_matrix}} steps: - name: Checkout ldmx-sw - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: 'recursive' ref: ${{ github.event.inputs.branch }} @@ -35,7 +35,7 @@ jobs: run: tar cf ldmx-sw-package.tar install/ .github/ - name: Upload ldmx-sw Package - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: ldmx-sw-build-${{ github.sha }} path: ldmx-sw-package.tar @@ -55,7 +55,7 @@ jobs: matrix: ${{fromJson(needs.compile-ldmx-sw.outputs.job_matrix)}} steps: - name: Download ldmx-sw Package - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: ldmx-sw-build-${{ github.sha }} @@ -72,7 +72,7 @@ jobs: no_comp: true - name: Upload Histogram File - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: ${{matrix.sample}}-${{matrix.run}}-hists path: ${{steps.validation.outputs.hists}} @@ -82,7 +82,7 @@ jobs: runs-on: ubuntu-latest steps: - name: Download Generated Histograms - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: path: '.' diff --git a/.github/workflows/pr_validation.yml b/.github/workflows/pr_validation.yml index 4b24fc7fb..346527e29 100644 --- a/.github/workflows/pr_validation.yml +++ b/.github/workflows/pr_validation.yml @@ -17,7 +17,7 @@ jobs: job_matrix: ${{steps.gen-mat.outputs.job_matrix}} steps: - name: Checkout ldmx-sw - uses: actions/checkout@v2 + uses: actions/checkout@v3 with: submodules: 'recursive' fetch-depth: 0 @@ -33,7 +33,7 @@ jobs: run: tar cf ldmx-sw-package.tar install/ .github/ - name: Upload ldmx-sw Package - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: ldmx-sw-build-${{ github.sha }} path: ldmx-sw-package.tar @@ -52,7 +52,7 @@ jobs: fail-fast: false steps: - name: Download ldmx-sw Package - uses: actions/download-artifact@v2 + uses: actions/download-artifact@v3 with: name: ldmx-sw-build-${{ github.sha }} @@ -68,7 +68,7 @@ jobs: sample: ${{matrix.sample}} - name: Upload Validation Plots - uses: actions/upload-artifact@v2 + uses: actions/upload-artifact@v3 with: name: ${{matrix.sample}}-pr-validation path: ${{ steps.validation.outputs.plots }} diff --git a/Biasing/include/Biasing/PhotoNuclearTopologyFilters.h b/Biasing/include/Biasing/PhotoNuclearTopologyFilters.h new file mode 100644 index 000000000..1c5abf248 --- /dev/null +++ b/Biasing/include/Biasing/PhotoNuclearTopologyFilters.h @@ -0,0 +1,124 @@ +#ifndef PHOTONUCLEARTOPOLOGYFILTERS_H +#define PHOTONUCLEARTOPOLOGYFILTERS_H + +/*~~~~~~~~~~~~~*/ +/* SimCore */ +/*~~~~~~~~~~~~~*/ +#include "SimCore/UserAction.h" +#include "SimCore/UserTrackInformation.h" +/*~~~~~~~~~~~~~~~*/ +/* Framework */ +/*~~~~~~~~~~~~~~~*/ +#include +#include + +#include "Framework/Configure/Parameters.h" +// Forward declaration + +namespace biasing { + +/** + * Abstract base class for a user action used to filter out photo-nuclear events + * that don't match the topology the user is interested in studying. Derived + * classes implement the event rejection + * + * Similar to the PhotoNuclearProductsFilter, this user action will only process + * steps whose associated track has been tagged as a "PN Gamma". This tag is + * currently only set in ECalProcessFilter and needs to be placed in the + * UserAction pipeline before this class. + * + */ +class PhotoNuclearTopologyFilter : public simcore::UserAction { + public: + /** + * Constructor + * + * @param[in] name The name of this class instance. + * @param[in] parameters The parameters used to configure this class. + */ + PhotoNuclearTopologyFilter(const std::string& name, + framework::config::Parameters& parameters); + + /// Destructor + ~PhotoNuclearTopologyFilter() = default; + + /** + * Callback that allows a user to take some actions at the end of + * a step. + * + * @param[in] step The Geant4 step containing transient information + * about the step taken by a track. + */ + void stepping(const G4Step* step) override; + + virtual bool rejectEvent(const std::vector& secondaries) const = 0; + + /// Retrieve the type of actions this class defines + std::vector getTypes() final override { + return {simcore::TYPE::STEPPING}; + } + + protected: + /** + * Check if the PDG code corresponds to a light ion nucleus. + * + * Nuclear PDG codes are given by ±10LZZZAAAI So to find the atomic + * number, we first divide by 10 (to lose the I-component) and then + * take the modulo with 1000. + * + * TODO: Repeated code from SimCore, could probably live elsewhere. + * + */ + constexpr bool isLightIon(const int pdgCode) const { + // + // TODO: Is the < check necessary? + if (pdgCode > 1000000000 && pdgCode < 10000000000) { + // Check if the atomic number is less than or equal to 4 + return ((pdgCode / 10) % 1000) <= 4; + } + return false; + } + + /** + * Whether or not to include a particular particle type in any counting. + * Unless \ref count_light_ions_ is set, we don't count anything with a + * nuclear PDG code. This is consistent with the counting behaviour used in + * the PhotoNuclearDQM. + * + * If \ref count_light_ions_ is set, we also match PDG codes for nuclei with + * atomic number < 4.  + * + TODO: Repeated code from SimCore, could probably live elsewhere. + * @see isLightIon + * + */ + constexpr bool skipCountingParticle(const int pdgcode) const { + return !(pdgcode < 10000 || count_light_ions_ && isLightIon(pdgcode)); + } + + constexpr bool isNeutron(const int pdgID) const { return pdgID == 2112; } + + bool count_light_ions_; + double hard_particle_threshold_; +}; // PhotoNuclearTopologyFilter + +class SingleNeutronFilter : public PhotoNuclearTopologyFilter { + public: + SingleNeutronFilter(const std::string& name, + framework::config::Parameters& parameters) + : PhotoNuclearTopologyFilter{name, parameters} {} + + bool rejectEvent(const std::vector& secondaries) const override; +}; + +class NothingHardFilter : public PhotoNuclearTopologyFilter { + public: + NothingHardFilter(const std::string& name, + framework::config::Parameters& parameters) + : PhotoNuclearTopologyFilter{name, parameters} {} + + bool rejectEvent(const std::vector& secondaries) const override; +}; +} // namespace biasing + +#endif /* PHOTONUCLEARTOPOLOGYFILTERS_H */ diff --git a/Biasing/include/Biasing/TaggerVetoFilter.h b/Biasing/include/Biasing/TaggerVetoFilter.h index e1b2d8f4c..ae36b185b 100644 --- a/Biasing/include/Biasing/TaggerVetoFilter.h +++ b/Biasing/include/Biasing/TaggerVetoFilter.h @@ -27,7 +27,7 @@ namespace biasing { * */ class TaggerVetoFilter : public simcore::UserAction { - public: +public: /** * Constructor. * @@ -35,30 +35,56 @@ class TaggerVetoFilter : public simcore::UserAction { * @param[in] parameters the parameters used to configure this * UserAction. */ - TaggerVetoFilter(const std::string& name, - framework::config::Parameters& parameters); + TaggerVetoFilter(const std::string &name, + framework::config::Parameters ¶meters); /// Destructor ~TaggerVetoFilter(); + /** + * + * Action called at the start of an event to reset + * + */ + void BeginOfEventAction(const G4Event *) final override; + /** + * + * Action called at the end of an event to veto events where the primary + * particle never entered the tagger region. + * + */ + + void EndOfEventAction(const G4Event *) final override; /** * Stepping action called when a step is taken during tracking of * a particle. * * @param[in] step Geant4 step */ - void stepping(const G4Step* step) final override; + void stepping(const G4Step *step) final override; /// Retrieve the type of actions this class defines std::vector getTypes() final override { - return {simcore::TYPE::STEPPING}; + return {simcore::TYPE::STEPPING, simcore::TYPE::EVENT}; } - private: + +private: + /** + * Did the primary particle enter the tagger region? Reset at the start of + * each event + * + */ + bool primary_entered_tagger_region_{false}; + /// Energy below which an incident electron should be vetoed. double threshold_{0}; -}; // TaggerVetoFilter + // Should the EndOfEventAction reject events where the primary particle never + // entered the tagger region? + bool reject_primaries_missing_tagger_{true}; + +}; // TaggerVetoFilter -} // namespace biasing +} // namespace biasing -#endif // BIASING_TAGGERVETOFILTER_H +#endif // BIASING_TAGGERVETOFILTER_H diff --git a/Biasing/include/Biasing/TargetDarkBremFilter.h b/Biasing/include/Biasing/TargetDarkBremFilter.h index 0bcbb26b6..e68631413 100644 --- a/Biasing/include/Biasing/TargetDarkBremFilter.h +++ b/Biasing/include/Biasing/TargetDarkBremFilter.h @@ -62,9 +62,16 @@ class TargetDarkBremFilter : public simcore::UserAction { * @return list of action types this class does */ std::vector getTypes() final override { - return {simcore::TYPE::STEPPING}; + return {simcore::TYPE::STEPPING, simcore::TYPE::EVENT}; } + /** + * Reset flag signaling finding of A' to false + * + * @param[in] e event being started, unused + */ + void BeginOfEventAction(const G4Event* e) final override; + /** * Looking for A' while primary is stepping. * @@ -78,6 +85,14 @@ class TargetDarkBremFilter : public simcore::UserAction { */ void stepping(const G4Step* step); + /** + * Check flag signaling finding of A', if false, + * abort the event so it isn't saved. + * + * @param[in] event being ended, check if it is already aborted + */ + void EndOfEventAction(const G4Event* event) final override; + private: /** * Check if the volume is outside the target region @@ -127,6 +142,11 @@ class TargetDarkBremFilter : public simcore::UserAction { */ double threshold_; + /** + * flag to signal that we saw an A' + */ + bool found_aprime_; + }; // TargetDarkBremFilter } // namespace biasing diff --git a/Biasing/python/eat.py b/Biasing/python/eat.py index 3f193f38e..8df7b30c8 100644 --- a/Biasing/python/eat.py +++ b/Biasing/python/eat.py @@ -123,19 +123,15 @@ def dark_brem( ap_mass , lhe, detector ) : #Activiate dark bremming with a certain A' mass and LHE library from LDMX.SimCore import dark_brem - db_model = dark_brem.VertexLibraryModel( lhe ) + db_model = dark_brem.G4DarkBreMModel( lhe ) db_model.threshold = 2. #GeV - minimum energy electron needs to have to dark brem db_model.epsilon = 0.01 #decrease epsilon from one to help with Geant4 biasing calculations sim.dark_brem.activate( ap_mass , db_model ) #Biasing dark brem up inside of the ecal volumes - from math import log10 - #need a higher power for the higher mass A' - mass_power = max(log10(sim.dark_brem.ap_mass),2.) - from LDMX.SimCore import bias_operators sim.biasing_operators = [ - bias_operators.DarkBrem.ecal(sim.dark_brem.ap_mass**mass_power / db_model.epsilon**2) + bias_operators.DarkBrem.ecal(sim.dark_brem.ap_mass**2 / db_model.epsilon**2) ] sim.actions = [ diff --git a/Biasing/python/ecal.py b/Biasing/python/ecal.py index 4cdb667cb..3b9bb2c17 100644 --- a/Biasing/python/ecal.py +++ b/Biasing/python/ecal.py @@ -74,3 +74,56 @@ def photo_nuclear( detector, generator ) : ]) return sim + +def gamma_mumu(detector, generator) : + """Example configuration for biasing gamma to mu+ mu- conversions in the ecal. + + In this particular example, 4 GeV elecrons are fired upstream of the + tagger tracker. The TargetBremFilter filters out all events that don't + produce a brem in the target with an energy greater than 2.5 GeV. + + Parameters + ---------- + + detector : str + Path to the detector + + Returns + ------- + Instance of the sim configured for target gamma to muon conversions. + + Example + ------- + + ecal_mumu_sim = ecal.gamma_mumu('ldmx-det-v12') + + """ + + # Initiate the sim + sim = simulator.simulator("ecal_gammamumu") + + # Set the path to the detector to use + # Also tell the simulator to include scoring planes + sim.setDetector( detector, True ) + + # Set run parameters + sim.description = "gamma --> mu+ mu-, xsec bias 3e4" + sim.beamSpotSmear = [20., 80., 0.] + + sim.generators.append(generator) + + # Enable and configure the biasing + sim.biasing_operators = [ bias_operators.GammaToMuPair('ecal', 3.E4, 2500.) ] + + # the following filters are in a library that needs to be included + includeBiasing.library() + + # Configure the sequence in which user actions should be called + sim.actions.extend([ + filters.TaggerVetoFilter(), + filters.TargetBremFilter(), + filters.EcalProcessFilter(process='GammaToMuPair'), + util.TrackProcessFilter.gamma_mumu() + ]) + + return sim diff --git a/Biasing/python/filters.py b/Biasing/python/filters.py index 69b126e72..bebd6b087 100644 --- a/Biasing/python/filters.py +++ b/Biasing/python/filters.py @@ -84,6 +84,17 @@ def __init__(self) : self.process = 'photonNuclear' +class TargetGammaMuMuFilter(simcfg.UserAction) : + """ Configuration for filtering muon conversion events in the target.""" + + def __init__(self) : + super().__init__("target_process_filter", "biasing::TargetProcessFilter") + + from LDMX.Biasing import include + include.library() + + self.process = 'GammaToMuPair' + class EcalDarkBremFilter(simcfg.UserAction): """ Configuration for filtering A' events @@ -125,15 +136,18 @@ class TaggerVetoFilter(simcfg.UserAction): ---------- thresh : float Minimum energy [MeV] that electron should have + reject_events_missing_tagger : bool + Also veto events where the primary particle misses the tagger region """ - def __init__(self,thresh=3800.) : + def __init__(self,thresh=3800., reject_events_missing_tagger=True) : super().__init__('tagger_veto_filter','biasing::TaggerVetoFilter') from LDMX.Biasing import include include.library() self.threshold = thresh + self.reject_events_missing_tagger = reject_events_missing_tagger class PrimaryToEcalFilter(simcfg.UserAction) : """ Configuration used to reject events where the primary doesn't reach the ecal with a mimimum energy diff --git a/Biasing/python/particle_filter.py b/Biasing/python/particle_filter.py index 6fea4593b..300dad13e 100644 --- a/Biasing/python/particle_filter.py +++ b/Biasing/python/particle_filter.py @@ -43,3 +43,69 @@ def kaon() : 321 # K^+ ] return particle_filter + +class PhotoNuclearTopologyFilter(simcfg.UserAction): + """Configuration for keeping events with a PN reaction producing a + particular event topology. + + Parameters + ---------- + name : str + Name for this filter + filter_class: + Name of the class that implements this filter. Should correspond to the + invocation of DECLARE_ACTION in PhotoNuclearTopologyFilters.cxx + + Attributes + ---------- + hard_particle_threshold: float + The kinetic energy threshold required to count a particle as "hard" + + count_light_ions: bool + Whether or not light ions (e.g. deutrons) should be considered when + applying the filter. Setting this to False can produce misleading + results such as classifying events with very high kinetic energy light + ions as "Nothing Hard" but it is the current default choice in the + PhotoNuclearDQM. + + """ + def __init__(self, name, filter_class): + super().__init__(name, filter_class) + from LDMX.Biasing import include + include.library() + self.count_light_ions = True + + + def SingleNeutronFilter(): + """Configuration for keeping photonuclear events where a single neutron + carries most of the kinetic energy from the interaction + + + Returns + ------- + Instance of configured PhotoNuclearTopologyFilter + + """ + filter = PhotoNuclearTopologyFilter(name='SingleNeutron', + filter_class='biasing::SingleNeutronFilter' ) + filter.hard_particle_threshold = 200. + return filter + + def NothingHardFilter(): + """Configuration for keeping photonuclear events no particles received + significant kinetic energy. + + Returns + ------- + Instance of configured PhotoNuclearTopologyFilter + + """ + filter = PhotoNuclearTopologyFilter(name='NothingHard', + filter_class='biasing::NothingHardFilter') + filter.hard_particle_threshold = 200. + return filter + + + + + diff --git a/Biasing/python/target.py b/Biasing/python/target.py index 9bff37f22..a39a36d98 100644 --- a/Biasing/python/target.py +++ b/Biasing/python/target.py @@ -161,7 +161,7 @@ def gamma_mumu( detector, generator ) : sim.generators.append(generator) # Enable and configure the biasing - sim.biasing_operators = [ bias_operators.GammaToMuPair('target', 1.E9, 2500.) ] + sim.biasing_operators = [ bias_operators.GammaToMuPair('target', 1.E4, 2500.) ] # the following filters are in a library that needs to be included includeBiasing.library() @@ -169,7 +169,10 @@ def gamma_mumu( detector, generator ) : # Configure the sequence in which user actions should be called. sim.actions.extend([ # Only consider events where a hard brem occurs + filters.TaggerVetoFilter(), filters.TargetBremFilter(), + filters.TargetGammaMuMuFilter(), + util.TrackProcessFilter.gamma_mumu() ]) return sim @@ -216,17 +219,14 @@ def dark_brem( ap_mass , lhe, detector ) : #Activiate dark bremming with a certain A' mass and LHE library from LDMX.SimCore import dark_brem - db_model = dark_brem.VertexLibraryModel( lhe ) + db_model = dark_brem.G4DarkBreMModel(lhe) db_model.threshold = 2. #GeV - minimum energy electron needs to have to dark brem db_model.epsilon = 0.01 #decrease epsilon from one to help with Geant4 biasing calculations sim.dark_brem.activate( ap_mass , db_model ) #Biasing dark brem up inside of the target - #need to bias up high mass A' by more than 2 so that they can actually happen - from math import log10 - mass_power = max(log10(sim.dark_brem.ap_mass),2.) sim.biasing_operators = [ - bias_operators.DarkBrem.target(sim.dark_brem.ap_mass**mass_power / db_model.epsilon**2) + bias_operators.DarkBrem.target(sim.dark_brem.ap_mass**2 / db_model.epsilon**2) ] sim.actions.extend([ diff --git a/Biasing/python/util.py b/Biasing/python/util.py index f44363b15..e48d9a35a 100644 --- a/Biasing/python/util.py +++ b/Biasing/python/util.py @@ -91,7 +91,16 @@ def dark_brem() : Instance of TrackProcessFilter configured to tag dark brem tracks. """ - return TrackProcessFilter('eDarkBrem') + return TrackProcessFilter('DarkBrem') + + def gamma_mumu() : + """ Configuration used to tag all gamma --> mu+ mu- tracks to persist them to the event. + + Return + ------ + Instance of TrackProcessFilter configured to tag gamma --> mu+ mu- tracks. + """ + return TrackProcessFilter('GammaToMuPair') class DecayChildrenKeeper(BiasingUtilityAction): """ Configuration used to store children of specific particle decays diff --git a/Biasing/src/Biasing/EcalDarkBremFilter.cxx b/Biasing/src/Biasing/EcalDarkBremFilter.cxx index 58157ff53..78276b9d8 100644 --- a/Biasing/src/Biasing/EcalDarkBremFilter.cxx +++ b/Biasing/src/Biasing/EcalDarkBremFilter.cxx @@ -10,8 +10,8 @@ #include "Biasing/EcalDarkBremFilter.h" #include "G4LogicalVolumeStore.hh" //for the store -#include "SimCore/DarkBrem/G4APrime.h" //checking if particles match A' -#include "SimCore/DarkBrem/G4eDarkBremsstrahlung.h" //checking for dark brem secondaries +#include "G4DarkBreM/G4APrime.h" //checking if particles match A' +#include "G4DarkBreM/G4DarkBremsstrahlung.h" //checking for dark brem secondaries #include "SimCore/UserTrackInformation.h" //make sure A' is saved namespace biasing { @@ -60,7 +60,7 @@ void EcalDarkBremFilter::BeginOfEventAction(const G4Event*) { G4ClassificationOfNewTrack EcalDarkBremFilter::ClassifyNewTrack( const G4Track* aTrack, const G4ClassificationOfNewTrack& cl) { - if (aTrack->GetParticleDefinition() == simcore::darkbrem::G4APrime::APrime()) { + if (aTrack->GetParticleDefinition() == G4APrime::APrime()) { // there is an A'! Yay! /* Debug message std::cout << "[ EcalDarkBremFilter ]: " @@ -98,12 +98,12 @@ void EcalDarkBremFilter::PostUserTrackingAction(const G4Track* track) { const G4VProcess* creator = track->GetCreatorProcess(); if (creator and creator->GetProcessName().contains( - simcore::darkbrem::G4eDarkBremsstrahlung::PROCESS_NAME)) { + G4DarkBremsstrahlung::PROCESS_NAME)) { // make sure all secondaries of dark brem process are saved simcore::UserTrackInformation* userInfo = simcore::UserTrackInformation::get(track); // make sure A' is persisted into output file userInfo->setSaveFlag(true); - if (track->GetParticleDefinition() == simcore::darkbrem::G4APrime::APrime()) { + if (track->GetParticleDefinition() == G4APrime::APrime()) { // check if A' was made in the desired volume and has the minimum energy if (not inDesiredVolume(track)) { AbortEvent("A' wasn't produced inside of the requested volume."); diff --git a/Biasing/src/Biasing/PhotoNuclearTopologyFilters.cxx b/Biasing/src/Biasing/PhotoNuclearTopologyFilters.cxx new file mode 100644 index 000000000..22ef7c4d5 --- /dev/null +++ b/Biasing/src/Biasing/PhotoNuclearTopologyFilters.cxx @@ -0,0 +1,79 @@ +#include "Biasing/PhotoNuclearTopologyFilters.h" + +namespace biasing { + +bool NothingHardFilter::rejectEvent( + const std::vector& secondaries) const { + for (const auto& secondary : secondaries) { + // Get the PDG ID of the track + const auto pdgID{ + std::abs(secondary->GetParticleDefinition()->GetPDGEncoding())}; + if (skipCountingParticle(pdgID)) { + continue; + } + auto energy{secondary->GetKineticEnergy()}; + if (energy > hard_particle_threshold_) { + return true; + } + } + return false; +} +bool SingleNeutronFilter::rejectEvent( + const std::vector& secondaries) const { + int hard_particles{0}; + int hard_neutrons{0}; + for (const auto& secondary : secondaries) { + // Get the PDG ID of the track + const auto pdgID{ + std::abs(secondary->GetParticleDefinition()->GetPDGEncoding())}; + if (skipCountingParticle(pdgID)) { + continue; + } + auto energy{secondary->GetKineticEnergy()}; + if (energy > hard_particle_threshold_) { + hard_particles++; + if (isNeutron(pdgID)) { + hard_neutrons++; + } + } + } + auto reject{hard_particles != hard_neutrons || hard_particles != 1}; + return reject; +} + +PhotoNuclearTopologyFilter::PhotoNuclearTopologyFilter( + const std::string& name, framework::config::Parameters& parameters) + : UserAction{name, parameters}, + hard_particle_threshold_{ + parameters.getParameter("hard_particle_threshold")}, + count_light_ions_{parameters.getParameter("count_light_ions")} {} + +void PhotoNuclearTopologyFilter::stepping(const G4Step* step) { + // Get the track associated with this step. + auto track{step->GetTrack()}; + + // Get the track info and check if this track has been tagged as the + // photon that underwent a photo-nuclear reaction. Only those tracks + // tagged as PN photos will be processed. The track is currently only + // tagged by the UserAction ECalProcessFilter which needs to be run + // before this UserAction. + auto trackInfo{simcore::UserTrackInformation::get(track)}; + if ((trackInfo != nullptr) && !trackInfo->isPNGamma()) return; + + // Get the PN photon daughters. + auto secondaries{step->GetSecondary()}; + + if (rejectEvent(*secondaries)) { + track->SetTrackStatus(fKillTrackAndSecondaries); + G4RunManager::GetRunManager()->AbortEvent(); + } + + // Once the PN gamma has been procesed, untag it so its not reprocessed + // again. + trackInfo->tagPNGamma(false); +} + +} // namespace biasing + +DECLARE_ACTION(biasing, NothingHardFilter) +DECLARE_ACTION(biasing, SingleNeutronFilter) diff --git a/Biasing/src/Biasing/TaggerVetoFilter.cxx b/Biasing/src/Biasing/TaggerVetoFilter.cxx index ab87dba5f..b899e56ac 100644 --- a/Biasing/src/Biasing/TaggerVetoFilter.cxx +++ b/Biasing/src/Biasing/TaggerVetoFilter.cxx @@ -9,20 +9,31 @@ namespace biasing { -TaggerVetoFilter::TaggerVetoFilter(const std::string& name, - framework::config::Parameters& parameters) +TaggerVetoFilter::TaggerVetoFilter(const std::string &name, + framework::config::Parameters ¶meters) : simcore::UserAction(name, parameters) { threshold_ = parameters.getParameter("threshold"); + reject_primaries_missing_tagger_ = + parameters.getParameter("reject_events_missing_tagger"); } TaggerVetoFilter::~TaggerVetoFilter() {} -void TaggerVetoFilter::stepping(const G4Step* step) { +void TaggerVetoFilter::BeginOfEventAction(const G4Event *) { + primary_entered_tagger_region_ = false; +} +void TaggerVetoFilter::EndOfEventAction(const G4Event *) { + if (reject_primaries_missing_tagger_ && !primary_entered_tagger_region_) { + G4RunManager::GetRunManager()->AbortEvent(); + } +} +void TaggerVetoFilter::stepping(const G4Step *step) { // Get the track associated with this step auto track{step->GetTrack()}; // Only process the primary electron track - if (track->GetParentID() != 0) return; + if (track->GetParentID() != 0) + return; // Get the PDG ID of the track and make sure it's an electron. If // another particle type is found, thrown an exception. @@ -36,16 +47,24 @@ void TaggerVetoFilter::stepping(const G4Step* step) { region.compareTo("tagger") != 0) return; + primary_entered_tagger_region_ = true; // If the energy of the particle falls below threshold, stop // processing the event. if (auto energy{step->GetPostStepPoint()->GetTotalEnergy()}; energy < threshold_) { track->SetTrackStatus(fKillTrackAndSecondaries); G4RunManager::GetRunManager()->AbortEvent(); + /* debug printout + std::cout << "[ TaggerVetoFilter ]: (" + << G4EventManager::GetEventManager() + ->GetConstCurrentEvent()->GetEventID() + << ") Primary lost too much energy before the target. Aborting event." + << std::endl; + */ return; } } -} // namespace biasing +} // namespace biasing DECLARE_ACTION(biasing, TaggerVetoFilter) diff --git a/Biasing/src/Biasing/TargetDarkBremFilter.cxx b/Biasing/src/Biasing/TargetDarkBremFilter.cxx index 35fc16fe9..214f6dfc6 100644 --- a/Biasing/src/Biasing/TargetDarkBremFilter.cxx +++ b/Biasing/src/Biasing/TargetDarkBremFilter.cxx @@ -10,7 +10,7 @@ #include "Biasing/TargetDarkBremFilter.h" #include "G4Electron.hh" //to check if track is electron -#include "SimCore/DarkBrem/G4APrime.h" //checking if particles match A' +#include "G4DarkBreM/G4APrime.h" //checking if particles match A' #include "SimCore/UserTrackInformation.h" //make sure A' is saved namespace biasing { @@ -21,6 +21,10 @@ TargetDarkBremFilter::TargetDarkBremFilter(const std::string& name, threshold_ = parameters.getParameter("threshold"); } +void TargetDarkBremFilter::BeginOfEventAction(const G4Event*) { + found_aprime_ = false; +} + void TargetDarkBremFilter::stepping(const G4Step* step) { // don't process if event is already aborted if (G4EventManager::GetEventManager()->GetConstCurrentEvent()->IsAborted()) @@ -32,7 +36,7 @@ void TargetDarkBremFilter::stepping(const G4Step* step) { // Leave if track is not an electron auto particle_def{track->GetParticleDefinition()}; if (particle_def != G4Electron::Electron()) { - if (particle_def == simcore::darkbrem::G4APrime::APrime() and + if (particle_def == G4APrime::APrime() and track->GetCurrentStepNumber() == 1) { /** check on first step of A' to see if it originated in correct volume * this needs to be here because sometimes the @@ -48,6 +52,8 @@ void TargetDarkBremFilter::stepping(const G4Step* step) { */ if (isOutsideTargetRegion(track->GetLogicalVolumeAtVertex())) AbortEvent("A' was not created within target."); + // A' was found and originated in correct region + found_aprime_ = true; } // first step of A' return; } @@ -71,8 +77,7 @@ void TargetDarkBremFilter::stepping(const G4Step* step) { } else { // check secondaries to see if we made a dark brem for (auto& secondary_track : *secondaries) { - if (secondary_track->GetParticleDefinition() == - simcore::darkbrem::G4APrime::APrime()) { + if (secondary_track->GetParticleDefinition() == G4APrime::APrime()) { // we found an A', woo-hoo! if (secondary_track->GetTotalEnergy() < threshold_) { @@ -114,6 +119,12 @@ void TargetDarkBremFilter::stepping(const G4Step* step) { return; } +void TargetDarkBremFilter::EndOfEventAction(const G4Event* event) { + if (not event->IsAborted() and not found_aprime_) { + AbortEvent("Did not find an A' after entire event was simulated."); + } +} + void TargetDarkBremFilter::AbortEvent(const std::string& reason) const { if (G4RunManager::GetRunManager()->GetVerboseLevel() > 1) { std::cout << "[ TargetDarkBremFilter ]: " diff --git a/Biasing/src/Biasing/TargetProcessFilter.cxx b/Biasing/src/Biasing/TargetProcessFilter.cxx index 0d1a7f6bb..84efeca16 100644 --- a/Biasing/src/Biasing/TargetProcessFilter.cxx +++ b/Biasing/src/Biasing/TargetProcessFilter.cxx @@ -50,19 +50,38 @@ void TargetProcessFilter::stepping(const G4Step* step) { // Get the track associated with this step. auto track{step->GetTrack()}; + if (G4EventManager::GetEventManager()->GetConstCurrentEvent()->IsAborted()) + return; + // Get the track info and check if this track is a brem candidate auto trackInfo{simcore::UserTrackInformation::get(track)}; if ((trackInfo != nullptr) && !trackInfo->isBremCandidate()) return; - // Get the region the particle is currently in. Continue processing - // the particle only if it's in the calorimeter region. + // Get the particles daughters. + auto secondaries{step->GetSecondary()}; + + // Get the region the particle is currently in. Continue processing + // the particle only if it's in the target region. if (auto region{ track->GetVolume()->GetLogicalVolume()->GetRegion()->GetName()}; - region.compareTo("target") != 0) + region.compareTo("target") != 0) { + // If secondaries were produced outside of the volume of interest, + // and there aren't additional brems to process, abort the event. + // Otherwise, suspend the track and move on to the next brem. + if (secondaries->size() != 0) { + if (getEventInfo()->bremCandidateCount() == 1) { + track->SetTrackStatus(fKillTrackAndSecondaries); + G4RunManager::GetRunManager()->AbortEvent(); + currentTrack_ = nullptr; + } else { + currentTrack_ = track; + track->SetTrackStatus(fSuspend); + getEventInfo()->decBremCandidateCount(); + trackInfo->tagBremCandidate(false); + } + } return; - - // Get the particles daughters. - auto secondaries{step->GetSecondary()}; + } // If the brem photon doesn't undergo any reaction in the target, stop // processing the rest of the event if the particle is exiting the @@ -72,7 +91,7 @@ void TargetProcessFilter::stepping(const G4Step* step) { * Check if the electron will be exiting the target * * The 'recoil_PV' volume name is automatically constructed by Geant4's - * GDML parser and was found by inspecting the geometry using a + * GDML parser and was found by inspecting the geometry using a * visualization. This Physical Volume (PV) is associated with the * recoil parent volume and so it will break if the recoil parent volume * changes its name. @@ -81,23 +100,23 @@ void TargetProcessFilter::stepping(const G4Step* step) { * an air gap between the target region and the recoil tracker. */ if (auto volume{track->GetNextVolume()->GetName()}; - volume.compareTo("recoil_PV") == 0 or + volume.compareTo("recoil_PV") == 0 or volume.compareTo("World_PV") == 0) { - if (secondaries->size() != 0) { - if (getEventInfo()->bremCandidateCount() == 1) { - track->SetTrackStatus(fKillTrackAndSecondaries); - G4RunManager::GetRunManager()->AbortEvent(); - currentTrack_ = nullptr; - } else { - currentTrack_ = track; - track->SetTrackStatus(fSuspend); - getEventInfo()->decBremCandidateCount(); - trackInfo->tagBremCandidate(false); - } + if (getEventInfo()->bremCandidateCount() == 1) { + track->SetTrackStatus(fKillTrackAndSecondaries); + G4RunManager::GetRunManager()->AbortEvent(); + currentTrack_ = nullptr; + } else { + currentTrack_ = track; + track->SetTrackStatus(fSuspend); + getEventInfo()->decBremCandidateCount(); + trackInfo->tagBremCandidate(false); } - return; } + return; } else { + // If the brem gamma interacts and produced secondaries, get the + // process used to create them. G4String processName = secondaries->at(0)->GetCreatorProcess()->GetProcessName(); @@ -113,12 +132,19 @@ void TargetProcessFilter::stepping(const G4Step* step) { getEventInfo()->decBremCandidateCount(); trackInfo->tagBremCandidate(false); } + return; } - std::cout << "[ TargetProcessFilter ]: " - << "Brem photon produced " << secondaries->size() + if (G4RunManager::GetRunManager()->GetVerboseLevel() > 1) { + std::cout << "[ TargetProcessFilter ]: " + << G4EventManager::GetEventManager() + ->GetConstCurrentEvent() + ->GetEventID() + << " Brem photon produced " << secondaries->size() << " particle via " << processName << " process." << std::endl; + } trackInfo->tagBremCandidate(false); + trackInfo->setSaveFlag(true); trackInfo->tagPNGamma(); getEventInfo()->decBremCandidateCount(); } diff --git a/Biasing/test/ecal_db.py b/Biasing/test/ecal_db.py index 1d128c675..4005b041c 100644 --- a/Biasing/test/ecal_db.py +++ b/Biasing/test/ecal_db.py @@ -1,14 +1,13 @@ from LDMX.Framework import ldmxcfg p = ldmxcfg.Process('ecal_db') from LDMX.Biasing import eat -from LDMX.SimCore import makePath from LDMX.Ecal import EcalGeometry from LDMX.Hcal import HcalGeometry p.sequence = [ eat.dark_brem( - 10., #MeV - mass of A' - makePath.makeLHEPath(10.), #str - full path to directory containing LHE vertex files - 'ldmx-det-v12' , #name of geometry to use + 100., #MeV - mass of A' + 'SimCore/G4DarkBreM/data/electron_tungsten_MaxE_4.0_MinE_0.2_RelEStep_0.1_UndecayedAP_mA_0.1_run_3000.csv.gz', + 'ldmx-det-v14' , #name of geometry to use ) ] p.maxTriesPerEvent = 1000 diff --git a/Biasing/test/ecal_pn.py b/Biasing/test/ecal_pn.py index be25175a6..db7cab31e 100644 --- a/Biasing/test/ecal_pn.py +++ b/Biasing/test/ecal_pn.py @@ -7,7 +7,7 @@ import LDMX.Hcal.HcalGeometry p.sequence = [ ecal.photo_nuclear( - 'ldmx-det-v12' , + 'ldmx-det-v14' , generators.single_4gev_e_upstream_tagger() ) ] diff --git a/Biasing/test/energy_sort.py b/Biasing/test/energy_sort.py index 4133621bf..8694240be 100644 --- a/Biasing/test/energy_sort.py +++ b/Biasing/test/energy_sort.py @@ -8,7 +8,7 @@ import LDMX.Ecal.EcalGeometry import LDMX.Hcal.HcalGeometry mySim = sim.simulator( "mySim" ) -mySim.setDetector( 'ldmx-det-v12' ) +mySim.setDetector( 'ldmx-det-v14' ) from LDMX.SimCore import generators as gen mySim.generators.append( gen.single_4gev_e_upstream_tagger() ) mySim.description = 'Basic test Simulation' diff --git a/Biasing/test/old_target_db.py b/Biasing/test/old_target_db.py deleted file mode 100644 index 03c3771c3..000000000 --- a/Biasing/test/old_target_db.py +++ /dev/null @@ -1,30 +0,0 @@ -from LDMX.Framework import ldmxcfg - -p = ldmxcfg.Process('old_signal') - -from LDMX.SimCore import simulator -sim = simulator.simulator('old_signal_sim') -sim.description = 'Place dark brem events inside target volume.' -sim.setDetector( 'ldmx-det-v12' , True ) - -ap_mass = 1000. #MeV -from LDMX.SimCore import makePath -db_event_lib_path = makePath.makeLHEPath(ap_mass) -print(db_event_lib_path) - -import glob -possible_lhes = glob.glob( db_event_lib_path+'/*4.0*.lhe' ) - -if len(possible_lhes) == 1 : - the_lhe = possible_lhes[0].strip() -else : - raise Exception("Not exactly one LHE file simulated for 4GeV Beam and input mass") - -p.maxEvents = 10 # assume LHE has at least 10 events in it - -from LDMX.SimCore import generators -sim.dark_brem.activate(ap_mass) -sim.generators = [ generators.lhe('dark_brem', the_lhe ) ] -sim.beamSpotSmear = [ 20., 80., 0.3504 ] #mm - -p.outputFiles = [ '/tmp/old_signal.root' ] diff --git a/Biasing/test/target_db.py b/Biasing/test/target_db.py index e1cd2a1e1..8845a7bfd 100644 --- a/Biasing/test/target_db.py +++ b/Biasing/test/target_db.py @@ -1,14 +1,13 @@ from LDMX.Framework import ldmxcfg p = ldmxcfg.Process('target_db') from LDMX.Biasing import target -from LDMX.SimCore import makePath import LDMX.Ecal.EcalGeometry import LDMX.Hcal.HcalGeometry p.sequence = [ target.dark_brem( - 10., #MeV - mass of A' - makePath.makeLHEPath(10.), #str - full path to directory containing LHE vertex files - 'ldmx-det-v12' , #name of geometry to use + 100., #MeV - mass of A' + 'SimCore/G4DarkBreM/data/electron_tungsten_MaxE_4.0_MinE_0.2_RelEStep_0.1_UndecayedAP_mA_0.1_run_3000.csv.gz', + 'ldmx-det-v14', #name of geometry to use ) ] p.maxEvents = 1000 diff --git a/Biasing/test/target_en.py b/Biasing/test/target_en.py index 37f648155..2a01aaf3b 100644 --- a/Biasing/test/target_en.py +++ b/Biasing/test/target_en.py @@ -6,7 +6,7 @@ import LDMX.Hcal.HcalGeometry p.sequence = [ target.electro_nuclear( - 'ldmx-det-v12' , + 'ldmx-det-v14' , generators.single_4gev_e_upstream_tagger() ) ] diff --git a/Biasing/test/target_mumu.py b/Biasing/test/target_mumu.py index 6addac007..3ca7a2c90 100644 --- a/Biasing/test/target_mumu.py +++ b/Biasing/test/target_mumu.py @@ -6,7 +6,7 @@ import LDMX.Hcal.HcalGeometry p.sequence = [ target.gamma_mumu( - 'ldmx-det-v12' , + 'ldmx-det-v14' , generators.single_4gev_e_upstream_tagger() ) ] diff --git a/Biasing/test/target_pn.py b/Biasing/test/target_pn.py index dde9e045f..24afad1b2 100644 --- a/Biasing/test/target_pn.py +++ b/Biasing/test/target_pn.py @@ -6,7 +6,7 @@ import LDMX.Hcal.HcalGeometry p.sequence = [ target.photo_nuclear( - 'ldmx-det-v12' , + 'ldmx-det-v14' , generators.single_4gev_e_upstream_tagger() ) ] diff --git a/CMakeLists.txt b/CMakeLists.txt index 382d5e88c..69707a95e 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -70,6 +70,7 @@ add_subdirectory(Ecal ${CMAKE_BINARY_DIR}/EcalEvent) add_subdirectory(Hcal ${CMAKE_BINARY_DIR}/HcalEvent) add_subdirectory(TrigScint ${CMAKE_BINARY_DIR}/TrigScintEvent) add_subdirectory(BeamInstrumentation ${CMAKE_BINARY_DIR}/BeamInstrumentationEvent) +add_subdirectory(Tracking ${CMAKE_BINARY_DIR}/TrackingEvent) # Once the event libraries have been built, turn off the global option. set(BUILD_EVENT_ONLY OFF CACHE BOOL "Build the SimCore library." FORCE) diff --git a/Conditions b/Conditions index 823da3bc2..4056cf868 160000 --- a/Conditions +++ b/Conditions @@ -1 +1 @@ -Subproject commit 823da3bc28fa4b911d70544d252bc1b16905bcf7 +Subproject commit 4056cf8684bd6eea0e13e4ab3e24958ccf8124e3 diff --git a/DQM/include/DQM/HCalDQM.h b/DQM/include/DQM/HCalDQM.h index 0eb45591d..94f0260ff 100644 --- a/DQM/include/DQM/HCalDQM.h +++ b/DQM/include/DQM/HCalDQM.h @@ -14,18 +14,23 @@ /*~~~~~~~~~~~~~~~*/ /* Framework */ /*~~~~~~~~~~~~~~~*/ +#include "DetDescr/HcalGeometry.h" +#include "DetDescr/HcalID.h" #include "Framework/Configure/Parameters.h" #include "Framework/Event.h" +#include "Framework/EventFile.h" #include "Framework/EventProcessor.h" - +#include "Hcal/Event/HcalHit.h" +#include "Hcal/Event/HcalVetoResult.h" +#include "SimCore/Event/SimCalorimeterHit.h" #include "Tools/AnalysisUtils.h" - +#include namespace dqm { class HCalDQM : public framework::Analyzer { - public: +public: /** Constructor */ - HCalDQM(const std::string& name, framework::Process& process); + HCalDQM(const std::string &name, framework::Process &process); /** Destructor */ ~HCalDQM() {} @@ -35,16 +40,38 @@ class HCalDQM : public framework::Analyzer { * * @param parameters Set of parameters used to configure this processor. */ - void configure(framework::config::Parameters& parameters) final override; + void configure(framework::config::Parameters ¶meters) final override; /** * Process the event and make histograms ro summaries. * * @param event The event to analyze. */ - void analyze(const framework::Event& event); + void analyze(const framework::Event &event) override; + + bool skipHit(const ldmx::HcalID &id) { + const auto section{id.section()}; + return (section != section_ && section_ != -1); + } + void analyzeRecHits(const std::vector &hits); + void analyzeSimHits(const std::vector &hits); - private: + bool hitPassesVeto(const ldmx::HcalHit &hit, int section) { + if (hit.getPE() < pe_veto_threshold || hit.getTime() > max_hit_time_) { + return true; + } + if (section == ldmx::HcalID::HcalSection::BACK && hit.getMinPE() < 1) { + return true; + } + return false; + } + +private: + /// Hcal Sim Hits collection name + std::string sim_coll_name_; + + /// Hcal Sim Hits pass name + std::string sim_pass_name_; /// Hcal Rec Hits collection name std::string rec_coll_name_; @@ -56,8 +83,13 @@ class HCalDQM : public framework::Analyzer { /// Hcal Veto pass name std::string veto_pass_; + + // Veto threshold for photo-electrons + float pe_veto_threshold; + int section_; + double max_hit_time_; }; -} // namespace dqm +} // namespace dqm -#endif // _DQM_HCAL_DQM_H_ +#endif // _DQM_HCAL_DQM_H_ diff --git a/DQM/include/DQM/HcalInefficiencyDQM.h b/DQM/include/DQM/HcalInefficiencyDQM.h new file mode 100644 index 000000000..fb67a0d57 --- /dev/null +++ b/DQM/include/DQM/HcalInefficiencyDQM.h @@ -0,0 +1,57 @@ +#ifndef HCALINEFFICIENCYDQM_H +#define HCALINEFFICIENCYDQM_H +#include "Framework/Configure/Parameters.h" +#include "Framework/Event.h" +#include "Framework/EventProcessor.h" +#include "SimCore/Event/SimCalorimeterHit.h" +#include +#include +#include +#include +#include +namespace dqm { +class HcalInefficiencyAnalyzer : public framework::Analyzer { +public: + HcalInefficiencyAnalyzer(const std::string &name, framework::Process &process) + : framework::Analyzer{name, process} {} + + enum vetoCategories { + back = 0, + top = 1, + bottom = 2, + right = 3, + left = 4, + any = 5, + both = 6, + back_only = 7, + side_only = 8, + neither = 9 + }; + void configure(framework::config::Parameters ¶meters) override; + + bool hitPassesVeto(const ldmx::HcalHit &hit, int section) { + if (hit.getPE() < pe_veto_threshold || hit.getTime() > max_hit_time_) { + return true; + } + if (section == ldmx::HcalID::HcalSection::BACK && hit.getMinPE() < 1) { + return true; + } + return false; + } + + void analyze(const framework::Event &event) override; + +private: + std::string hcalSimHitsCollection_{"HcalSimHits"}; + std::string hcalRecHitsCollection_{"HcalRecHits"}; + std::string hcalSimHitsPassName_{""}; + std::string hcalRecHitsPassName_{""}; + + // Veto threshold for photo-electrons + float pe_veto_threshold; + double max_hit_time_; +}; + +} // namespace dqm + +#endif /* HCALINEFFICIENCYDQM_H */ diff --git a/DQM/include/DQM/PhotoNuclearDQM.h b/DQM/include/DQM/PhotoNuclearDQM.h index 64386cf8f..72fe7085b 100644 --- a/DQM/include/DQM/PhotoNuclearDQM.h +++ b/DQM/include/DQM/PhotoNuclearDQM.h @@ -16,9 +16,43 @@ class Event; class SimParticle; class PhotoNuclearDQM : public framework::Analyzer { - public: + + // Classification schemes for PN events + enum class CompactEventType { + single_neutron = 0, + single_charged_kaon = 1, + single_neutral_kaon = 2, + two_neutrons = 3, + soft = 4, + other = 5, + }; + enum class EventType { + nothing_hard = 0, + single_neutron = 1, + two_neutrons = 2, + three_or_more_neutrons = 3, + single_charged_pion = 4, + two_charged_pions = 5, + single_neutral_pion = 6, + single_charged_pion_and_nucleon = 7, + single_charged_pion_and_two_nucleons = 8, + two_charged_pions_and_nucleon = 9, + single_neutral_pion_and_nucleon = 10, + single_neutral_pion_and_two_nucleons = 11, + single_neutral_pion_charged_pion_and_nucleon = 12, + single_proton = 13, + two_protons = 14, + proton_neutron = 15, + klong = 16, + charged_kaon = 17, + kshort = 18, + exotics = 19, + multibody = 20, + }; + +public: /// Constructor - PhotoNuclearDQM(const std::string& name, framework::Process& process); + PhotoNuclearDQM(const std::string &name, framework::Process &process); /// Destructor ~PhotoNuclearDQM(); @@ -29,49 +63,96 @@ class PhotoNuclearDQM : public framework::Analyzer { * @param parameters Set of parameters used to configure this * analyzer. */ - void configure(framework::config::Parameters& parameters) final override; + void configure(framework::config::Parameters ¶meters) final override; /** * Process the event and create the histogram summaries. * * @param event The event to analyze. */ - void analyze(const framework::Event& event) final override; + void analyze(const framework::Event &event) final override; /// Method executed before processing of events begins. void onProcessStart(); - private: +private: + /** Method used to classify events. Note: Assumes that daughters is sorted by + * kinetic energy. */ + EventType + classifyEvent(const std::vector daughters, + double threshold); + + /** Method used to classify events in a compact manner. */ + CompactEventType + classifyCompactEvent(const ldmx::SimParticle *pnGamma, + const std::vector daughters, + double threshold); + + /** + * Fill the recoil electron-histograms + * */ + void findRecoilProperties(const ldmx::SimParticle *recoil); + /** - * Print the particle tree. * - * @param[in] particleMap The map containing the SimParticles. - */ - void printParticleTree(std::map particleMap); + * Find all daughter particles of a parent. Particles are included if they> + * + * - Are in the particle map, + * - Are not photons or nuclear fragment, and + * - Are not a light ion (Z < 4) if the \ref count_light_ions_ parameter is + set to false + * + * The products are sorted by kinetic energy, in descending order. + * + **/ + std::vector + findDaughters(const std::map particleMap, + const ldmx::SimParticle *parent) const; /** - * Print the daughters of a particle. * - * @param[in] particleMap The map containing the SimParticles. - * @param[in] particle The particle whose daughters will be printed. - * @param[in] depth The tree depth. + * Fill histograms related to kinematics of PN products. * - * @return[out] A vector with the track IDs of particles that have - * already been printed. - */ - std::vector printDaughters(std::map particleMap, - const ldmx::SimParticle particle, int depth); + **/ + void findParticleKinematics( + const std::vector &pnDaughters); - /** Method used to classify events. */ - int classifyEvent(const std::vector daughters, - double threshold); + /** + * + * Fill histograms related to the kinematics and subleading particles for 1n, + * kaon, and 2n type events. Note: This assumes that the daughter particles + * are sorted by energy. + **/ + void findSubleadingKinematics( + const ldmx::SimParticle *pnGamma, + const std::vector &pnDaughters, // + const EventType eventType); - /** Method used to classify events in a compact manner. */ - int classifyCompactEvent( - const ldmx::SimParticle* pnGamma, - const std::vector daughters, double threshold); +public: + /** + * Check if the PDG code corresponds to a light ion. + * + * Nuclear PDG codes are given by ±10LZZZAAAI So to find the atomic + * number, we first divide by 10 (to lose the I-component) and then + * take the modulo with 1000. + * + * TODO: Repeated code from SimCore, could probably live elsewhere. + * + */ + constexpr bool isLightIon(const int pdgCode) const { + // + // TODO: Is the < check necessary? + if (pdgCode > 1000000000 && pdgCode < 10000000000) { + // Check if the atomic number is less than or equal to 4 + return ((pdgCode / 10) % 1000) <= 4; + } + return false; + } + + bool verbose_; + bool count_light_ions_; }; -} // namespace dqm +} // namespace dqm -#endif // _DQM_ECAL_PN_H_ +#endif // _DQM_ECAL_PN_H_ diff --git a/DQM/python/dqm.py b/DQM/python/dqm.py index ceabaff67..96ade9216 100644 --- a/DQM/python/dqm.py +++ b/DQM/python/dqm.py @@ -1,6 +1,140 @@ """Configuration for DQM analyzers""" from LDMX.Framework import ldmxcfg +class HCalDQM(ldmxcfg.Analyzer) : + """Configured HCalDQM python object + + Contains an instance of HCalDQM that + has already been configured. + + Builds the necessary histograms as well. + + Examples + -------- + from LDMX.DQM import dqm + p.sequence.append( dqm.HCalDQM() ) + """ + + + + def __init__(self,name="hcal_dqm", pe_threshold=5, section=0, max_hit_time = 50.0) : + self.section = section + section_names = ['back', 'top', 'bottom', 'right', 'left'] + section_name = section_names[section] + super().__init__(name + f'_{section_name}','dqm::HCalDQM','DQM') + + self.pe_veto_threshold = float(pe_threshold) + self.max_hit_time = max_hit_time + self.rec_coll_name = 'HcalRecHits' + self.rec_pass_name = '' + self.sim_coll_name = 'HcalSimHits' + self.sim_pass_name = '' + + pe_bins = [1500, 0, 1500] + time_bins = [100, -100, 500] + layer_bins = [100,0,100] + multiplicity_bins = [400,0,400] + energy_bins = [200,0,200] + total_energy_bins = [1000, 0, 1000] + self.build1DHistogram('sim_along_x', 'x', 1200, -3000,3000) + self.build1DHistogram('sim_along_y', 'y', 1200, -3000,3000) + self.build1DHistogram('sim_along_z', 'z', 1200, 0,6000) + self.build1DHistogram('along_x', 'x', 1200, -3000,3000) + self.build1DHistogram('along_y', 'y', 1200, -3000,3000) + self.build1DHistogram('along_z', 'z', 1200, 0,6000) + # Per hit + self.build1DHistogram("pe", + f"Photoelectrons in the HCal ({section_name})", + *pe_bins) + self.build1DHistogram('hit_time', f'HCal hit time ({section_name}) [ns]', + *time_bins) + self.build1DHistogram('sim_hit_time', f'HCal hit time ({section_name}) [ns]', + *time_bins) + self.build1DHistogram("layer", f"Layer number ({section_name})", + *layer_bins) + self.build1DHistogram("sim_layer", f"Layer number ({section_name})", + *layer_bins) + self.build1DHistogram("noise", + f"Is pure noise hit? ({section_name})", 2, 0, 1) + + self.build1DHistogram("energy", + f"Reconstructed hit energy in the HCal ({section_name})", + *energy_bins) + + self.build1DHistogram("sim_energy", + f"Simulated hit energy in the HCal ({section_name})", + *energy_bins) + self.build1DHistogram("sim_energy_per_bar", + f"Simulated hit energy per bar in the HCal ({section_name})", + *energy_bins) + # Once per event + self.build1DHistogram("total_energy", + f"Total reconstructed energy in the HCal ({section_name})", + *total_energy_bins) + self.build1DHistogram("sim_total_energy", + f"Total simulated energy in the HCal ({section_name})", + *total_energy_bins) + self.build1DHistogram("total_pe", + f"Total photoelectrons in the HCal ({section_name})", + 200,0,10000) + self.build1DHistogram('max_pe', + f"Maximum photoelectrons in the HCal ({section_name})", + *pe_bins) + self.build2DHistogram('sim_layer:strip', + f'HCal Layer ({section_name})', + *layer_bins, + 'Back HCal Strip', 62,0,62 ) + self.build2DHistogram('layer:strip', + f'HCal Layer ({section_name})', + *layer_bins, + 'Back HCal Strip', 62,0,62 ) + self.build1DHistogram("hit_multiplicity", + f"HCal hit multiplicity ({section_name})", + *multiplicity_bins) + self.build1DHistogram("sim_hit_multiplicity", + f"HCal hit multiplicity ({section_name})", + *multiplicity_bins) + self.build1DHistogram("sim_num_bars_hit", + f"HCal hit multiplicity ({section_name})", + *multiplicity_bins) + self.build1DHistogram("vetoable_hit_multiplicity", + f"Multiplicity of vetoable hits at {pe_threshold} PE ({section_name})", + *multiplicity_bins) + self.build1DHistogram('max_pe_time', + f"Max PE hit time ({section_name}) [ns]", + *time_bins) + self.build1DHistogram('hit_z', 'Reconstructed Z position in the HCal ({section_name}) [mm]', + 1000, 0, 6000 + ) + + + +class HcalInefficiencyAnalyzer(ldmxcfg.Analyzer): + def __init__(self,name="HcalInefficiencyAnalyzer", num_sections=5, + pe_threshold=5, max_hit_time=50.0): + super().__init__(name,'dqm::HcalInefficiencyAnalyzer','DQM') + + self.sim_coll_name = "HcalSimHits" + self.sim_pass_name = "" #use whatever pass is available + + self.rec_coll_name= "HcalRecHits" + self.rec_pass_name= "" #use whatever pass is available + + self.pe_veto_threshold = float(pe_threshold) + self.max_hit_time = max_hit_time + + section_names = ['back', 'top', 'bottom', 'right', 'left'] + inefficiency_depth_bins = [6000, 0., 6000.] + inefficiency_layer_bins = [100, 0, 100] + # Overall, Back, Side, Top, Bottom, Left, Right, Both, + # Back only, Side Only, Neither + self.build1DHistogram('efficiency', "", 12, -1, 11) + for section in range(num_sections): + section_name = section_names[section] + self.build1DHistogram(f"inefficiency_{section_name}", + "fInefficiency ({section_name})", + *inefficiency_layer_bins + ) class EcalDigiVerify(ldmxcfg.Analyzer) : """Configured EcalDigiVerifier python object @@ -92,58 +226,6 @@ def __init__(self,name='sim_dqm',sim_pass='') : super().__init__(name,'dqm::SimObjects','DQM') self.sim_pass = sim_pass -class HCalDQM(ldmxcfg.Analyzer) : - """Configured HCalDQM python object - - Contains an instance of HCalDQM that - has already been configured. - - Builds the necessary histograms as well. - - Examples - -------- - from LDMX.DQM import dqm - p.sequence.append( dqm.HCalDQM() ) - """ - - def __init__(self,name="hcal_dqm") : - super().__init__(name,'dqm::HCalDQM','DQM') - - self.rec_coll_name = 'HcalRecHits' - self.rec_pass_name = '' - - # every hit in hcal - self.build1DHistogram("pe", "Photoelectrons in an HCal Module", 1500, 0, 1500) - self.build1DHistogram("hit_time", "HCal hit time (ns)", 1600, -100, 1500) - self.build2DHistogram("back_pe:layer", - "Photoelectrons in a Back HCal Layer",10,0,10, - "Back HCal Layer",100,0,100) - self.build2DHistogram("back_layer:strip", - "Back HCal Layer",100,0,100, - "Back HCal Strip",62,0,62) - self.build2DHistogram("side_pe:layer", - "Photoelectrons in a Side HCal Layer",10,0,10, - "Side HCal Layer",20,0,20) - self.build2DHistogram("side_layer:strip", - "Side HCal Layer",20,0,20, - "Side HCal Strip",30,0,30) - - # once per event - self.build1DHistogram("n_hits", "HCal hit multiplicity", 300, 0, 300) - self.build1DHistogram("total_pe", "Total Photoelectrons", 3000, 0, 3000) - self.build1DHistogram("back_total_pe", "Total Photoelectrons in Back", 3000, 0, 3000) - self.build1DHistogram("max_pe", - "Max Photoelectrons in an HCal Module", 1500, 0, 1500) - self.build1DHistogram("hit_time_max_pe", - "Max PE hit time (ns)", 1600, -100, 1500) - self.build2DHistogram("max_pe:time", - "Max Photoelectrons in an HCal Module", 1500, 0, 1500, - "HCal max PE hit time (ns)", 1500, 0, 1500) - self.build1DHistogram("min_time_hit_above_thresh", - "Earliest time of HCal hit above threshold (ns)", 1600, -100, 1500) - self.build2DHistogram("min_time_hit_above_thresh:pe", - "Photoelectrons in an HCal Module", 1500, 0, 1500, - "Earliest time of HCal hit above threshold (ns)", 1600, -100, 1500) class HCalRawDigi(ldmxcfg.Analyzer) : def __init__(self, input_name) : @@ -202,9 +284,11 @@ class PhotoNuclearDQM(ldmxcfg.Analyzer) : p.sequence.append( dqm.PhotoNuclearDQM() ) """ - def __init__(self,name='PN') : + def __init__(self,name='PN', verbose=False, count_light_ions=True) : super().__init__(name,'dqm::PhotoNuclearDQM','DQM') + self.count_light_ions=count_light_ions + self.verbose = verbose self.build1DHistogram("event_type" , "", 24, -1, 23) self.build1DHistogram("event_type_500mev" , "", 24, -1, 23) self.build1DHistogram("event_type_2000mev" , "", 24, -1, 23) @@ -212,8 +296,11 @@ def __init__(self,name='PN') : self.build1DHistogram("event_type_compact_500mev" , "", 8, -1, 7) self.build1DHistogram("event_type_compact_2000mev" , "", 8, -1, 7) self.build1DHistogram("1n_event_type" , "", 7, -1, 6) - self.build1DHistogram("pn_particle_mult" , "Photo-nuclear Multiplicity", 100, 0, 200) + self.build1DHistogram("pn_particle_mult" , "Photo-nuclear Multiplicity", 200, 0, 200) + self.build1DHistogram("pn_neutron_mult", "Photo-nuclear Neutron Multiplicity", 200,0, 200) self.build1DHistogram("pn_gamma_energy" , "#gamma Energy (MeV)", 500, 0, 5000) + self.build1DHistogram("pn_total_ke" , "Total Kineitc Energy of Photo-Nuclear Products(MeV)", 500, 0, 5000) + self.build1DHistogram("pn_total_neutron_ke" , "Total Kineitc Energy of Photo-Nuclear Neutrons (MeV)", 500, 0, 5000) self.build1DHistogram("1n_neutron_energy" , "Neutron Energy (MeV)", 500, 0, 5000) self.build1DHistogram("1n_energy_diff" , "E(#gamma_{PN}) - E(n) (MeV)", 500, 0, 5000) self.build1DHistogram("1n_energy_frac" , "E(n)/E(#gamma_{PN}) (MeV)", 500, 0, 1) @@ -417,13 +504,29 @@ def __init__(self,name='Trigger',coll='Trigger') : ] hcal_dqm = [ - HCalDQM() - ] + HCalDQM(pe_threshold=5, + section=0 + ), + HCalDQM(pe_threshold=5, + section=1 + ), + HCalDQM(pe_threshold=5, + section=2 + ), + HCalDQM(pe_threshold=5, + section=3 + ), + HCalDQM(pe_threshold=5, + section=4 + ), + HcalInefficiencyAnalyzer(), + ] recoil_dqm = [ RecoilTrackerDQM() ] + trigScint_dqm = [ TrigScintSimDQM('TrigScintSimPad1','TriggerPad1SimHits','pad1'), TrigScintSimDQM('TrigScintSimPad2','TriggerPad2SimHits','pad2'), diff --git a/DQM/src/DQM/HCalDQM.cxx b/DQM/src/DQM/HCalDQM.cxx index 885cb8089..1cad2dc0c 100644 --- a/DQM/src/DQM/HCalDQM.cxx +++ b/DQM/src/DQM/HCalDQM.cxx @@ -1,80 +1,164 @@ #include "DQM/HCalDQM.h" - -#include "Hcal/Event/HcalHit.h" -#include "Hcal/Event/HcalVetoResult.h" - -#include "DetDescr/HcalID.h" - namespace dqm { -HCalDQM::HCalDQM(const std::string& name, framework::Process& process) +HCalDQM::HCalDQM(const std::string &name, framework::Process &process) : framework::Analyzer(name, process) {} -void HCalDQM::configure(framework::config::Parameters& ps) { +void HCalDQM::configure(framework::config::Parameters &ps) { rec_coll_name_ = ps.getParameter("rec_coll_name"); rec_pass_name_ = ps.getParameter("rec_pass_name"); + sim_coll_name_ = ps.getParameter("sim_coll_name"); + sim_pass_name_ = ps.getParameter("sim_pass_name"); + pe_veto_threshold = ps.getParameter("pe_veto_threshold"); + section_ = ps.getParameter("section"); + max_hit_time_ = ps.getParameter("max_hit_time"); } -void HCalDQM::analyze(const framework::Event& event) { - // hard-code PE threshold because I'm lazy - static const float pe_veto_threshold{5.}; - +void HCalDQM::analyze(const framework::Event &event) { // Get the collection of HCalDQM digitized hits if the exists - const auto& hcalHits{event.getCollection(rec_coll_name_,rec_pass_name_)}; + const auto &hcalHits{ + event.getCollection(rec_coll_name_, rec_pass_name_)}; + + const auto &hcalSimHits{event.getCollection( + sim_coll_name_, sim_pass_name_)}; + analyzeSimHits(hcalSimHits); + analyzeRecHits(hcalHits); + const auto &geometry = getCondition( + ldmx::HcalGeometry::CONDITIONS_OBJECT_NAME); +} +void HCalDQM::analyzeSimHits(const std::vector &hits) { - float totalPE{0}, total_back_pe{0}, minTime{999}, minTimePE{-1}; - float maxPE{-1}, maxPETime{-1}; + const auto &geometry = getCondition( + ldmx::HcalGeometry::CONDITIONS_OBJECT_NAME); - // Loop through all HCal hits in the event - // Get non-noise generated hits into new vector for sorting - for (const ldmx::HcalHit& hit : hcalHits) { - ldmx::HcalID id(hit.getID()); + std::map simEnergyPerBar; + int hitMultiplicity{0}; - histograms_.fill("pe", hit.getPE()); - histograms_.fill("hit_time", hit.getTime()); - totalPE += hit.getPE(); + for (const auto &hit : hits) { - std::string section{"side"}; - if (id.section() == ldmx::HcalID::HcalSection::BACK) { - section = "back"; - total_back_pe += hit.getPE(); + ldmx::HcalID id(hit.getID()); + if (skipHit(id)) { + continue; + } + const auto energy{hit.getEdep()}; + if (simEnergyPerBar.count(id) == 0) { + simEnergyPerBar[id] = energy; + } else { + simEnergyPerBar[id] += energy; + } + const auto orientation{geometry.getScintillatorOrientation(id)}; + const auto section{id.section()}; + const auto layer{id.layer()}; + const auto strip{id.strip()}; + const auto pos{hit.getPosition()}; + const auto x{pos[0]}; + const auto y{pos[1]}; + const auto z{pos[2]}; + const auto t{hit.getTime()}; + hitMultiplicity++; + histograms_.fill("sim_hit_time", t); + histograms_.fill("sim_layer", layer); + histograms_.fill("sim_layer:strip", layer, strip); + histograms_.fill("sim_energy", energy); + switch (orientation) { + case ldmx::HcalGeometry::ScintillatorOrientation::horizontal: + histograms_.fill("sim_along_x", x); + break; + case ldmx::HcalGeometry::ScintillatorOrientation::vertical: + histograms_.fill("sim_along_y", y); + break; + case ldmx::HcalGeometry::ScintillatorOrientation::depth: + histograms_.fill("sim_along_z", z); + break; } + } - histograms_.fill(section+"_pe:layer", hit.getPE(), id.layer()); - histograms_.fill(section+"_layer:strip", id.layer(), id.strip()); + histograms_.fill("sim_hit_multiplicity", hitMultiplicity); + histograms_.fill("sim_num_bars_hit", simEnergyPerBar.size()); - /** - * Get earliest (min time) non-noise (time > -999.ns) hit - * above the PE veto threshold. - */ - if (hit.getTime() > -999. and hit.getPE() > pe_veto_threshold and hit.getTime() < minTime) { - minTime = hit.getTime(); - minTimePE = hit.getPE(); + double total_energy{0}; + for (const auto [id, energy] : simEnergyPerBar) { + histograms_.fill("sim_energy_per_bar", energy); + total_energy += energy; + } + histograms_.fill("sim_total_energy", total_energy); +} +void HCalDQM::analyzeRecHits(const std::vector &hits) { + + const auto &geometry = getCondition( + ldmx::HcalGeometry::CONDITIONS_OBJECT_NAME); + + float totalPE{0}; + float maxPE{-1}; + float maxPETime{-1}; + float E{0}; + float totalE{0}; + int vetoableHitMultiplicity{0}; + int hitMultiplicity{0}; + + for (const ldmx::HcalHit &hit : hits) { + ldmx::HcalID id(hit.getID()); + const auto orientation{geometry.getScintillatorOrientation(id)}; + const auto section{id.section()}; + const auto layer{id.layer()}; + const auto strip{id.strip()}; + if (skipHit(id)) { + continue; } - /** - * Get PE value and time of hit - * with maximum PE deposited in event. - */ - if (hit.getPE() > maxPE) { - maxPE = hit.getPE(); - maxPETime = hit.getTime(); + if (hit.isNoise()) { + histograms_.fill("noise", 1); + } else { + histograms_.fill("noise", 0); } - } + if (hitPassesVeto(hit, section)) { + hitMultiplicity++; + } else { + hitMultiplicity++; + vetoableHitMultiplicity++; + } + const auto pe{hit.getPE()}; + const auto t{hit.getTime()}; + const auto e{hit.getEnergy()}; + const auto x{hit.getXPos()}; + const auto y{hit.getYPos()}; + const auto z{hit.getZPos()}; + switch (orientation) { + case ldmx::HcalGeometry::ScintillatorOrientation::horizontal: + histograms_.fill("along_x", x); + break; + case ldmx::HcalGeometry::ScintillatorOrientation::vertical: + histograms_.fill("along_y", y); + break; + case ldmx::HcalGeometry::ScintillatorOrientation::depth: + histograms_.fill("along_z", z); + break; + } + + totalE += e; + totalPE += pe; - // let's fill our once-per-event histograms - histograms_.fill("n_hits", hcalHits.size()); + if (pe > maxPE) { + maxPE = pe; + maxPETime = t; + } + histograms_.fill("layer:strip", layer, strip); + histograms_.fill("pe", pe); + histograms_.fill("hit_time", t); + histograms_.fill("layer", layer); + histograms_.fill("noise", hit.isNoise()); + histograms_.fill("energy", e); + histograms_.fill("hit_z", z); + } + histograms_.fill("total_energy", totalE); histograms_.fill("total_pe", totalPE); - histograms_.fill("back_total_pe", total_back_pe); histograms_.fill("max_pe", maxPE); - histograms_.fill("hit_time_max_pe", maxPETime); - histograms_.fill("max_pe:time", maxPE, maxPETime); - histograms_.fill("min_time_hit_above_thresh", minTime); - histograms_.fill("min_time_hit_above_thresh:pe", minTimePE, minTime); - + histograms_.fill("max_pe_time", maxPETime); + histograms_.fill("hit_multiplicity", hitMultiplicity); + histograms_.fill("vetoable_hit_multiplicity", vetoableHitMultiplicity); } -} // namespace dqm +} // namespace dqm DECLARE_ANALYZER_NS(dqm, HCalDQM) diff --git a/DQM/src/DQM/HcalInefficiencyDQM.cxx b/DQM/src/DQM/HcalInefficiencyDQM.cxx new file mode 100644 index 000000000..92302404b --- /dev/null +++ b/DQM/src/DQM/HcalInefficiencyDQM.cxx @@ -0,0 +1,76 @@ + +#include "DQM/HcalInefficiencyDQM.h" + +namespace dqm { +void HcalInefficiencyAnalyzer::analyze(const framework::Event &event) { + const auto hcalSimHits = event.getCollection( + hcalSimHitsCollection_, hcalSimHitsPassName_); + const auto hcalRecHits = event.getCollection( + hcalRecHitsCollection_, hcalRecHitsPassName_); + + const int failedVeto{999}; + // Check veto for each section, combined side hcal veto + std::vector firstLayersHit{failedVeto, failedVeto, failedVeto, + failedVeto, failedVeto}; + + const std::vector sectionNames{"back", "top", "bottom", "right", + "left"}; + for (const auto hit : hcalRecHits) { + const ldmx::HcalID id{hit.getID()}; + const auto section{id.section()}; + const auto z{hit.getZPos()}; + const auto layer{id.layer()}; + if (hitPassesVeto(hit, section)) { + if (layer < firstLayersHit[section]) { + firstLayersHit[section] = layer; + } + } + } + + bool vetoedByBack{firstLayersHit[ldmx::HcalID::HcalSection::BACK] != + failedVeto}; + bool vetoedByTop{firstLayersHit[ldmx::HcalID::HcalSection::TOP]}; + bool vetoedByBottom{firstLayersHit[ldmx::HcalID::HcalSection::BOTTOM]}; + bool vetoedByRight{firstLayersHit[ldmx::HcalID::HcalSection::RIGHT]}; + bool vetoedByLeft{firstLayersHit[ldmx::HcalID::HcalSection::LEFT]}; + bool vetoedBySide{vetoedByTop || vetoedByBottom || vetoedByRight || + vetoedByLeft}; + + for (int section{0}; section < firstLayersHit.size(); ++section) { + const auto layer{firstLayersHit[section]}; + const auto sectionName{sectionNames[section]}; + if (layer != failedVeto) { + histograms_.fill("inefficiency_" + sectionName, layer); + histograms_.fill("efficiency", section); + } + } + if (vetoedByBack || vetoedBySide) { + histograms_.fill("efficiency", vetoCategories::any); + if (vetoedByBack && vetoedBySide) { + histograms_.fill("efficiency", vetoCategories::both); + } else if (vetoedByBack && !vetoedBySide) { + histograms_.fill("efficiency", vetoCategories::back_only); + } else if (vetoedBySide && !vetoedByBack) { + histograms_.fill("efficiency", vetoCategories::side_only); + } + } else { + histograms_.fill("efficiency", vetoCategories::neither); + } +} + +void HcalInefficiencyAnalyzer::configure( + + framework::config::Parameters ¶meters) { + + hcalSimHitsCollection_ = + parameters.getParameter("sim_coll_name"); + hcalRecHitsCollection_ = + parameters.getParameter("rec_coll_name"); + hcalSimHitsPassName_ = parameters.getParameter("sim_pass_name"); + hcalRecHitsPassName_ = parameters.getParameter("rec_pass_name"); + pe_veto_threshold = parameters.getParameter("pe_veto_threshold"); + max_hit_time_ = parameters.getParameter("max_hit_time"); +} +} // namespace dqm + +DECLARE_ANALYZER_NS(dqm, HcalInefficiencyAnalyzer); diff --git a/DQM/src/DQM/NtuplizeDarkBremInteraction.cxx b/DQM/src/DQM/NtuplizeDarkBremInteraction.cxx new file mode 100644 index 000000000..15c99bdd7 --- /dev/null +++ b/DQM/src/DQM/NtuplizeDarkBremInteraction.cxx @@ -0,0 +1,151 @@ +#include "Framework/EventProcessor.h" +#include "SimCore/Event/SimParticle.h" + +namespace dqm { +class NtuplizeDarkBremInteraction : public framework::Analyzer { + public: + NtuplizeDarkBremInteraction(const std::string& n, framework::Process& p) + : framework::Analyzer(n,p) {} + virtual void onProcessStart() final override; + virtual void analyze(const framework::Event& e) final override; +}; + +void NtuplizeDarkBremInteraction::onProcessStart() { + getHistoDirectory(); + ntuple_.create("dbint"); + ntuple_.addVar("dbint","x"); + ntuple_.addVar("dbint","y"); + ntuple_.addVar("dbint","z"); + ntuple_.addVar("dbint","weight"); + ntuple_.addVar("dbint","incident_pdg"); + ntuple_.addVar("dbint","incident_genstatus"); + ntuple_.addVar("dbint","incident_mass"); + ntuple_.addVar("dbint","incident_energy"); + ntuple_.addVar("dbint","incident_px"); + ntuple_.addVar("dbint","incident_py"); + ntuple_.addVar("dbint","incident_pz"); + ntuple_.addVar("dbint","recoil_pdg"); + ntuple_.addVar("dbint","recoil_genstatus"); + ntuple_.addVar("dbint","recoil_mass"); + ntuple_.addVar("dbint","recoil_energy"); + ntuple_.addVar("dbint","recoil_px"); + ntuple_.addVar("dbint","recoil_py"); + ntuple_.addVar("dbint","recoil_pz"); + ntuple_.addVar("dbint","aprime_pdg"); + ntuple_.addVar("dbint","aprime_genstatus"); + ntuple_.addVar("dbint","aprime_mass"); + ntuple_.addVar("dbint","aprime_energy"); + ntuple_.addVar("dbint","aprime_px"); + ntuple_.addVar("dbint","aprime_py"); + ntuple_.addVar("dbint","aprime_pz"); + ntuple_.addVar("dbint","visible_energy"); + ntuple_.addVar("dbint","beam_energy"); +} + +static double energy(const std::vector& p, const double& m) { + return sqrt(p.at(0)*p.at(0)+ p.at(1)*p.at(1)+ p.at(2)*p.at(2)+ m*m); +} + +/** + * extract the kinematics of the dark brem interaction from the SimParticles + * + * Sometimes the electron that undergoes the dark brem is not in a region + * where it should be saved (i.e. it is a shower electron inside of the ECal). + * In this case, we need to reconstruct the incident momentum from the outgoing + * products (the recoil electron and the dark photon) which should be saved by + * the biasing filter used during the simulation. + * + * Since the dark brem model does not include a nucleus, it only is able to + * conserve momentum, so we need to reconstruct the incident particle's 3-momentum + * and then use the electron mass to calculate its total energy. + */ +void NtuplizeDarkBremInteraction::analyze(const framework::Event& e) { + const auto& particle_map{e.getMap("SimParticles")}; + const ldmx::SimParticle *recoil{nullptr}, *aprime{nullptr}, *beam{nullptr}; + for (const auto& [track_id, particle] : particle_map) { + if (track_id == 1) beam = &particle; + if (particle.getProcessType() == ldmx::SimParticle::ProcessType::eDarkBrem) { + if (particle.getPdgID() == 62) { + if (aprime != nullptr) { + EXCEPTION_RAISE("BadEvent", "Found multiple A' in event."); + } + aprime = &particle; + } else { + recoil = &particle; + } + } + } + + if (recoil == nullptr and aprime == nullptr) { + /* dark brem did not occur during the simulation + * IF PROPERLY CONFIGURED, this occurs because the simulation + * exhausted the maximum number of tries to get a dark brem + * to occur. We just leave early so that the entries in the + * ntuple are the unphysical numeric minimum. + * + * This can also happen during development, so I leave a debug + * printout here to be uncommented when developing the dark + * brem simulation. + std::cout << "Event " << e.getEventNumber() + << " did not have a dark brem occur within it." << std::endl; + */ + return; + } + + if (recoil == nullptr or aprime == nullptr or beam == nullptr) { + // we are going to end processing so let's take our time to + // construct a nice error message + std::stringstream err_msg; + err_msg + << "Unable to final all necessary particles for DarkBrem interaction." + << " Missing: [ " + << (recoil == nullptr ? "recoil " : "") + << (aprime == nullptr ? "aprime " : "") + << (beam == nullptr ? "beam " : "") + << "]" << std::endl; + EXCEPTION_RAISE("BadEvent", err_msg.str()); + return; + } + + const auto& recoil_p = recoil->getMomentum(); + const auto& aprime_p = aprime->getMomentum(); + + std::vector incident_p = recoil_p; + for (std::size_t i{0}; i < recoil_p.size(); ++i) incident_p[i] += aprime_p.at(i); + + double incident_energy = energy(incident_p, recoil->getMass()); + double recoil_energy = energy(recoil_p, recoil->getMass()); + double visible_energy = (beam->getEnergy() - incident_energy) + recoil_energy; + + ntuple_.setVar("x", aprime->getVertex().at(0)); + ntuple_.setVar("y", aprime->getVertex().at(1)); + ntuple_.setVar("z", aprime->getVertex().at(2)); + ntuple_.setVar("weight", e.getEventWeight()); + ntuple_.setVar("incident_pdg", recoil->getPdgID()); + ntuple_.setVar("incident_genstatus", -1); + ntuple_.setVar("incident_mass", recoil->getMass()); + ntuple_.setVar("incident_energy", incident_energy); + ntuple_.setVar("incident_px", incident_p.at(0)); + ntuple_.setVar("incident_py", incident_p.at(1)); + ntuple_.setVar("incident_pz", incident_p.at(2)); + ntuple_.setVar("recoil_pdg", recoil->getPdgID()); + ntuple_.setVar("recoil_genstatus", recoil->getGenStatus()); + ntuple_.setVar("recoil_mass", recoil->getMass()); + ntuple_.setVar("recoil_energy", recoil_energy); + ntuple_.setVar("recoil_px", recoil_p.at(0)); + ntuple_.setVar("recoil_py", recoil_p.at(1)); + ntuple_.setVar("recoil_pz", recoil_p.at(2)); + ntuple_.setVar("aprime_pdg", aprime->getPdgID()); + ntuple_.setVar("aprime_genstatus", aprime->getGenStatus()); + ntuple_.setVar("aprime_mass", aprime->getMass()); + ntuple_.setVar("aprime_energy", energy(aprime_p,aprime->getMass())); + ntuple_.setVar("aprime_px", aprime_p.at(0)); + ntuple_.setVar("aprime_py", aprime_p.at(1)); + ntuple_.setVar("aprime_pz", aprime_p.at(2)); + ntuple_.setVar("beam_energy", beam->getEnergy()); + ntuple_.setVar("visible_energy", visible_energy); +} + +} + +DECLARE_ANALYZER_NS(dqm,NtuplizeDarkBremInteraction); diff --git a/DQM/src/DQM/PhotoNuclearDQM.cxx b/DQM/src/DQM/PhotoNuclearDQM.cxx index 7e867aa59..f0a9e11fc 100644 --- a/DQM/src/DQM/PhotoNuclearDQM.cxx +++ b/DQM/src/DQM/PhotoNuclearDQM.cxx @@ -11,7 +11,6 @@ //----------// #include "TH1F.h" #include "TH2F.h" -#include "TVector3.h" //----------// // LDMX // @@ -19,40 +18,189 @@ #include "Framework/Event.h" #include "Tools/AnalysisUtils.h" +#include + namespace dqm { -PhotoNuclearDQM::PhotoNuclearDQM(const std::string& name, - framework::Process& process) +PhotoNuclearDQM::PhotoNuclearDQM(const std::string &name, + framework::Process &process) : framework::Analyzer(name, process) {} PhotoNuclearDQM::~PhotoNuclearDQM() {} +std::vector PhotoNuclearDQM::findDaughters( + const std::map particleMap, + const ldmx::SimParticle *parent) const { + std::vector pnDaughters; + for (const auto &daughterTrackID : parent->getDaughters()) { + // skip daughters that weren't saved + if (particleMap.count(daughterTrackID) == 0) { + continue; + } + + auto daughter{&(particleMap.at(daughterTrackID))}; + + // Get the PDG ID + auto pdgID{daughter->getPdgID()}; + + // Ignore photons and nuclei + if (pdgID == 22 || + (pdgID > 10000 && (!count_light_ions_ || !isLightIon(pdgID)))) { + continue; + } + pnDaughters.push_back(daughter); + } + + std::sort(pnDaughters.begin(), pnDaughters.end(), + [](const auto &lhs, const auto &rhs) { + double lhs_ke = lhs->getEnergy() - lhs->getMass(); + double rhs_ke = rhs->getEnergy() - rhs->getMass(); + return lhs_ke > rhs_ke; + }); + + return pnDaughters; +} +void PhotoNuclearDQM::findRecoilProperties(const ldmx::SimParticle *recoil) { + + histograms_.fill("recoil_vertex_x", recoil->getVertex()[0]); + histograms_.fill("recoil_vertex_y", recoil->getVertex()[1]); + histograms_.fill("recoil_vertex_z", recoil->getVertex()[2]); + histograms_.fill("recoil_vertex_x:recoil_vertex_y", recoil->getVertex()[0], + recoil->getVertex()[1]); +} +void PhotoNuclearDQM::findParticleKinematics( + const std::vector &pnDaughters) { + double hardest_ke{-1}, hardest_theta{-1}; + double hardest_proton_ke{-1}, hardest_proton_theta{-1}; + double hardest_neutron_ke{-1}, hardest_neutron_theta{-1}; + double hardest_pion_ke{-1}, hardest_pion_theta{-1}; + double total_ke{0}; + double total_neutron_ke{0}; + int neutron_multiplicity{0}; + // Loop through all of the PN daughters and extract kinematic + // information. + for (const auto *daughter : pnDaughters) { + // skip daughters that weren't saved + + // Get the PDG ID + auto pdgID{daughter->getPdgID()}; + + // Calculate the kinetic energy + double ke{daughter->getEnergy() - daughter->getMass()}; + total_ke += ke; + + std::vector vec{daughter->getMomentum()}; + TVector3 pvec(vec[0], vec[1], vec[2]); + + // Calculate the polar angle + auto theta{pvec.Theta() * (180 / 3.14159)}; + + if (hardest_ke < ke) { + hardest_ke = ke; + hardest_theta = theta; + } + + if ((pdgID == 2112)) { + total_neutron_ke += ke; + neutron_multiplicity++; + if (hardest_neutron_ke < ke) { + hardest_neutron_ke = ke; + hardest_neutron_theta = theta; + } + } + + if ((pdgID == 2212) && (hardest_proton_ke < ke)) { + hardest_proton_ke = ke; + hardest_proton_theta = theta; + } + + if (((std::abs(pdgID) == 211) || (pdgID == 111)) && + (hardest_pion_ke < ke)) { + hardest_pion_ke = ke; + hardest_pion_theta = theta; + } + } + histograms_.fill("hardest_ke", hardest_ke); + histograms_.fill("hardest_theta", hardest_theta); + histograms_.fill("h_ke_h_theta", hardest_ke, hardest_theta); + histograms_.fill("hardest_p_ke", hardest_proton_ke); + histograms_.fill("hardest_p_theta", hardest_proton_theta); + histograms_.fill("hardest_n_ke", hardest_neutron_ke); + histograms_.fill("hardest_n_theta", hardest_neutron_theta); + histograms_.fill("hardest_pi_ke", hardest_pion_ke); + histograms_.fill("hardest_pi_theta", hardest_pion_theta); + + histograms_.fill("pn_neutron_mult", neutron_multiplicity); + histograms_.fill("pn_total_ke", total_ke); + histograms_.fill("pn_total_neutron_ke", total_neutron_ke); +} +void PhotoNuclearDQM::findSubleadingKinematics( + const ldmx::SimParticle *pnGamma, + const std::vector &pnDaughters, + const PhotoNuclearDQM::EventType eventType) { + + // Note: Assumes sorted by energy + + double subleading_ke{-9999}; + double nEnergy{-9999}, energyDiff{-9999}, energyFrac{-9999}; + + nEnergy = pnDaughters[0]->getEnergy() - pnDaughters[0]->getMass(); + subleading_ke = -9999; + if (pnDaughters.size() > 1) { + subleading_ke = pnDaughters[1]->getEnergy() - pnDaughters[1]->getMass(); + } + energyDiff = pnGamma->getEnergy() - nEnergy; + energyFrac = nEnergy / pnGamma->getEnergy(); + + if (eventType == EventType::single_neutron) { + histograms_.fill("1n_ke:2nd_h_ke", nEnergy, subleading_ke); + histograms_.fill("1n_neutron_energy", nEnergy); + histograms_.fill("1n_energy_diff", energyDiff); + histograms_.fill("1n_energy_frac", energyFrac); + } else if (eventType == EventType::two_neutrons) { + histograms_.fill("2n_n2_energy", subleading_ke); + auto energyFrac2n = (nEnergy + subleading_ke) / pnGamma->getEnergy(); + histograms_.fill("2n_energy_frac", energyFrac2n); + histograms_.fill("2n_energy_other", pnGamma->getEnergy() - energyFrac2n); + + } else if (eventType == EventType::charged_kaon) { + histograms_.fill("1kp_ke:2nd_h_ke", nEnergy, subleading_ke); + histograms_.fill("1kp_energy", nEnergy); + histograms_.fill("1kp_energy_diff", energyDiff); + histograms_.fill("1kp_energy_frac", energyFrac); + } else if (eventType == EventType::klong || eventType == EventType::kshort) { + histograms_.fill("1k0_ke:2nd_h_ke", nEnergy, subleading_ke); + histograms_.fill("1k0_energy", nEnergy); + histograms_.fill("1k0_energy_diff", energyDiff); + histograms_.fill("1k0_energy_frac", energyFrac); + } +} void PhotoNuclearDQM::onProcessStart() { std::vector labels = {"", - "Nothing hard", // 0 - "1 n", // 1 - "2 n", // 2 - "#geq 3 n", // 3 - "1 #pi", // 4 - "2 #pi", // 5 - "1 #pi_{0}", // 6 - "1 #pi A", // 7 - "1 #pi 2 A", // 8 - "2 #pi A", // 9 - "1 #pi_{0} A", // 10 - "1 #pi_{0} 2 A", // 11 - "#pi_{0} #pi A", // 12 - "1 p", // 13 - "2 p", // 14 - "pn", // 15 - "K^{0}_{L} X", // 16 - "K X", // 17 - "K^{0}_{S} X", // 18 - "exotics", // 19 - "multi-body", // 20 + "Nothing hard", // 0 + "1 n", // 1 + "2 n", // 2 + "#geq 3 n", // 3 + "1 #pi", // 4 + "2 #pi", // 5 + "1 #pi_{0}", // 6 + "1 #pi A", // 7 + "1 #pi 2 A", // 8 + "2 #pi A", // 9 + "1 #pi_{0} A", // 10 + "1 #pi_{0} 2 A", // 11 + "#pi_{0} #pi A", // 12 + "1 p", // 13 + "2 p", // 14 + "pn", // 15 + "K^{0}_{L} X", // 16 + "K X", // 17 + "K^{0}_{S} X", // 18 + "exotics", // 19 + "multi-body", // 20 ""}; - std::vector hists = { + std::vector hists = { histograms_.get("event_type"), histograms_.get("event_type_500mev"), histograms_.get("event_type_2000mev"), @@ -60,18 +208,18 @@ void PhotoNuclearDQM::onProcessStart() { }; for (int ilabel{1}; ilabel < labels.size(); ++ilabel) { - for (auto& hist : hists) { + for (auto &hist : hists) { hist->GetXaxis()->SetBinLabel(ilabel, labels[ilabel - 1].c_str()); } } labels = {"", - "1 n", // 0 - "K#pm X", // 1 - "1 K^{0}", // 2 - "2 n", // 3 - "Soft", // 4 - "Other", // 5 + "1 n", // 0 + "K#pm X", // 1 + "1 K^{0}", // 2 + "2 n", // 3 + "Soft", // 4 + "Other", // 5 ""}; hists = { @@ -81,49 +229,54 @@ void PhotoNuclearDQM::onProcessStart() { }; for (int ilabel{1}; ilabel < labels.size(); ++ilabel) { - for (auto& hist : hists) { + for (auto &hist : hists) { hist->GetXaxis()->SetBinLabel(ilabel, labels[ilabel - 1].c_str()); } } - std::vector n_labels = {"", "", - "nn", // 1 - "pn", // 2 - "#pi^{+}n", // 3 - "#pi^{0}n", // 4 + std::vector n_labels = {"", + "nn", // 0 + "pn", // 1 + "#pi^{+}n", // 2 + "#pi^{0}n", // 3 + "other", // 4 ""}; - TH1* hist = histograms_.get("1n_event_type"); + TH1 *hist = histograms_.get("1n_event_type"); for (int ilabel{1}; ilabel < n_labels.size(); ++ilabel) { hist->GetXaxis()->SetBinLabel(ilabel, n_labels[ilabel - 1].c_str()); } } -void PhotoNuclearDQM::configure(framework::config::Parameters& parameters) {} +void PhotoNuclearDQM::configure(framework::config::Parameters ¶meters) { + verbose_ = parameters.getParameter("verbose"); + count_light_ions_ = parameters.getParameter("count_light_ions", true); +} -void PhotoNuclearDQM::analyze(const framework::Event& event) { +void PhotoNuclearDQM::analyze(const framework::Event &event) { // Get the particle map from the event. If the particle map is empty, // don't process the event. auto particleMap{event.getMap("SimParticles")}; - if (particleMap.size() == 0) return; + if (particleMap.size() == 0) { + return; + } // Get the recoil electron auto [trackID, recoil] = Analysis::getRecoil(particleMap); - - histograms_.fill("recoil_vertex_x", recoil->getVertex()[0]); - histograms_.fill("recoil_vertex_y", recoil->getVertex()[1]); - histograms_.fill("recoil_vertex_z", recoil->getVertex()[2]); - histograms_.fill("recoil_vertex_x:recoil_vertex_y", recoil->getVertex()[0], - recoil->getVertex()[1]); + findRecoilProperties(recoil); // Use the recoil electron to retrieve the gamma that underwent a // photo-nuclear reaction. auto pnGamma{Analysis::getPNGamma(particleMap, recoil, 2500.)}; if (pnGamma == nullptr) { - std::cout << "[ PhotoNuclearDQM ]: PN Daughter is lost, skipping." - << std::endl; + if (verbose_) { + std::cout << "[ PhotoNuclearDQM ]: PN Daughter is lost, skipping." + << std::endl; + } return; } + const auto pnDaughters{findDaughters(particleMap, pnGamma)}; + findParticleKinematics(pnDaughters); histograms_.fill("pn_particle_mult", pnGamma->getDaughters().size()); histograms_.fill("pn_gamma_energy", pnGamma->getEnergy()); @@ -132,272 +285,169 @@ void PhotoNuclearDQM::analyze(const framework::Event& event) { histograms_.fill("pn_gamma_vertex_y", pnGamma->getVertex()[1]); histograms_.fill("pn_gamma_vertex_z", pnGamma->getVertex()[2]); - double lke{-1}, lt{-1}; - double lpke{-1}, lpt{-1}; - double lnke{-1}, lnt{-1}; - double lpike{-1}, lpit{-1}; - - std::vector pnDaughters; - - // Loop through all of the PN daughters and extract kinematic - // information. - for (const auto& daughterTrackID : pnGamma->getDaughters()) { - // skip daughters that weren't saved - if (particleMap.count(daughterTrackID) == 0) continue; - - auto daughter{&(particleMap.at(daughterTrackID))}; - - // Get the PDG ID - auto pdgID{daughter->getPdgID()}; - - // Ignore photons and nuclei - if (pdgID == 22 || pdgID > 10000) continue; - - // Calculate the kinetic energy - double ke{daughter->getEnergy() - daughter->getMass()}; - - std::vector vec{daughter->getMomentum()}; - TVector3 pvec(vec[0], vec[1], vec[2]); - - // Calculate the polar angle - auto theta{pvec.Theta() * (180 / 3.14159)}; - - if (lke < ke) { - lke = ke; - lt = theta; - } - - if ((pdgID == 2112) && (lnke < ke)) { - lnke = ke; - lnt = theta; - } - - if ((pdgID == 2212) && (lpke < ke)) { - lpke = ke; - lpt = theta; - } - - if (((abs(pdgID) == 211) || (pdgID == 111)) && (lpike < ke)) { - lpike = ke; - lpit = theta; - } - - pnDaughters.push_back(daughter); - } - - histograms_.fill("hardest_ke", lke); - histograms_.fill("hardest_theta", lt); - histograms_.fill("h_ke_h_theta", lke, lt); - histograms_.fill("hardest_p_ke", lpke); - histograms_.fill("hardest_p_theta", lpt); - histograms_.fill("hardest_n_ke", lnke); - histograms_.fill("hardest_n_theta", lnt); - histograms_.fill("hardest_pi_ke", lpike); - histograms_.fill("hardest_pi_theta", lpit); - // Classify the event auto eventType{classifyEvent(pnDaughters, 200)}; auto eventType500MeV{classifyEvent(pnDaughters, 500)}; auto eventType2000MeV{classifyEvent(pnDaughters, 2000)}; auto eventTypeComp{classifyCompactEvent(pnGamma, pnDaughters, 200)}; - auto eventTypeComp500MeV{classifyCompactEvent(pnGamma, pnDaughters, 200)}; - auto eventTypeComp2000MeV{classifyCompactEvent(pnGamma, pnDaughters, 200)}; - - histograms_.fill("event_type", eventType); - histograms_.fill("event_type_500mev", eventType500MeV); - histograms_.fill("event_type_2000mev", eventType2000MeV); - - histograms_.fill("event_type_compact", eventTypeComp); - histograms_.fill("event_type_compact_500mev", eventTypeComp500MeV); - histograms_.fill("event_type_compact_2000mev", eventTypeComp2000MeV); - - double slke{-9999}; - double nEnergy{-9999}, energyDiff{-9999}, energyFrac{-9999}; - - if (eventType == 1 || eventType == 17 || eventType == 16 || eventType == 18 || - eventType == 2) { - std::sort(pnDaughters.begin(), pnDaughters.end(), - [](const auto& lhs, const auto& rhs) { - double lhs_ke = lhs->getEnergy() - lhs->getMass(); - double rhs_ke = rhs->getEnergy() - rhs->getMass(); - return lhs_ke > rhs_ke; - }); - - nEnergy = pnDaughters[0]->getEnergy() - pnDaughters[0]->getMass(); - slke = -9999; - if (pnDaughters.size() > 1) - slke = pnDaughters[1]->getEnergy() - pnDaughters[1]->getMass(); - energyDiff = pnGamma->getEnergy() - nEnergy; - energyFrac = nEnergy / pnGamma->getEnergy(); - - if (eventType == 1) { - histograms_.fill("1n_ke:2nd_h_ke", nEnergy, slke); - histograms_.fill("1n_neutron_energy", nEnergy); - histograms_.fill("1n_energy_diff", energyDiff); - histograms_.fill("1n_energy_frac", energyFrac); - } else if (eventType == 2) { - histograms_.fill("2n_n2_energy", slke); - auto energyFrac2n = (nEnergy + slke) / pnGamma->getEnergy(); - histograms_.fill("2n_energy_frac", energyFrac2n); - histograms_.fill("2n_energy_other", pnGamma->getEnergy() - energyFrac2n); - - } else if (eventType == 17) { - histograms_.fill("1kp_ke:2nd_h_ke", nEnergy, slke); - histograms_.fill("1kp_energy", nEnergy); - histograms_.fill("1kp_energy_diff", energyDiff); - histograms_.fill("1kp_energy_frac", energyFrac); - } else if (eventType == 16 || eventType == 18) { - histograms_.fill("1k0_ke:2nd_h_ke", nEnergy, slke); - histograms_.fill("1k0_energy", nEnergy); - histograms_.fill("1k0_energy_diff", energyDiff); - histograms_.fill("1k0_energy_frac", energyFrac); + auto eventTypeComp500MeV{classifyCompactEvent(pnGamma, pnDaughters, 500)}; + auto eventTypeComp2000MeV{classifyCompactEvent(pnGamma, pnDaughters, 2000)}; + + histograms_.fill("event_type", static_cast(eventType)); + histograms_.fill("event_type_500mev", static_cast(eventType500MeV)); + histograms_.fill("event_type_2000mev", static_cast(eventType2000MeV)); + + histograms_.fill("event_type_compact", static_cast(eventTypeComp)); + histograms_.fill("event_type_compact_500mev", + static_cast(eventTypeComp500MeV)); + histograms_.fill("event_type_compact_2000mev", + static_cast(eventTypeComp2000MeV)); + + switch (eventType) { + case EventType::single_neutron: + if (eventType == EventType::single_neutron) { + if (pnDaughters.size() > 1) { + auto secondHardestPdgID{abs(pnDaughters[1]->getPdgID())}; + auto nEventType{-10}; + if (secondHardestPdgID == 2112) { + nEventType = 0; // n + n + } else if (secondHardestPdgID == 2212) { + nEventType = 1; // p + n + } else if (secondHardestPdgID == 211) { + nEventType = 2; // Pi+/- + n + } else if (secondHardestPdgID == 111) { + nEventType = 3; // Pi0 + n + } else { + nEventType = 4; // other + } + histograms_.fill("1n_event_type", nEventType); + } } - - auto nPdgID{abs(pnDaughters[0]->getPdgID())}; - auto nEventType{-10}; - if (nPdgID == 2112) - nEventType = 1; - else if (nPdgID == 2212) - nEventType = 2; - else if (nPdgID == 211) - nEventType = 3; - else if (nPdgID == 111) - nEventType = 4; - - histograms_.fill("1n_event_type", nEventType); + [[fallthrough]]; // Remaining code is important for 1n as well + case EventType::two_neutrons: + case EventType::charged_kaon: + case EventType::klong: + case EventType::kshort: + findSubleadingKinematics(pnGamma, pnDaughters, eventType); + break; } } -int PhotoNuclearDQM::classifyEvent( - const std::vector daughters, double threshold) { +PhotoNuclearDQM::EventType PhotoNuclearDQM::classifyEvent( + const std::vector daughters, double threshold) { short n{0}, p{0}, pi{0}, pi0{0}, exotic{0}, k0l{0}, kp{0}, k0s{0}, lambda{0}; // Loop through all of the PN daughters and extract kinematic // information. - for (const auto& daughter : daughters) { + for (const auto &daughter : daughters) { // Calculate the kinetic energy auto ke{daughter->getEnergy() - daughter->getMass()}; - // If the kinetic energy is below threshold, continue - if (ke <= threshold) continue; + // Assuming the daughters are sorted by kinetic energy, if the kinetic + // energy is below threshold, we don't need to look at any further + // particles. + if (ke <= threshold) { + break; + } // Get the PDG ID auto pdgID{abs(daughter->getPdgID())}; - if (pdgID == 2112) + if (pdgID == 2112) { n++; - else if (pdgID == 2212) + } else if (pdgID == 2212) { p++; - else if (pdgID == 211) + } else if (pdgID == 211) { pi++; - else if (pdgID == 111) + } else if (pdgID == 111) { pi0++; - else if (pdgID == 130) + } else if (pdgID == 130) { k0l++; - else if (pdgID == 321) + } else if (pdgID == 321) { kp++; - else if (pdgID == 310) + } else if (pdgID == 310) { k0s++; - else + } else { exotic++; + } } int kaons = k0l + kp + k0s; int nucleons = n + p; int pions = pi + pi0; int count = nucleons + pions + exotic + kaons; - int count_a = p + pions + exotic + kaons; - int count_b = pions + exotic + kaons; - int count_c = nucleons + pi0 + exotic + kaons; - int count_d = n + pi0 + exotic + k0l + kp + k0s; - int count_e = p + pi0 + exotic + k0l + kp + k0s; - int count_f = n + p + pi + exotic + k0l + kp + k0s; - int count_g = n + pi + pi0 + exotic + k0l + kp + k0s; - int count_h = n + p + pi + pi0 + k0l + kp + k0s; - int count_i = p + pi + exotic + k0l + kp + k0s; - int count_j = n + pi + exotic + k0l + kp + k0s; - int count_k = nucleons + pions + exotic + kp + k0s; - int count_l = nucleons + pions + exotic + k0l + k0s; - int count_m = nucleons + pions + exotic + kp + k0l; - int count_n = pi0 + exotic + kaons; - int count_o = pi + exotic + kaons; - int count_p = exotic + kaons; - - if (count == 0) return 0; // Nothing hard - - if (n == 1) { - if (count_a == 0) - return 1; // 1n - else if ((p == 1) && (count_b == 0)) - return 15; // pn - } - if ((n == 2) && (count_a == 0)) return 2; // 2 n - - if ((n >= 3) && (count_a == 0)) return 3; // >= 3 n - - if (pi == 1) { - if (count_c == 0) - return 4; // 1 pi - else if ((p == 1) && (count_d == 0)) - return 7; // 1 pi 1 p - else if ((p == 2) && (count_d == 0)) - return 8; // 1 pi 1 p - else if ((n == 1) && (count_e == 0)) - return 7; // 1 pi 1 n - else if ((n == 2) && (count_e == 0)) - return 8; // 1 pi 1 n - else if ((n == 1) && (p == 1) && (count_n == 0)) - return 8; + if (count == 0) { + return EventType::nothing_hard; } - - if (pi == 2) { - if (count_c == 0) - return 5; // 2pi - else if ((p == 1) && (count_d == 0)) - return 9; // 2pi p - else if ((n == 1) && (count_e == 0)) - return 9; // 2pi n + if (count == 1) { + if (n == 1) { + return EventType::single_neutron; + } else if (p == 1) { + return EventType::single_proton; + } else if (pi0 == 1) { + return EventType::single_neutral_pion; + } else if (pi == 1) { + return EventType::single_charged_pion; + } } - - if (pi0 == 1) { - if (count_f == 0) - return 6; // 1 pi0 - else if ((n == 1) && (count_i == 0)) - return 10; // 1pi0 1 p - else if ((n == 2) && (count_i == 0)) - return 11; // 1pi0 1 p - else if ((p == 1) && (count_j == 0)) - return 10; // 1pi0 1 n - else if ((p == 2) && (count_j == 0)) - return 11; - else if ((n == 1) && (p == 1) && (count_o == 0)) - return 11; - else if ((pi == 1) && ((p == 1) || (n == 1)) && (count_p == 0)) - return 12; + if (count == 2) { + if (n == 2) { + return EventType::two_neutrons; + } else if (n == 1 && p == 1) { + return EventType::proton_neutron; + } else if (p == 2) { + return EventType::two_protons; + } else if (pi == 2) { + return EventType::two_charged_pions; + } else if (pi == 1 && nucleons == 1) { + return EventType::single_charged_pion_and_nucleon; + } else if (pi0 == 1 && nucleons == 1) { + return EventType::single_neutral_pion_and_nucleon; + } } - if ((p == 1) && (count_g == 0)) return 13; // 1 p - if ((p == 2) && (count_g == 0)) return 14; // 2 p - - if (k0l == 1) return 16; - if (kp == 1) return 17; - if (k0s == 1) return 18; + if (count == 3) { + if (pi == 1 && nucleons == 2) { + return EventType::single_charged_pion_and_two_nucleons; + } else if (pi == 2 && nucleons == 1) { + return EventType::two_charged_pions_and_nucleon; + } // else + else if (pi0 == 1 && nucleons == 2) { + return EventType::single_neutral_pion_and_two_nucleons; + } else if (pi0 == 1 && nucleons == 1 && pi == 1) { + return EventType::single_neutral_pion_charged_pion_and_nucleon; + } + } + if (count >= 3 && count == n) { + return EventType::three_or_more_neutrons; + } - if ((exotic > 0) && (count_h == 0)) return 19; + if (kaons == 1) { + if (k0l == 1) { + return EventType::klong; + } else if (kp == 1) { + return EventType::charged_kaon; + } else if (k0s == 1) { + return EventType::kshort; + } + } + if (exotic == count && count != 0) { + return EventType::exotics; + } - return 20; + return EventType::multibody; } -int PhotoNuclearDQM::classifyCompactEvent( - const ldmx::SimParticle* pnGamma, - const std::vector daughters, double threshold) { +PhotoNuclearDQM::CompactEventType PhotoNuclearDQM::classifyCompactEvent( + const ldmx::SimParticle *pnGamma, + const std::vector daughters, double threshold) { short n{0}, n_t{0}, k0l{0}, kp{0}, k0s{0}, soft{0}; // Loop through all of the PN daughters and extract kinematic // information. - for (const auto& daughter : daughters) { + for (const auto &daughter : daughters) { // Calculate the kinetic energy auto ke{daughter->getEnergy() - daughter->getMass()}; @@ -410,80 +460,44 @@ int PhotoNuclearDQM::classifyCompactEvent( } if (ke >= 0.8 * pnGamma->getEnergy()) { - if (pdgID == 2112) + if (pdgID == 2112) { n++; - else if (pdgID == 130) + } else if (pdgID == 130) { k0l++; - else if (pdgID == 321) + } else if (pdgID == 321) { kp++; - else if (pdgID == 310) + } else if (pdgID == 310) { k0s++; + } continue; } - if ((pdgID == 2112) && ke > threshold) n_t++; + if ((pdgID == 2112) && ke > threshold) { + n_t++; + } } int neutral_kaons{k0l + k0s}; - if (n != 0) return 0; - if (kp != 0) return 1; - if (neutral_kaons != 0) return 2; - if (n_t == 2) return 3; - if (soft == daughters.size()) return 4; - - return 5; -} - -void PhotoNuclearDQM::printParticleTree( - std::map particleMap) { - std::vector printedParticles; - - // Loop through the particle map - for (auto const& [trackID, simParticle] : particleMap) { - // Print the particle only if it has daughters - if ((simParticle.getDaughters().size() != 0) & - (std::find(printedParticles.begin(), printedParticles.end(), trackID) == - printedParticles.end())) { - simParticle.Print(); - printedParticles.push_back(trackID); - - // Print the daughters - std::vector printedDaughters = - printDaughters(particleMap, simParticle, 1); - printedParticles.insert(printedParticles.end(), printedDaughters.begin(), - printedDaughters.end()); - } + if (n != 0) { + return PhotoNuclearDQM::CompactEventType::single_neutron; } -} - -std::vector PhotoNuclearDQM::printDaughters( - std::map particleMap, - const ldmx::SimParticle particle, int depth) { - std::vector printedParticles; - - // Don't print anything if a particle doesn't have any daughters - if (particle.getDaughters().size() == 0) return printedParticles; - - // Generate the prefix - std::string prefix{""}; - for (auto i{0}; i < depth; ++i) prefix += "\t"; - - // Loop through all of the daughter particles and print them - for (const auto& daughter : particle.getDaughters()) { - // Print the ith daughter particle - std::cout << prefix; - particleMap[daughter].Print(); - printedParticles.push_back(daughter); - - // Print the Daughters - std::vector printedDaughters = - printDaughters(particleMap, particleMap[daughter], depth + 1); - printedParticles.insert(printedParticles.end(), printedDaughters.begin(), - printedDaughters.end()); + if (kp != 0) { + return PhotoNuclearDQM::CompactEventType::single_charged_kaon; } + if (neutral_kaons != 0) { + return PhotoNuclearDQM::CompactEventType::single_neutral_kaon; + } + if (n_t == 2) { + return PhotoNuclearDQM::CompactEventType::two_neutrons; + } + if (soft == daughters.size()) { + return PhotoNuclearDQM::CompactEventType::soft; + } + + return PhotoNuclearDQM::CompactEventType::other; } -} // namespace dqm +} // namespace dqm DECLARE_ANALYZER_NS(dqm, PhotoNuclearDQM) diff --git a/DetDescr/CMakeLists.txt b/DetDescr/CMakeLists.txt index 505dbd9e4..24e78b3da 100644 --- a/DetDescr/CMakeLists.txt +++ b/DetDescr/CMakeLists.txt @@ -14,7 +14,7 @@ setup_python(package_name ${PYTHON_PACKAGE_NAME}/DetDescr) # Setup the test setup_test(dependencies DetDescr::DetDescr) -option(BUILD_DETECTORID_BINDINGS "Build the python bindings for the the DetDescr/DetectorID components" OFF) +option(BUILD_DETECTORID_BINDINGS "Build the python bindings for the the DetDescr/DetectorID components" ON) if(BUILD_DETECTORID_BINDINGS) find_package(Python COMPONENTS Development) diff --git a/DetDescr/include/DetDescr/HcalGeometry.h b/DetDescr/include/DetDescr/HcalGeometry.h index d1d2bfa35..69b0357fc 100644 --- a/DetDescr/include/DetDescr/HcalGeometry.h +++ b/DetDescr/include/DetDescr/HcalGeometry.h @@ -32,21 +32,36 @@ class HcalGeometryProvider; * */ class HcalGeometry : public framework::ConditionsObject { - public: +public: /** * Conditions object: * The name of the python configuration calling this class * (Hcal/python/HcalGeometry.py) needs to match the CONDITIONS_OBJECT_NAME * exactly. */ - static constexpr const char* CONDITIONS_OBJECT_NAME{"HcalGeometry"}; + static constexpr const char *CONDITIONS_OBJECT_NAME{"HcalGeometry"}; + + /** + * Encodes the orientation of a bar. + * horizontal : The length of the bar is along the x-axis + * vertical : The length of the bar is along the y-axis + * depth : The length of the bar is along the z-axis + */ + enum class ScintillatorOrientation { + horizontal = 0, + vertical = 1, + depth = 2 + }; /** * Class destructor. * * Does nothing because the stl containers clean up automatically. */ - ~HcalGeometry() = default; + ~HcalGeometry() = default; + + ScintillatorOrientation + getScintillatorOrientation(const ldmx::HcalID id) const; /** * Get a strip center position from a combined hcal ID. @@ -68,23 +83,33 @@ class HcalGeometry : public framework::ConditionsObject { } /** Check whether a given layer corresponds to a horizontal (scintillator - * length along the x-axis) or vertical layer. See the horizontal_parity_ - * member for details. + * length along the x-axis) or vertical layer in the back HCal. See the + * back_horizontal_parity_ member for details. */ - bool layerIsHorizontal(const int layer) const { - return layer % 2 == horizontal_parity_; + bool backLayerIsHorizontal(const int layer) const { + return layer % 2 == back_horizontal_parity_; } /** - * Get the half total width of a layer for a given section(strip) for back(side) Hcal. + * Get the half total width of a layer for a given section(strip) for + * back(side) Hcal. * @param section * @param layer * @return half total width [mm] */ double getHalfTotalWidth(int isection, int layer = 1) const { + // Layer numbering starts at 1, but a vector is zero-indexed auto layer_index = layer - 1; return half_total_width_.at(isection).at(layer_index); } + /** + * Get the length of a scintillator bar + * @param id The HcalID of the bar + * @return The length of the bar with ID `id` [mm] + */ + double getScintillatorLength(ldmx::HcalID id) const { + return scint_length_.at(id.section()).at(id.layer() - 1); + } /** * Get the scitillator width */ @@ -116,7 +141,6 @@ class HcalGeometry : public framework::ConditionsObject { return zero_strip_.at(isection).at(layer_index); } - /** * Get the length of the Ecal in (x) for the side Hcal. */ @@ -127,13 +151,53 @@ class HcalGeometry : public framework::ConditionsObject { */ double getEcalDy() const { return ecal_dy_; } - private: + /** + * Does the Side Hcal have 3D readout? + * + * In other words, does the side hcal layers alter in scintillator direction + * (z vs x/y). + */ + bool hasSide3DReadout() const { return side_3d_readout_; } + + /* + * Is the hcal geometry one of the geometries used for the CERN testbeam + * activities? + */ + bool isPrototype() const { return is_prototype_; } + + /** + * Coordinates that are given by Geant4 are typically global. These can be + * transformed into corresponding local coordinates of a volume with a + * TopTransform. However, a TopTransform translates to the local bar but does + * not do the rotation (this is because we don't do a rotation when placing + * the bars in the GDML). This is used primarily for recording pre and post + * step positions in local coordinates of the volume in HcalSD. + * + * the logic below does the rotation to the local coordiates where + * x : short transverse side of bar + * y : long transverse side of bar + * z : along length of bar + * + * @note This logic only applies to the v14 and prototype detector; however, + * support for v12 is not broken because no studies using these pre/post step + * positions have been (or should be) done with the v12 detector. + / + * @note: The native position type in Geant4 is typically a G4ThreeVector, + * which is a typedef for a CLHEP::Hep3Vector. However, DetDescr currently + * does not have a dependency on Geant4/CLHEP so we are taking the position + * as a vector of floats (which is what is used by SimCalorimeterHit) + **/ + std::vector + rotateGlobalToLocalBarPosition(const std::vector &globalPosition, + const ldmx::HcalID &id) const; + +private: /** * Class constructor, for use only by the provider * * @param ps Parameters to configure the HcalGeometry */ - HcalGeometry(const framework::config::Parameters& ps); + HcalGeometry(const framework::config::Parameters &ps); friend class hcal::HcalGeometryProvider; /** @@ -153,33 +217,18 @@ class HcalGeometry : public framework::ConditionsObject { * * @param section The section number to print, see HcalID for details. */ - void printPositionMap(int section) const; + void printPositionMap(int section) const; /** * Debugging utility, prints out the HcalID and corresponding value of all * entries in the strip_position_map_. For printing only one of the sections, * see the overloaded version of this function taking a section parameter. * */ - void printPositionMap() const { - for (int section = 0; section < num_sections_ ; ++section) { - printPositionMap(section); - } + void printPositionMap() const { + for (int section = 0; section < num_sections_; ++section) { + printPositionMap(section); } - - // Deserves a better name - // - // Takes a parameter which only has dimensions of section and creates a - // version with both section and layer. All layers in a given section will be - // copies. - template - std::vector> makeCanonicalLayeredParameter(const std::vector& parameter) { - std::vector> result; - for(auto section = 0; section < parameter.size(); ++section) { - result.push_back(std::vector(num_layers_[section], parameter[section])); - } - return result; - } - + } private: /// Parameters that apply to all types of geometries @@ -210,14 +259,11 @@ class HcalGeometry : public framework::ConditionsObject { // Defines what parity (0/1, i.e. even/odd parity) of a layer number in the // geometry that corresponds to a horizontal layer (scintillator bar length - // along the x-axis). - int horizontal_parity_{}; + // along the x-axis) in the back HCal. + int back_horizontal_parity_{}; // 3D readout for side Hcal int side_3d_readout_{}; - - /// Canonical layered version of parameters that differ between geometry - /// versions. /// Number of strips per layer in each section and each layer std::vector> num_strips_; @@ -225,7 +271,11 @@ class HcalGeometry : public framework::ConditionsObject { std::vector> zero_strip_; /// Half Total Width of Strips [mm] std::vector> half_total_width_; - + /// Length of strips [mm] + std::vector> scint_length_; + + bool is_prototype_{}; + /** Map of the HcalID position of strip centers relative to world geometry. The map is not configurable and is calculated by buildStripPositionMap(). @@ -233,6 +283,6 @@ class HcalGeometry : public framework::ConditionsObject { std::map strip_position_map_; }; -} // namespace ldmx +} // namespace ldmx #endif diff --git a/DetDescr/include/DetDescr/HcalID.h b/DetDescr/include/DetDescr/HcalID.h index 39ed48fce..88cdb0596 100644 --- a/DetDescr/include/DetDescr/HcalID.h +++ b/DetDescr/include/DetDescr/HcalID.h @@ -21,7 +21,7 @@ class HcalID : public HcalAbstractID { /** * Encodes the section of the HCal based on the 'section' field value. */ - enum HcalSection { BACK = 0, TOP = 1, BOTTOM = 2, LEFT = 4, RIGHT = 3 }; + enum HcalSection { BACK = 0, TOP = 1, BOTTOM = 2, RIGHT = 3, LEFT = 4 }; static const RawValue SECTION_MASK{0x7}; // space for up to 7 sections static const RawValue SECTION_SHIFT{18}; diff --git a/DetDescr/python/HcalGeometry.py b/DetDescr/python/HcalGeometry.py index c0d52ff80..8f2921e65 100644 --- a/DetDescr/python/HcalGeometry.py +++ b/DetDescr/python/HcalGeometry.py @@ -38,8 +38,8 @@ class HcalReadoutGeometry: Used in the regular geometry to describe the dimensions of the Ecal. Since there is no Ecal in the prototype geometry, these are both set to 0. - horizontal_parity: - Layers with odd parity (1) are horizontal on the x-axis + back_horizontal_parity: + Layers with odd parity (1) are horizontal on the x-axis in the back HCal """ def __init__(self): @@ -48,17 +48,18 @@ def __init__(self): self.detectors_valid = [] self.scint_thickness = 0.0 self.scint_width = 0.0 + self.scint_length = [[]] self.zero_layer = [] - self.zero_strip = [] + self.zero_strip = [[]] self.layer_thickness = [] self.num_layers = [] - self.num_strips = [] - self.half_total_width = [] + self.num_strips = [[]] + self.half_total_width = [[]] self.ecal_dx = 0.0 self.ecal_dy = 0.0 self.num_sections = 0 self.verbose = 0 - self.horizontal_parity = 1 + self.back_horizontal_parity = 1 self.side_3d_readout = 0 def __str__(self): @@ -74,6 +75,7 @@ def __str__(self): Half total width of layers: {} [mm] Number of strips per layer: {} Location of zero-th strip per layer: {} [mm] + Scintillator length: {} [mm] }}, Ecal DX, DY: {}, {} [mm], Valid detector regexps: {} @@ -88,6 +90,7 @@ def __str__(self): self.half_total_width, self.num_strips, self.zero_strip, + self.scint_length, self.ecal_dx, self.ecal_dy, self.detectors_valid, @@ -116,8 +119,28 @@ def make_v13(self): """ self.v13 = HcalReadoutGeometry() + self.v13.num_sections = 5 + self.v13.num_layers = [100, 28, 28, 26, 26] self.v13.scint_thickness = 20.0 self.v13.scint_width = 50.0 + + back_scint_length = 3100.0 + # See https://github.com/LDMX-Software/ldmx-sw/blob/trunk/Detectors/data/ldmx-det-v13/hcal.gdml#L21 + # and https://github.com/LDMX-Software/ldmx-sw/blob/trunk/Detectors/data/ldmx-det-v13/hcal.gdml#L177 + # and the corresponding discussion https://github.com/LDMX-Software/ldmx-sw/pull/1135#discussion_r1178068211 + side_TB_scint_length = 1944 + + # See https://github.com/LDMX-Software/ldmx-sw/blob/trunk/Detectors/data/ldmx-det-v13/hcal.gdml#L22 + # and https://github.com/LDMX-Software/ldmx-sw/blob/trunk/Detectors/data/ldmx-det-v13/hcal.gdml#L181 + # and the corresponding discussion https://github.com/LDMX-Software/ldmx-sw/pull/1135#discussion_r1178070801 + side_LR_scint_length = 1832 + self.v13.scint_length = [[back_scint_length for layer in range(self.v13.num_layers[0])], + [side_TB_scint_length for layer in range(self.v13.num_layers[1])], + [side_TB_scint_length for layer in range(self.v13.num_layers[2])], + [side_LR_scint_length for layer in range(self.v13.num_layers[3])], + [side_LR_scint_length for layer in range(self.v13.num_layers[4])], + ] + self.v13.zero_layer = [ 220.0 + 600.0 + 25.0 + 2 * 2.0, 600.0 / 2 + 20.0 + 2 * 2.0, @@ -125,7 +148,11 @@ def make_v13(self): 800.0 / 2 + 20.0 + 2 * 2.0, 800.0 / 2 + 20.0 + 2 * 2.0, ] - self.v13.zero_strip = [3100.0 / 2, 220.0, 220.0, 220.0, 220.0] + self.v13.zero_strip = [[back_scint_length / 2 for layer in range(self.v13.num_layers[0])], + [220.0 for layer in range(self.v13.num_layers[1])], + [220.0 for layer in range(self.v13.num_layers[2])], + [220.0 for layer in range(self.v13.num_layers[3])], + [220.0 for layer in range(self.v13.num_layers[4])]] self.v13.layer_thickness = [ 25.0 + self.v13.scint_thickness + 2 * 2.0, 20.0 + self.v13.scint_thickness + 2 * 2.0, @@ -133,21 +160,27 @@ def make_v13(self): 20.0 + self.v13.scint_thickness + 2 * 2.0, 20.0 + self.v13.scint_thickness + 2 * 2.0, ] - self.v13.num_sections = 5 - self.v13.num_layers = [100, 28, 28, 26, 26] - self.v13.num_strips = [62, 12, 12, 12, 12] + self.v13.num_strips = [[62 for layer in range(self.v13.num_layers[0])], + [12 for layer in range(self.v13.num_layers[1])], + [12 for layer in range(self.v13.num_layers[2])], + [12 for layer in range(self.v13.num_layers[3])], + [12 for layer in range(self.v13.num_layers[4])]] self.v13.ecal_dx = 800.0 self.v13.ecal_dy = 600.0 self.v13.half_total_width = [ - (self.v13.num_strips[0] * self.v13.scint_width) / 2, - (self.v13.num_layers[1] * self.v13.layer_thickness[1] + self.v13.ecal_dx) - / 2, - (self.v13.num_layers[2] * self.v13.layer_thickness[2] + self.v13.ecal_dx) - / 2, - (self.v13.num_layers[3] * self.v13.layer_thickness[3] + self.v13.ecal_dy) - / 2, - (self.v13.num_layers[4] * self.v13.layer_thickness[4] + self.v13.ecal_dy) - / 2, + [(self.v13.num_strips[0][layer] * self.v13.scint_width) / 2 for layer in range(self.v13.num_layers[0])], + [(self.v13.num_layers[1] * self.v13.layer_thickness[1] + self.v13.ecal_dx) + / 2 + for layer in range(self.v13.num_layers[1])], + [(self.v13.num_layers[2] * self.v13.layer_thickness[2] + self.v13.ecal_dx) + / 2 + for layer in range(self.v13.num_layers[2])], + [(self.v13.num_layers[3] * self.v13.layer_thickness[3] + self.v13.ecal_dy) + / 2 + for layer in range(self.v13.num_layers[3])], + [(self.v13.num_layers[4] * self.v13.layer_thickness[4] + self.v13.ecal_dy) + / 2 + for layer in range(self.v13.num_layers[4])], ] self.v13.detectors_valid = [ "ldmx-det-v13", @@ -158,8 +191,8 @@ def make_v13(self): "ldmx-det-v11", ] # Layers with odd parity (1) are horizontal (scintillator bar length - # along the x-axis) - self.v13.horizontal_parity = 1 + # along the x-axis) in the back hcal + self.v13.back_horizontal_parity = 1 self.v13.side_3d_readout = 0 def make_v1_prototype(self): @@ -188,6 +221,7 @@ def make_v1_prototype(self): self.v1_prototype.scint_thickness = scint_thickness self.v1_prototype.scint_width = scint_bar_width + self.v1_prototype.scint_length = [[scint_bar_length for layer in range(num_layers)] ] # Note that this seems to be location of the first scintillator layer self.v1_prototype.zero_layer = [-dz / 2 + air_thickness + absorber_thickness] @@ -196,11 +230,12 @@ def make_v1_prototype(self): self.v1_prototype.num_layers = [num_layers] num_strips_front = [num_bars_front for i in range(num_layers_front)] num_strips_back = [num_bars_back for i in range(num_layers_back)] - self.v1_prototype.num_strips = num_strips_front + num_strips_back + num_strips_total = num_strips_front + num_strips_back + self.v1_prototype.num_strips = [num_strips_total] # zero_strip and half_total_width are identical - self.v1_prototype.zero_strip = [ - N * scint_bar_width / 2 for N in self.v1_prototype.num_strips - ] + self.v1_prototype.zero_strip = [[ + N * scint_bar_width / 2 for N in num_strips_total + ]] self.v1_prototype.half_total_width = self.v1_prototype.zero_strip self.v1_prototype.ecal_dx = 0.0 self.v1_prototype.ecal_dy = 0.0 @@ -209,8 +244,8 @@ def make_v1_prototype(self): "ldmx-hcal-prototype-v1.0[.].*", ] # Layers with odd parity (1) are horizontal (scintillator bar length - # along the x-axis) - self.v1_prototype.horizontal_parity = 1 + # along the x-axis) in the back HCal + self.v1_prototype.back_horizontal_parity = 1 self.v1_prototype.side_3d_readout = 0 def make_v2_prototype(self): @@ -245,6 +280,7 @@ def make_v2_prototype(self): self.v2_prototype.scint_thickness = scint_thickness self.v2_prototype.scint_width = scint_bar_width + self.v2_prototype.scint_length = [[scint_bar_length for layer in range(num_layers)] ] self.v2_prototype.zero_layer = [ -dz / 2 @@ -257,11 +293,12 @@ def make_v2_prototype(self): self.v2_prototype.num_layers = [num_layers] num_strips_front = [num_bars_front for i in range(num_layers_front)] num_strips_back = [num_bars_back for i in range(num_layers_back)] - self.v2_prototype.num_strips = num_strips_front + num_strips_back + num_strips_total = num_strips_front + num_strips_back + self.v2_prototype.num_strips = [num_strips_total] # zero_strip and half_total_width are identical - self.v2_prototype.zero_strip = [ - N * scint_bar_width / 2 for N in self.v2_prototype.num_strips - ] + self.v2_prototype.zero_strip = [[ + N * scint_bar_width / 2 for N in num_strips_total + ]] self.v2_prototype.half_total_width = self.v2_prototype.zero_strip self.v2_prototype.ecal_dx = 0.0 self.v2_prototype.ecal_dy = 0.0 @@ -270,8 +307,8 @@ def make_v2_prototype(self): "ldmx-hcal-prototype-v2.0[.].*", ] # Layers with even parity (0) are horizontal (scintillator bar length - # along the x-axis) - self.v2_prototype.horizontal_parity = 0 + # along the x-axis) in the back HCal + self.v2_prototype.back_horizontal_parity = 0 self.v2_prototype.side_3d_readout = 0 def make_v14(self): @@ -288,18 +325,19 @@ def make_v14(self): back_hcal_layerThick = ( back_hcal_absoThick + hcal_scintThick + 2.0 * hcal_airThick ) - back_hcal_dx = 2000.0 - back_hcal_dy = 2000.0 + back_hcal_scint_length = 2000.0 + back_hcal_dx = back_hcal_scint_length + back_hcal_dy = back_hcal_scint_length back_hcal_dz = back_hcal_numLayers * back_hcal_layerThick side_hcal_absoThick = 20.0 side_hcal_dz = 600.0 side_hcal_numModules = 4 side_hcal_numSections = 4 - side_hcal_length = [1800.0, 1600.0, 1400.0, 1200.0] + side_hcal_scint_length = [1800.0, 1600.0, 1400.0, 1200.0] side_hcal_numLayers = [4, 3, 2, 3] side_hcal_numPrevLayers = [0, 4, 7, 9] - side_hcal_numScintZ = [m / hcal_scintWidth for m in side_hcal_length] + side_hcal_numScintZ = [m / hcal_scintWidth for m in side_hcal_scint_length] side_hcal_numScintXY = side_hcal_dz / hcal_scintWidth # Number of layers oriented in x,y. Multiply by 2 to get the total number of layers side_hcal_numTotalLayers = ( @@ -312,7 +350,7 @@ def make_v14(self): side_hcal_absoThick + 2.0 * hcal_airThick + hcal_scintThick ) side_hcal_moduleWidth = side_hcal_numTotalLayers * side_hcal_layerThick - side_hcal_moduleLength = side_hcal_length[0] + side_hcal_moduleLength = side_hcal_scint_length[0] hcal_envelope_dx = 3000.0 hcal_envelope_dy = 3000.0 @@ -356,7 +394,7 @@ def make_v14(self): self.v14.side_3d_readout = 1 self.v14.side_num_modules = side_hcal_numModules # In back hcal: odd layers are horizontal, even layers are vertical - self.v14.horizontal_parity = 1 + self.v14.back_horizontal_parity = 1 # In side hcal: odd layers have strips oriented in z zero_strip_odd = [ -ecal_side_dx / 2.0, @@ -365,6 +403,25 @@ def make_v14(self): ecal_side_dy / 2.0, ] + self.v14.scint_length = [[back_hcal_scint_length for layer in range(back_hcal_numLayers)], + [0.] * side_hcal_numTotalLayers, # Filled below + [0.] * side_hcal_numTotalLayers, + [0.] * side_hcal_numTotalLayers, + [0.] * side_hcal_numTotalLayers] + for s in range(side_hcal_numSections): + for m in range(self.v14.side_num_modules): + for l in range(side_hcal_numLayers[m] * 2): + # Layer numbering starts at 1 + layer = l + 1 + # The back hcal (section 0) has already been handled + section_index = s + 1 + layer_index = l + side_hcal_numPrevLayers[m] * 2 + if layer % 2 == 0: + # Layer number is even, length is along X/Y + self.v14.scint_length[section_index][layer_index] = side_hcal_scint_length[m] + else: + # Layer number is odd, length is along Z + self.v14.scint_length[section_index][layer_index] = side_hcal_dz # side properties # num strips # for layer 1: side_hcal_numScintZ (odd layers have strips oriented in z) @@ -375,10 +432,12 @@ def make_v14(self): for m in range(self.v14.side_num_modules): for l in range(side_hcal_numLayers[m] * 2): if (l + 1) % 2 == 0: + # Layer number is even half_total_width_side.append(side_hcal_dz / 2) num_strips_side.append(int(side_hcal_numScintXY)) else: - half_total_width_side.append(side_hcal_length[m] / 2) + # Layer number is odd + half_total_width_side.append(side_hcal_scint_length[m] / 2) num_strips_side.append(int(side_hcal_numScintZ[m])) zero_strip_side = [] diff --git a/DetDescr/src/DetDescr/HcalGeometry.cxx b/DetDescr/src/DetDescr/HcalGeometry.cxx index 69476367f..290f370bc 100644 --- a/DetDescr/src/DetDescr/HcalGeometry.cxx +++ b/DetDescr/src/DetDescr/HcalGeometry.cxx @@ -8,7 +8,7 @@ namespace ldmx { -HcalGeometry::HcalGeometry(const framework::config::Parameters& ps) +HcalGeometry::HcalGeometry(const framework::config::Parameters &ps) : framework::ConditionsObject(HcalGeometry::CONDITIONS_OBJECT_NAME) { scint_thickness_ = ps.getParameter("scint_thickness"); scint_width_ = ps.getParameter("scint_width"); @@ -19,49 +19,107 @@ HcalGeometry::HcalGeometry(const framework::config::Parameters& ps) ecal_dx_ = ps.getParameter("ecal_dx"); ecal_dy_ = ps.getParameter("ecal_dy"); verbose_ = ps.getParameter("verbose"); - horizontal_parity_ = ps.getParameter("horizontal_parity"); + back_horizontal_parity_ = ps.getParameter("back_horizontal_parity"); side_3d_readout_ = ps.getParameter("side_3d_readout"); - + auto detectors_valid = ps.getParameter>("detectors_valid"); // If one of the strings in detectors_valid is "ldmx-hcal-prototype", we will // use prototype geometry initialization - auto is_prototype = - std::find_if(detectors_valid.cbegin(), detectors_valid.cend(), - [](const auto detector) { - return detector.find("ldmx-hcal-prototype") != - std::string::npos; - }) != detectors_valid.cend(); - - if (is_prototype) { - auto zero_strip_prototype_ = - ps.getParameter>("zero_strip", {}); - auto num_strips_prototype_ = - ps.getParameter>("num_strips", {}); - auto half_total_width_prototype_ = - ps.getParameter>("half_total_width", {}); - // The prototype only has one section, so we only have one entry into these - // vectors - zero_strip_ = {zero_strip_prototype_}; - num_strips_ = {num_strips_prototype_}; - half_total_width_ = {half_total_width_prototype_}; + is_prototype_ = std::find_if(detectors_valid.cbegin(), detectors_valid.cend(), + [](const auto detector) { + return detector.find("ldmx-hcal-prototype") != + std::string::npos; + }) != detectors_valid.cend(); + + num_strips_ = ps.getParameter>>("num_strips"); + half_total_width_ = + ps.getParameter>>("half_total_width"); + zero_strip_ = ps.getParameter>>("zero_strip"); + scint_length_ = + ps.getParameter>>("scint_length"); + + buildStripPositionMap(); + if (verbose_ > 0) { + printPositionMap(); } - else if (side_3d_readout_==1) { - num_strips_ = ps.getParameter>>("num_strips", {}); - half_total_width_ = ps.getParameter>>("half_total_width", {}); - zero_strip_ = ps.getParameter>>("zero_strip", {}); +} + +std::vector HcalGeometry::rotateGlobalToLocalBarPosition( + const std::vector &globalPosition, const ldmx::HcalID &id) const { + const auto orientation{getScintillatorOrientation(id)}; + switch (id.section()) { + case ldmx::HcalID::HcalSection::BACK: + switch (orientation) { + case ScintillatorOrientation::horizontal: + return {globalPosition[2], globalPosition[1], globalPosition[0]}; + case ScintillatorOrientation::vertical: + return {globalPosition[2], globalPosition[0], globalPosition[1]}; + } + case ldmx::HcalID::HcalSection::TOP: + case ldmx::HcalID::HcalSection::BOTTOM: + switch (orientation) { + case ScintillatorOrientation::horizontal: + return {globalPosition[1], globalPosition[2], globalPosition[0]}; + case ScintillatorOrientation::depth: + return {globalPosition[1], globalPosition[0], globalPosition[2]}; + } + case ldmx::HcalID::HcalSection::LEFT: + case ldmx::HcalID::HcalSection::RIGHT: + switch (orientation) { + case ScintillatorOrientation::vertical: + return {globalPosition[0], globalPosition[2], globalPosition[1]}; + case ScintillatorOrientation::depth: + return globalPosition; + } } - else { - auto zero_strip_v12_ = ps.getParameter>("zero_strip", {}); - auto num_strips_v12_ = ps.getParameter>("num_strips", {}); - auto half_total_width_v12_ = ps.getParameter>("half_total_width", {}); - - zero_strip_ = makeCanonicalLayeredParameter(zero_strip_v12_); - num_strips_ = makeCanonicalLayeredParameter(num_strips_v12_); - half_total_width_ = makeCanonicalLayeredParameter(half_total_width_v12_); +} + +HcalGeometry::ScintillatorOrientation +HcalGeometry::getScintillatorOrientation(const ldmx::HcalID id) const { + if (hasSide3DReadout()) { + // v14 or later detector + switch (id.section()) { + case ldmx::HcalID::HcalSection::TOP: + case ldmx::HcalID::HcalSection::BOTTOM: + // Odd layers are in z/depth direction, even are in the x/horizontal + // direction + return id.layer() % 2 == 0 ? ScintillatorOrientation::horizontal + : ScintillatorOrientation::depth; + + case ldmx::HcalID::HcalSection::LEFT: + case ldmx::HcalID::HcalSection::RIGHT: + // Odd layers are in the z/depth direction, even are in the y/vertical + // direction + return id.layer() % 2 == 0 ? ScintillatorOrientation::vertical + : ScintillatorOrientation::depth; + case ldmx::HcalID::HcalSection::BACK: + // Configurable + return id.layer() % 2 == back_horizontal_parity_ + ? ScintillatorOrientation::horizontal + : ScintillatorOrientation::vertical; + } // V14 or later detector } + if (isPrototype()) { - buildStripPositionMap(); + // The prototype only has the back section. However, the orientation + // depends on the configuration so we delegate to the + // back_horizontal_parity parameter + return id.layer() % 2 == back_horizontal_parity_ + ? ScintillatorOrientation::horizontal + : ScintillatorOrientation::vertical; + } // Prototype detector + // v13/v12 + switch (id.section()) { + // For the v13 side hcal, the bars in each section have the same + // orientation + case ldmx::HcalID::HcalSection::TOP: + case ldmx::HcalID::HcalSection::BOTTOM: + return ScintillatorOrientation::horizontal; + case ldmx::HcalID::HcalSection::LEFT: + case ldmx::HcalID::HcalSection::RIGHT: + return ScintillatorOrientation::vertical; + } // v13/v12 detector } void HcalGeometry::printPositionMap(int section) const { // Note that layer numbering starts at 1 rather than 0 @@ -89,7 +147,10 @@ void HcalGeometry::buildStripPositionMap() { ldmx::HcalID::HcalSection hcalsection = (ldmx::HcalID::HcalSection)section; - // the center of a layer: (layer-1) * (layer_thickness) + scint_thickness/2 + const ldmx::HcalID id{section, layer, strip}; + const auto orientation{getScintillatorOrientation(id)}; + // the center of a layer: (layer-1) * (layer_thickness) + + // scint_thickness/2 double layercenter = (layer - 1) * layer_thickness_.at(section) + 0.5 * scint_thickness_; @@ -97,13 +158,13 @@ void HcalGeometry::buildStripPositionMap() { double stripcenter = (strip + 0.5) * scint_width_; if (hcalsection == ldmx::HcalID::HcalSection::BACK) { - /** - For back Hcal: - - layers in z - - strips occupy thickness of scintillator in z (e.g. 20mm) - - strips orientation is in x(y) depending on horizontal parity - */ - // z position: zero-layer(z) + layer_z + scint_thickness + /** + For back Hcal: + - layers in z + - strips occupy thickness of scintillator in z (e.g. 20mm) + - strips orientation is in x(y) depending on back_horizontal parity + */ + // z position: zero-layer(z) + layer_z + scint_thickness / 2 z = zero_layer_.at(section) + layercenter; /** @@ -112,68 +173,69 @@ void HcalGeometry::buildStripPositionMap() { stripcenter will be large for +y(+x) and the half width of the strip needs to be subtracted The halfwidth of the scintillator is given by half_total_width_. - The x(y) position is set to the center of the strip (0). + The x(y) position is set to the center of the strip (0). */ - if (layerIsHorizontal(layer)) { + if (orientation == ScintillatorOrientation::horizontal) { y = stripcenter - getZeroStrip(section, layer); x = 0; } else { x = stripcenter - getZeroStrip(section, layer); y = 0; } - // std::cout << "back (section,layer,strip)" << section << " " << layer << " " << strip; - // std::cout << " (x,y,z) " << x << " " << y << " " << z << std::endl; } else { - /** - For side Hcal - - layers in y(x) - - all layers have strips in x(y) for top-bottom (left-right) sections - - all layers have strips occupying width of scintillator in z (e.g. 50mm) - For 3D readout: - - odd layers have strips in z - - even layers have strips in x(y) for top-bottom (left-right) sections - - odd layers have strips occupying width of scintillator in x(y) - - even layers have strips occupying width of scintillator in z - */ - if(side_3d_readout_ && (layer%2==1)){ - // z position: zero-strip + half-width (center_strip) of strip - z = getZeroStrip(section, layer) + getHalfTotalWidth(section, layer); - } - else { - // z position: zero-strip(z) + strip_center(z) - z = getZeroStrip(section, layer) + stripcenter; - } - + /** + For side Hcal before 3D readout + - layers in y(x) + - all layers have strips in x(y) for top-bottom (left-right) sections + - all layers have strips occupying width of scintillator in z (e.g. + 50mm) + For 3D readout: + - odd layers have strips in z + - even layers have strips in x(y) for top-bottom (left-right) sections + - odd layers have strips occupying width of scintillator in x(y) + - even layers have strips occupying width of scintillator in z + */ + if (side_3d_readout_ && + orientation == ScintillatorOrientation::depth) { + // z position: zero-strip + half-width (center_strip) of strip + z = getZeroStrip(section, layer) + + getHalfTotalWidth(section, layer); + } else { + // z position: zero-strip(z) + strip_center(z) + z = getZeroStrip(section, layer) + stripcenter; + } + if (hcalsection == ldmx::HcalID::HcalSection::TOP or hcalsection == ldmx::HcalID::HcalSection::BOTTOM) { - y = zero_layer_.at(section) + layercenter; - x = getHalfTotalWidth(section, layer); - if(side_3d_readout_ && layer%2==1) - x = getZeroStrip(section, layer) + stripcenter; - if (hcalsection == ldmx::HcalID::HcalSection::BOTTOM) { - y *= -1; - x *= -1; - } + y = zero_layer_.at(section) + layercenter; + x = getHalfTotalWidth(section, layer); + if (side_3d_readout_ && + orientation == ScintillatorOrientation::horizontal) { + x = getZeroStrip(section, layer) + stripcenter; + } + if (hcalsection == ldmx::HcalID::HcalSection::BOTTOM) { + y *= -1; + x *= -1; + } } else { x = zero_layer_.at(section) + layercenter; y = getHalfTotalWidth(section, layer); - if(side_3d_readout_ && layer%2==1) - y = getZeroStrip(section, layer) + stripcenter; + if (side_3d_readout_ && + orientation == ScintillatorOrientation::vertical) { + y = getZeroStrip(section, layer) + stripcenter; + } if (hcalsection == ldmx::HcalID::HcalSection::RIGHT) { x *= -1; y *= -1; } } - - // std::cout << "side (section,layer,strip)" << section << " " << layer << " " << strip; - // std::cout << " (x,y,z) " << x << " " << y << " " << z << std::endl; } TVector3 pos; pos.SetXYZ(x, y, z); strip_position_map_[ldmx::HcalID(section, layer, strip)] = pos; - } // loop over strips - } // loop over layers - } // loop over sections -} // strip position map + } // loop over strips + } // loop over layers + } // loop over sections +} // strip position map -} // namespace ldmx +} // namespace ldmx diff --git a/DetDescr/test/DetectorIDTest.cxx b/DetDescr/test/DetectorIDTest.cxx index 0df59fb11..491da6442 100644 --- a/DetDescr/test/DetectorIDTest.cxx +++ b/DetDescr/test/DetectorIDTest.cxx @@ -2,7 +2,7 @@ * @file DetectorIDTest.cxx * @brief Test the operation of DetectorID class */ -#include "Framework/catch.hpp" //for TEST_CASE, REQUIRE, and other Catch2 macros +#include #include #include "DetDescr/DetectorID.h" //headers defining what we will be testing diff --git a/Dockerfile b/Dockerfile index af42ea665..9d55ef52b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -3,17 +3,14 @@ # for the development image, look at the LDMX-Software/docker repo ############################################################################### -ARG DEV_TAG=v2.0 -FROM ldmx/dev:${DEV_TAG} +FROM ldmx/dev:v3.5.0 # install ldmx-sw into the container at /usr/local COPY . /code RUN mkdir /code/build &&\ - ./home/ldmx.sh /code/build cmake -DCMAKE_INSTALL_PREFIX=/usr/local .. &&\ - ./home/ldmx.sh /code/build make install &&\ + /etc/entry.sh /code/build cmake -DCMAKE_INSTALL_PREFIX=/usr/local .. &&\ + /etc/entry.sh /code/build make install &&\ rm -rf code &&\ ldconfig /usr/local/lib -COPY ./scripts/docker_entrypoint.sh /home/docker_entrypoint.sh -RUN chmod 755 /home/docker_entrypoint.sh -ENTRYPOINT ["/home/docker_entrypoint.sh"] +ENV LDMX_SW_INSTALL=/usr/local diff --git a/Ecal b/Ecal index bb668db60..1312dbcd5 160000 --- a/Ecal +++ b/Ecal @@ -1 +1 @@ -Subproject commit bb668db60852b86d85eab1bce910c64be02b16e2 +Subproject commit 1312dbcd5adc890988a02703927ae9a7a125ffb3 diff --git a/Framework b/Framework index 5ddc20eb5..f5ab60d3f 160000 --- a/Framework +++ b/Framework @@ -1 +1 @@ -Subproject commit 5ddc20eb5284da3a062ccfd64de620b6854fc1a1 +Subproject commit f5ab60d3fe92103e8febe87d2f8e654ded2b4904 diff --git a/Hcal b/Hcal index c1076e713..43b05f8fe 160000 --- a/Hcal +++ b/Hcal @@ -1 +1 @@ -Subproject commit c1076e713f3a1289f74a28118b19055c97aed1ce +Subproject commit 43b05f8fe377054d5618847d9716667a51318e54 diff --git a/Packing/src/Packing/RawDataFile/EventPacket.cxx b/Packing/src/Packing/RawDataFile/EventPacket.cxx index 51a3d2eb6..b9c3b7c25 100644 --- a/Packing/src/Packing/RawDataFile/EventPacket.cxx +++ b/Packing/src/Packing/RawDataFile/EventPacket.cxx @@ -46,6 +46,8 @@ utility::Reader& EventPacket::read(utility::Reader& r) { r.read(subsys_data_, num_subsys); r >> crc_; + + return r; } utility::Writer& EventPacket::write(utility::Writer& w) const { diff --git a/Packing/test/BinaryIO.cxx b/Packing/test/BinaryIO.cxx index 968492638..104bdbd27 100644 --- a/Packing/test/BinaryIO.cxx +++ b/Packing/test/BinaryIO.cxx @@ -1,8 +1,7 @@ +#include #include "TTree.h" -#include "Framework/catch.hpp" - #include "Framework/Configure/Parameters.h" #include "Framework/Event.h" #include "Framework/EventHeader.h" diff --git a/SimCore b/SimCore index d034a3938..2e40938e4 160000 --- a/SimCore +++ b/SimCore @@ -1 +1 @@ -Subproject commit d034a3938257e96c770933ec56bbb204aaaca8e4 +Subproject commit 2e40938e4fda7e00074a3661259e84b8a9f183aa diff --git a/Tools/include/Tools/AnalysisUtils.h b/Tools/include/Tools/AnalysisUtils.h index 77d21de3b..8a22a0e7c 100644 --- a/Tools/include/Tools/AnalysisUtils.h +++ b/Tools/include/Tools/AnalysisUtils.h @@ -21,13 +21,6 @@ namespace ldmx { class SimParticle; } -/* -struct TrackMaps { - std::unordered_map findable; - std::unordered_map loose; - std::unordered_map axial; -};*/ - namespace Analysis { /** @@ -38,7 +31,18 @@ namespace Analysis { * @return[out] Pointer to sim particle labeled as recoil electron (nullptr if * not found) */ -std::tuple getRecoil( +std::tuple +getRecoil(const std::map &particleMap); + +/** + * Helper function to getPNGamma. Checks if a particle has daughter particles + * produced by the photonNuclear process type that are present in the particle + * map. + * + * */ + +bool doesParticleHavePNDaughters( + const ldmx::SimParticle &gamma, const std::map &particleMap); /** @@ -57,16 +61,10 @@ std::tuple getRecoil( * @return[out] Pointer to sim particle labeled as PN Gamma photon (nullptr if * not found) */ -const ldmx::SimParticle *getPNGamma( - const std::map &particleMap, - const ldmx::SimParticle *recoil, const float &energyThreshold); - -/** - * Sort tracks depending on how finable they are. - */ -// TrackMaps getFindableTrackMaps(const std::vector -// &tracks); +const ldmx::SimParticle * +getPNGamma(const std::map &particleMap, + const ldmx::SimParticle *recoil, const float &energyThreshold); -} // namespace Analysis +} // namespace Analysis -#endif // _ANALYSIS_UTILS_H_ +#endif // _ANALYSIS_UTILS_H_ diff --git a/Tools/src/Tools/AnalysisUtils.cxx b/Tools/src/Tools/AnalysisUtils.cxx index ae2035eda..bcc515ebb 100644 --- a/Tools/src/Tools/AnalysisUtils.cxx +++ b/Tools/src/Tools/AnalysisUtils.cxx @@ -15,7 +15,6 @@ //----------// // ldmx // //----------// -//#include "Event/FindableTrackResult.h" #include "Framework/Exception/Exception.h" #include "SimCore/Event/SimParticle.h" @@ -26,60 +25,63 @@ namespace Analysis { -std::tuple getRecoil( - const std::map& particleMap) { - // The recoil electron always has a track ID of 1. +std::tuple +getRecoil(const std::map &particleMap) { + // The recoil electron is "produced" in the dark brem geneartion + for (const auto &[trackID, particle] : particleMap) { + if (particle.getPdgID() == 11 and + particle.getProcessType() == + ldmx::SimParticle::ProcessType::eDarkBrem) { + return {trackID, &particle}; + } + } + // only get here if recoil electron was not "produced" by dark brem + // in this case (bkgd), we interpret the primary electron as also the recoil + // electron return {1, &(particleMap.at(1))}; } -const ldmx::SimParticle* getPNGamma( - const std::map& particleMap, - const ldmx::SimParticle* recoil, const float& energyThreshold) { - // Get all of the daughter track IDs - auto daughterTrackIDs{recoil->getDaughters()}; - - auto pit = std::find_if( - daughterTrackIDs.begin(), daughterTrackIDs.end(), - [energyThreshold, particleMap](const int& id) { - // Get the SimParticle from the map - auto daughter{particleMap.at(id)}; - - // If the particle doesn't have any daughters, return false - if (daughter.getDaughters().size() == 0) return false; - - // If the particle has daughters that were a result of a - // photo-nuclear reaction, and its energy is above threshold, - // then tag it as the PN gamma. - return ( - (particleMap.at(daughter.getDaughters().front()).getProcessType() == - ldmx::SimParticle::ProcessType::photonNuclear) && - (daughter.getEnergy() >= energyThreshold)); - }); - - // If a PN daughter was found, return it. - if (pit != daughterTrackIDs.end()) return &particleMap.at(*pit); - - return nullptr; +// Search the recoil electrons daughters for a photon +// Check if the photon has daughters and if so, if they were produced by PN +// + +bool doesParticleHavePNDaughters( + const ldmx::SimParticle &gamma, + const std::map &particleMap) { + for (auto daughterID : gamma.getDaughters()) { + if (particleMap.find(daughterID) != std::end(particleMap)) { + const auto daughter{particleMap.at(daughterID)}; + const auto processType{daughter.getProcessType()}; + if (processType == ldmx::SimParticle::ProcessType::photonNuclear) { + return true; + } // Was it PN? + } // Was it in the map? + } + + return false; } -/* -TrackMaps getFindableTrackMaps(const std::vector &tracks) { - - TrackMaps map; - - for (const FindableTrackResult &track : tracks ) { - if (track.is4sFindable() || track.is3s1aFindable() || -track.is2s2aFindable()) { map.findable[track.getParticleTrackID()] = &track; +const ldmx::SimParticle * +getPNGamma(const std::map &particleMap, + const ldmx::SimParticle *recoil, const float &energyThreshold) { + auto recoilDaughters{recoil->getDaughters()}; + for (auto recoilDaughterID : recoilDaughters) { + // Have we stored the recoil daughter? + if (particleMap.find(recoilDaughterID) != std::end(particleMap)) { + auto recoilDaughter{particleMap.at(recoilDaughterID)}; + // Is it a gamma? + if (recoilDaughter.getPdgID() == 22) { + // Does it have enough energy? + if (recoilDaughter.getEnergy() >= energyThreshold) { + // Are its daughters PN products? + if (doesParticleHavePNDaughters(recoilDaughter, particleMap)) { + return &particleMap.at(recoilDaughterID); + } } - - if (track.is2sFindable()) map.loose[track.getParticleTrackID()] = -&track; - - if (track.is2aFindable()) map.axial[track.getParticleTrackID()] = -&track; + } } + } + return nullptr; +} - return map; -}*/ - -} // namespace Analysis +} // namespace Analysis diff --git a/Tools/src/Tools/ONNXRuntime.cxx b/Tools/src/Tools/ONNXRuntime.cxx index 46aeb503b..92a73688e 100644 --- a/Tools/src/Tools/ONNXRuntime.cxx +++ b/Tools/src/Tools/ONNXRuntime.cxx @@ -12,6 +12,30 @@ namespace ldmx::Ort { using namespace ::Ort; +#if ORT_API_VERSION == 2 +// version used when first integrated onnx into ldmx-sw +// and version downloaded by cmake infrastructure +// only support x86_64 architectures +std::string get_input_name(std::unique_ptr& s, size_t i, AllocatorWithDefaultOptions a) { + return s->GetInputName(i, a); +} +std::string get_output_name(std::unique_ptr& s, size_t i, AllocatorWithDefaultOptions a) { + return s->GetOutputName(i, a); +} +#else +// latest version with prebuilds for both x86_64 and arm64 +// architectures but contains a slight API change +std::string get_input_name(std::unique_ptr& s, size_t i, AllocatorWithDefaultOptions a) { + return s->GetInputNameAllocated(i, a).get(); +} +std::string get_output_name(std::unique_ptr& s, size_t i, AllocatorWithDefaultOptions a) { + return s->GetOutputNameAllocated(i, a).get(); +} +#if ORT_API_VERSION != 15 +#pragma warning ("Untested ONNX version, not certain of API, assuming API version 15.") +#endif +#endif + Env ONNXRuntime::env_(ORT_LOGGING_LEVEL_WARNING, ""); ONNXRuntime::ONNXRuntime(const std::string& model_path, @@ -34,7 +58,7 @@ ONNXRuntime::ONNXRuntime(const std::string& model_path, for (size_t i = 0; i < num_input_nodes; i++) { // get input node names - std::string input_name(session_->GetInputName(i, allocator)); + std::string input_name(get_input_name(session_, i, allocator)); input_node_strings_[i] = input_name; input_node_names_[i] = input_node_strings_[i].c_str(); @@ -56,7 +80,7 @@ ONNXRuntime::ONNXRuntime(const std::string& model_path, for (size_t i = 0; i < num_output_nodes; i++) { // get output node names - std::string output_name(session_->GetOutputName(i, allocator)); + std::string output_name(get_output_name(session_, i, allocator)); output_node_strings_[i] = output_name; output_node_names_[i] = output_node_strings_[i].c_str(); diff --git a/Tracking b/Tracking index 328e6024a..f2c9c6566 160000 --- a/Tracking +++ b/Tracking @@ -1 +1 @@ -Subproject commit 328e6024ac5140f79271dc7217e0ad6c29ebc459 +Subproject commit f2c9c6566ae64fa4178f003b683925752a0b1ccc diff --git a/Validation/.gitignore b/Validation/.gitignore new file mode 100644 index 000000000..6ee179e73 --- /dev/null +++ b/Validation/.gitignore @@ -0,0 +1,2 @@ +build +**egg-info diff --git a/Validation/README.md b/Validation/README.md new file mode 100644 index 000000000..a2299d284 --- /dev/null +++ b/Validation/README.md @@ -0,0 +1,59 @@ +# Validation + +Python package forcused on comparing two or more "similar" LDMX event data files. + +## Installation +Inside container... +``` +ldmx python3 -m pip install Validation/ --target install/python/ --no-cache +``` +Outside container it is helpful to put the Validation module inside a virtual environment. +This makes it easier to keep track of what you are doing. +Below, I make two different virtual environments. `valid` has the Validation module files are copied +over (so it can be used even if ldmx-sw switches to a branch without the Validation files). +`valid-ed` has an "editable" install of Validation so you can develop the Validation module. +``` +python3 -m venv .venv/valid --prompt valid +source .venv/valid/bin/activate +python3 -m pip install Validation/ +# in new terminal +python3 -m venv .venv/valid-ed --prompt valid-ed +source .venv/valid-ed/bin/activate +python3 -m pip install -e Validation/ +``` +Then if you are developing Validation you would `source /ldmx-sw/.venv/valid-ed/bin/activate` +and if not `source /ldmx-sw/.venv/valid/bin/activate`. + +Other helpful options +- Outside container: `--user` may need to be required +- Both: `--editable` may be helpful if developing Validation which should be provided _before_ the path to Validation + e.g. `python3 -m pip install --editable Validation/ --user` + +## Usage +_Cannot_ run from ldmx-sw directory. `import Validation` prefers +the local directory instead of the installed path so it tries to +load from the `ldmx-sw/Validation` directory. + +Could fix this by renaming the package inside Validation. + +### CLI +The Validation module is constructed to do some common tasks quickly on the command line. +Printing out its help message shows how to run it and gives you the details on what +parameters to provide. +``` +python3 -m Validation -h +``` +which should be run with `ldmx` if the module was installed in the container. + +### In Script +Similar to the CLI, you can develop your own python script using Validation. +Simply `import Validation` where you want to be using it. +**Remember**: The plotting functions assume the user is in an interactive notebook +unless the `out_dir` parameter is provided. + +### In Notebook +Again, accessing this module post-installation is the same as other modules `import Validation`. +This can help you develop plots that don't come pre-made within the Validation module. +**If you are developing Validation and testing within a notebook**, you will need to reboot +the python kernel anytime you wish to test changes to the Validation module. This is necessary +because Python keeps modules cached in memory during normal running. diff --git a/Validation/pyproject.toml b/Validation/pyproject.toml new file mode 100644 index 000000000..f05532b1c --- /dev/null +++ b/Validation/pyproject.toml @@ -0,0 +1,16 @@ +[build-system] +requires = ["setuptools>=43.0.0", "wheel"] +build-backend = "setuptools.build_meta" + +[project] +name = "Validation" +version = "0.0.1" +description = "LDMX Data Validation" +readme = "README.md" +requires-python = ">=3.7" +dependencies = [ + "mplhep", + "matplotlib", + "pandas", + "uproot" +] diff --git a/Validation/setup.py b/Validation/setup.py new file mode 100644 index 000000000..9f855282b --- /dev/null +++ b/Validation/setup.py @@ -0,0 +1,184 @@ +"""A setuptools based setup module. + +I copied this template from +https://github.com/pypa/sampleproject + +and followed the Python packaging guide +https://packaging.python.org/guides/distributing-packages-using-setuptools/ +""" + +# Always prefer setuptools over distutils +from setuptools import setup, find_packages +import pathlib + +here = pathlib.Path(__file__).parent.resolve() + +# Get the long description from the README file +long_description = (here / "README.md").read_text(encoding="utf-8") + +# Arguments marked as "Required" below must be included for upload to PyPI. +# Fields marked as "Optional" may be commented out. + +setup( + # This is the name of your project. The first time you publish this + # package, this name will be registered for you. It will determine how + # users can install this project, e.g.: + # + # $ pip install sampleproject + # + # And where it will live on PyPI: https://pypi.org/project/sampleproject/ + # + # There are some restrictions on what makes a valid project name + # specification here: + # https://packaging.python.org/specifications/core-metadata/#name + name="Validation", # Required + # Versions should comply with PEP 440: + # https://www.python.org/dev/peps/pep-0440/ + # + # For a discussion on single-sourcing the version across setup.py and the + # project code, see + # https://packaging.python.org/guides/single-sourcing-package-version/ + version="0.0.1", # Required + # This is a one-line description or tagline of what your project does. This + # corresponds to the "Summary" metadata field: + # https://packaging.python.org/specifications/core-metadata/#summary + description="LDMX Data Validation", # Optional + # This is an optional longer description of your project that represents + # the body of text which users will see when they visit PyPI. + # + # Often, this is the same as your README, so you can just read it in from + # that file directly (as we have already done above) + # + # This field corresponds to the "Description" metadata field: + # https://packaging.python.org/specifications/core-metadata/#description-optional + long_description=long_description, # Optional + # Denotes that our long_description is in Markdown; valid values are + # text/plain, text/x-rst, and text/markdown + # + # Optional if long_description is written in reStructuredText (rst) but + # required for plain-text or Markdown; if unspecified, "applications should + # attempt to render [the long_description] as text/x-rst; charset=UTF-8 and + # fall back to text/plain if it is not valid rst" (see link below) + # + # This field corresponds to the "Description-Content-Type" metadata field: + # https://packaging.python.org/specifications/core-metadata/#description-content-type-optional + long_description_content_type="text/markdown", # Optional (see note above) + # This should be a valid link to your project's main homepage. + # + # This field corresponds to the "Home-Page" metadata field: + # https://packaging.python.org/specifications/core-metadata/#home-page-optional + url="https://github.com/LDMX-Software/ldmx-sw", # Optional + # This should be your name or the name of the organization which owns the + # project. + #author="A. Random Developer", # Optional + # This should be a valid email address corresponding to the author listed + # above. + #author_email="author@example.com", # Optional + # Classifiers help users find your project by categorizing it. + # + # For a list of valid classifiers, see https://pypi.org/classifiers/ + classifiers=[ # Optional + # How mature is this project? Common values are + # 3 - Alpha + # 4 - Beta + # 5 - Production/Stable + "Development Status :: 3 - Alpha", + # Indicate who your project is intended for + "Intended Audience :: Developers", + "Topic :: Software Development :: Build Tools", + # Pick your license as you wish + "License :: OSI Approved :: MIT License", + # Specify the Python versions you support here. In particular, ensure + # that you indicate you support Python 3. These classifiers are *not* + # checked by 'pip install'. See instead 'python_requires' below. + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.6", + "Programming Language :: Python :: 3.7", + "Programming Language :: Python :: 3.8", + "Programming Language :: Python :: 3.9", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3 :: Only", + ], + # This field adds keywords for your project which will appear on the + # project page. What does your project relate to? + # + # Note that this is a list of additional keywords, separated + # by commas, to be used to assist searching for the distribution in a + # larger catalog. + #keywords="", # Optional + # When your source code is in a subdirectory under the project root, e.g. + # `src/`, it is necessary to specify the `package_dir` argument. + package_dir={"": "src"}, # Optional + # You can just specify package directories manually here if your project is + # simple. Or you can use find_packages(). + # + # Alternatively, if you just want to distribute a single Python file, use + # the `py_modules` argument instead as follows, which will expect a file + # called `my_module.py` to exist: + # + # py_modules=["my_module"], + # + packages=find_packages(where="src"), # Required + # Specify which Python versions you support. In contrast to the + # 'Programming Language' classifiers above, 'pip install' will check this + # and refuse to install the project if the version does not match. See + # https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires + python_requires=">=3.6, <4", + # This field lists other packages that your project depends on to run. + # Any package you put here will be installed by pip when your project is + # installed, so they must be valid existing projects. + # + # For an analysis of "install_requires" vs pip's requirements files see: + # https://packaging.python.org/discussions/install-requires-vs-requirements/ + install_requires=["uproot", "pandas", "matplotlib", "mplhep"], # Optional + # List additional groups of dependencies here (e.g. development + # dependencies). Users will be able to install these using the "extras" + # syntax, for example: + # + # $ pip install sampleproject[dev] + # + # Similar to `install_requires` above, these must be valid existing + # projects. + #extras_require={ # Optional + # "dev": ["check-manifest"], + # "test": ["coverage"], + #}, + # If there are data files included in your packages that need to be + # installed, specify them here. + #package_data={ # Optional + # "sample": ["package_data.dat"], + #}, + # Although 'package_data' is the preferred approach, in some case you may + # need to place data files outside of your packages. See: + # http://docs.python.org/distutils/setupscript.html#installing-additional-files + # + # In this case, 'data_file' will be installed into '/my_data' + #data_files=[("my_data", ["data/data_file"])], # Optional + # To provide executable scripts, use entry points in preference to the + # "scripts" keyword. Entry points provide cross-platform support and allow + # `pip` to create the appropriate form of executable for the target + # platform. + # + # For example, the following would provide a command called `sample` which + # executes the function `main` from this package when invoked: + #entry_points={ # Optional + # "console_scripts": [ + # "sample=sample:main", + # ], + #}, + # List additional URLs that are relevant to your project as a dict. + # + # This field corresponds to the "Project-URL" metadata fields: + # https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use + # + # Examples listed include a pattern for specifying where the package tracks + # issues, where the source is hosted, where to say thanks to the package + # maintainers, and where to support the project financially. The key is + # what's used to render the link text on PyPI. + #project_urls={ # Optional + # "Bug Reports": "https://github.com/pypa/sampleproject/issues", + # "Funding": "https://donate.pypi.org", + # "Say Thanks!": "http://saythanks.io/to/example", + # "Source": "https://github.com/pypa/sampleproject/", + #}, +) diff --git a/Validation/src/Validation/__init__.py b/Validation/src/Validation/__init__.py new file mode 100644 index 000000000..4ada6e544 --- /dev/null +++ b/Validation/src/Validation/__init__.py @@ -0,0 +1,8 @@ +"""LDMX Validation plotting and inspection""" + +from ._file import File +from ._differ import Differ +from . import ecal +from . import trigscint +from . import hcal +from . import photonuclear diff --git a/Validation/src/Validation/__main__.py b/Validation/src/Validation/__main__.py new file mode 100644 index 000000000..4f4237c70 --- /dev/null +++ b/Validation/src/Validation/__main__.py @@ -0,0 +1,107 @@ +"""CLI for comparison plots within LDMX Validation""" + +# standard +import argparse +import os +import re +import logging + +# external +import matplotlib +# this line allows us to run without an X server connected +# basically telling MPL that it will not open a window +matplotlib.use('Agg') + +import matplotlib.pyplot as plt +import mplhep +plt.style.use(mplhep.style.ROOT) + +# us +from ._differ import Differ +from ._file import File +from ._plotter import plotter + +# guard incase someone imports this somehow +if __name__ == '__main__' : + parser = argparse.ArgumentParser("python3 -m Validation", + description=""" + Make comparison plots between different files within a directory. + + The labels of different plots within the directory is controlled by + the parameter you choose. The parameters of a file are extracted from + the file name by splitting the filename into key-val pairs separated + by underscores (i.e. key1_val1_key2_val2_..._keyN_valN.root). If no + parameter is provided, then the first key/val is used. + """ + ) + + parser.add_argument('data',help='directory of event and histogram files') + parser.add_argument('--log',help='logging level',choices=['info','debug','warn','error'], default='warn') + parser.add_argument('--label',help='label for grouping of data, defaults to data directory name') + parser.add_argument('--out-dir',help='directory to which to print plots. defaults to input data directory') + parser.add_argument('--systems',required=True, choices=plotter.__registry__.keys(), nargs='+', + help='list of plotters to run') + parser.add_argument('--output-type', help='File format to use to produce figures', default='pdf') + parser.add_argument('--param', + nargs='+', + help='parameter(s) in filename to use as file labels') + + arg = parser.parse_args() + + numeric_level = getattr(logging, arg.log.upper(), None) + if not isinstance(numeric_level, int) : + raise ValueError(f'Invalid log level: {arg.log}') + logging.basicConfig(level=numeric_level) + + logging.getLogger('matplotlib').setLevel(logging.ERROR) + + logging.debug(f'Parsed Arguments: {arg}') + + data = arg.data + if data.endswith('/') : + data = data[:-1] + + label = os.path.basename(data) + if arg.label is not None : + # update the raw string literal to intrepret new lines + label = arg.label.replace(r'\n','\n') + + out_dir = data + if arg.out_dir is not None : + out_dir = arg.out_dir + + if arg.output_type is not None: + output_type = arg.output_type + if output_type[0] != '.': + output_type = '.' + output_type + else: + output_type = '.pdf' + + logging.debug(f'Deduced Args: label = {label} out_dir = {out_dir}') + + root_files = [ File.from_path(os.path.join(data,f), legendlabel_parameter = arg.param) + for f in os.listdir(data) if f.endswith('.root') ] + + logging.debug(f'ROOT Files: {root_files}') + + hd = Differ(label, output_type, *[f for f in root_files if not f.is_events()]) + ed = Differ(label, output_type, *[f for f in root_files if f.is_events()]) + + logging.debug(f'histogram differ = {hd}') + logging.debug(f'event differ = {ed}') + + for syst in arg.systems : + logging.info(f'running {syst}') + h, e, plot = plotter.__registry__[syst] + if h and e : + logging.debug('both hist and event plotter') + plot(hd, ed, out_dir = out_dir) + elif h : + logging.debug('both hist-only plotter') + plot(hd, out_dir = out_dir) + elif e : + logging.debug('both event-only plotter') + plot(ed, out_dir = out_dir) + else : + logging.warn(f'Not running {syst} since it was not registered properly.') + diff --git a/Validation/src/Validation/_differ.py b/Validation/src/Validation/_differ.py new file mode 100644 index 000000000..54c39a013 --- /dev/null +++ b/Validation/src/Validation/_differ.py @@ -0,0 +1,163 @@ +"""Hold the two or more files we are comparing together in one class""" + +# standard modules +import os +import re + +# external dependencies +import matplotlib.pyplot as plt +import matplotlib +# us +from ._file import File + +class Differ : + """Differ allowing easy comparison of "similar" files + + The basic requirement of all files passed is that the columns + of data in the 'LDMX_Events' TTree are named exactly the same. + This is an easy requirement if the files are generated using + the same configuration script and/or the same installation of + ldmx-sw. + + Parameters + ---------- + grp_name : str + Name to include in legend title to differentiate + this group of plots from another + output_type : str + The extension for the filetype that figures should be produced with in + non-interactive mode + args : list of tuples or Files + Each entry is a tuple (file_path, name, *args) where file_path + specifies the file to open and name is what should appear + in plot legends. + Alternatively, each entry can just be the already constructed File + + Example + ------- + Opening a differ is pretty quick and lightweight. + We do open the files with `uproot`. + + d = Differ('v3.2.0-alpha',('path/to/v12.root','v12'),('path/to/v14.root','v14')) + + Without any other options, the plotting with show the plot as if + we are in an interactive notebook. + + d.plot1d('EcalSimHits_valid/EcalSimHits_valid.edep_', 'Sim E Dep [MeV]') + + """ + + def __init__(self, grp_name, output_type, *args) : + def open_file(arg) : + if isinstance(arg, (list,tuple)) : + return File(*arg) + elif isinstance(arg, File) : + return arg + else : + raise KeyError(f'Argument provided {arg} is not a Validation.File or a tuple of arguments for its constructor') + + self.grp_name = grp_name + self.files = list(map(open_file, args)) + self.output_type = output_type + + def __repr__(self) : + """Short form representation of a Differ""" + return f'Differ ({self.grp_name}) {self.files}' + + def plot1d(self, column, xlabel, + ylabel = 'Count', + yscale = 'log', + ylim = (None,None), + out_dir = None, + file_name = None, + tick_labels = None, + legend_kw = dict(), + **hist_kwargs) : + """Plot a 1D histogram, overlaying the File entries + + We overlay the same 'column' of data of each File onto + the same figure, generating a legend with the title defined by + grp_name from the constructor. + + If out_dir is not provided, we assume we are in a notebook and + simply `plt.show()` the figure. If out_dir is not None (i.e. it + was defined), we assume we are in a non-interactive script and + write the figure to a PDF in the output file and then clear + the figure. + + Parameters + ---------- + column : str or Callable + Determines the array of data from each File to histogram and plot + xlabel : str + Label of X axis + ylabel : str, optional + Label for Y axis (default: Count) + yscale : {'linear', 'log', 'symlog', 'logit', ...}, optional + Scale to use for y-axis (default: 'log') + ylim : 2-tuple + Limits to set for the y-axis (default: deduced by matplotlib) + out_dir : str + Directory in which to write the plotting file + tick_labels: list, optional + Tick labels for the x-axis + file_name : str + Name of file, no extension (default: column name with directory separators removed) + hist_kwargs : dict + All other key-word arguments are passed into each File.plot1d + """ + fig = plt.figure('differ',figsize=(11,8)) + ax = fig.subplots() + + for f in self.files : + weights, bins, patches = f.plot1d(ax, column, **hist_kwargs) + + ax.set_xlabel(xlabel) + ax.set_ylabel(ylabel) + ax.set_yscale(yscale) + ax.set_ylim(*ylim) + + if 'title' not in legend_kw : + legend_kw['title'] = self.grp_name + + if tick_labels is not None: + ax.set_xticks(bins) + ax.set_xticklabels(tick_labels) + ax.tick_params(axis='x', rotation=90) + + + ax.legend(**legend_kw) + + if out_dir is None : + plt.show() + else : + if file_name is None : + if isinstance(column, str) : + file_name = re.sub(r'^.*/','',column) + else : + # assume column is a function meaning the '__name__' + # parameter is defined by Python for us + file_name = column.__name__ + fig.savefig(os.path.join(out_dir,file_name)+ self.output_type, bbox_inches='tight') + fig.clf() + + def load(self, **kwargs) : + """Load all of the event data frames into memory + + The key-word arguments are used in each File's events call + to specify which branches (if not all of them) should be loaded + into memory and what manipulation (if any) to do. + """ + for f in self.files : + f.load(**kwargs) + + def manipulate(self, manipulation) : + """Manipulate all of the File data frames + + Parameters + ---------- + manipulation : Callable (e.g. a function) + Function operating on the data frame to manipuate it **in place** + """ + for f in self.files : + f.manipulate(manipulation) diff --git a/Validation/src/Validation/_file.py b/Validation/src/Validation/_file.py new file mode 100644 index 000000000..bd5b055df --- /dev/null +++ b/Validation/src/Validation/_file.py @@ -0,0 +1,215 @@ +"""Wrap an uproot file for some extra help plotting""" + +import uproot +import os +import logging + +class File : + """File entry in Differ object + + Parameters + ---------- + filepath : str or pathlib.Path + path specifying ROOT file to open for reading + legendlabel : str + name for labeling histograms plotted from this file + colmod : function with str argument returning str + modify an input column name to align with the columns in this file + can be used for example to change the pass name + hist_kwargs : dict + dictionary providing extra detail for the matplotlib hist call + helpful for specifying style options and other defaults for hists from this file + open_kwargs : dict + all other key-word arguments are passed to uproot.open + """ + log = logging.getLogger('File') + + def __init__(self, filepath, colmod = None, hist_kwargs = dict(), **open_kwargs) : + self.__file = uproot.open(filepath, **open_kwargs) + self.__colmod = colmod + self.__df = None + + if 'histtype' not in hist_kwargs : + hist_kwargs['histtype'] = 'step' + if 'linewidth' not in hist_kwargs : + hist_kwargs['linewidth'] = 2 + if 'bins' not in hist_kwargs : + hist_kwargs['bins'] = 'auto' + + self.__hist_kwargs = hist_kwargs + + def __repr__(self) : + """Represent this File in a short form""" + event = 'Events' + if not self.is_events() : + event = 'Histos' + return 'File { '+event+' labeled '+self.__hist_kwargs['label']+' }' + + def from_path(filepath, legendlabel_parameter = None) : + """Extract the legend-label for histograms from this file using the filepath + + This is separated from the contsructor because + + 1. It makes assumptions on how the file name is formatted + 2. It does not allow for modification of the keyword args in the constructor + + The parameters of a file are extracted from its name. + In general, we assume that the filename is of the form + + ____..._.root + + i.e. a mapping where the key/val pairs are separated by underscores. + + If the 'legendlabel_parameter' is not provided, we simply use the first key-val + pair in the file name as the legend-label. + + Parameters + ---------- + filepath : str + Full path to the file to open + legendlabel_parameter : str, optional + key-name to use in legend-label for this File + """ + fn = os.path.basename(filepath).replace('.root','') + l = fn.split('_') + if len(l)%2 != 0 : + raise ValueError(f'The filename provided {fn} cannot be split into key_val pairs.') + file_params = { l[i] : l[i+1] for i in range(len(l)-1) if i%2 == 0 } + File.log.debug(f'Deduced File Parameters: {file_params}') + + if legendlabel_parameter is None : + legendlabel_parameter = [next(iter(file_params))] + ll = [file_params[l] for l in legendlabel_parameter if l in file_params] + if len(ll) < len(legendlabel_parameter): + missing_params = set(legendlabel_parameter) - set(file_params.keys()) + raise KeyError(f"{', '.join(missing_params)} not in deduced file parameters:\n{file_params}") + ll='_'.join(ll) + + File.log.debug(f'Deduced File Label: {ll}') + return File(filepath, hist_kwargs=dict(label=ll)) + + def keys(self, *args, **kwargs) : + """Callback into uproot keys + + Helpful for exploring the file when trying to decide + what to plot within a notebook + """ + return self.__file.keys(*args, **kwargs) + + def is_events(self) : + """Check if this file is an Events file + + We simply see if the 'LDMX_Events' object exists within + the file. If it does, we assume that it is an event file. + Otherwise, we assume that it is a histogram file. + """ + return 'LDMX_Events' in self.__file + + def events(self, **kwargs) : + """Callback for retrieving a full in-memory data frame of the events + + All key-word arguments are passed to the uproot.arrays method. + + We change the default 'library' to be pandas which can be overridden + by a user if desired. + """ + + if 'library' not in kwargs : + kwargs['library'] = 'pd' + return self.__file['LDMX_Events'].arrays(**kwargs) + + def load(self, manipulation = None, **kwargs) : + """Instead of giving the events data frame to the caller, + we store the dataframe here for later batch processing + + manipulation is a function operating on the loaded dataframe + which is there for people to rename columns, calculate new + columns, etc... + + All the kwargs are simply provided to events for selecting + the branches of LDMX_Events to load into memory. + """ + if not self.is_events() : + raise AttributeError('File is not an Events file and so the data cannot be loaded into an in-memory data frame') + + self.__df = self.events(**kwargs) + if manipulation is not None : + manipulation(self.__df) + + def manipulate(self, manipulation) : + """Apply the passed manipulation to the dataframe""" + manipulation(self.__df) + + def plot1d(self, ax, obj, **hist_kwargs) : + """Plot the input uproot object as a histogram on the input axes + + Provided the same interface for different plotting options, all + depending on how the input 'obj' is deduced. + + If 'obj' is not a str, we assume that is is a callable (e.g. a function) + that will take the in-memory dataframe (if it is loaded) or the uproot file + (if the dataframe isn't loaded) in order to calculate the array of values + to histogram. This is helpful for doing simple calculations or cuts that + this class does not implement. + + If 'obj' is a str, then we need to use it to deduce what we are plotting. + + If a in-memory dataframe exists, we check if 'obj' is a column in it first + and use that column of the dataframe as the values to histogram. + + If no dataframe exists (or 'obj' is not in the dataframe), then we get + the object from the uproot file we have with 'obj' as the full in-file + path to the object (using the configured colmod function in order to + augment the name if necessary). + + Now, if the uproot object is a subclass of uproot's Histogram class, + we retrieve the bin edges from it, check that its dimension is one, + and plot the histogram using its values as the entry weights and the + loaded bin edges (the key-word arguments 'bins' and 'weights' are + overwritten by the values read in from the file). + + If the uproot object is not a subclass of uproot's Histogram, + then we assume that it is a branch and we extract a flattened + array of values from it using uproot.array and pandas. + + Parameters + ---------- + ax : matplotlib.Axes + axes on which to plot the histogram + obj : str or function + object to plot + hist_kwargs : dict + All other key-word arguments are passed to plt.hist + """ + + for k, v in self.__hist_kwargs.items() : + if k not in hist_kwargs : + hist_kwargs[k] = v + + if not isinstance(obj, str) : + if self.__df is None : + return ax.hist(obj(self.__file), **hist_kwargs) + else : + return ax.hist(obj(self.__df), **hist_kwargs) + + if self.__df is not None and obj in self.__df : + return ax.hist(self.__df[obj], **hist_kwargs) + + uproot_obj_path = obj + if self.__colmod is not None : + uproot_obj_path = self.__colmod(obj) + + uproot_obj = self.__file[uproot_obj_path] + + if issubclass(type(uproot_obj), uproot.behaviors.TH1.Histogram) : + edges = uproot_obj.axis('x').edges() + dim = len(edges.shape) + if dim > 1 : + raise KeyError(f'Attempted to do a 1D plot of a {dim} dimension histogram.') + # overwrite bins and weights with what the serialized histogram has + hist_kwargs['bins'] = edges + hist_kwargs['weights'] = uproot_obj.values() + return ax.hist((edges[1:]+edges[:-1])/2, **hist_kwargs) + else : + return ax.hist(uproot_obj.array(library='pd').values, **hist_kwargs) + diff --git a/Validation/src/Validation/_plotter.py b/Validation/src/Validation/_plotter.py new file mode 100644 index 000000000..1975bdcd0 --- /dev/null +++ b/Validation/src/Validation/_plotter.py @@ -0,0 +1,62 @@ +"""Decorator for registering plotters + +This is the wackiest python thing in this package and is +used to allow the CLI to have a list of all plotters in +submodules. In order for this to function, a plotting function +needs... + +1. to be in a module imported in __init__.py. This is required + so that the function is imported when the parent module is + imported +2. to be decorated by the 'plotter' decorator below. +""" + +def plotter(hist = False, event = True) : + """decorator for registering plotters + + There are three options for a plotter. + + 1. Plots from histogram files + 2. Plots from event files + 3. Plots from both at the same time + + (1) and (2) have the same function signature but will + be given a histogram-file or event-file Differ (respectively). + + (3) has a longer signature for accepting both histogram- and event- + file Differ objects at once. + + Examples + -------- + Register a histogram-file plotter + + @plotter(hist=True,event=False) + def my_hist_plotter(d, out_dir = None) : + # d will be a Differ with histogram-files + + Register a event-file plotter + + @plotter(hist=False,event=True) + def my_event_plotter(d, out_dir = None) : + # d will be a Differ with event-files + + Register a plotter that can do both + + @plotter(hist=True,event=True) + def plots_both(hd, ed, out_dir = None) : + # hd will be histogram-files and ed will be event-files + + Attributes + ---------- + __registry__ : dict + Dictionary of registered plotters + """ + if not hist and not event : + raise ArgumentError('Need to plot one or both hist or event') + def plotter_decorator(func) : + func_name = func.__module__.replace('Validation.','')+'.'+func.__name__ + plotter.__registry__[func_name] = (hist, event, func) + return func + return plotter_decorator + +plotter.__registry__ = dict() diff --git a/Validation/src/Validation/ecal.py b/Validation/src/Validation/ecal.py new file mode 100644 index 000000000..e4eae9b79 --- /dev/null +++ b/Validation/src/Validation/ecal.py @@ -0,0 +1,74 @@ +"""Plotting of ECal-related validation plots""" + +from ._differ import Differ +from ._plotter import plotter +import logging + +log = logging.getLogger('ecal') + +@plotter(hist=True,event=False) +def shower_feats(d : Differ, out_dir = None) : + """Plot ECal shower features from the already created DQM histograms + + Parameters + ---------- + d : Differ + Differ containing files that are not event files (presumably histogram files) + """ + + col, name = 'EcalShowerFeatures/EcalShowerFeatures_deepest_layer_hit', 'Deepest Layer Hit' + log.info(f'plotting {col}') + d.plot1d(col, name, out_dir = out_dir, legend_kw = dict(loc='upper left')) + + features = [ + ('EcalShowerFeatures/EcalShowerFeatures_num_readout_hits', 'N Readout Hits'), + ('EcalShowerFeatures/EcalShowerFeatures_summed_det', 'Total Rec Energy [MeV]'), + ('EcalShowerFeatures/EcalShowerFeatures_summed_iso', 'Total Isolated Energy [MeV]'), + ('EcalShowerFeatures/EcalShowerFeatures_summed_back', 'Total Back Energy [MeV]'), + ('EcalShowerFeatures/EcalShowerFeatures_max_cell_dep', 'Max Cell Dep [MeV]'), + ('EcalShowerFeatures/EcalShowerFeatures_shower_rms', 'Shower RMS [mm]'), + ('EcalShowerFeatures/EcalShowerFeatures_x_std', 'X Standard Deviation [mm]'), + ('EcalShowerFeatures/EcalShowerFeatures_y_std', 'Y Standard Deviation [mm]'), + ('EcalShowerFeatures/EcalShowerFeatures_avg_layer_hit', 'Avg Layer Hit'), + ('EcalShowerFeatures/EcalShowerFeatures_std_layer_hit', 'Std Dev Layer Hit') + ] + for col, name in features : + log.info(f'plotting {col}') + d.plot1d(col, name, out_dir = out_dir) + +@plotter(hist=False,event=True) +def sim_hits(d : Differ, out_dir = None) : + """Plot ECal-related validation plots + + Parameters + ---------- + d : Differ + Differ containing files that are event files + """ + + # loading just the IDs into memory so that we can calculate the layer + # the hits are in and count how many sim hits there were per event + def rename_columns_and_calc_layer(data) : + data.rename(inplace=True,columns=lambda cn : cn.replace('EcalSimHits_valid.','').replace('_','')) + data['layer'] = (data['id'].values >> 17) & 0x3f + d.load(manipulation = rename_columns_and_calc_layer, filter_name = 'EcalSimHits_valid/EcalSimHits_valid.id_') + + # plot number of sim hits + log.info('plotting num sim hits') + d.plot1d(lambda data : data.reset_index(level=1).index.value_counts(), 'Num Sim Hits', + ylabel='Events', range=(0,200), file_name = 'ecal_num_sim_hits', out_dir = out_dir) + branches = [ + ('layer', 'Sim Layer Hit', + dict(bins=34, density=True)), + ('LDMX_Events/EcalSimHits_valid/EcalSimHits_valid.edep_', 'Sim Energy Dep [MeV]', + dict(range=(0,50),bins=50, density=True)), + ('LDMX_Events/EcalSimHits_valid/EcalSimHits_valid.time_', 'Sim Hit Time [ns]', + dict(range=(0,100000),bins=100,density=True)), + ('LDMX_Events/EcalSimHits_valid/EcalSimHits_valid.x_', 'Sim Hit x [mm]', + dict(range=(-300,300),bins=60, density=True)), + ('LDMX_Events/EcalSimHits_valid/EcalSimHits_valid.y_', 'Sim Hit y [mm]', + dict(range=(-300,300),bins=60, density=True)), + ] + for col, name, kw in branches : + log.info(f'plotting {col}') + d.plot1d(col, name, ylabel = 'Fraction Sim Hits', out_dir = out_dir, **kw) diff --git a/Validation/src/Validation/hcal.py b/Validation/src/Validation/hcal.py new file mode 100644 index 000000000..8ab24733e --- /dev/null +++ b/Validation/src/Validation/hcal.py @@ -0,0 +1,64 @@ + +from ._differ import Differ +from ._plotter import plotter +import logging + +log = logging.getLogger('hcal') + +@plotter(hist=True, event=False) +def dqm(d: Differ, out_dir=None): + sections = ['back', 'top', 'bottom', 'left', 'right'] + histogram_name_format = 'hcal_dqm_{0}/hcal_dqm_{0}_{1}' + histograms = [ + ('pe', 'Reconstructed PE per hit ({})'), + ('hit_time', 'HCal hit time ({}) [ns]'), + ('sim_hit_time', 'HCal hit time ({}) [ns]'), + ("layer", "Layer number ({})"), + ("sim_layer", "Layer number ({})"), + ("energy", "Reconstructed hit energy in the HCal ({})"), + ("total_energy", "Total reconstructed energy in the HCal ({}) [MeV]"), + ("sim_energy", "Total energy deposited in the HCal ({}) [MeV]"), + ("sim_energy_per_bar", "Total energy deposited per bar in the HCal ({}) [MeV]"), + ("sim_total_energy", "Total energy deposited in the HCal ({}) [MeV]"), + ("total_pe", "Total photoelectrons in the HCal ({})"), + ('max_pe', "Maximum photoelectrons in any HCal bar ({})"), + ("hit_multiplicity", "HCal hit multiplicity ({})"), + ("sim_hit_multiplicity", "HCal hit multiplicity ({})"), + ("sim_num_bars_hit", "HCal hit multiplicity ({})"), + ("vetoable_hit_multiplicity", "Multiplicity of vetoable hits (> 8PE) ({})"), + ('max_pe_time', "Max PE hit time ({}) [ns]",), + ('along_x', 'Reconstructed hit position along horizontal bars [mm]'), + ('along_y', 'Reconstructed hit position along vertical bars [mm]' ), + ('along_z', 'Reconstructed hit position along z-oriented bars [mm]'), + ('sim_along_x', 'Reconstructed hit position along horizontal bars [mm]'), + ('sim_along_y', 'Reconstructed hit position along vertical bars [mm]' ), + ('sim_along_z', 'Reconstructed hit position along z-oriented bars [mm]'), + ] + + log.info("Making the efficiency histogram...") + d.plot1d('HcalInefficiencyAnalyzer/HcalInefficiencyAnalyzer_efficiency', + 'Hcal part involved in veto', + 'Efficiency', + tick_labels=['', 'Back', 'Top', 'Bottom', + 'Right', 'Left', 'Any', + 'Both', 'Back only', 'Side only', + 'Neither', '', ''], + out_dir=out_dir, + yscale='linear', +) + + for section in sections: + for histogram, title in histograms: + name = histogram_name_format.format(section, histogram) + title = title.format(section.capitalize()) + log.info(f"Making the {name} histogram...") + d.plot1d(name, title, out_dir=out_dir, density=True, ylabel='Event rate') + log.info(f"Making the inefficiency figure for {section}") + d.plot1d(f'HcalInefficiencyAnalyzer/HcalInefficiencyAnalyzer_inefficiency_{section}', + 'Layer', + 'Inefficiency (5PE)', + out_dir=out_dir, + density=True) + + + diff --git a/Validation/src/Validation/photonuclear.py b/Validation/src/Validation/photonuclear.py new file mode 100644 index 000000000..b43e612cf --- /dev/null +++ b/Validation/src/Validation/photonuclear.py @@ -0,0 +1,81 @@ + + +from ._differ import Differ +from ._plotter import plotter +import logging + +log = logging.getLogger('photonuclear') + +@plotter(hist=True, event=False) +def pndqm(d: Differ, out_dir=None): + event_type_labels = ['', 'Nothing hard', 'n', 'nn', '≥ 3n', 'π', 'ππ', + 'π₀', 'πA', 'π2A', 'ππA', 'π₀A', + 'π₀2A', 'π₀πA', 'p', 'pp', 'pn', 'K_L⁰X', 'KX', + 'K_S⁰X', 'exotics', 'multi-body', '', '', ''] + + compact_event_type_labels = ['', 'n', 'K^{±}X', 'K⁰', 'nn', 'soft', 'other', '',''] + neutron_event_type_labels = ['', '', 'nn', 'pn', 'π^+n', 'π⁰n', '', ''] + + d.plot1d("PN/PN_event_type", "Event category (200 MeV cut)", + tick_labels=event_type_labels, + out_dir=out_dir, + density=True + ) + d.plot1d("PN/PN_event_type_500mev", "Event category (500 MeV cut)", + tick_labels=event_type_labels, + out_dir=out_dir, + density=True + ) + d.plot1d("PN/PN_event_type_2000mev", "Event category (2000 MeV cut)", + tick_labels=event_type_labels, + out_dir=out_dir, + density=True + + ) + d.plot1d("PN/PN_event_type_compact", "", + tick_labels=compact_event_type_labels, + out_dir=out_dir) + d.plot1d("PN/PN_event_type_compact_500mev", "", + tick_labels=compact_event_type_labels, + out_dir=out_dir) + d.plot1d("PN/PN_event_type_compact_2000mev", "", + tick_labels=compact_event_type_labels, + out_dir=out_dir) + d.plot1d("PN/PN_1n_event_type", "", + tick_labels=neutron_event_type_labels, + out_dir=out_dir) + d.plot1d("PN/PN_pn_particle_mult", "Photo-nuclear Multiplicity", out_dir=out_dir) + d.plot1d("PN/PN_pn_neutron_mult", "Photo-nuclear Neutron Multiplicity", out_dir=out_dir) + d.plot1d("PN/PN_pn_gamma_energy", "γ Energy (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_pn_total_ke" , "Total Kineitc Energy of Photo-Nuclear Products(MeV)", out_dir=out_dir) + d.plot1d("PN/PN_pn_total_neutron_ke" , "Total Kineitc Energy of Photo-Nuclear Neutrons (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_1n_neutron_energy", "Neutron Energy (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_1n_energy_diff", "E(γ_{PN}) - E(n) (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_1n_energy_frac", "E(n)/E(γ_{PN}) (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_2n_n2_energy", "Energy of second hardest neutron (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_2n_energy_frac", "E(n)/E(γ_{PN}) (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_2n_energy_other", "E_{other} (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_1kp_energy", "Charged Kaon Energy (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_1kp_energy_diff", "E(γ_{PN}) - E(K±) (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_1kp_energy_frac", "E(K±)/E(γ_{PN}) (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_1k0_energy", "K0 Energy (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_1k0_energy_diff", "E(γ_{PN}) - E(K0) (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_1k0_energy_frac", "E(K0)/E(γ_{PN}) (MeV)", out_dir=out_dir) + + d.plot1d("PN/PN_recoil_vertex_x", "Recoil e^{-} Vertex - x (mm)", out_dir=out_dir) + d.plot1d("PN/PN_recoil_vertex_y", "Recoil e^{-} Vertex - y (mm)", out_dir=out_dir) + d.plot1d("PN/PN_recoil_vertex_z", "Recoil e^{-} Vertex - z (mm)", out_dir=out_dir) + + d.plot1d("PN/PN_pn_gamma_int_z", "γ Interaction Vertex (mm)", out_dir=out_dir) + d.plot1d("PN/PN_pn_gamma_vertex_z", "γ Vertex (mm)", out_dir=out_dir) + d.plot1d("PN/PN_pn_gamma_vertex_x", "γ Vertex (mm)", out_dir=out_dir) + d.plot1d("PN/PN_pn_gamma_vertex_y", "γ Vertex (mm)", out_dir=out_dir) + + d.plot1d("PN/PN_hardest_ke", "Kinetic Energy Hardest Photo-nuclear Particle (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_hardest_theta", "#theta of Hardest Photo-nuclear Particle (Degrees)", out_dir=out_dir) + d.plot1d("PN/PN_hardest_p_ke", "Kinetic Energy Hardest Photo-nuclear Proton (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_hardest_p_theta", "#theta of Hardest Photo-nuclear Proton (Degrees)", out_dir=out_dir) + d.plot1d("PN/PN_hardest_n_ke", "Kinetic Energy Hardest Photo-nuclear Neutron (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_hardest_n_theta", "#theta of Hardest Photo-nuclear Neutron (Degrees)" ) + d.plot1d("PN/PN_hardest_pi_ke", "Kinetic Energy Hardest Photo-nuclear #pi (MeV)", out_dir=out_dir) + d.plot1d("PN/PN_hardest_pi_theta", "#theta of Hardest Photo-nuclear #pi (Degrees)", out_dir=out_dir) diff --git a/Validation/src/Validation/trigscint.py b/Validation/src/Validation/trigscint.py new file mode 100644 index 000000000..51a8b7c70 --- /dev/null +++ b/Validation/src/Validation/trigscint.py @@ -0,0 +1,60 @@ +"""Plotting of TrigScint-related validation plots""" + +from ._differ import Differ +from ._plotter import plotter + +@plotter(hist=True,event=False) +def dqm(d : Differ, out_dir = None) : + """Plot TrigScint-related validation plots + + Parameters + ---------- + d : Differ + Differ containing files that are not event files (presumably histogram files) + """ + + collections=["Sim", "Digi", "Cluster"] + pads = ["Pad1", "Pad2", "Pad3"] + tColl="TrigScintTracks" + + track_hists = [ + ('centroid', 'Track centroid [in channel nb]'), + ('n_tracks', 'Track multiplicity'), + ('n_clusters', 'Track cluster multiplicity'), + ('beamEfrac', 'Beam electron energy fraction'), + ('residual', 'Track residual [in channel nb]'), + ('x', 'Track x [mm]'), + ('y', 'Track y [mm]') + ] + for member, name in track_hists : + d.plot1d(f'{tColl}/{tColl}_{member}', name, out_dir = out_dir) + + for pad in pads : + for coll in collections : + shared_members = [ ('x', 'x [mm]'), ('y', 'y [mm]'), ('z', 'z [mm]'), + ('n_hits', 'Hit multiplicity') ] + for member, name in shared_members : + d.plot1d(f'TrigScint{coll}{pad}/TrigScint{coll}{pad}_{member}', f'{coll} {name}', + out_dir = out_dir) + special_members = [ + (f'TrigScintSim{pad}/TrigScintSim{pad}_hit_time', 'Simhit time [ns]'), + (f'TrigScintDigi{pad}/TrigScintDigi{pad}_total_pe', 'Total PE in event'), + (f'TrigScintDigi{pad}/TrigScintDigi{pad}_pe', 'Total PE in bars'), + (f'TrigScintSim{pad}/TrigScintSim{pad}_id', 'Channel ID'), + (f'TrigScintDigi{pad}/TrigScintDigi{pad}_id', 'Channel ID'), + (f'TrigScintDigi{pad}/TrigScintDigi{pad}_hit_time', 'Digi hit time [ns]'), + (f'TrigScintDigi{pad}/TrigScintDigi{pad}_id_noise', 'ID of noise hits'), + (f'TrigScintDigi{pad}/TrigScintDigi{pad}_pe_noise', 'PE in noise hits'), + (f'TrigScintDigi{pad}/TrigScintDigi{pad}_n_hits_noise', 'Number of noise hits'), + (f'TrigScintCluster{pad}/TrigScintCluster{pad}_centroid', 'Cluster centroid [in channel nb]'), + (f'TrigScintCluster{pad}/TrigScintCluster{pad}_total_pe', 'Cluster total PE in event'), + (f'TrigScintCluster{pad}/TrigScintCluster{pad}_n_clusters', 'Cluster multiplicity'), + (f'TrigScintCluster{pad}/TrigScintCluster{pad}_seed', 'Cluster seed [in channel nb]'), + (f'TrigScintCluster{pad}/TrigScintCluster{pad}_cluster_time', 'Cluster time [ns]'), + (f'TrigScintCluster{pad}/TrigScintCluster{pad}_beamEfrac', 'Beam electron energy fraction') + # not implemented but should be + #(f'TrigScintDigi{pad}/TrigScintDigi{pad}_beamEfrac', 'Beam electron energy fraction') + ] + for member, name in special_members : + d.plot1d(member, name, out_dir = out_dir) + diff --git a/cmake b/cmake index 6b8a07a16..9a6c6a487 160000 --- a/cmake +++ b/cmake @@ -1 +1 @@ -Subproject commit 6b8a07a16e82304d01de7072929d93cf2ecbd51a +Subproject commit 9a6c6a4875505e9c80662db67bd8dfb09e38636d diff --git a/scripts/docker_entrypoint.sh b/scripts/docker_entrypoint.sh deleted file mode 100644 index 52a3c309c..000000000 --- a/scripts/docker_entrypoint.sh +++ /dev/null @@ -1,49 +0,0 @@ -#!/bin/bash - -set -e - -############################################################################### -# Entry point for the ldmx production container -# The basic idea is that we want to go into the container, -# setup the ldmx-sw working environment, and then -# run the application with any provided arguments. -# -# We mount and run inside the present working directory. -############################################################################### - -## Bash environment script for use **within** the docker container -## Assuming the following environment variables are already defined by Dockerfile: -# XercesC_DIR - install of xerces-c -# ONNX_DIR - install of onnx runtime -# ROOTSYS - install of root -# G4DIR - install of Geant4 - -source $ROOTSYS/bin/thisroot.sh -source $G4DIR/bin/geant4.sh - -# add ldmx-sw and ldmx-analysis installs to the various paths -export LDMX_SW_INSTALL=/usr/local/ -export LD_LIBRARY_PATH=$LDMX_SW_INSTALL/lib:$LD_LIBRARY_PATH -export PYTHONPATH=$LDMX_SW_INSTALL/python:$PYTHONPATH -export PATH=$LDMX_SW_INSTALL/bin:$PATH - -# add externals installed along side ldmx-sw -# TODO this for loop might be very slow... might want to hard-code the externals path -for _external_path in $LDMX_SW_INSTALL/external/*/lib -do - export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:$_external_path -done - -# go to first argument -cd "$1" - -# run the rest of the arguments depending on the command -if [[ "$2" =~ .*".py" ]] -then - # command ends in '.py' - # assume that we are running the ldmx application - fire "${@:2}" -else - # otherwise just run everything like normal - eval "${@:2}" -fi