Back to home page

Project CMSSW displayed by LXR

 
 

    


File indexing completed on 2024-04-06 12:26:13

0001 /**
0002  * \file GEMSegmentAlgorithm.cc
0003  *  based on CSCSegAlgoST.cc
0004  * 
0005  *  \authors: Piet Verwilligen, Jason Lee
0006  */
0007 
0008 #include "GEMSegmentAlgorithm.h"
0009 #include "MuonSegFit.h"
0010 
0011 #include "FWCore/ParameterSet/interface/ParameterSet.h"
0012 #include "FWCore/MessageLogger/interface/MessageLogger.h"
0013 #include "DataFormats/GeometryVector/interface/GlobalPoint.h"
0014 #include "DataFormats/Math/interface/deltaPhi.h"
0015 #include "CommonTools/Statistics/interface/ChiSquaredProbability.h"
0016 
0017 #include <algorithm>
0018 #include <cmath>
0019 #include <iostream>
0020 #include <string>
0021 
0022 /* Constructor
0023  *
0024  */
0025 GEMSegmentAlgorithm::GEMSegmentAlgorithm(const edm::ParameterSet& ps)
0026     : GEMSegmentAlgorithmBase(ps), myName("GEMSegmentAlgorithm") {
0027   minHitsPerSegment = ps.getParameter<unsigned int>("minHitsPerSegment");
0028   preClustering = ps.getParameter<bool>("preClustering");
0029   dXclusBoxMax = ps.getParameter<double>("dXclusBoxMax");
0030   dYclusBoxMax = ps.getParameter<double>("dYclusBoxMax");
0031   preClustering_useChaining = ps.getParameter<bool>("preClusteringUseChaining");
0032   dPhiChainBoxMax = ps.getParameter<double>("dPhiChainBoxMax");
0033   dEtaChainBoxMax = ps.getParameter<double>("dEtaChainBoxMax");
0034   maxRecHitsInCluster = ps.getParameter<int>("maxRecHitsInCluster");
0035   clusterOnlySameBXRecHits = ps.getParameter<bool>("clusterOnlySameBXRecHits");
0036 
0037   // maybe to be used in the future ???
0038   // Pruning                = ps.getParameter<bool>("Pruning");
0039   // BrutePruning           = ps.getParameter<bool>("BrutePruning");
0040   // maxRecHitsInCluster is the maximal number of hits in a precluster that is being processed
0041   // This cut is intended to remove messy events. Currently nothing is returned if there are
0042   // more that maxRecHitsInCluster hits. It could be useful to return an estimate of the
0043   // cluster position, which is available.
0044   // maxRecHitsInCluster    = ps.getParameter<int>("maxRecHitsInCluster");
0045   // onlyBestSegment        = ps.getParameter<bool>("onlyBestSegment");
0046 
0047   // CSC uses pruning to remove clearly bad hits, using as much info from the rechits as possible: charge, position, timing, ...
0048   // In fits with bad chi^2 they look for the worst hit (hit with abnormally large residual)
0049   // if worst hit was found, refit without worst hit and select if considerably better than original fit.
0050 }
0051 
0052 /* Destructor
0053  *
0054  */
0055 GEMSegmentAlgorithm::~GEMSegmentAlgorithm() {}
0056 
0057 std::vector<GEMSegment> GEMSegmentAlgorithm::run(const GEMEnsemble& ensemble, const EnsembleHitContainer& rechits) {
0058   // pre-cluster rechits and loop over all sub clusters separately
0059   std::vector<GEMSegment> segments_temp;
0060   std::vector<GEMSegment> segments;
0061   ProtoSegments rechits_clusters;  // this is a collection of groups of rechits
0062 
0063   if (preClustering) {
0064     // run a pre-clusterer on the given rechits to split obviously separated segment seeds:
0065     if (preClustering_useChaining) {
0066       // it uses X,Y,Z information; there are no configurable parameters used;
0067       // the X, Y, Z "cuts" are just (much) wider than reasonable high pt segments
0068       edm::LogVerbatim("GEMSegmentAlgorithm") << "[GEMSegmentAlgorithm::run] preClustering :: use Chaining";
0069       rechits_clusters = this->chainHits(ensemble, rechits);
0070     } else {
0071       // it uses X,Y information + configurable parameters
0072       edm::LogVerbatim("GEMSegmentAlgorithm") << "[GEMSegmentAlgorithm::run] Clustering";
0073       rechits_clusters = this->clusterHits(ensemble, rechits);
0074     }
0075     // loop over the found clusters:
0076     edm::LogVerbatim("GEMSegmentAlgorithm") << "[GEMSegmentAlgorithm::run] Loop over clusters and build segments";
0077     for (auto sub_rechits = rechits_clusters.begin(); sub_rechits != rechits_clusters.end(); ++sub_rechits) {
0078       // clear the buffer for the subset of segments:
0079       segments_temp.clear();
0080       // build the subset of segments:
0081       this->buildSegments(ensemble, (*sub_rechits), segments_temp);
0082       // add the found subset of segments to the collection of all segments in this chamber:
0083       segments.insert(segments.end(), segments_temp.begin(), segments_temp.end());
0084     }
0085 
0086     return segments;
0087   } else {
0088     this->buildSegments(ensemble, rechits, segments);
0089     return segments;
0090   }
0091 }
0092 
0093 // ********************************************************************;
0094 GEMSegmentAlgorithm::ProtoSegments GEMSegmentAlgorithm::clusterHits(const GEMEnsemble& ensemble,
0095                                                                     const EnsembleHitContainer& rechits) {
0096   // think how to implement BX requirement here
0097 
0098   ProtoSegments rechits_clusters;  // this is a collection of groups of rechits
0099 
0100   float dXclus_box = 0.0;
0101   float dYclus_box = 0.0;
0102 
0103   ProtoSegments seeds;
0104   seeds.reserve(rechits.size());
0105 
0106   std::vector<float> running_meanX;
0107   running_meanX.reserve(rechits.size());
0108   std::vector<float> running_meanY;
0109   running_meanY.reserve(rechits.size());
0110 
0111   std::vector<float> seed_minX;
0112   seed_minX.reserve(rechits.size());
0113   std::vector<float> seed_maxX;
0114   seed_maxX.reserve(rechits.size());
0115   std::vector<float> seed_minY;
0116   seed_minY.reserve(rechits.size());
0117   std::vector<float> seed_maxY;
0118   seed_maxY.reserve(rechits.size());
0119 
0120   // split rechits into subvectors and return vector of vectors:
0121   // Loop over rechits
0122   // Create one seed per hit
0123   for (unsigned int i = 0; i < rechits.size(); ++i) {
0124     seeds.push_back(EnsembleHitContainer(1, rechits[i]));
0125 
0126     GEMDetId rhID = rechits[i]->gemId();
0127     const GEMEtaPartition* rhEP = (ensemble.second.find(rhID.rawId()))->second;
0128     if (!rhEP)
0129       throw cms::Exception("GEMEtaPartition not found")
0130           << "Corresponding GEMEtaPartition to GEMDetId: " << rhID << " not found in the GEMEnsemble";
0131     const GEMSuperChamber* rhCH = ensemble.first;
0132     LocalPoint rhLP_inEtaPartFrame = rechits[i]->localPosition();
0133     GlobalPoint rhGP_inCMSFrame = rhEP->toGlobal(rhLP_inEtaPartFrame);
0134     LocalPoint rhLP_inChamberFrame = rhCH->toLocal(rhGP_inCMSFrame);
0135 
0136     running_meanX.push_back(rhLP_inChamberFrame.x());
0137     running_meanY.push_back(rhLP_inChamberFrame.y());
0138 
0139     // set min/max X and Y for box containing the hits in the precluster:
0140     seed_minX.push_back(rhLP_inChamberFrame.x());
0141     seed_maxX.push_back(rhLP_inChamberFrame.x());
0142     seed_minY.push_back(rhLP_inChamberFrame.y());
0143     seed_maxY.push_back(rhLP_inChamberFrame.y());
0144   }
0145 
0146   // merge clusters that are too close
0147   // measure distance between final "running mean"
0148   for (size_t NNN = 0; NNN < seeds.size(); ++NNN) {
0149     for (size_t MMM = NNN + 1; MMM < seeds.size(); ++MMM) {
0150       if (running_meanX[MMM] == running_max || running_meanX[NNN] == running_max) {
0151         LogDebug("GEMSegmentAlgorithm") << "[GEMSegmentAlgorithm::clusterHits]: ALARM! Skipping used seeds, this "
0152                                            "should not happen - inform developers!";
0153         continue;  //skip seeds that have been used
0154       }
0155 
0156       // calculate cut criteria for simple running mean distance cut:
0157       //dXclus = fabs(running_meanX[NNN] - running_meanX[MMM]);
0158       //dYclus = fabs(running_meanY[NNN] - running_meanY[MMM]);
0159       // calculate minmal distance between precluster boxes containing the hits:
0160       if (running_meanX[NNN] > running_meanX[MMM])
0161         dXclus_box = seed_minX[NNN] - seed_maxX[MMM];
0162       else
0163         dXclus_box = seed_minX[MMM] - seed_maxX[NNN];
0164       if (running_meanY[NNN] > running_meanY[MMM])
0165         dYclus_box = seed_minY[NNN] - seed_maxY[MMM];
0166       else
0167         dYclus_box = seed_minY[MMM] - seed_maxY[NNN];
0168 
0169       if (dXclus_box < dXclusBoxMax && dYclus_box < dYclusBoxMax) {
0170         // merge clusters!
0171         // merge by adding seed NNN to seed MMM and erasing seed NNN
0172 
0173         // calculate running mean for the merged seed:
0174         if (seeds[NNN].size() + seeds[MMM].size() != 0) {
0175           running_meanX[MMM] = (running_meanX[NNN] * seeds[NNN].size() + running_meanX[MMM] * seeds[MMM].size()) /
0176                                (seeds[NNN].size() + seeds[MMM].size());
0177           running_meanY[MMM] = (running_meanY[NNN] * seeds[NNN].size() + running_meanY[MMM] * seeds[MMM].size()) /
0178                                (seeds[NNN].size() + seeds[MMM].size());
0179         }
0180 
0181         // update min/max X and Y for box containing the hits in the merged cluster:
0182         if (seed_minX[NNN] < seed_minX[MMM])
0183           seed_minX[MMM] = seed_minX[NNN];
0184         if (seed_maxX[NNN] > seed_maxX[MMM])
0185           seed_maxX[MMM] = seed_maxX[NNN];
0186         if (seed_minY[NNN] < seed_minY[MMM])
0187           seed_minY[MMM] = seed_minY[NNN];
0188         if (seed_maxY[NNN] > seed_maxY[MMM])
0189           seed_maxY[MMM] = seed_maxY[NNN];
0190 
0191         // add seed NNN to MMM (lower to larger number)
0192         seeds[MMM].insert(seeds[MMM].end(), seeds[NNN].begin(), seeds[NNN].end());
0193 
0194         // mark seed NNN as used (at the moment just set running mean to 999999.)
0195         running_meanX[NNN] = running_max;
0196         running_meanY[NNN] = running_max;
0197         // we have merged a seed (NNN) to the highter seed (MMM) - need to contimue to
0198         // next seed (NNN+1)
0199         break;
0200       }
0201     }
0202   }
0203 
0204   // hand over the final seeds to the output
0205   // would be more elegant if we could do the above step with
0206   // erasing the merged ones, rather than the
0207   for (size_t NNN = 0; NNN < seeds.size(); ++NNN) {
0208     if (running_meanX[NNN] == running_max)
0209       continue;  //skip seeds that have been marked as used up in merging
0210     rechits_clusters.push_back(seeds[NNN]);
0211   }
0212 
0213   return rechits_clusters;
0214 }
0215 
0216 GEMSegmentAlgorithm::ProtoSegments GEMSegmentAlgorithm::chainHits(const GEMEnsemble& ensemble,
0217                                                                   const EnsembleHitContainer& rechits) {
0218   ProtoSegments rechits_chains;
0219   ProtoSegments seeds;
0220   seeds.reserve(rechits.size());
0221   std::vector<bool> usedCluster(rechits.size(), false);
0222 
0223   // split rechits into subvectors and return vector of vectors:
0224   // Loop over rechits
0225   // Create one seed per hit
0226   for (unsigned int i = 0; i < rechits.size(); ++i)
0227     seeds.push_back(EnsembleHitContainer(1, rechits[i]));
0228 
0229   // merge chains that are too close ("touch" each other)
0230   for (size_t NNN = 0; NNN < seeds.size(); ++NNN) {
0231     for (size_t MMM = NNN + 1; MMM < seeds.size(); ++MMM) {
0232       if (usedCluster[MMM] || usedCluster[NNN]) {
0233         continue;
0234       }
0235       // all is in the way we define "good";
0236       // try not to "cluster" the hits but to "chain" them;
0237       // it does the clustering but also does a better job
0238       // for inclined tracks (not clustering them together;
0239       // crossed tracks would be still clustered together)
0240       // 22.12.09: In fact it is not much more different
0241       // than the "clustering", we just introduce another
0242       // variable in the game - Z. And it makes sense
0243       // to re-introduce Y (or actually wire group mumber)
0244       // in a similar way as for the strip number - see
0245       // the code below.
0246       bool goodToMerge = isGoodToMerge(ensemble, seeds[NNN], seeds[MMM]);
0247       if (goodToMerge) {
0248         // merge chains!
0249         // merge by adding seed NNN to seed MMM and erasing seed NNN
0250 
0251         // add seed NNN to MMM (lower to larger number)
0252         seeds[MMM].insert(seeds[MMM].end(), seeds[NNN].begin(), seeds[NNN].end());
0253 
0254         // mark seed NNN as used
0255         usedCluster[NNN] = true;
0256         // we have merged a seed (NNN) to the highter seed (MMM) - need to contimue to
0257         // next seed (NNN+1)
0258         break;
0259       }
0260     }
0261   }
0262 
0263   // hand over the final seeds to the output
0264   // would be more elegant if we could do the above step with
0265   // erasing the merged ones, rather than the
0266   for (size_t NNN = 0; NNN < seeds.size(); ++NNN) {
0267     if (usedCluster[NNN])
0268       continue;  //skip seeds that have been marked as used up in merging
0269     rechits_chains.push_back(seeds[NNN]);
0270   }
0271 
0272   //***************************************************************
0273 
0274   return rechits_chains;
0275 }
0276 
0277 bool GEMSegmentAlgorithm::isGoodToMerge(const GEMEnsemble& ensemble,
0278                                         const EnsembleHitContainer& newChain,
0279                                         const EnsembleHitContainer& oldChain) {
0280   bool phiRequirementOK = false;  // once it is true in the loop, it is ok to merge
0281   bool etaRequirementOK = false;  // once it is true in the loop, it is ok to merge
0282   bool bxRequirementOK = false;   // once it is true in the loop, it is ok to merge
0283 
0284   for (size_t iRH_new = 0; iRH_new < newChain.size(); ++iRH_new) {
0285     int layer_new = (newChain[iRH_new]->gemId().station() - 1) * 2 + newChain[iRH_new]->gemId().layer();
0286 
0287     const GEMEtaPartition* rhEP = (ensemble.second.find(newChain[iRH_new]->gemId().rawId()))->second;
0288     GlobalPoint pos_new = rhEP->toGlobal(newChain[iRH_new]->localPosition());
0289 
0290     for (size_t iRH_old = 0; iRH_old < oldChain.size(); ++iRH_old) {
0291       int layer_old = (oldChain[iRH_old]->gemId().station() - 1) * 2 + oldChain[iRH_old]->gemId().layer();
0292       // Layers - hits on the same layer should not be allowed ==> if abs(layer_new - layer_old) > 0 is ok. if = 0 is false
0293       if (layer_new == layer_old)
0294         return false;
0295 
0296       const GEMEtaPartition* oldrhEP = (ensemble.second.find(oldChain[iRH_old]->gemId().rawId()))->second;
0297       GlobalPoint pos_old = oldrhEP->toGlobal(oldChain[iRH_old]->localPosition());
0298 
0299       // Eta & Phi- to be chained, two hits need also to be "close" in phi and eta
0300       if (phiRequirementOK == false)
0301         phiRequirementOK = std::abs(reco::deltaPhi(float(pos_new.phi()), float(pos_old.phi()))) < dPhiChainBoxMax;
0302       if (etaRequirementOK == false)
0303         etaRequirementOK = std::abs(pos_new.eta() - pos_old.eta()) < dEtaChainBoxMax;
0304       // and they should have a time difference compatible with the hypothesis
0305       // that the rechits originate from the same particle, but were detected in different layers
0306       if (bxRequirementOK == false) {
0307         if (!clusterOnlySameBXRecHits) {
0308           bxRequirementOK = true;
0309         } else {
0310           if (newChain[iRH_new]->BunchX() == oldChain[iRH_old]->BunchX())
0311             bxRequirementOK = true;
0312         }
0313       }
0314 
0315       if (phiRequirementOK && etaRequirementOK && bxRequirementOK)
0316         return true;
0317     }
0318   }
0319   return false;
0320 }
0321 
0322 void GEMSegmentAlgorithm::buildSegments(const GEMEnsemble& ensemble,
0323                                         const EnsembleHitContainer& rechits,
0324                                         std::vector<GEMSegment>& gemsegs) {
0325   if (rechits.size() < minHitsPerSegment)
0326     return;
0327 
0328   MuonSegFit::MuonRecHitContainer muonRecHits;
0329   proto_segment.clear();
0330 
0331   // select hits from the ensemble and sort it
0332   const GEMSuperChamber* suCh = ensemble.first;
0333   for (auto rh = rechits.begin(); rh != rechits.end(); rh++) {
0334     proto_segment.push_back(*rh);
0335 
0336     // for segFit - using local point in chamber frame
0337     const GEMEtaPartition* thePartition = (ensemble.second.find((*rh)->gemId()))->second;
0338     GlobalPoint gp = thePartition->toGlobal((*rh)->localPosition());
0339     const LocalPoint lp = suCh->toLocal(gp);
0340 
0341     GEMRecHit* newRH = (*rh)->clone();
0342     newRH->setPosition(lp);
0343     MuonSegFit::MuonRecHitPtr trkRecHit(newRH);
0344     muonRecHits.push_back(trkRecHit);
0345   }
0346 
0347 #ifdef EDM_ML_DEBUG  // have lines below only compiled when in debug mode
0348   edm::LogVerbatim("GEMSegmentAlgorithm")
0349       << "[GEMSegmentAlgorithm::buildSegments] will now try to fit a GEMSegment from collection of " << rechits.size()
0350       << " GEM RecHits";
0351   for (auto rh = rechits.begin(); rh != rechits.end(); ++rh) {
0352     auto gemid = (*rh)->gemId();
0353     auto rhLP = (*rh)->localPosition();
0354     edm::LogVerbatim("GEMSegmentAlgorithm")
0355         << "[RecHit :: Loc x = " << std::showpos << std::setw(9) << rhLP.x() << " Loc y = " << std::showpos
0356         << std::setw(9) << rhLP.y() << " BX = " << std::showpos << (*rh)->BunchX() << " -- " << gemid.rawId() << " = "
0357         << gemid << " ]";
0358   }
0359 #endif
0360 
0361   // The actual fit on all hits of the vector of the selected Tracking RecHits:
0362   sfit_ = std::make_unique<MuonSegFit>(muonRecHits);
0363   bool goodfit = sfit_->fit();
0364   edm::LogVerbatim("GEMSegmentAlgorithm")
0365       << "[GEMSegmentAlgorithm::buildSegments] GEMSegment fit done :: fit is good = " << goodfit;
0366 
0367   // quit function if fit was not OK
0368   if (!goodfit) {
0369     for (auto rh : muonRecHits)
0370       rh.reset();
0371     return;
0372   }
0373   // obtain all information necessary to make the segment:
0374   LocalPoint protoIntercept = sfit_->intercept();
0375   LocalVector protoDirection = sfit_->localdir();
0376   AlgebraicSymMatrix protoErrors = sfit_->covarianceMatrix();
0377   double protoChi2 = sfit_->chi2();
0378 
0379   // Calculate the bunch crossing of the GEM Segment
0380   float bx = 0.0;
0381   for (auto rh = rechits.begin(); rh != rechits.end(); ++rh) {
0382     bx += (*rh)->BunchX();
0383   }
0384   if (!rechits.empty())
0385     bx = bx * 1.0 / (rechits.size());
0386 
0387   // Calculate the central value and uncertainty of the segment time
0388   // if we want to study impact of 2-3ns time resolution on GEM Segment
0389   // (if there will be TDCs in readout and not just BX determination)
0390   // then implement tof() method for rechits and use this here
0391   /*`
0392   float averageTime=0.;
0393   for (auto rh=rechits.begin(); rh!=rechits.end(); ++rh){
0394     GEMEtaPartition thePartition = ensemble.second.find((*rh)->gemId));
0395     GlobalPoint pos = (thePartition->toGlobal((*rh)->localPosition());
0396     float tof = pos.mag() * 0.01 / 0.2997925 + 25.0*(*rh)->BunchX(); 
0397     averageTime += pos;                                          
0398   }
0399   if(rechits.size() != 0) averageTime=averageTime/(rechits.size());
0400   float timeUncrt=0.;
0401   for (auto rh=rechits.begin(); rh!=rechits.end(); ++rh){
0402     GEMEtaPartition thePartition = ensemble.second.find((*rh)->gemId));
0403     GlobalPoint pos = (thePartition->toGlobal((*rh)->localPosition());
0404     float tof = pos.mag() * 0.01 / 0.2997925 + 25.0*(*rh)->BunchX();
0405     timeUncrt += pow(tof-averageTime,2);
0406   }
0407   if(rechits.size() != 0) timeUncrt=timeUncrt/(rechits.size());
0408   timeUncrt = sqrt(timeUncrt);
0409   */
0410 
0411   // save all information inside GEMCSCSegment
0412   edm::LogVerbatim("GEMSegmentAlgorithm")
0413       << "[GEMSegmentAlgorithm::buildSegments] will now wrap fit info in GEMSegment dataformat";
0414   GEMSegment tmp(proto_segment, protoIntercept, protoDirection, protoErrors, protoChi2, bx);
0415   // GEMSegment tmp(proto_segment, protoIntercept, protoDirection, protoErrors, protoChi2, averageTime, timeUncrt);
0416 
0417   edm::LogVerbatim("GEMSegmentAlgorithm")
0418       << "[GEMSegmentAlgorithm::buildSegments] GEMSegment made in " << tmp.gemDetId();
0419   edm::LogVerbatim("GEMSegmentAlgorithm") << "[GEMSegmentAlgorithm::buildSegments] " << tmp;
0420 
0421   for (auto rh : muonRecHits)
0422     rh.reset();
0423   gemsegs.push_back(tmp);
0424 }