Back to home page

Project CMSSW displayed by LXR

 
 

    


File indexing completed on 2023-03-17 11:23:29

0001 #include "RecoVertex/PrimaryVertexProducer/interface/DAClusterizerInZ.h"
0002 #include "FWCore/MessageLogger/interface/MessageLogger.h"
0003 #include "DataFormats/GeometryCommonDetAlgo/interface/Measurement1D.h"
0004 #include "RecoVertex/VertexPrimitives/interface/VertexException.h"
0005 
0006 using namespace std;
0007 
0008 namespace {
0009 
0010   bool recTrackLessZ1(const DAClusterizerInZ::track_t& tk1, const DAClusterizerInZ::track_t& tk2) {
0011     return tk1.z < tk2.z;
0012   }
0013 }  // namespace
0014 
0015 vector<DAClusterizerInZ::track_t> DAClusterizerInZ::fill(const vector<reco::TransientTrack>& tracks) const {
0016   // prepare track data for clustering
0017   vector<track_t> tks;
0018   for (vector<reco::TransientTrack>::const_iterator it = tracks.begin(); it != tracks.end(); it++) {
0019     track_t t;
0020     t.z = ((*it).stateAtBeamLine().trackStateAtPCA()).position().z();
0021     double tantheta = tan(((*it).stateAtBeamLine().trackStateAtPCA()).momentum().theta());
0022     double phi = ((*it).stateAtBeamLine().trackStateAtPCA()).momentum().phi();
0023     //  get the beam-spot
0024     reco::BeamSpot beamspot = (it->stateAtBeamLine()).beamSpot();
0025     t.dz2 = pow((*it).track().dzError(), 2)  // track errror
0026             + (pow(beamspot.BeamWidthX() * cos(phi), 2) + pow(beamspot.BeamWidthY() * sin(phi), 2)) /
0027                   pow(tantheta, 2)  // beam-width induced
0028             + pow(vertexSize_, 2);  // intrinsic vertex size, safer for outliers and short lived decays
0029     if (d0CutOff_ > 0) {
0030       Measurement1D IP = (*it).stateAtBeamLine().transverseImpactParameter();       // error constains beamspot
0031       t.pi = 1. / (1. + exp(pow(IP.value() / IP.error(), 2) - pow(d0CutOff_, 2)));  // reduce weight for high ip tracks
0032     } else {
0033       t.pi = 1.;
0034     }
0035     t.tt = &(*it);
0036     t.Z = 1.;
0037     tks.push_back(t);
0038   }
0039   return tks;
0040 }
0041 
0042 double DAClusterizerInZ::Eik(const track_t& t, const vertex_t& k) const { return pow(t.z - k.z, 2) / t.dz2; }
0043 
0044 double DAClusterizerInZ::update(double beta, vector<track_t>& tks, vector<vertex_t>& y) const {
0045   //update weights and vertex positions
0046   // mass constrained annealing without noise
0047   // returns the squared sum of changes of vertex positions
0048 
0049   unsigned int nt = tks.size();
0050 
0051   //initialize sums
0052   double sumpi = 0;
0053   for (vector<vertex_t>::iterator k = y.begin(); k != y.end(); k++) {
0054     k->se = 0;
0055     k->sw = 0;
0056     k->swz = 0;
0057     k->swE = 0;
0058     k->Tc = 0;
0059   }
0060 
0061   // loop over tracks
0062   for (unsigned int i = 0; i < nt; i++) {
0063     // update pik and Zi
0064     double Zi = 0.;
0065     for (vector<vertex_t>::iterator k = y.begin(); k != y.end(); k++) {
0066       k->ei = exp(-beta * Eik(tks[i], *k));  // cache exponential for one track at a time
0067       Zi += k->pk * k->ei;
0068     }
0069     tks[i].Z = Zi;
0070 
0071     // normalization for pk
0072     if (tks[i].Z > 0) {
0073       sumpi += tks[i].pi;
0074       // accumulate weighted z and weights for vertex update
0075       for (vector<vertex_t>::iterator k = y.begin(); k != y.end(); k++) {
0076         k->se += tks[i].pi * k->ei / Zi;
0077         double w = k->pk * tks[i].pi * k->ei / Zi / tks[i].dz2;
0078         k->sw += w;
0079         k->swz += w * tks[i].z;
0080         k->swE += w * Eik(tks[i], *k);
0081       }
0082     } else {
0083       sumpi += tks[i].pi;
0084     }
0085 
0086   }  // end of track loop
0087 
0088   // now update z and pk
0089   double delta = 0;
0090   for (vector<vertex_t>::iterator k = y.begin(); k != y.end(); k++) {
0091     if (k->sw > 0) {
0092       double znew = k->swz / k->sw;
0093       delta += pow(k->z - znew, 2);
0094       k->z = znew;
0095       k->Tc = 2 * k->swE / k->sw;
0096     } else {
0097       edm::LogInfo("sumw") << "invalid sum of weights in fit: " << k->sw << endl;
0098       if (verbose_) {
0099         cout << " a cluster melted away ?  pk=" << k->pk << " sumw=" << k->sw << endl;
0100       }
0101       k->Tc = -1;
0102     }
0103 
0104     k->pk = k->pk * k->se / sumpi;
0105   }
0106 
0107   // return how much the prototypes moved
0108   return delta;
0109 }
0110 
0111 double DAClusterizerInZ::update(double beta, vector<track_t>& tks, vector<vertex_t>& y, double& rho0) const {
0112   // MVF style, no more vertex weights, update tracks weights and vertex positions, with noise
0113   // returns the squared sum of changes of vertex positions
0114 
0115   unsigned int nt = tks.size();
0116 
0117   //initialize sums
0118   for (vector<vertex_t>::iterator k = y.begin(); k != y.end(); k++) {
0119     k->se = 0;
0120     k->sw = 0;
0121     k->swz = 0;
0122     k->swE = 0;
0123     k->Tc = 0;
0124   }
0125 
0126   // loop over tracks
0127   for (unsigned int i = 0; i < nt; i++) {
0128     // update pik and Zi
0129     double Zi = rho0 * exp(-beta * dzCutOff_ * dzCutOff_);  // cut-off
0130     for (vector<vertex_t>::iterator k = y.begin(); k != y.end(); k++) {
0131       k->ei = exp(-beta * Eik(tks[i], *k));  // cache exponential for one track at a time
0132       Zi += k->pk * k->ei;
0133     }
0134     tks[i].Z = Zi;
0135 
0136     // normalization
0137     if (tks[i].Z > 0) {
0138       // accumulate weighted z and weights for vertex update
0139       for (vector<vertex_t>::iterator k = y.begin(); k != y.end(); k++) {
0140         k->se += tks[i].pi * k->ei / Zi;
0141         double w = k->pk * tks[i].pi * k->ei / Zi / tks[i].dz2;
0142         k->sw += w;
0143         k->swz += w * tks[i].z;
0144         k->swE += w * Eik(tks[i], *k);
0145       }
0146     }
0147 
0148   }  // end of track loop
0149 
0150   // now update z
0151   double delta = 0;
0152   for (vector<vertex_t>::iterator k = y.begin(); k != y.end(); k++) {
0153     if (k->sw > 0) {
0154       double znew = k->swz / k->sw;
0155       delta += pow(k->z - znew, 2);
0156       k->z = znew;
0157       k->Tc = 2 * k->swE / k->sw;
0158     } else {
0159       edm::LogInfo("sumw") << "invalid sum of weights in fit: " << k->sw << endl;
0160       if (verbose_) {
0161         cout << " a cluster melted away ?  pk=" << k->pk << " sumw=" << k->sw << endl;
0162       }
0163       k->Tc = 0;
0164     }
0165   }
0166 
0167   // return how much the prototypes moved
0168   return delta;
0169 }
0170 
0171 bool DAClusterizerInZ::merge(vector<vertex_t>& y, int nt) const {
0172   // merge clusters that collapsed or never separated, return true if vertices were merged, false otherwise
0173 
0174   if (y.size() < 2)
0175     return false;
0176 
0177   for (vector<vertex_t>::iterator k = y.begin(); (k + 1) != y.end(); k++) {
0178     if (fabs((k + 1)->z - k->z) < 1.e-3) {  // with fabs if only called after freeze-out (splitAll() at highter T)
0179       double rho = k->pk + (k + 1)->pk;
0180       if (rho > 0) {
0181         k->z = (k->pk * k->z + (k + 1)->z * (k + 1)->pk) / rho;
0182       } else {
0183         k->z = 0.5 * (k->z + (k + 1)->z);
0184       }
0185       k->pk = rho;
0186 
0187       y.erase(k + 1);
0188       return true;
0189     }
0190   }
0191 
0192   return false;
0193 }
0194 
0195 bool DAClusterizerInZ::merge(vector<vertex_t>& y, double& beta) const {
0196   // merge clusters that collapsed or never separated,
0197   // only merge if the estimated critical temperature of the merged vertex is below the current temperature
0198   // return true if vertices were merged, false otherwise
0199   if (y.size() < 2)
0200     return false;
0201 
0202   for (vector<vertex_t>::iterator k = y.begin(); (k + 1) != y.end(); k++) {
0203     if (fabs((k + 1)->z - k->z) < 2.e-3) {
0204       double rho = k->pk + (k + 1)->pk;
0205       double swE = k->swE + (k + 1)->swE - k->pk * (k + 1)->pk / rho * pow((k + 1)->z - k->z, 2);
0206       double Tc = 2 * swE / (k->sw + (k + 1)->sw);
0207 
0208       if (Tc * beta < 1) {
0209         if (rho > 0) {
0210           k->z = (k->pk * k->z + (k + 1)->z * (k + 1)->pk) / rho;
0211         } else {
0212           k->z = 0.5 * (k->z + (k + 1)->z);
0213         }
0214         k->pk = rho;
0215         k->sw += (k + 1)->sw;
0216         k->swE = swE;
0217         k->Tc = Tc;
0218         y.erase(k + 1);
0219         return true;
0220       }
0221     }
0222   }
0223 
0224   return false;
0225 }
0226 
0227 bool DAClusterizerInZ::purge(vector<vertex_t>& y, vector<track_t>& tks, double& rho0, const double beta) const {
0228   // eliminate clusters with only one significant/unique track
0229   if (y.size() < 2)
0230     return false;
0231 
0232   unsigned int nt = tks.size();
0233   double sumpmin = nt;
0234   vector<vertex_t>::iterator k0 = y.end();
0235   for (vector<vertex_t>::iterator k = y.begin(); k != y.end(); k++) {
0236     int nUnique = 0;
0237     double sump = 0;
0238     double pmax = k->pk / (k->pk + rho0 * exp(-beta * dzCutOff_ * dzCutOff_));
0239     for (unsigned int i = 0; i < nt; i++) {
0240       if (tks[i].Z > 0) {
0241         double p = k->pk * exp(-beta * Eik(tks[i], *k)) / tks[i].Z;
0242         sump += p;
0243         if ((p > 0.9 * pmax) && (tks[i].pi > 0)) {
0244           nUnique++;
0245         }
0246       }
0247     }
0248 
0249     if ((nUnique < 2) && (sump < sumpmin)) {
0250       sumpmin = sump;
0251       k0 = k;
0252     }
0253   }
0254 
0255   if (k0 != y.end()) {
0256     if (verbose_) {
0257       cout << "eliminating prototype at " << k0->z << " with sump=" << sumpmin << endl;
0258     }
0259     //rho0+=k0->pk;
0260     y.erase(k0);
0261     return true;
0262   } else {
0263     return false;
0264   }
0265 }
0266 
0267 double DAClusterizerInZ::beta0(double betamax, vector<track_t>& tks, vector<vertex_t>& y) const {
0268   double T0 = 0;  // max Tc for beta=0
0269   // estimate critical temperature from beta=0 (T=inf)
0270   unsigned int nt = tks.size();
0271 
0272   for (vector<vertex_t>::iterator k = y.begin(); k != y.end(); k++) {
0273     // vertex fit at T=inf
0274     double sumwz = 0;
0275     double sumw = 0;
0276     for (unsigned int i = 0; i < nt; i++) {
0277       double w = tks[i].pi / tks[i].dz2;
0278       sumwz += w * tks[i].z;
0279       sumw += w;
0280     }
0281     k->z = sumwz / sumw;
0282 
0283     // estimate Tcrit, eventually do this in the same loop
0284     double a = 0, b = 0;
0285     for (unsigned int i = 0; i < nt; i++) {
0286       double dx = tks[i].z - (k->z);
0287       double w = tks[i].pi / tks[i].dz2;
0288       a += w * pow(dx, 2) / tks[i].dz2;
0289       b += w;
0290     }
0291     double Tc = 2. * a / b;  // the critical temperature of this vertex
0292     if (Tc > T0)
0293       T0 = Tc;
0294   }  // vertex loop (normally there should be only one vertex at beta=0)
0295 
0296   if (T0 > 1. / betamax) {
0297     return betamax / pow(coolingFactor_, int(log(T0 * betamax) / log(coolingFactor_)) - 1);
0298   } else {
0299     // ensure at least one annealing step
0300     return betamax / coolingFactor_;
0301   }
0302 }
0303 
0304 bool DAClusterizerInZ::split(double beta, vector<track_t>& tks, vector<vertex_t>& y, double threshold) const {
0305   // split only critical vertices (Tc >~ T=1/beta   <==>   beta*Tc>~1)
0306   // an update must have been made just before doing this (same beta, no merging)
0307   // returns true if at least one cluster was split
0308 
0309   double epsilon = 1e-3;  // split all single vertices by 10 um
0310   bool split = false;
0311 
0312   // avoid left-right biases by splitting highest Tc first
0313 
0314   std::vector<std::pair<double, unsigned int> > critical;
0315   for (unsigned int ik = 0; ik < y.size(); ik++) {
0316     if (beta * y[ik].Tc > 1.) {
0317       critical.push_back(make_pair(y[ik].Tc, ik));
0318     }
0319   }
0320   stable_sort(critical.begin(), critical.end(), std::greater<std::pair<double, unsigned int> >());
0321 
0322   for (unsigned int ic = 0; ic < critical.size(); ic++) {
0323     unsigned int ik = critical[ic].second;
0324     // estimate subcluster positions and weight
0325     double p1 = 0, z1 = 0, w1 = 0;
0326     double p2 = 0, z2 = 0, w2 = 0;
0327     //double sumpi=0;
0328     for (unsigned int i = 0; i < tks.size(); i++) {
0329       if (tks[i].Z > 0) {
0330         //sumpi+=tks[i].pi;
0331         double p = y[ik].pk * exp(-beta * Eik(tks[i], y[ik])) / tks[i].Z * tks[i].pi;
0332         double w = p / tks[i].dz2;
0333         if (tks[i].z < y[ik].z) {
0334           p1 += p;
0335           z1 += w * tks[i].z;
0336           w1 += w;
0337         } else {
0338           p2 += p;
0339           z2 += w * tks[i].z;
0340           w2 += w;
0341         }
0342       }
0343     }
0344     if (w1 > 0) {
0345       z1 = z1 / w1;
0346     } else {
0347       z1 = y[ik].z - epsilon;
0348     }
0349     if (w2 > 0) {
0350       z2 = z2 / w2;
0351     } else {
0352       z2 = y[ik].z + epsilon;
0353     }
0354 
0355     // reduce split size if there is not enough room
0356     if ((ik > 0) && (y[ik - 1].z >= z1)) {
0357       z1 = 0.5 * (y[ik].z + y[ik - 1].z);
0358     }
0359     if ((ik + 1 < y.size()) && (y[ik + 1].z <= z2)) {
0360       z2 = 0.5 * (y[ik].z + y[ik + 1].z);
0361     }
0362 
0363     // split if the new subclusters are significantly separated
0364     if ((z2 - z1) > epsilon) {
0365       split = true;
0366       vertex_t vnew;
0367       vnew.pk = p1 * y[ik].pk / (p1 + p2);
0368       y[ik].pk = p2 * y[ik].pk / (p1 + p2);
0369       vnew.z = z1;
0370       y[ik].z = z2;
0371       y.insert(y.begin() + ik, vnew);
0372 
0373       // adjust remaining pointers
0374       for (unsigned int jc = ic; jc < critical.size(); jc++) {
0375         if (critical[jc].second > ik) {
0376           critical[jc].second++;
0377         }
0378       }
0379     }
0380   }
0381 
0382   //  stable_sort(y.begin(), y.end(), clusterLessZ);
0383   return split;
0384 }
0385 
0386 void DAClusterizerInZ::splitAll(vector<vertex_t>& y) const {
0387   double epsilon = 1e-3;      // split all single vertices by 10 um
0388   double zsep = 2 * epsilon;  // split vertices that are isolated by at least zsep (vertices that haven't collapsed)
0389   vector<vertex_t> y1;
0390 
0391   for (vector<vertex_t>::iterator k = y.begin(); k != y.end(); k++) {
0392     if (((k == y.begin()) || (k - 1)->z < k->z - zsep) && (((k + 1) == y.end()) || (k + 1)->z > k->z + zsep)) {
0393       // isolated prototype, split
0394       vertex_t vnew;
0395       vnew.z = k->z - epsilon;
0396       (*k).z = k->z + epsilon;
0397       vnew.pk = 0.5 * (*k).pk;
0398       (*k).pk = 0.5 * (*k).pk;
0399       y1.push_back(vnew);
0400       y1.push_back(*k);
0401 
0402     } else if (y1.empty() || (y1.back().z < k->z - zsep)) {
0403       y1.push_back(*k);
0404     } else {
0405       y1.back().z -= epsilon;
0406       k->z += epsilon;
0407       y1.push_back(*k);
0408     }
0409   }  // vertex loop
0410 
0411   y = y1;
0412 }
0413 
0414 DAClusterizerInZ::DAClusterizerInZ(const edm::ParameterSet& conf) {
0415   // some defaults to avoid uninitialized variables
0416   verbose_ = conf.getUntrackedParameter<bool>("verbose", false);
0417   useTc_ = true;
0418   betamax_ = 0.1;
0419   betastop_ = 1.0;
0420   coolingFactor_ = 0.8;
0421   maxIterations_ = 100;
0422   vertexSize_ = 0.05;  // 0.5 mm
0423   dzCutOff_ = 4.0;     // Adaptive Fitter uses 3.0 but that appears to be a bit tight here sometimes
0424 
0425   // configure
0426 
0427   double Tmin = conf.getParameter<double>("Tmin");
0428   vertexSize_ = conf.getParameter<double>("vertexSize");
0429   coolingFactor_ = conf.getParameter<double>("coolingFactor");
0430   d0CutOff_ = conf.getParameter<double>("d0CutOff");
0431   dzCutOff_ = conf.getParameter<double>("dzCutOff");
0432   maxIterations_ = 100;
0433   if (Tmin == 0) {
0434     cout << "DAClusterizerInZ: invalid Tmin" << Tmin << "  reset do default " << 1. / betamax_ << endl;
0435   } else {
0436     betamax_ = 1. / Tmin;
0437   }
0438 
0439   // for testing, negative cooling factor: revert to old splitting scheme
0440   if (coolingFactor_ < 0) {
0441     coolingFactor_ = -coolingFactor_;
0442     useTc_ = false;
0443   }
0444 }
0445 
0446 void DAClusterizerInZ::dump(const double beta,
0447                             const vector<vertex_t>& y,
0448                             const vector<track_t>& tks0,
0449                             int verbosity) const {
0450   // copy and sort for nicer printout
0451   vector<track_t> tks;
0452   for (vector<track_t>::const_iterator t = tks0.begin(); t != tks0.end(); t++) {
0453     tks.push_back(*t);
0454   }
0455   stable_sort(tks.begin(), tks.end(), recTrackLessZ1);
0456 
0457   cout << "-----DAClusterizerInZ::dump ----" << endl;
0458   cout << "beta=" << beta << "   betamax= " << betamax_ << endl;
0459   cout << "                                                               z= ";
0460   cout.precision(4);
0461   for (vector<vertex_t>::const_iterator k = y.begin(); k != y.end(); k++) {
0462     cout << setw(8) << fixed << k->z;
0463   }
0464   cout << endl << "T=" << setw(15) << 1. / beta << "                                             Tc= ";
0465   for (vector<vertex_t>::const_iterator k = y.begin(); k != y.end(); k++) {
0466     cout << setw(8) << fixed << k->Tc;
0467   }
0468 
0469   cout << endl << "                                                               pk=";
0470   for (vector<vertex_t>::const_iterator k = y.begin(); k != y.end(); k++) {
0471     cout << setw(8) << setprecision(3) << fixed << k->pk;
0472   }
0473   cout << endl;
0474 
0475   if (verbosity > 0) {
0476     double E = 0, F = 0;
0477     cout << endl;
0478     cout << "----       z +/- dz                ip +/-dip       pt    phi  eta    weights  ----" << endl;
0479     cout.precision(4);
0480     for (unsigned int i = 0; i < tks.size(); i++) {
0481       if (tks[i].Z > 0) {
0482         F -= log(tks[i].Z) / beta;
0483       }
0484       double tz = tks[i].z;
0485       cout << setw(3) << i << ")" << setw(8) << fixed << setprecision(4) << tz << " +/-" << setw(6) << sqrt(tks[i].dz2);
0486 
0487       if (tks[i].tt->track().quality(reco::TrackBase::highPurity)) {
0488         cout << " *";
0489       } else {
0490         cout << "  ";
0491       }
0492       if (tks[i].tt->track().hitPattern().hasValidHitInPixelLayer(PixelSubdetector::SubDetector::PixelBarrel, 1)) {
0493         cout << "+";
0494       } else {
0495         cout << "-";
0496       }
0497       cout << setw(1)
0498            << tks[i]
0499                   .tt->track()
0500                   .hitPattern()
0501                   .pixelBarrelLayersWithMeasurement();  // see DataFormats/TrackReco/interface/HitPattern.h
0502       cout << setw(1) << tks[i].tt->track().hitPattern().pixelEndcapLayersWithMeasurement();
0503       cout << setw(1) << hex
0504            << tks[i].tt->track().hitPattern().trackerLayersWithMeasurement() -
0505                   tks[i].tt->track().hitPattern().pixelLayersWithMeasurement()
0506            << dec;
0507       cout << "=" << setw(1) << hex
0508            << tks[i].tt->track().hitPattern().numberOfLostHits(reco::HitPattern::MISSING_OUTER_HITS) << dec;
0509 
0510       Measurement1D IP = tks[i].tt->stateAtBeamLine().transverseImpactParameter();
0511       cout << setw(8) << IP.value() << "+/-" << setw(6) << IP.error();
0512       cout << " " << setw(6) << setprecision(2) << tks[i].tt->track().pt() * tks[i].tt->track().charge();
0513       cout << " " << setw(5) << setprecision(2) << tks[i].tt->track().phi() << " " << setw(5) << setprecision(2)
0514            << tks[i].tt->track().eta();
0515 
0516       for (vector<vertex_t>::const_iterator k = y.begin(); k != y.end(); k++) {
0517         if ((tks[i].pi > 0) && (tks[i].Z > 0)) {
0518           //double p=pik(beta,tks[i],*k);
0519           double p = k->pk * exp(-beta * Eik(tks[i], *k)) / tks[i].Z;
0520           if (p > 0.0001) {
0521             cout << setw(8) << setprecision(3) << p;
0522           } else {
0523             cout << "    .   ";
0524           }
0525           E += p * Eik(tks[i], *k);
0526         } else {
0527           cout << "        ";
0528         }
0529       }
0530       cout << endl;
0531     }
0532     cout << endl << "T=" << 1 / beta << " E=" << E << " n=" << y.size() << "  F= " << F << endl << "----------" << endl;
0533   }
0534 }
0535 
0536 vector<TransientVertex> DAClusterizerInZ::vertices(const vector<reco::TransientTrack>& tracks,
0537                                                    const int verbosity) const {
0538   vector<track_t> tks = fill(tracks);
0539   unsigned int nt = tracks.size();
0540   double rho0 = 0.0;  // start with no outlier rejection
0541 
0542   vector<TransientVertex> clusters;
0543   if (tks.empty())
0544     return clusters;
0545 
0546   vector<vertex_t> y;  // the vertex prototypes
0547 
0548   // initialize:single vertex at infinite temperature
0549   vertex_t vstart;
0550   vstart.z = 0.;
0551   vstart.pk = 1.;
0552   y.push_back(vstart);
0553   int niter = 0;  // number of iterations
0554 
0555   // estimate first critical temperature
0556   double beta = beta0(betamax_, tks, y);
0557   niter = 0;
0558   while ((update(beta, tks, y) > 1.e-6) && (niter++ < maxIterations_)) {
0559   }
0560 
0561   // annealing loop, stop when T<Tmin  (i.e. beta>1/Tmin)
0562   while (beta < betamax_) {
0563     if (useTc_) {
0564       update(beta, tks, y);
0565       while (merge(y, beta)) {
0566         update(beta, tks, y);
0567       }
0568       split(beta, tks, y, 1.);
0569       beta = beta / coolingFactor_;
0570     } else {
0571       beta = beta / coolingFactor_;
0572       splitAll(y);
0573     }
0574 
0575     // make sure we are not too far from equilibrium before cooling further
0576     niter = 0;
0577     while ((update(beta, tks, y) > 1.e-6) && (niter++ < maxIterations_)) {
0578     }
0579   }
0580 
0581   if (useTc_) {
0582     // last round of splitting, make sure no critical clusters are left
0583     update(beta, tks, y);
0584     while (merge(y, beta)) {
0585       update(beta, tks, y);
0586     }
0587     unsigned int ntry = 0;
0588     while (split(beta, tks, y, 1.) && (ntry++ < 10)) {
0589       niter = 0;
0590       while ((update(beta, tks, y) > 1.e-6) && (niter++ < maxIterations_)) {
0591       }
0592       merge(y, beta);
0593       update(beta, tks, y);
0594     }
0595   } else {
0596     // merge collapsed clusters
0597     while (merge(y, beta)) {
0598       update(beta, tks, y);
0599     }
0600     if (verbose_) {
0601       cout << "dump after 1st merging " << endl;
0602       dump(beta, y, tks, 2);
0603     }
0604   }
0605 
0606   // switch on outlier rejection
0607   rho0 = 1. / nt;
0608   for (vector<vertex_t>::iterator k = y.begin(); k != y.end(); k++) {
0609     k->pk = 1.;
0610   }  // democratic
0611   niter = 0;
0612   while ((update(beta, tks, y, rho0) > 1.e-8) && (niter++ < maxIterations_)) {
0613   }
0614   if (verbose_) {
0615     cout << "rho0=" << rho0 << " niter=" << niter << endl;
0616     dump(beta, y, tks, 2);
0617   }
0618 
0619   // merge again  (some cluster split by outliers collapse here)
0620   while (merge(y, tks.size())) {
0621   }
0622   if (verbose_) {
0623     cout << "dump after 2nd merging " << endl;
0624     dump(beta, y, tks, 2);
0625   }
0626 
0627   // continue from freeze-out to Tstop (=1) without splitting, eliminate insignificant vertices
0628   while (beta <= betastop_) {
0629     while (purge(y, tks, rho0, beta)) {
0630       niter = 0;
0631       while ((update(beta, tks, y, rho0) > 1.e-6) && (niter++ < maxIterations_)) {
0632       }
0633     }
0634     beta /= coolingFactor_;
0635     niter = 0;
0636     while ((update(beta, tks, y, rho0) > 1.e-6) && (niter++ < maxIterations_)) {
0637     }
0638   }
0639 
0640   //   // new, one last round of cleaning at T=Tstop
0641   //   while(purge(y,tks,rho0, beta)){
0642   //     niter=0; while((update(beta, tks,y,rho0) > 1.e-6)  && (niter++ < maxIterations_)){  }
0643   //   }
0644 
0645   if (verbose_) {
0646     cout << "Final result, rho0=" << rho0 << endl;
0647     dump(beta, y, tks, 2);
0648   }
0649 
0650   // select significant tracks and use a TransientVertex as a container
0651   GlobalError dummyError;
0652 
0653   // ensure correct normalization of probabilities, should make double assginment reasonably impossible
0654   for (unsigned int i = 0; i < nt; i++) {
0655     tks[i].Z = rho0 * exp(-beta * dzCutOff_ * dzCutOff_);
0656     for (vector<vertex_t>::iterator k = y.begin(); k != y.end(); k++) {
0657       tks[i].Z += k->pk * exp(-beta * Eik(tks[i], *k));
0658     }
0659   }
0660 
0661   for (vector<vertex_t>::iterator k = y.begin(); k != y.end(); k++) {
0662     GlobalPoint pos(0, 0, k->z);
0663     vector<reco::TransientTrack> vertexTracks;
0664     for (unsigned int i = 0; i < nt; i++) {
0665       if (tks[i].Z > 0) {
0666         double p = k->pk * exp(-beta * Eik(tks[i], *k)) / tks[i].Z;
0667         if ((tks[i].pi > 0) && (p > 0.5)) {
0668           vertexTracks.push_back(*(tks[i].tt));
0669           tks[i].Z = 0;
0670         }  // setting Z=0 excludes double assignment
0671       }
0672     }
0673     TransientVertex v(pos, dummyError, vertexTracks, 5);
0674     clusters.push_back(v);
0675   }
0676 
0677   return clusters;
0678 }
0679 
0680 vector<vector<reco::TransientTrack> > DAClusterizerInZ::clusterize(const vector<reco::TransientTrack>& tracks) const {
0681   if (verbose_) {
0682     cout << "###################################################" << endl;
0683     cout << "# DAClusterizerInZ::clusterize   nt=" << tracks.size() << endl;
0684     cout << "###################################################" << endl;
0685   }
0686 
0687   vector<vector<reco::TransientTrack> > clusters;
0688   vector<TransientVertex> pv = vertices(tracks);
0689 
0690   if (verbose_) {
0691     cout << "# DAClusterizerInZ::clusterize   pv.size=" << pv.size() << endl;
0692   }
0693   if (pv.empty()) {
0694     return clusters;
0695   }
0696 
0697   // fill into clusters and merge
0698   vector<reco::TransientTrack> aCluster = pv.begin()->originalTracks();
0699 
0700   for (vector<TransientVertex>::iterator k = pv.begin() + 1; k != pv.end(); k++) {
0701     if (fabs(k->position().z() - (k - 1)->position().z()) > (2 * vertexSize_)) {
0702       // close a cluster
0703       clusters.push_back(aCluster);
0704       aCluster.clear();
0705     }
0706     for (unsigned int i = 0; i < k->originalTracks().size(); i++) {
0707       aCluster.push_back(k->originalTracks().at(i));
0708     }
0709   }
0710   clusters.push_back(aCluster);
0711 
0712   return clusters;
0713 }