Back to home page

Project CMSSW displayed by LXR

 
 

    


File indexing completed on 2024-04-06 12:26:23

0001 #ifndef RecoLocalTracker_SiPixelRecHits_plugins_gpuPixelRecHits_h
0002 #define RecoLocalTracker_SiPixelRecHits_plugins_gpuPixelRecHits_h
0003 
0004 #include <cstdint>
0005 #include <cstdio>
0006 #include <limits>
0007 
0008 #include "CUDADataFormats/BeamSpot/interface/BeamSpotCUDA.h"
0009 #include "CUDADataFormats/SiPixelCluster/interface/gpuClusteringConstants.h"
0010 #include "CUDADataFormats/SiPixelDigi/interface/SiPixelDigisCUDA.h"
0011 #include "CUDADataFormats/TrackingRecHit/interface/TrackingRecHitsUtilities.h"
0012 #include "DataFormats/Math/interface/approx_atan2.h"
0013 #include "HeterogeneousCore/CUDAUtilities/interface/cuda_assert.h"
0014 #include "RecoLocalTracker/SiPixelRecHits/interface/pixelCPEforGPU.h"
0015 
0016 //#define GPU_DEBUG
0017 
0018 namespace gpuPixelRecHits {
0019 
0020   template <typename TrackerTraits>
0021   __global__ void getHits(pixelCPEforGPU::ParamsOnGPUT<TrackerTraits> const* __restrict__ cpeParams,
0022                           BeamSpotPOD const* __restrict__ bs,
0023                           SiPixelDigisSoA::ConstView digis,
0024                           int numElements,
0025                           SiPixelClustersCUDASOAConstView clusters,
0026                           TrackingRecHitSoAView<TrackerTraits> hits) {
0027     // FIXME
0028     // the compiler seems NOT to optimize loads from views (even in a simple test case)
0029     // The whole gimnastic here of copying or not is a pure heuristic exercise that seems to produce the fastest code with the above signature
0030     // not using views (passing a gazzilion of array pointers) seems to produce the fastest code (but it is harder to mantain)
0031 
0032     assert(cpeParams);
0033 
0034     // copy average geometry corrected by beamspot . FIXME (move it somewhere else???)
0035     if (0 == blockIdx.x) {
0036       auto& agc = hits.averageGeometry();
0037       auto const& ag = cpeParams->averageGeometry();
0038       auto nLadders = TrackerTraits::numberOfLaddersInBarrel;
0039 
0040       for (int il = threadIdx.x, nl = nLadders; il < nl; il += blockDim.x) {
0041         agc.ladderZ[il] = ag.ladderZ[il] - bs->z;
0042         agc.ladderX[il] = ag.ladderX[il] - bs->x;
0043         agc.ladderY[il] = ag.ladderY[il] - bs->y;
0044         agc.ladderR[il] = sqrt(agc.ladderX[il] * agc.ladderX[il] + agc.ladderY[il] * agc.ladderY[il]);
0045         agc.ladderMinZ[il] = ag.ladderMinZ[il] - bs->z;
0046         agc.ladderMaxZ[il] = ag.ladderMaxZ[il] - bs->z;
0047       }
0048 
0049       if (0 == threadIdx.x) {
0050         agc.endCapZ[0] = ag.endCapZ[0] - bs->z;
0051         agc.endCapZ[1] = ag.endCapZ[1] - bs->z;
0052       }
0053     }
0054 
0055     // to be moved in common namespace...
0056     using gpuClustering::invalidModuleId;
0057     constexpr int32_t MaxHitsInIter = pixelCPEforGPU::MaxHitsInIter;
0058 
0059     using ClusParams = pixelCPEforGPU::ClusParams;
0060 
0061     // as usual one block per module
0062     __shared__ ClusParams clusParams;
0063 
0064     auto me = clusters[blockIdx.x].moduleId();
0065     int nclus = clusters[me].clusInModule();
0066 
0067     if (0 == nclus)
0068       return;
0069 #ifdef GPU_DEBUG
0070     if (threadIdx.x == 0) {
0071       auto k = clusters[1 + blockIdx.x].moduleStart();
0072       while (digis[k].moduleId() == invalidModuleId)
0073         ++k;
0074       assert(digis[k].moduleId() == me);
0075     }
0076 
0077     if (me % 100 == 1)
0078       if (threadIdx.x == 0)
0079         printf("hitbuilder: %d clusters in module %d. will write at %d\n", nclus, me, clusters[me].clusModuleStart());
0080 #endif
0081 
0082     for (int startClus = 0, endClus = nclus; startClus < endClus; startClus += MaxHitsInIter) {
0083       int nClusInIter = std::min(MaxHitsInIter, endClus - startClus);
0084       int lastClus = startClus + nClusInIter;
0085       assert(nClusInIter <= nclus);
0086       assert(nClusInIter > 0);
0087       assert(lastClus <= nclus);
0088 
0089       assert(nclus > MaxHitsInIter || (0 == startClus && nClusInIter == nclus && lastClus == nclus));
0090 
0091       // init
0092       for (int ic = threadIdx.x; ic < nClusInIter; ic += blockDim.x) {
0093         clusParams.minRow[ic] = std::numeric_limits<uint32_t>::max();
0094         clusParams.maxRow[ic] = 0;
0095         clusParams.minCol[ic] = std::numeric_limits<uint32_t>::max();
0096         clusParams.maxCol[ic] = 0;
0097         clusParams.charge[ic] = 0;
0098         clusParams.q_f_X[ic] = 0;
0099         clusParams.q_l_X[ic] = 0;
0100         clusParams.q_f_Y[ic] = 0;
0101         clusParams.q_l_Y[ic] = 0;
0102       }
0103 
0104       __syncthreads();
0105 
0106       // one thread per "digi"
0107       auto first = clusters[1 + blockIdx.x].moduleStart() + threadIdx.x;
0108       for (int i = first; i < numElements; i += blockDim.x) {
0109         auto id = digis[i].moduleId();
0110         if (id == invalidModuleId)
0111           continue;  // not valid
0112         if (id != me)
0113           break;  // end of module
0114         auto cl = digis[i].clus();
0115         if (cl < startClus || cl >= lastClus)
0116           continue;
0117         cl -= startClus;
0118         assert(cl >= 0);
0119         assert(cl < MaxHitsInIter);
0120         auto x = digis[i].xx();
0121         auto y = digis[i].yy();
0122         atomicMin(&clusParams.minRow[cl], x);
0123         atomicMax(&clusParams.maxRow[cl], x);
0124         atomicMin(&clusParams.minCol[cl], y);
0125         atomicMax(&clusParams.maxCol[cl], y);
0126       }
0127 
0128       __syncthreads();
0129 
0130       auto pixmx = cpeParams->detParams(me).pixmx;
0131       for (int i = first; i < numElements; i += blockDim.x) {
0132         auto id = digis[i].moduleId();
0133         if (id == invalidModuleId)
0134           continue;  // not valid
0135         if (id != me)
0136           break;  // end of module
0137         auto cl = digis[i].clus();
0138         if (cl < startClus || cl >= lastClus)
0139           continue;
0140         cl -= startClus;
0141         assert(cl >= 0);
0142         assert(cl < MaxHitsInIter);
0143         auto x = digis[i].xx();
0144         auto y = digis[i].yy();
0145         auto ch = digis[i].adc();
0146         atomicAdd(&clusParams.charge[cl], ch);
0147         ch = std::min(ch, pixmx);
0148         if (clusParams.minRow[cl] == x)
0149           atomicAdd(&clusParams.q_f_X[cl], ch);
0150         if (clusParams.maxRow[cl] == x)
0151           atomicAdd(&clusParams.q_l_X[cl], ch);
0152         if (clusParams.minCol[cl] == y)
0153           atomicAdd(&clusParams.q_f_Y[cl], ch);
0154         if (clusParams.maxCol[cl] == y)
0155           atomicAdd(&clusParams.q_l_Y[cl], ch);
0156       }
0157 
0158       __syncthreads();
0159 
0160       // next one cluster per thread...
0161 
0162       first = clusters[me].clusModuleStart() + startClus;
0163       for (int ic = threadIdx.x; ic < nClusInIter; ic += blockDim.x) {
0164         auto h = first + ic;  // output index in global memory
0165 
0166         assert(h < hits.nHits());
0167         assert(h < clusters[me + 1].clusModuleStart());
0168 
0169         pixelCPEforGPU::position<TrackerTraits>(cpeParams->commonParams(), cpeParams->detParams(me), clusParams, ic);
0170 
0171         pixelCPEforGPU::errorFromDB<TrackerTraits>(cpeParams->commonParams(), cpeParams->detParams(me), clusParams, ic);
0172 
0173         // store it
0174         hits[h].chargeAndStatus().charge = clusParams.charge[ic];
0175         hits[h].chargeAndStatus().status = clusParams.status[ic];
0176         hits[h].detectorIndex() = me;
0177 
0178         float xl, yl;
0179         hits[h].xLocal() = xl = clusParams.xpos[ic];
0180         hits[h].yLocal() = yl = clusParams.ypos[ic];
0181 
0182         hits[h].clusterSizeX() = clusParams.xsize[ic];
0183         hits[h].clusterSizeY() = clusParams.ysize[ic];
0184 
0185         hits[h].xerrLocal() = clusParams.xerr[ic] * clusParams.xerr[ic] + cpeParams->detParams(me).apeXX;
0186         hits[h].yerrLocal() = clusParams.yerr[ic] * clusParams.yerr[ic] + cpeParams->detParams(me).apeYY;
0187 
0188         // keep it local for computations
0189         float xg, yg, zg;
0190         // to global and compute phi...
0191         cpeParams->detParams(me).frame.toGlobal(xl, yl, xg, yg, zg);
0192         // here correct for the beamspot...
0193         xg -= bs->x;
0194         yg -= bs->y;
0195         zg -= bs->z;
0196 
0197         hits[h].xGlobal() = xg;
0198         hits[h].yGlobal() = yg;
0199         hits[h].zGlobal() = zg;
0200 
0201         hits[h].rGlobal() = std::sqrt(xg * xg + yg * yg);
0202         hits[h].iphi() = unsafe_atan2s<7>(yg, xg);
0203       }
0204       __syncthreads();
0205     }  // end loop on batches
0206   }
0207 
0208 }  // namespace gpuPixelRecHits
0209 
0210 #endif  // RecoLocalTracker_SiPixelRecHits_plugins_gpuPixelRecHits_h