Back to home page

Project CMSSW displayed by LXR

 
 

    


File indexing completed on 2021-06-10 02:54:15

0001 #ifndef HeterogeneousCore_CUDAUtilities_interface_VecArray_h
0002 #define HeterogeneousCore_CUDAUtilities_interface_VecArray_h
0003 
0004 //
0005 // Author: Felice Pantaleo, CERN
0006 //
0007 
0008 #include "HeterogeneousCore/CUDAUtilities/interface/cudaCompat.h"
0009 
0010 namespace cms {
0011   namespace cuda {
0012 
0013     template <class T, int maxSize>
0014     class VecArray {
0015     public:
0016       using self = VecArray<T, maxSize>;
0017       using value_t = T;
0018 
0019       inline constexpr int push_back_unsafe(const T &element) {
0020         auto previousSize = m_size;
0021         m_size++;
0022         if (previousSize < maxSize) {
0023           m_data[previousSize] = element;
0024           return previousSize;
0025         } else {
0026           --m_size;
0027           return -1;
0028         }
0029       }
0030 
0031       template <class... Ts>
0032       constexpr int emplace_back_unsafe(Ts &&...args) {
0033         auto previousSize = m_size;
0034         m_size++;
0035         if (previousSize < maxSize) {
0036           (new (&m_data[previousSize]) T(std::forward<Ts>(args)...));
0037           return previousSize;
0038         } else {
0039           --m_size;
0040           return -1;
0041         }
0042       }
0043 
0044       inline constexpr T &back() const {
0045         if (m_size > 0) {
0046           return m_data[m_size - 1];
0047         } else
0048           return T();  //undefined behaviour
0049       }
0050 
0051       // thread-safe version of the vector, when used in a CUDA kernel
0052       __device__ int push_back(const T &element) {
0053         auto previousSize = atomicAdd(&m_size, 1);
0054         if (previousSize < maxSize) {
0055           m_data[previousSize] = element;
0056           return previousSize;
0057         } else {
0058           atomicSub(&m_size, 1);
0059           return -1;
0060         }
0061       }
0062 
0063       template <class... Ts>
0064       __device__ int emplace_back(Ts &&...args) {
0065         auto previousSize = atomicAdd(&m_size, 1);
0066         if (previousSize < maxSize) {
0067           (new (&m_data[previousSize]) T(std::forward<Ts>(args)...));
0068           return previousSize;
0069         } else {
0070           atomicSub(&m_size, 1);
0071           return -1;
0072         }
0073       }
0074 
0075       inline constexpr T pop_back() {
0076         if (m_size > 0) {
0077           auto previousSize = m_size--;
0078           return m_data[previousSize - 1];
0079         } else
0080           return T();
0081       }
0082 
0083       inline constexpr T const *begin() const { return m_data; }
0084       inline constexpr T const *end() const { return m_data + m_size; }
0085       inline constexpr T *begin() { return m_data; }
0086       inline constexpr T *end() { return m_data + m_size; }
0087       inline constexpr int size() const { return m_size; }
0088       inline constexpr T &operator[](int i) { return m_data[i]; }
0089       inline constexpr const T &operator[](int i) const { return m_data[i]; }
0090       inline constexpr void reset() { m_size = 0; }
0091       inline static constexpr int capacity() { return maxSize; }
0092       inline constexpr T const *data() const { return m_data; }
0093       inline constexpr void resize(int size) { m_size = size; }
0094       inline constexpr bool empty() const { return 0 == m_size; }
0095       inline constexpr bool full() const { return maxSize == m_size; }
0096 
0097     private:
0098       T m_data[maxSize];
0099 
0100       int m_size;
0101     };
0102 
0103   }  // namespace cuda
0104 }  // namespace cms
0105 
0106 #endif  // HeterogeneousCore_CUDAUtilities_interface_VecArray_h