Back to home page

Project CMSSW displayed by LXR

 
 

    


File indexing completed on 2024-09-24 22:51:34

0001 /*
0002  * Tests visible device interface 
0003  * For more info,
0004  * https://github.com/tensorflow/tensorflow/blob/3bc73f5e2ac437b1d9d559751af789c8c965a7f9/tensorflow/core/framework/device_attributes.proto
0005  * https://stackoverflow.com/questions/74110853/how-to-check-if-tensorflow-is-using-the-cpu-with-the-c-api\
0006  *
0007  * Author: Davide Valsecchi
0008  */
0009 
0010 #include <stdexcept>
0011 #include <cppunit/extensions/HelperMacros.h>
0012 
0013 #include "PhysicsTools/TensorFlow/interface/TensorFlow.h"
0014 #include "tensorflow/core/framework/device_attributes.pb.h"
0015 
0016 #include "testBaseCUDA.h"
0017 
0018 class testVisibleDevicesCUDA : public testBaseCUDA {
0019   CPPUNIT_TEST_SUITE(testVisibleDevicesCUDA);
0020   CPPUNIT_TEST(test);
0021   CPPUNIT_TEST_SUITE_END();
0022 
0023 public:
0024   std::string pyScript() const override;
0025   void test() override;
0026 };
0027 
0028 CPPUNIT_TEST_SUITE_REGISTRATION(testVisibleDevicesCUDA);
0029 
0030 std::string testVisibleDevicesCUDA::pyScript() const { return "createconstantgraph.py"; }
0031 
0032 void testVisibleDevicesCUDA::test() {
0033   if (!cms::cudatest::testDevices())
0034     return;
0035 
0036   std::vector<edm::ParameterSet> psets;
0037   edm::ServiceToken serviceToken = edm::ServiceRegistry::createSet(psets);
0038   edm::ServiceRegistry::Operate operate(serviceToken);
0039 
0040   // Setup the CUDA Service
0041   edmplugin::PluginManager::configure(edmplugin::standard::config());
0042 
0043   std::string const config = R"_(import FWCore.ParameterSet.Config as cms
0044 process = cms.Process('Test')
0045 process.add_(cms.Service('ResourceInformationService'))
0046 process.add_(cms.Service('CUDAService'))
0047 )_";
0048   std::unique_ptr<edm::ParameterSet> params;
0049   edm::makeParameterSets(config, params);
0050   edm::ServiceToken tempToken(edm::ServiceRegistry::createServicesFromConfig(std::move(params)));
0051   edm::ServiceRegistry::Operate operate2(tempToken);
0052   edm::Service<CUDAInterface> cuda;
0053   std::cout << "CUDA service enabled: " << cuda->enabled() << std::endl;
0054 
0055   std::cout << "Testing CUDA backend" << std::endl;
0056   tensorflow::Backend backend = tensorflow::Backend::cuda;
0057   tensorflow::Options options{backend};
0058 
0059   // load the graph
0060   std::string pbFile = dataPath_ + "/constantgraph.pb";
0061   tensorflow::GraphDef* graphDef = tensorflow::loadGraphDef(pbFile);
0062   CPPUNIT_ASSERT(graphDef != nullptr);
0063 
0064   // create a new session and add the graphDef
0065   tensorflow::Session* session = tensorflow::createSession(graphDef, options);
0066   CPPUNIT_ASSERT(session != nullptr);
0067 
0068   // check for exception
0069   CPPUNIT_ASSERT_THROW(tensorflow::createSession(nullptr, options), cms::Exception);
0070 
0071   std::vector<tensorflow::DeviceAttributes> response;
0072   tensorflow::Status status = session->ListDevices(&response);
0073   CPPUNIT_ASSERT(status.ok());
0074 
0075   // If a single device is found, we assume that it's the CPU.
0076   // You can check that name if you want to make sure that this is the case
0077   for (unsigned int i = 0; i < response.size(); ++i) {
0078     std::cout << i << " " << response[i].name() << " type: " << response[i].device_type() << std::endl;
0079   }
0080   std::cout << "Available devices: " << response.size() << std::endl;
0081   CPPUNIT_ASSERT(response.size() == 2);
0082 
0083   // cleanup
0084   CPPUNIT_ASSERT(tensorflow::closeSession(session));
0085   CPPUNIT_ASSERT(session == nullptr);
0086   delete graphDef;
0087 }