Back to home page

Project CMSSW displayed by LXR

 
 

    


File indexing completed on 2024-04-06 12:24:16

0001 /*
0002  * Tests visible device interface 
0003  * For more info,
0004  * https://github.com/tensorflow/tensorflow/blob/3bc73f5e2ac437b1d9d559751af789c8c965a7f9/tensorflow/core/framework/device_attributes.proto
0005  * https://stackoverflow.com/questions/74110853/how-to-check-if-tensorflow-is-using-the-cpu-with-the-c-api\
0006  *
0007  * Author: Davide Valsecchi
0008  */
0009 
0010 #include <stdexcept>
0011 #include <cppunit/extensions/HelperMacros.h>
0012 
0013 #include "PhysicsTools/TensorFlow/interface/TensorFlow.h"
0014 #include "tensorflow/core/framework/device_attributes.pb.h"
0015 
0016 #include "testBaseCUDA.h"
0017 
0018 class testVisibleDevicesCUDA : public testBaseCUDA {
0019   CPPUNIT_TEST_SUITE(testVisibleDevicesCUDA);
0020   CPPUNIT_TEST(test);
0021   CPPUNIT_TEST_SUITE_END();
0022 
0023 public:
0024   std::string pyScript() const override;
0025   void test() override;
0026 };
0027 
0028 CPPUNIT_TEST_SUITE_REGISTRATION(testVisibleDevicesCUDA);
0029 
0030 std::string testVisibleDevicesCUDA::pyScript() const { return "createconstantgraph.py"; }
0031 
0032 void testVisibleDevicesCUDA::test() {
0033   if (!cms::cudatest::testDevices())
0034     return;
0035 
0036   std::vector<edm::ParameterSet> psets;
0037   edm::ServiceToken serviceToken = edm::ServiceRegistry::createSet(psets);
0038   edm::ServiceRegistry::Operate operate(serviceToken);
0039 
0040   // Setup the CUDA Service
0041   edmplugin::PluginManager::configure(edmplugin::standard::config());
0042 
0043   std::string const config = R"_(import FWCore.ParameterSet.Config as cms
0044 process = cms.Process('Test')
0045 process.add_(cms.Service('ResourceInformationService'))
0046 process.add_(cms.Service('CUDAService'))
0047 )_";
0048   std::unique_ptr<edm::ParameterSet> params;
0049   edm::makeParameterSets(config, params);
0050   edm::ServiceToken tempToken(edm::ServiceRegistry::createServicesFromConfig(std::move(params)));
0051   edm::ServiceRegistry::Operate operate2(tempToken);
0052   edm::Service<CUDAInterface> cuda;
0053   std::cout << "CUDA service enabled: " << cuda->enabled() << std::endl;
0054 
0055   std::cout << "Testing CUDA backend" << std::endl;
0056   tensorflow::Backend backend = tensorflow::Backend::cuda;
0057   tensorflow::Options options{backend};
0058   tensorflow::setLogging("0");
0059 
0060   // load the graph
0061   std::string pbFile = dataPath_ + "/constantgraph.pb";
0062   tensorflow::setLogging();
0063   tensorflow::GraphDef* graphDef = tensorflow::loadGraphDef(pbFile);
0064   CPPUNIT_ASSERT(graphDef != nullptr);
0065 
0066   // create a new session and add the graphDef
0067   tensorflow::Session* session = tensorflow::createSession(graphDef, options);
0068   CPPUNIT_ASSERT(session != nullptr);
0069 
0070   // check for exception
0071   CPPUNIT_ASSERT_THROW(tensorflow::createSession(nullptr, options), cms::Exception);
0072 
0073   std::vector<tensorflow::DeviceAttributes> response;
0074   tensorflow::Status status = session->ListDevices(&response);
0075   CPPUNIT_ASSERT(status.ok());
0076 
0077   // If a single device is found, we assume that it's the CPU.
0078   // You can check that name if you want to make sure that this is the case
0079   for (unsigned int i = 0; i < response.size(); ++i) {
0080     std::cout << i << " " << response[i].name() << " type: " << response[i].device_type() << std::endl;
0081   }
0082   std::cout << "Available devices: " << response.size() << std::endl;
0083   CPPUNIT_ASSERT(response.size() == 2);
0084 
0085   // cleanup
0086   CPPUNIT_ASSERT(tensorflow::closeSession(session));
0087   CPPUNIT_ASSERT(session == nullptr);
0088   delete graphDef;
0089 }