Back to home page

Project CMSSW displayed by LXR

 
 

    


File indexing completed on 2025-03-10 23:53:32

0001 #!/bin/bash
0002 set -e
0003 set -x
0004 
0005 # This is mainly to make sure nothing crashes. Checking the output for sanity is attempted but not really complete.
0006 
0007 # 1. Run a very simple configuration with all module types.
0008 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=alltypes.root numberEventsInRun=100 numberEventsInLuminosityBlock=20 nEvents=100
0009 # actually we'd expect 99, but the MEs by legacy modules are booked with JOB scope and cannot be saved to DQMIO.
0010 [ 98 = $(dqmiolistmes.py alltypes.root -r 1 | wc -l) ]
0011 [ 84 = $(dqmiolistmes.py alltypes.root -r 1 -l 1 | wc -l) ]
0012 # this is deeply related to what the analyzers actually do.
0013 # again, the legacy modules output is not saved.
0014 # most run histos (4 modules * 9 types) fill on every event and should have 100 entries.
0015 # the scalar MEs should have the last lumi number (5) (5 float + 5 int)
0016 # testonefilllumi also should have 5 entries in the histograms (9 more)
0017 # the "fillrun" module should have one entry in the histograms (9 total) and 0 in the scalars (2 total)
0018 
0019 [ "0: 1, 0.0: 1, 1: 12, 100: 60, 5: 18, 5.0: 6" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py alltypes.root -r 1 --summary)" ]
0020 # per lumi we see 20 in most histograms (4*9), and the current lumi number in the scalars (6 modules * 2).
0021 # the two fillumi modules should have one entry in each of the lumi histograms, (2*9 total)
0022 [ "1: 30, 1.0: 6, 20: 48" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py alltypes.root -r 1 -l 1 --summary)" ]
0023 [ "1: 24, 2: 6, 2.0: 6, 20: 48" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py alltypes.root -r 1 -l 2 --summary)" ]
0024 [ "1: 24, 20: 48, 3: 6, 3.0: 6" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py alltypes.root -r 1 -l 3 --summary)" ]
0025 [ "1: 24, 20: 48, 4: 6, 4.0: 6" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py alltypes.root -r 1 -l 4 --summary)" ]
0026 [ "1: 24, 20: 48, 5: 6, 5.0: 6" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py alltypes.root -r 1 -l 5 --summary)" ]
0027 # just make sure we are not off by one
0028 [ "" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py alltypes.root -r 1 -l 6 --summary)" ]
0029 
0030 
0031 # 2. Run multi-threaded. First we make a baseline file without legacy modules, since they might not work.
0032 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=nolegacy.root    numberEventsInRun=1000 numberEventsInLuminosityBlock=200 nEvents=1000 nolegacy=True
0033 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=nolegacy-mt.root numberEventsInRun=1000 numberEventsInLuminosityBlock=200 nEvents=1000 nolegacy=True nThreads=10
0034 
0035 
0036 # 3. Try enabling concurrent lumis.
0037 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=nolegacy-cl.root numberEventsInRun=1000 numberEventsInLuminosityBlock=200 nEvents=1000 nolegacy=True nThreads=10 nConcurrent=10
0038 
0039 # same math as above, just a few less modules, and more events.
0040 for f in nolegacy.root nolegacy-mt.root nolegacy-cl.root
0041 do
0042   [ "0: 1, 0.0: 1, 1: 12, 1000: 48, 5: 4, 5.0: 4" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py $f -r 1 --summary)" ]
0043   [ "1: 2, 1.0: 2, 200: 24" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py $f -r 1 -l 1 --summary)" ]
0044   [ "2: 2, 2.0: 2, 200: 24" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py $f -r 1 -l 2 --summary)" ]
0045   [ "200: 24, 3: 2, 3.0: 2" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py $f -r 1 -l 3 --summary)" ]
0046   [ "200: 24, 4: 2, 4.0: 2" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py $f -r 1 -l 4 --summary)" ]
0047   [ "200: 24, 5: 2, 5.0: 2" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py $f -r 1 -l 5 --summary)" ]
0048   [ "" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py $f -r 1 -l 6 --summary)" ]
0049 done
0050 
0051 # 4. Try crossing a run boundary.
0052 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=multirun.root numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=1200
0053 dqmiodumpmetadata.py multirun.root | grep -q '4 runs, 12 lumisections'
0054 
0055 
0056 # 5. Now, make some chopped up files to try harvesting.
0057 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=part1.root numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=50               # 1st half of 1st lumi
0058 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=part2.root numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=50 firstEvent=50 # 2nd half of 1st lumi
0059 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=part3.root numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=200 firstEvent=100 firstLuminosityBlock=2 # lumi 2 and 3
0060 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=part4.root numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=900 firstRun=2   # 3 more runs
0061 
0062 cmsRun ${SCRAM_TEST_PATH}/run_harvesters_cfg.py inputFiles=part1.root inputFiles=part2.root inputFiles=part3.root inputFiles=part4.root outfile=merged.root nomodules=True
0063 dqmiodumpmetadata.py merged.root | grep -q '4 runs, 12 lumisections'
0064 
0065 #dumproot() { root2sqlite.py -o $1.sqlite $1 ; echo '.dump' | sqlite3 $1.sqlite > $1.sqldump ; rm $1.sqlite ; }
0066 #dumproot multirun.root
0067 #dumproot merged.root
0068 rootlist ()
0069 {  python3 -c '
0070 import uproot
0071 for k in uproot.open("'"$1"'").keys(): print(k)'
0072 }
0073 
0074 # we need to exclude MEs filled on run and lumi boundaries, since the split job *does* see a different number of begin/end run/lumi transitions.
0075 cmp <(${SCRAM_TEST_PATH}/dqmiodumpentries.py multirun.root -r 1 | grep -vE 'fillrun|filllumi') <(${SCRAM_TEST_PATH}/dqmiodumpentries.py merged.root -r 1 | grep -vE 'fillrun|filllumi')
0076 cmp <(${SCRAM_TEST_PATH}/dqmiodumpentries.py multirun.root -r 3) <(${SCRAM_TEST_PATH}/dqmiodumpentries.py merged.root -r 3)
0077 cmp <(${SCRAM_TEST_PATH}/dqmiodumpentries.py multirun.root -r 1 -l 1 | grep -v filllumi) <(${SCRAM_TEST_PATH}/dqmiodumpentries.py merged.root -r 1 -l 1 | grep -v filllumi)
0078 cmp <(${SCRAM_TEST_PATH}/dqmiodumpentries.py multirun.root -r 1 -l 2) <(${SCRAM_TEST_PATH}/dqmiodumpentries.py merged.root -r 1 -l 2)
0079 
0080 # 6. A load test. 
0081 #( if [[ `uname -m` != aarch64 ]] ; then ulimit -v 4000000 ; fi # limit available virtual memory
0082   cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=huge.root numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=600 nThreads=10 nConcurrent=2 howmany=1000 nolegacy=True
0083 #)
0084 
0085 
0086 # 7. Try writing a TDirectory file.
0087 cmsRun ${SCRAM_TEST_PATH}/run_harvesters_cfg.py inputFiles=alltypes.root nomodules=True legacyoutput=True reScope=JOB
0088 # this number is rather messy: we have 66 per-lumi objecs (harvested), 66 per-run objects (no legacy output), one folder for each set of 11, 
0089 # plus some higher-level folders and the ProvInfo hierarchy create by the FileSaver.
0090 [ 212 = $(rootlist DQM_V0001_R000000001__Harvesting__DQMTests__DQMIO.root | wc -l) ]
0091 
0092 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py numberEventsInRun=100 numberEventsInLuminosityBlock=20 nEvents=100 legacyoutput=True
0093 # we expect only the (per-job) legacy histograms here: 3*11 objects in 3 folders, plus 9 more for ProvInfo and higher-level folders.
0094 [ 54 = $(rootlist DQM_V0001_R000000001__EmptySource__DQMTests__DQMIO.root | wc -l) ]
0095 
0096 # 8. Try writing ProtoBuf files.
0097 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=1200 protobufoutput=True
0098 
0099 cmsRun ${SCRAM_TEST_PATH}/run_harvesters_cfg.py inputFiles=./run000001 outfile=pbdata.root nomodules=True protobufinput=True
0100 [ 140 = $(dqmiolistmes.py pbdata.root -r 1 | wc -l) ]
0101 [ 84 = $(dqmiolistmes.py pbdata.root -r 1 -l 1 | wc -l) ]
0102 
0103 # this will potentially mess up statistics (we should only fastHadd *within* a lumisection, not *across*), but should technically work.
0104 fastHadd add -o streamDQMHistograms.pb run000001/run000001_ls*_streamDQMHistograms.pb
0105 # the output format is different from the harvesting above, this is a not-DQM-formatted TDirectory file.
0106 fastHadd convert -o streamDQMHistograms.root streamDQMHistograms.pb
0107 # here we expect all (incl. legacy) MEs (99+66), plus folders (14 + 4 higher-level)
0108 [ 244 = $(rootlist streamDQMHistograms.root | wc -l) ]
0109 
0110 
0111 # 9. Try writing online files. This is really TDirectory files, but written via a different module.
0112 # Note that this does not really need to support multiple runs, but it appears it does.
0113 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=1200 onlineoutput=True
0114 # here we expect full per-run output (99 objects), no per-lumi MEs, plus folders (9 + 10 higher-level).
0115 [ 160 = $(rootlist DQM_V0001_UNKNOWN_R000000001.root | wc -l) ]
0116 [ 160 = $(rootlist DQM_V0001_UNKNOWN_R000000002.root | wc -l) ]
0117 [ 160 = $(rootlist DQM_V0001_UNKNOWN_R000000003.root | wc -l) ]
0118 [ 160 = $(rootlist DQM_V0001_UNKNOWN_R000000004.root | wc -l) ]
0119 
0120 
0121 # 10. Try running some harvesting modules and check if their output makes it out.
0122 # Note that we pass the files out-of order here; the DQMIO input should sort them.
0123 cmsRun ${SCRAM_TEST_PATH}/run_harvesters_cfg.py inputFiles=part1.root inputFiles=part3.root inputFiles=part2.root legacyoutput=True
0124 [ 1 = $(rootlist DQM_V0001_R000000001__Harvesting__DQMTests__DQMIO.root | grep  -c '<harvestingsummary>s=beginRun(1) endLumi(1,1) endLumi(1,2) endLumi(1,3) endRun(1) endJob() </harvestingsummary>') ]
0125 # The legacy harvester can only do per-run harvesting.
0126 [ 2 = $(rootlist DQM_V0001_R000000001__Harvesting__DQMTests__DQMIO.root | grep  -c '<runsummary>s=beginRun(1) endLumi(1,1) endLumi(1,2) endLumi(1,3) endRun(1) </runsummary>') ]
0127 
0128 # 11. Try MEtoEDM and EDMtoME.
0129 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=metoedm.root numberEventsInRun=100 numberEventsInLuminosityBlock=20 nEvents=100 metoedmoutput=True
0130 cmsRun ${SCRAM_TEST_PATH}/run_harvesters_cfg.py outfile=edmtome.root inputFiles=metoedm.root nomodules=True metoedminput=True
0131 [ 91 = $(dqmiolistmes.py edmtome.root -r 1 | wc -l) ]
0132 [ 78 = $(dqmiolistmes.py edmtome.root -r 1 -l 1 | wc -l) ]
0133 # again, no legacy module (run) output here due to JOB scope for legacy modules
0134 [ "0: 1, 0.0: 1, 1: 11, 100: 55, 5: 17, 5.0: 6" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py edmtome.root -r 1 --summary)" ]
0135 [ "1: 28, 1.0: 6, 20: 44" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py edmtome.root -r 1 -l 1 --summary)" ]
0136 [ "1: 22, 2: 6, 2.0: 6, 20: 44" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py edmtome.root -r 1 -l 2 --summary)" ]
0137 [ "1: 22, 20: 44, 3: 6, 3.0: 6" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py edmtome.root -r 1 -l 3 --summary)" ]
0138 [ "1: 22, 20: 44, 4: 6, 4.0: 6" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py edmtome.root -r 1 -l 4 --summary)" ]
0139 [ "1: 22, 20: 44, 5: 6, 5.0: 6" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py edmtome.root -r 1 -l 5 --summary)" ]
0140 [ "" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py edmtome.root -r 1 -l 6 --summary)" ]
0141 
0142 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=part1_metoedm.root metoedmoutput=True numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=50               # 1st half of 1st lumi
0143 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=part2_metoedm.root metoedmoutput=True numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=50 firstEvent=50 # 2nd half of 1st lumi
0144 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=part3_metoedm.root metoedmoutput=True numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=200 firstEvent=100 firstLuminosityBlock=2 # lumi 2 and 3
0145 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=part4_metoedm.root metoedmoutput=True numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=900 firstRun=2   # 3 more runs
0146 
0147 cmsRun ${SCRAM_TEST_PATH}/run_harvesters_cfg.py inputFiles=part1_metoedm.root inputFiles=part2_metoedm.root inputFiles=part3_metoedm.root inputFiles=part4_metoedm.root outfile=metoedm_merged.root nomodules=True metoedminput=True
0148 dqmiodumpmetadata.py metoedm_merged.root | grep -q '4 runs, 12 lumisections'
0149 
0150 # 12. Sanity checks.
0151 # this will mess up some of the files created earlier, disable for debugging.
0152 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=empty.root nEvents=0
0153 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=empty.root howmany=0
0154 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=empty.root howmany=0 legacyoutput=True
0155 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=empty.root howmany=0 protobufoutput=True
0156 # nLumisections might be a bit buggy (off by one) in EDM, but is fine here.
0157 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=noevents.root processingMode='RunsAndLumis' nLumisections=20
0158 [ 98 = $(dqmiolistmes.py noevents.root -r 1 | wc -l) ]
0159 [ 84 = $(dqmiolistmes.py noevents.root -r 1 -l 1 | wc -l) ]
0160 [ 98 = $(dqmiolistmes.py noevents.root -r 2 | wc -l) ]
0161 [ 84 = $(dqmiolistmes.py noevents.root -r 2 -l 2 | wc -l) ]