Back to home page

Project CMSSW displayed by LXR

 
 

    


File indexing completed on 2023-03-24 02:59:04

0001 #!/bin/bash
0002 set -e
0003 set -x
0004 
0005 # This is mainly to make sure nothing crashes. Checking the output for sanity is attempted but not really complete.
0006 
0007 # 1. Run a very simple configuration with all module types.
0008 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=alltypes.root numberEventsInRun=100 numberEventsInLuminosityBlock=20 nEvents=100
0009 # actually we'd expect 99, but the MEs by legacy modules are booked with JOB scope and cannot be saved to DQMIO.
0010 [ 78 = $(dqmiolistmes.py alltypes.root -r 1 | wc -l) ]
0011 [ 78 = $(dqmiolistmes.py alltypes.root -r 1 -l 1 | wc -l) ]
0012 # this is deeply related to what the analyzers actually do.
0013 # again, the legacy modules output is not saved.
0014 # most run histos (4 modules * 9 types) fill on every event and should have 100 entries.
0015 # the scalar MEs should have the last lumi number (5) (5 float + 5 int)
0016 # testonefilllumi also should have 5 entries in the histograms (9 more)
0017 # the "fillrun" module should have one entry in the histograms (9 total) and 0 in the scalars (2 total)
0018 
0019 [ "0: 1, 0.0: 1, 1: 11, 100: 33, 200: 11, 5: 16, 5.0: 5" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py alltypes.root -r 1 --summary)" ]
0020 # per lumi we see 20 in most histograms (4*9), and the current lumi number in the scalars (6 modules * 2).
0021 # the two fillumi modules should have one entry in each of the lumi histograms, (2*9 total)
0022  
0023 [ "1: 28, 1.0: 6, 20: 44" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py alltypes.root -r 1 -l 1 --summary)" ]
0024 [ "1: 22, 2: 6, 2.0: 6, 20: 44" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py alltypes.root -r 1 -l 2 --summary)" ]
0025 [ "1: 22, 20: 44, 3: 6, 3.0: 6" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py alltypes.root -r 1 -l 3 --summary)" ]
0026 [ "1: 22, 20: 44, 4: 6, 4.0: 6" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py alltypes.root -r 1 -l 4 --summary)" ]
0027 [ "1: 22, 20: 44, 5: 6, 5.0: 6" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py alltypes.root -r 1 -l 5 --summary)" ]
0028 # just make sure we are not off by one
0029 [ "" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py alltypes.root -r 1 -l 6 --summary)" ]
0030 
0031 
0032 # 2. Run multi-threaded. First we make a baseline file without legacy modules, since they might not work.
0033 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=nolegacy.root    numberEventsInRun=1000 numberEventsInLuminosityBlock=200 nEvents=1000 nolegacy=True
0034 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=nolegacy-mt.root numberEventsInRun=1000 numberEventsInLuminosityBlock=200 nEvents=1000 nolegacy=True nThreads=10
0035 
0036 
0037 # 3. Try enabling concurrent lumis.
0038 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=nolegacy-cl.root numberEventsInRun=1000 numberEventsInLuminosityBlock=200 nEvents=1000 nolegacy=True nThreads=10 nConcurrent=10
0039 
0040 # same math as above, just a few less modules, and more events.
0041 for f in nolegacy.root nolegacy-mt.root nolegacy-cl.root
0042 do
0043   [ "0: 1, 0.0: 1, 1: 11, 1000: 22, 2000: 11, 5: 3, 5.0: 3" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py $f -r 1 --summary)" ]
0044   [ "1: 2, 1.0: 2, 200: 22" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py $f -r 1 -l 1 --summary)" ]
0045   [ "2: 2, 2.0: 2, 200: 22" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py $f -r 1 -l 2 --summary)" ]
0046   [ "200: 22, 3: 2, 3.0: 2" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py $f -r 1 -l 3 --summary)" ]
0047   [ "200: 22, 4: 2, 4.0: 2" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py $f -r 1 -l 4 --summary)" ]
0048   [ "200: 22, 5: 2, 5.0: 2" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py $f -r 1 -l 5 --summary)" ]
0049   [ "" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py $f -r 1 -l 6 --summary)" ]
0050 done
0051 
0052 
0053 # 4. Try crossing a run boundary.
0054 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=multirun.root numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=1200
0055 dqmiodumpmetadata.py multirun.root | grep -q '4 runs, 12 lumisections'
0056 
0057 
0058 # 5. Now, make some chopped up files to try harvesting.
0059 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=part1.root numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=50               # 1st half of 1st lumi
0060 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=part2.root numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=50 firstEvent=50 # 2nd half of 1st lumi
0061 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=part3.root numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=200 firstEvent=100 firstLuminosityBlock=2 # lumi 2 and 3
0062 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=part4.root numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=900 firstRun=2   # 3 more runs
0063 
0064 cmsRun ${SCRAM_TEST_PATH}/run_harvesters_cfg.py inputFiles=part1.root inputFiles=part2.root inputFiles=part3.root inputFiles=part4.root outfile=merged.root nomodules=True
0065 dqmiodumpmetadata.py merged.root | grep -q '4 runs, 12 lumisections'
0066 
0067 #dumproot() { root2sqlite.py -o $1.sqlite $1 ; echo '.dump' | sqlite3 $1.sqlite > $1.sqldump ; rm $1.sqlite ; }
0068 #dumproot multirun.root
0069 #dumproot merged.root
0070 rootlist ()
0071 {  python3 -c '
0072 import uproot
0073 for k in uproot.open("'"$1"'").keys(): print(k)'
0074 }
0075 
0076 # we need to exclude MEs filled on run and lumi boundaries, since the split job *does* see a different number of begin/end run/lumi transitions.
0077 cmp <(${SCRAM_TEST_PATH}/dqmiodumpentries.py multirun.root -r 1 | grep -vE 'fillrun|filllumi') <(${SCRAM_TEST_PATH}/dqmiodumpentries.py merged.root -r 1 | grep -vE 'fillrun|filllumi')
0078 cmp <(${SCRAM_TEST_PATH}/dqmiodumpentries.py multirun.root -r 3) <(${SCRAM_TEST_PATH}/dqmiodumpentries.py merged.root -r 3)
0079 cmp <(${SCRAM_TEST_PATH}/dqmiodumpentries.py multirun.root -r 1 -l 1 | grep -v filllumi) <(${SCRAM_TEST_PATH}/dqmiodumpentries.py merged.root -r 1 -l 1 | grep -v filllumi)
0080 cmp <(${SCRAM_TEST_PATH}/dqmiodumpentries.py multirun.root -r 1 -l 2) <(${SCRAM_TEST_PATH}/dqmiodumpentries.py merged.root -r 1 -l 2)
0081 
0082 # 6. A load test. 
0083 #( if [[ `uname -m` != aarch64 ]] ; then ulimit -v 4000000 ; fi # limit available virtual memory
0084   cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=huge.root numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=600 nThreads=10 nConcurrent=2 howmany=1000 nolegacy=True
0085 #)
0086 
0087 
0088 # 7. Try writing a TDirectory file.
0089 cmsRun ${SCRAM_TEST_PATH}/run_harvesters_cfg.py inputFiles=alltypes.root nomodules=True legacyoutput=True reScope=JOB
0090 # this number is rather messy: we have 66 per-lumi objecs (harvested), 66 per-run objects (no legacy output), one folder for each set of 11, 
0091 # plus some higher-level folders and the ProvInfo hierarchy create by the FileSaver.
0092 [ 185 = $(rootlist DQM_V0001_R000000001__Harvesting__DQMTests__DQMIO.root | wc -l) ]
0093 
0094 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py numberEventsInRun=100 numberEventsInLuminosityBlock=20 nEvents=100 legacyoutput=True
0095 # we expect only the (per-job) legacy histograms here: 3*11 objects in 3 folders, plus 9 more for ProvInfo and higher-level folders.
0096 [ 51 = $(rootlist DQM_V0001_R000000001__EmptySource__DQMTests__DQMIO.root | wc -l) ]
0097 
0098 # 8. Try writing ProtoBuf files.
0099 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=1200 protobufoutput=True
0100 
0101 cmsRun ${SCRAM_TEST_PATH}/run_harvesters_cfg.py inputFiles=./run000001 outfile=pbdata.root nomodules=True protobufinput=True
0102 [ 117 = $(dqmiolistmes.py pbdata.root -r 1 | wc -l) ]
0103 [ 78 = $(dqmiolistmes.py pbdata.root -r 1 -l 1 | wc -l) ]
0104 
0105 # this will potentially mess up statistics (we should only fastHadd *within* a lumisection, not *across*), but should technically work.
0106 fastHadd add -o streamDQMHistograms.pb run000001/run000001_ls*_streamDQMHistograms.pb
0107 # the output format is different from the harvesting above, this is a not-DQM-formatted TDirectory file.
0108 fastHadd convert -o streamDQMHistograms.root streamDQMHistograms.pb
0109 # here we expect all (incl. legacy) MEs (99+66), plus folders (14 + 4 higher-level)
0110 [ 214 = $(rootlist streamDQMHistograms.root | wc -l) ]
0111 
0112 
0113 # 9. Try writing online files. This is really TDirectory files, but written via a different module.
0114 # Note that this does not really need to support multiple runs, but it appears it does.
0115 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=1200 onlineoutput=True
0116 # here we expect full per-run output (99 objects), no per-lumi MEs, plus folders (9 + 10 higher-level).
0117 [ 136 = $(rootlist DQM_V0001_UNKNOWN_R000000001.root | wc -l) ]
0118 [ 136 = $(rootlist DQM_V0001_UNKNOWN_R000000002.root | wc -l) ]
0119 [ 136 = $(rootlist DQM_V0001_UNKNOWN_R000000003.root | wc -l) ]
0120 [ 136 = $(rootlist DQM_V0001_UNKNOWN_R000000004.root | wc -l) ]
0121 
0122 
0123 # 10. Try running some harvesting modules and check if their output makes it out.
0124 # Note that we pass the files out-of order here; the DQMIO input should sort them.
0125 cmsRun ${SCRAM_TEST_PATH}/run_harvesters_cfg.py inputFiles=part1.root inputFiles=part3.root inputFiles=part2.root legacyoutput=True
0126 [ 1 = $(rootlist DQM_V0001_R000000001__Harvesting__DQMTests__DQMIO.root | grep  -c '<harvestingsummary>s=beginRun(1) endLumi(1,1) endLumi(1,2) endLumi(1,3) endRun(1) endJob() </harvestingsummary>') ]
0127 # The legacy harvester can only do per-run harvesting.
0128 [ 2 = $(rootlist DQM_V0001_R000000001__Harvesting__DQMTests__DQMIO.root | grep  -c '<runsummary>s=beginRun(1) endLumi(1,1) endLumi(1,2) endLumi(1,3) endRun(1) </runsummary>') ]
0129 
0130 # 11. Try MEtoEDM and EDMtoME.
0131 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=metoedm.root numberEventsInRun=100 numberEventsInLuminosityBlock=20 nEvents=100 metoedmoutput=True
0132 cmsRun ${SCRAM_TEST_PATH}/run_harvesters_cfg.py outfile=edmtome.root inputFiles=metoedm.root nomodules=True metoedminput=True
0133 [ 72 = $(dqmiolistmes.py edmtome.root -r 1 | wc -l) ]
0134 [ 72 = $(dqmiolistmes.py edmtome.root -r 1 -l 1 | wc -l) ]
0135 # again, no legacy module (run) output here due to JOB scope for legacy modules
0136 [ "0: 1, 0.0: 1, 1: 10, 100: 30, 200: 10, 5: 15, 5.0: 5" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py edmtome.root -r 1 --summary)" ]
0137 [ "1: 26, 1.0: 6, 20: 40" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py edmtome.root -r 1 -l 1 --summary)" ]
0138 [ "1: 20, 2: 6, 2.0: 6, 20: 40" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py edmtome.root -r 1 -l 2 --summary)" ]
0139 [ "1: 20, 20: 40, 3: 6, 3.0: 6" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py edmtome.root -r 1 -l 3 --summary)" ]
0140 [ "1: 20, 20: 40, 4: 6, 4.0: 6" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py edmtome.root -r 1 -l 4 --summary)" ]
0141 [ "1: 20, 20: 40, 5: 6, 5.0: 6" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py edmtome.root -r 1 -l 5 --summary)" ]
0142 [ "" = "$(${SCRAM_TEST_PATH}/dqmiodumpentries.py edmtome.root -r 1 -l 6 --summary)" ]
0143 
0144 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=part1_metoedm.root metoedmoutput=True numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=50               # 1st half of 1st lumi
0145 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=part2_metoedm.root metoedmoutput=True numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=50 firstEvent=50 # 2nd half of 1st lumi
0146 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=part3_metoedm.root metoedmoutput=True numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=200 firstEvent=100 firstLuminosityBlock=2 # lumi 2 and 3
0147 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=part4_metoedm.root metoedmoutput=True numberEventsInRun=300 numberEventsInLuminosityBlock=100 nEvents=900 firstRun=2   # 3 more runs
0148 
0149 cmsRun ${SCRAM_TEST_PATH}/run_harvesters_cfg.py inputFiles=part1_metoedm.root inputFiles=part2_metoedm.root inputFiles=part3_metoedm.root inputFiles=part4_metoedm.root outfile=metoedm_merged.root nomodules=True metoedminput=True
0150 dqmiodumpmetadata.py metoedm_merged.root | grep -q '4 runs, 12 lumisections'
0151 
0152 # 12. Sanity checks.
0153 # this will mess up some of the files created earlier, disable for debugging.
0154 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=empty.root nEvents=0
0155 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=empty.root howmany=0
0156 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=empty.root howmany=0 legacyoutput=True
0157 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=empty.root howmany=0 protobufoutput=True
0158 # nLumisections might be a bit buggy (off by one) in EDM, but is fine here.
0159 cmsRun ${SCRAM_TEST_PATH}/run_analyzers_cfg.py outfile=noevents.root processingMode='RunsAndLumis' nLumisections=20
0160 [ 78 = $(dqmiolistmes.py noevents.root -r 1 | wc -l) ]
0161 [ 78 = $(dqmiolistmes.py noevents.root -r 1 -l 1 | wc -l) ]
0162 [ 78 = $(dqmiolistmes.py noevents.root -r 2 | wc -l) ]
0163 [ 78 = $(dqmiolistmes.py noevents.root -r 2 -l 2 | wc -l) ]
0164 
0165