Back to home page

Project CMSSW displayed by LXR

 
 

    


Warning, /HLTrigger/Configuration/scripts/hltDumpStream is written in an unsupported language. File is not indexed.

0001 #! /usr/bin/env python3
0002 # -*- coding: utf-8 -*-
0003 
0004 from __future__ import print_function
0005 import sys, re, os
0006 import operator
0007 from importlib.machinery import SourceFileLoader
0008 import types
0009 import tempfile
0010 import FWCore.ParameterSet.Config as cms
0011 
0012 mode = 'text'
0013 try:
0014   if sys.argv[1] == '--csv':
0015     mode = 'csv'
0016     del sys.argv[1]
0017 except:
0018   pass
0019 
0020 # parse the HLT configuration from standard input or from the given file
0021 try:
0022   configname = sys.argv[1]
0023   loader=SourceFileLoader("pycfg", configname)  
0024   hlt=types.ModuleType(loader.name)
0025   loader.exec_module(hlt)
0026 except:
0027   with tempfile.NamedTemporaryFile(dir="./",delete=False,suffix=".py") as temp_file:
0028 
0029     temp_file.write(bytes(sys.stdin.read(),'utf-8'))
0030     configname = temp_file.name
0031     sys.argv.append(configname)# VarParsing expects python/cmsRun python.py
0032   loader=SourceFileLoader("pycfg", configname)  
0033   hlt=types.ModuleType(loader.name)
0034   loader.exec_module(hlt)
0035   os.remove(configname)
0036 
0037 
0038 
0039 if 'process' in hlt.__dict__:
0040   process = hlt.process
0041 elif 'fragment' in hlt.__dict__:
0042   process = hlt.fragment
0043 else:
0044   sys.stderr.write("Error: the input is not a valid HLT configuration")
0045   sys.exit(1)
0046 
0047 # read global prescale service
0048 prescale = dict()
0049 prescaleNames = [ '' ]
0050 columns  = 1
0051 if 'PrescaleService' in process.__dict__:
0052   prescaleNames = process.PrescaleService.lvl1Labels.value()
0053   columns = len(prescaleNames)
0054   for entry in process.PrescaleService.prescaleTable:
0055     prescale[entry.pathName.value()] = entry.prescales.value()
0056 
0057 
0058 # search a path for a single module with a certain name
0059 class SearchModuleByName(object):
0060   def __init__(self, target, barrier = None):
0061     self.target  = target
0062     self.barrier = barrier
0063     self.found   = [ ]
0064     self.stop    = False
0065 
0066   def enter(self, node):
0067     if self.stop:
0068       return
0069 
0070     if isinstance(node, cms._Module):
0071       if node.label_() == self.barrier:
0072         self.stop = True
0073         return
0074       if node.label_() == self.target:
0075         self.found.append(node)
0076     
0077   def leave(self, node):
0078     pass
0079 
0080 
0081 # search a path for a single module of a certain type
0082 class SearchModuleByType(object):
0083   def __init__(self, target, barrier = None):
0084     self.target  = target
0085     self.barrier = barrier
0086     self.found   = [ ]
0087     self.stop    = False
0088 
0089   def enter(self, node):
0090     if self.stop:
0091       return
0092 
0093     if isinstance(node, cms._Module):
0094       if node.label_() == self.barrier:
0095         self.stop = True
0096         return
0097       if node.type_() == self.target:
0098         self.found.append(node)
0099     
0100   def leave(self, node):
0101     pass
0102 
0103 
0104 # search a path for a "dumb" prescaler
0105 class SearchDumbPrescale(SearchModuleByType):
0106   def __init__(self, barrier = None):
0107     super(SearchDumbPrescale, self).__init__('HLTPrescaler', barrier)
0108     
0109 
0110 # search a path for a "smart" prescaler
0111 class SearchSmartPrescale(SearchModuleByType):
0112   def __init__(self, barrier = None):
0113     super(SearchSmartPrescale, self).__init__('HLTHighLevelDev', barrier)
0114 
0115 
0116 # search a path for a "smart" prescaler
0117 class SearchNewSmartPrescale(SearchModuleByType):
0118   def __init__(self, barrier = None):
0119     super(SearchNewSmartPrescale, self).__init__('TriggerResultsFilter', barrier)
0120 
0121 
0122 # extract the L1 seed for a given path
0123 def getL1Seed(path):
0124   searchSeed = SearchModuleByType('HLTL1TSeed')
0125   path.visit(searchSeed)
0126   if searchSeed.found:
0127     return [ seed.L1SeedsLogicalExpression.value() for seed in searchSeed.found ]
0128   else:
0129     return [ ]
0130 
0131 
0132 # prepare a description of the L1 seed for a given path
0133 def getL1SeedDescription(path):
0134   seeds = getL1Seed(path)
0135   if len(seeds) == 0:
0136     seedDesc = '(none)'
0137   elif len(seeds) == 1:
0138     seedDesc = seeds[0]
0139   else:
0140     seedDesc = '(' + ') AND ('.join(seeds) + ')'
0141 
0142   return seedDesc
0143 
0144 # get the BPTX coincidenxe information for the given path
0145 def getBPTXMatching(path):
0146   searchSeed  = SearchModuleByName('hltL1sL1BPTX')
0147   searchPlus  = SearchModuleByName('hltL1sL1BPTXPlusOnly')
0148   searchMinus = SearchModuleByName('hltL1sL1BPTXMinusOnly')
0149   searchZero  = SearchModuleByName('hltL1sZeroBias')
0150   searchBPTX  = SearchModuleByName('hltBPTXCoincidence')
0151   path.visit(searchSeed)
0152   path.visit(searchPlus)
0153   path.visit(searchMinus)
0154   path.visit(searchZero)
0155   path.visit(searchBPTX)
0156   if searchSeed.found or searchPlus.found or searchMinus.found or searchZero.found:
0157     bptx = 2
0158   elif searchBPTX.found:
0159     bptx = 1
0160   else:
0161     bptx = 0
0162   return bptx
0163 
0164 # get the BPTX coincidenxe information for the given path, formatted as a charachter
0165 def getBPTXMatchingDescription(path):
0166   code = r' ~='
0167   bptx = getBPTXMatching(path)
0168   return code[bptx]
0169 
0170 
0171 # get a tuple with the prescale factors for a path in a given endpath
0172 def getPrescales(name, out, end):
0173   # look for a gobal prescale for the given path
0174   if name in prescale:
0175     pre = prescale[name]
0176   else:
0177     pre = [1] * columns
0178 
0179   # check for a valid EndPath
0180   if out and end:
0181     endp = process.endpaths[end]
0182 
0183     # look for a local dumb prescaler in the output path
0184     dumb = SearchDumbPrescale(out)
0185     endp.visit(dumb)
0186     if dumb.found and end in prescale:
0187       pre = map(operator.mul, pre, prescale[end])
0188 
0189     # look for an old-style local smart prescaler in the output path
0190     smart = SearchSmartPrescale(out)
0191     endp.visit(smart)
0192     # FIXME wildcards are not supported yet
0193     for found in smart.found:
0194       if name in found.HLTPaths.value():
0195         index = found.HLTPaths.value().index(name)
0196         scale = found.HLTPathsPrescales.value()[index] * found.HLTOverallPrescale.value()
0197         pre = [ scale * p for p in pre ]
0198       else:
0199         pre = [ 0 ] * columns
0200 
0201     # look for a new-style local smart prescaler in the output path
0202     smart = SearchNewSmartPrescale(out)
0203     endp.visit(smart)
0204     # FIXME wildcards are not supported yet
0205     # FIXME arbitrary expressions are not supported yet, only "HLT_Xxx" and "HLT_Xxx / N"
0206     match_pre = re.compile(r'%s\s*/\s*(\d+)' % name)
0207     for found in smart.found:
0208       scale = 0
0209       for condition in found.triggerConditions.value():
0210         if name == condition:
0211           scale = 1
0212         elif match_pre.match(condition):
0213           scale = int(match_pre.match(condition).groups()[0])
0214       # apply the smart prescale to all columns 
0215       pre = [ scale * p for p in pre ]
0216 
0217   return pre
0218 
0219 
0220 # get the prescale factors for a path in a given endpath
0221 def getPrescalesDescription(name, out, end):
0222   pre = getPrescales(name, out, end)
0223   if mode == 'text':
0224     return ''.join('  %6d' % p for p in pre)
0225   elif mode == 'csv':
0226     return ', '.join('%s' % p for p in pre)
0227   else:
0228     return 'n/a'
0229 
0230 # get the names of the prescale columns
0231 def getPrescaleNames():
0232   if mode == 'text':
0233     return ''.join('  %6d' % p for p in prescaleNames)
0234   elif mode == 'csv':
0235     return ', '.join('%s' % p for p in prescaleNames)
0236   else:
0237     return 'n/a'
0238 
0239 
0240 # format the information about a path associated to a specific endpath
0241 def dumpPath(stream, dataset, name, out, end):
0242   if name not in process.paths:
0243     return '        %-*s*** missing ***' % (length, name)
0244 
0245   path = process.paths[name]
0246   
0247   # look for prescales
0248   preDesc = getPrescalesDescription(name, out, end)
0249 
0250   # look for BPTX coincidence in the given path
0251   bptxDesc = getBPTXMatchingDescription(path)
0252 
0253   # look for L1 seed
0254   seedDesc = getL1SeedDescription(path)
0255 
0256   if mode == 'text':
0257     return '      %s %-*s%s    %s' % (bptxDesc, length, name, preDesc, seedDesc)
0258   elif mode == 'csv':
0259     return '%s, %s, %s, %s, %s' % (stream, dataset, name, preDesc, seedDesc)
0260   else:
0261     return 'n/a'
0262 
0263 
0264 def getEndPath(output):
0265   # look for the EndPath with the corresponding output module
0266   out = ''
0267   for o in process.endpaths.itervalues():
0268     searchOut = SearchModuleByName(output)
0269     o.visit(searchOut)
0270     if searchOut.found:
0271       out = o.label_()
0272       break
0273   return out
0274 
0275 
0276 def dumpHeader():
0277   if mode == 'csv':
0278     print('stream, dataset, path, %s, L1 trigger' % getPrescaleNames())
0279 
0280 
0281 def dumpStream(stream):
0282   assigned = set()
0283   allpaths = set()
0284 
0285   if mode == 'text':
0286     print('stream', stream)
0287   out = 'hltOutput%s' % stream
0288   end = getEndPath(out)
0289   if end:
0290     output = eval('process.hltOutput%s' % stream)
0291     allpaths = set( path for path in output.SelectEvents.SelectEvents )
0292 
0293   pds = sorted( process.streams.__dict__[stream] )
0294   for pd in pds:
0295     if mode == 'text':
0296       print('    dataset', pd)
0297     if pd in process.datasets.__dict__:
0298       paths = sorted( path for path in process.datasets.__dict__[pd] )
0299       assigned.update( paths )
0300       for path in paths:
0301         print(dumpPath(stream, pd, path, out, end))
0302     else:
0303       if mode == 'text':
0304         print('        *** not found ***')
0305 
0306   unassigned = allpaths - assigned
0307   if unassigned:
0308     if mode == 'text':
0309       print('    *** unassigned paths ***')
0310     for path in sorted(unassigned):
0311       print(dumpPath(stream, '(unassigned)', path, out, end))
0312 
0313   if not end:
0314     print('    *** corresponding EndPath not found ***')
0315   else:
0316     missing    = assigned - allpaths
0317     if missing:
0318       if mode == 'text':
0319         print('    *** paths missing from the EndPath\'s output module ***')
0320       for path in sorted(missing):
0321         print(dumpPath(stream, '(missing)', path, out, end))
0322 
0323 
0324 # read the list of streams
0325 streams = process.streams._Parameterizable__parameterNames
0326 streams.sort()
0327 
0328 # figure the longest path name
0329 length   = 32
0330 length_p = max(len(p) for p in process.paths)
0331 length_d = max(len(p) for d in process.datasets.__dict__ if not d.startswith('_') for p in process.datasets.__dict__[d])
0332 length = max(length_p, length_d, length) + 4
0333 
0334 dumpHeader()
0335 for stream in streams:
0336   dumpStream(stream)