1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""hltDumpStream: print information on streams, primary datasets, triggers and (HLT) prescales of a HLT menu
examples:
# run hltDumpStream on HLT menu used in run 123456 (output in CSV format)
hltConfigFromDB --runNumber 123456 | hltDumpStream --mode csv
# run hltDumpStream on local cfg file hlt.py (output in text format)
hltDumpStream hlt.py --mode text
"""
import os
import sys
import argparse
import re
import fnmatch
import operator
from importlib.machinery import SourceFileLoader
import types
import tempfile
import FWCore.ParameterSet.Config as cms
def getPathSeeds(path):
'''list of strings, each corresponding to one of the seeds of the trigger
'''
seeds = []
pathModList = path.expandAndClone()._seq._collection
for pathMod in pathModList:
# skip ignored EDFilters
if pathMod.isOperation() and pathMod.decoration() == '-':
continue
if pathMod.type_() == 'HLTPrescaler':
break
elif pathMod.type_() == 'HLTL1TSeed':
seeds.append(pathMod.L1SeedsLogicalExpression.value())
elif pathMod.type_() == 'TriggerResultsFilter':
trigConds = pathMod.triggerConditions
if len(trigConds) == 1:
seeds.append(trigConds[0])
else:
seeds.append('(' + ') OR ('.join(trigConds) + ')')
return seeds
def getPathSeedsDescription(path):
'''string representing the logical expression of the trigger's seeding
'''
seeds = getPathSeeds(path)
if len(seeds) == 0:
seedDesc = '(none)'
elif len(seeds) == 1:
seedDesc = seeds[0]
else:
seedDesc = '(' + ') AND ('.join(seeds) + ')'
return seedDesc
def getBPTXMatching(path):
'''integer representing the BPTX coincidence information for the given path
'''
bptxDict = {
'hltL1sL1BPTX': False,
'hltL1sL1BPTXPlusOnly': False,
'hltL1sL1BPTXMinusOnly': False,
'hltL1sZeroBias': False,
'hltBPTXCoincidence': False,
}
for bptxMod in bptxDict:
if hasattr(process, bptxMod):
bptxDict[bptxMod] = path.contains(getattr(process, bptxMod))
if bptxDict['hltL1sL1BPTX'] or bptxDict['hltL1sL1BPTXPlusOnly'] or \
bptxDict['hltL1sL1BPTXMinusOnly'] or bptxDict['hltL1sZeroBias']:
bptx = 2
elif bptxDict['hltBPTXCoincidence']:
bptx = 1
else:
bptx = 0
return bptx
def getBPTXMatchingDescription(path):
'''string representing the BPTX coincidence information for the given path
'''
return ' ~='[getBPTXMatching(path)]
def getStreamsInfoFromProcess(process, prescaleValues, numberOfPrescaleColumns):
'''extract the info on streams, datasets and triggers:
the return type is a list of dictionaries, one per stream
each stream dict contains the following entries (key: value)
- name: name of the stream
- path: cms.EndPath associated to the stream
- prescales: list of prescale values (1 per PS column) applied to the EndPath associated to the stream
- smartPrescales: dictionary (path:ps) of smart-prescales applied at stream level
- datasets: list of dataset dictionaries (see below)
every dataset dictionary contains the following entries (key: value)
- name: name of the dataset
- path: cms.Path associated to the dataset, i.e. the "DatasetPath" (path=None if there is no DatasetPath)
- prescales: list of prescale values (1 per PS column) applied to the DatasetPath (a list of 1s if there is no DatasetPath)
- smartPrescales: dictionary (path:ps) of smart-prescales applied at dataset level (a list of 1s if there is no DatasetPath)
- triggers: list of trigger dictionaries (see below)
every trigger dictionary contains the following entries (key: value)
- name: name of the high-level trigger
- path: cms.Path associated to the trigger
- prescales: list of prescale values (1 per PS column) applied to the Path
- bptx: string expressing the BPTX condition used by the trigger (if any)
- seed: string expressing the logical expression seeding the trigger
(this is tipically based on L1-Trigger algorithms,
but in general it can also include selections on other HLT Paths, incl. DatasetPaths);
seed modules are required to precede the HLTPrescaler module of the trigger (if any)
'''
# get hold of Schedule if present
procSchedule = process.schedule_()
if procSchedule == None:
# older HLT configs used "HLTSchedule", which appears under '_Process__partialschedules'
partialScheduleNames = list(process._Process__partialschedules.keys())
if len(partialScheduleNames) == 1:
procSchedule = getattr(process, partialScheduleNames[0])
elif len(partialScheduleNames) > 1:
raise RuntimeError('ERROR -- the process holds multiple partial schedules: '+str(process._Process__partialschedules))
# find list of streams:
# - stream XYZ exists if the configuration contains a EndPath that contains an OutputModule named hltOutputXYZ
# - if more than one such EndPath exists, it is an error (unsupported configuration)
# - if the configuration contains a cms.Schedule, the EndPath is required to be part of the cms.Schedule
# - reliance on the PSet "streams" is minimised:
# - the latter PSet, if present, should be a superset of the streams found in the configuration
# (this is the case for HLT configs containing only a subset of the triggers of a menu, e.g. "hltGetConfiguration --paths [..]")
streams = []
# regex to find smart-prescale value
smartPS_recomp = re.compile(r'(.*)\s*/\s*(\d+)')
# find valid streams based on OutputModules
for outModName in process.outputModules_():
# find stream OutputModules by name: "hltOutput*"
if not outModName.startswith('hltOutput'):
continue
streamName = outModName[len('hltOutput'):]
# find EndPath containing stream OutputModule
streamPath = None
outputModule = process.outputModules_()[outModName]
for efPath in process.endpaths_().values():
if procSchedule != None:
# skip EndPath if not included in the schedule
# (schedule.contains does not seem to be enough, so the index of the EndPath in the schedule is searched;
# if the EndPath is not in the schedule, this search leads to a runtime-error)
try: procSchedule.index(efPath)
except: continue
if efPath.contains(outputModule):
# if streamPath is already set, throw a runtime error:
# no more than one EndPath can hold a given OutputModule in valid HLT configurations
if streamPath != None:
errMsg = 'ERROR -- output module "'+outModName+'" is associated to'
errMsg += ' more than one EndPath: '+str([streamPath.label(), efPath.label()])
raise RuntimeError(errMsg)
streamPath = efPath
# OutputModule not included in any active EndPath
if streamPath == None:
continue
# fill info of all datasets assigned to this stream
datasetsOfStream = []
datasetNames = []
usesDatasetPaths = False
pathsOfStream = sorted(list(set(fooPathName for fooPathName in outputModule.SelectEvents.SelectEvents)))
datasetPathsOfStream = [foo for foo in pathsOfStream if foo.startswith('Dataset_')]
if len(datasetPathsOfStream) == 0:
# no DatasetPaths found (older HLT menus, before Run 3), fall back on the "streams" PSet
datasetNames = sorted(list(set(getattr(process.streams, streamName))))
elif len(datasetPathsOfStream) == len(pathsOfStream):
# HLT menus with OutputModules selecting on DatasetPaths (HLT menus of 2022 and later)
datasetNames = [foo[len('Dataset_'):] for foo in datasetPathsOfStream]
usesDatasetPaths = True
else:
# only a subset of the Paths in the OutputModule's SelectEvents are DatasetPaths:
# this is not expected to happen in valid HLT configurations
errMsg = 'ERROR -- output module "'+outModName+'" has a mixture of DatasetPaths and non-DatasetPaths'
errMsg += ' in its SelectEvents.SelectEvents PSet: '+str(outputModule.SelectEvents.SelectEvents)
raise RuntimeError(errMsg)
for datasetName in datasetNames:
datasetPath = None
datasetPrescales = [1]*numberOfPrescaleColumns
datasetSmartPrescales = {}
if not usesDatasetPaths:
# for non-DatasetPaths, fall back on "datasets" PSet
pathNames = sorted(list(set(getattr(process.datasets, datasetName))))
for foo in pathNames:
datasetSmartPrescales[foo] = 1
else:
datasetPathName = 'Dataset_'+datasetName
if datasetPathName in psValues:
datasetPrescales = psValues[datasetPathName]
if not hasattr(process, datasetPathName):
msgErr = 'ERROR -- process does not have DatasetPath "'+datasetPathName+'" required by stream "'+streamName+'"'
raise RuntimeError(msgErr)
datasetPath = getattr(process, datasetPathName)
datasetSmartPrescales = {}
# find smart-PS module of DatasetPath
# (used to extract both trigger name and smart-PS values)
datasetPathSmartPSModule = None
for pathMod in datasetPath.expandAndClone()._seq._collection:
# check only EDFilters
if not isinstance(pathMod, cms.EDFilter):
continue
if pathMod.type_() == 'TriggerResultsFilter':
if datasetPathSmartPSModule == None:
datasetPathSmartPSModule = pathMod
else:
msgErr = 'ERROR -- found more than one smart-PS module associated to DatasetPath "'+datasetPathName+'":'
msgErr = ' '+str([datasetPathSmartPSModule.label(), pathMod.label()])
raise RuntimeError(msgErr)
if datasetPathSmartPSModule == None:
raise RuntimeError('ERROR -- DatasetPath "'+datasetPathName+'" does not contain any smart-PS module (type: TriggerResultsFilter)')
# get list of Paths, and their smartPSs, from TriggerResultsFilter module of the Dataset Path
# (by construction, the smart-PS module inside a DatasetPath does not use wildcards, but only the exact names of the Paths)
for triggerCond in datasetPathSmartPSModule.triggerConditions:
smartPS_match = smartPS_recomp.match(triggerCond)
if smartPS_match:
smartPS_key = smartPS_match.group(1).strip()
smartPS_val = int(smartPS_match.group(2))
else:
smartPS_key = triggerCond
smartPS_val = 1
if smartPS_key in datasetSmartPrescales:
msgErr = 'ERROR -- multiple smart-PS for trigger expression "'+smartPS_key+'" in module "'+datasetPathSmartPSModule.label()+'"'
raise RuntimeError(msgErr)
datasetSmartPrescales[smartPS_key] = smartPS_val
pathNames = sorted(datasetSmartPrescales.keys())
# info on triggers assigned to a dataset
triggersOfDataset = []
for pathName in pathNames:
if not hasattr(process, pathName):
msgErr = 'ERROR -- process does not have Path "'+pathName+'" required by dataset "'+datasetName+'"'
raise RuntimeError(msgErr)
path = getattr(process, pathName)
# seed of HLT path (tipically, a selection on L1-Trigger algos)
seedStr = getPathSeedsDescription(path)
# look for BPTX coincidence in the given path
bptxStr = getBPTXMatchingDescription(path)
# global prescales of the trigger
pathPrescales = prescaleValues[pathName] if pathName in prescaleValues else [1]*numberOfPrescaleColumns
# trigger information
triggersOfDataset += [{
'name': pathName,
'path': path,
'seed': seedStr,
'BPTX': bptxStr,
'prescales': pathPrescales,
}]
# order triggers alphabetically by name
triggersOfDataset.sort(key = lambda x: x['name'])
datasetsOfStream += [{
'name': datasetName,
'path': datasetPath,
'prescales': datasetPrescales,
'smartPrescales': datasetSmartPrescales,
'triggers': triggersOfDataset,
}]
# order datasets alphabetically by name
datasetsOfStream.sort(key = lambda x: x['name'])
# fill smart-PS applied at stream level
# (in Run-2 HLT menus, smart-PSs are applied to standard Paths in the stream EndPath)
streamSmartPrescales = {} # dictionary of smart-PSs applied at stream level
smartPSModuleName = None # smart-PS module in the stream EndPath
# build list of Paths that can have a smart-PS at stream level:
# this includes the Paths of all datasets in the stream, and the DatasetPaths (if present)
pathNamesForStreamSmartPS = []
for dset in datasetsOfStream:
pathNamesForStreamSmartPS += [foo['name'] for foo in dset['triggers']]
if dset['path'] != None:
pathNamesForStreamSmartPS += [dset['path'].label()]
pathNamesForStreamSmartPS = sorted(list(set(pathNamesForStreamSmartPS)))
# default to 1 (correct value if smart-PS module is absent)
for foo in pathNamesForStreamSmartPS:
streamSmartPrescales[foo] = 1
# loop on modules preceding the OutputModule in the EndPath
# to extract smart prescales at stream level, requiring no more than one smart-PS module
streamPathExpanded = streamPath.expandAndClone()
for modIdx in range(streamPathExpanded.index(outputModule)):
mod = streamPathExpanded._seq._collection[modIdx]
# find smart-PS modules by type
if mod.type_() not in ['TriggerResultsFilter', 'HLTHighLevelDev']:
continue
if smartPSModuleName != None:
msgErr = 'ERROR -- found more than one smart-PS module associated to stream "'+streamName+'":'
msgErr = ' '+str([smartPSModuleName, mod.label()])
raise RuntimeError(msgErr)
else:
smartPSModuleName = mod.label()
# smart-PS module is present: start by setting to None, and extract smart-PSs
for foo in pathNamesForStreamSmartPS:
streamSmartPrescales[foo] = None
# "TriggerResultsFilter": latest type of smart-prescale module
if mod.type_() == 'TriggerResultsFilter':
for triggerCond in mod.triggerConditions:
smartPS_match = smartPS_recomp.match(triggerCond)
if smartPS_match:
smartPS_key = smartPS_match.group(1).strip()
smartPS_val = int(smartPS_match.group(2))
else:
smartPS_key = triggerCond
smartPS_val = 1
for pathName in streamSmartPrescales:
if fnmatch.fnmatch(pathName, smartPS_key):
if streamSmartPrescales[pathName] == None:
streamSmartPrescales[pathName] = smartPS_val
else:
msgWarn = 'WARNING -- multiple matches for smart-PS of Path "'+pathName+'" in stream "'+streamName+'"'
msgWarn += ', the lowest smart-PS value will be used (ignoring zero).'
print(msgWarn, file=sys.stderr)
streamSmartPrescales[pathName] = min(streamSmartPrescales[pathName], max(1, smartPS_val))
# "HLTHighLevelDev": old type of smart-prescale module
else:
for idx,triggerExpr in enumerate(mod.HLTPaths.value()):
smartPS_key = triggerExpr
smartPS_val = mod.HLTPathsPrescales.value()[idx] * mod.HLTOverallPrescale.value()
for pathName in streamSmartPrescales:
if fnmatch.fnmatch(pathName, smartPS_key):
if streamSmartPrescales[pathName] == None:
streamSmartPrescales[pathName] = smartPS_val
else:
msgWarn = 'WARNING -- multiple matches for smart-PS of Path "'+pathName+'" in stream "'+streamName+'"'
msgWarn += ', the lowest smart-PS value will be used (ignoring zero).'
print(msgWarn, file=sys.stderr)
streamSmartPrescales[pathName] = min(streamSmartPrescales[pathName], max(1, smartPS_val))
# if a smart-PS is still None, there is a smart-PS module,
# but no match for a given Path, so set its smart-PS to zero
for foo in pathNamesForStreamSmartPS:
if streamSmartPrescales[foo] == None:
msgWarn = 'WARNING -- smart-PS of Path "'+foo+'" in stream "'+streamName+'" not found, will be set to zero.'
print(msgWarn, file=sys.stderr)
streamSmartPrescales[foo] = 0
streams += [{
'name': streamName,
'path': streamPath,
'prescales': prescaleValues[streamPath.label()] if streamPath.label() in prescaleValues else [1]*numberOfPrescaleColumns,
'datasets': datasetsOfStream,
'smartPrescales': streamSmartPrescales,
}]
# order streams alphabetically by name
streams.sort(key = lambda x: x['name'])
return streams
def printHeader(mode, selectedPrescaleColumns):
'''print to stdout the header of the hltDumpStream output
'''
if mode == 'text':
if len(selectedPrescaleColumns) > 0:
print('-----------------')
print('Prescale Columns:')
print('-----------------')
for psColIdx,psColName in selectedPrescaleColumns:
print(f'{psColIdx: >4d} : {psColName}')
print('-----------------\n')
elif mode == 'csv':
psColNamesStr = ', '.join(foo[1] for foo in selectedPrescaleColumns)
if psColNamesStr: psColNamesStr += ', '
print('stream, dataset, path, ' + psColNamesStr + 'seed')
def printStreamInfo(mode, stream, selectedPrescaleColumns, pathStrSize=100, showPrescaleValues=True, showSeedNames=True):
'''print to stdout the information on triggers and datasets of a given stream
'''
streamName = stream['name']
if mode == 'text':
print('stream', streamName)
for dataset in stream['datasets']:
datasetName = dataset['name']
if mode == 'text':
print(' dataset', datasetName)
datasetPathName = None
if dataset['path'] != None:
datasetPathName = dataset['path'].label()
datasetSmartPrescaleInStream = 1
if datasetPathName != None:
datasetSmartPrescaleInStream = stream['smartPrescales'][datasetPathName]
for trigger in dataset['triggers']:
triggerName = trigger['name']
triggerSeedDesc = trigger['seed']
triggerBPTXDesc = trigger['BPTX']
triggerSmartPrescaleInDataset = dataset['smartPrescales'][triggerName]
triggerSmartPrescaleInStream = stream['smartPrescales'][triggerName]
triggerSmartPrescale = triggerSmartPrescaleInDataset * triggerSmartPrescaleInStream * datasetSmartPrescaleInStream
prescales = []
for psColIdx,_ in selectedPrescaleColumns:
triggerPrescale = trigger['prescales'][psColIdx]
datasetPrescale = dataset['prescales'][psColIdx]
streamPrescale = stream['prescales'][psColIdx]
prescales.append(triggerSmartPrescale * triggerPrescale * datasetPrescale * streamPrescale)
triggerStr = ''
if mode == 'text':
triggerStr += ' %s %-*s' % (triggerBPTXDesc, pathStrSize, triggerName)
if showPrescaleValues:
triggerStr += '%s' % (''.join(' %6d' % p for p in prescales))
if showSeedNames:
triggerStr += ' %s' % (triggerSeedDesc)
elif mode == 'csv':
triggerStr += '%s, %s, %s' % (streamName, datasetName, triggerName)
if showPrescaleValues:
prescaleStr = '%s' % (', '.join('%s' % p for p in prescales))
if prescaleStr: prescaleStr = ', '+prescaleStr
triggerStr += prescaleStr
if showSeedNames:
triggerStr += ', %s' % (triggerSeedDesc)
print(triggerStr)
if mode == 'text':
print('---')
##
## main
##
if __name__ == '__main__':
### args
parser = argparse.ArgumentParser(
prog = './'+os.path.basename(__file__),
formatter_class = argparse.RawDescriptionHelpFormatter,
description = __doc__,
argument_default = argparse.SUPPRESS,
)
# menu: name of ConfDB config, or local cmsRun cfg file, or stdin
parser.add_argument('menu',
nargs = '?',
metavar = 'MENU',
default = None,
help = 'path to local cmsRun cfg file (if not specified, stdin is used)' )
# mode: format of output file
parser.add_argument('-m', '--mode',
dest = 'mode',
action = 'store',
metavar = 'MODE',
default = 'text',
choices = ['text', 'csv'],
help = 'format of output file (must be "text", or "csv") [default: "text"]' )
# prescale-columns: select prescale columns via regular expressions
group = parser.add_mutually_exclusive_group()
group.add_argument('-p', '--prescale-columns',
dest = 'ps_columns',
metavar = 'COLUMN',
nargs = '+',
default = ['*'],
help = 'list of patterns, passed to fnmatch, to select prescale columns [default: ["*"]]')
group.add_argument('--no-prescales',
dest = 'ps_columns',
action = 'store_const',
const = [],
help = 'do not show information on prescales')
# no-seeds: do not show information on trigger seeds
parser.add_argument('--no-seeds',
dest = 'no_seeds',
action = 'store_true',
default = False,
help = 'do not show the seed of a HLT path [default: False]' )
# parse command line arguments and options
opts = parser.parse_args()
# parse the HLT configuration from a local cfg file, or from standard input
if opts.menu != None:
loader = SourceFileLoader("pycfg", opts.menu)
hlt = types.ModuleType(loader.name)
loader.exec_module(hlt)
else:
with tempfile.NamedTemporaryFile(dir = "./", delete = False, suffix = ".py") as temp_file:
temp_file.write(bytes(sys.stdin.read(), 'utf-8'))
configname = temp_file.name
# VarParsing expects python/cmsRun python.py
sys.argv.append(configname)
loader = SourceFileLoader("pycfg", configname)
hlt = types.ModuleType(loader.name)
loader.exec_module(hlt)
os.remove(configname)
# get hold of cms.Process object
if 'process' in hlt.__dict__:
process = hlt.process
elif 'fragment' in hlt.__dict__:
process = hlt.fragment
else:
sys.stderr.write("Error: the input is not a valid HLT configuration")
sys.exit(1)
# Info from PrescaleService:
# values of all (global, i.e. non-smart) HLT prescales, and names of selected prescale columns
# - prescaleValues: dictionary of (key) name of Path to (value) list of Path's prescales
# - selectedPSColumns: list of tuples, each holding (index, name) of selected prescale column
psValues = dict()
numPSColumns = 0
selectedPSColumns = []
showPSValues = True
if opts.ps_columns:
if hasattr(process, 'PrescaleService') and process.PrescaleService.type_() == 'PrescaleService':
# number of prescale columns in the configuration
numPSColumns = len(process.PrescaleService.lvl1Labels)
# keep PS values for all columns
for entry in process.PrescaleService.prescaleTable:
psValues[entry.pathName.value()] = entry.prescales.value()
# find index and name of selected PS columns
for psColIdx,psColName in enumerate(process.PrescaleService.lvl1Labels):
for psColPattern in opts.ps_columns:
if fnmatch.fnmatch(psColName, psColPattern):
selectedPSColumns.append((psColIdx, psColName))
break
if not selectedPSColumns:
print('WARNING -- selected zero prescale columns: information on prescales will not be shown.', file=sys.stderr)
showPSValues = False
else:
print('WARNING -- PrescaleService module not found: information on prescales will not be shown.', file=sys.stderr)
showPSValues = False
# show seed expression of every HLT Path
showSeeds = not opts.no_seeds
# extract streams' information from cms.Process
streams = getStreamsInfoFromProcess(process, psValues, numPSColumns)
# print header information
printHeader(opts.mode, selectedPSColumns)
maxPathNameLength = max([16] + [len(foo) for foo in process.paths_() if not foo.startswith('Dataset_')])
# print information of every stream
for stream_i in streams:
printStreamInfo(opts.mode, stream_i, selectedPSColumns, maxPathNameLength, showPSValues, showSeeds)
|