File indexing completed on 2023-10-25 09:38:22
0001
0002 """
0003 _Scenario_
0004
0005 Standard cmsRun Process building interface used for data processing
0006 for a particular data scenario.
0007 A scenario is a macro-data-taking setting such as cosmic running,
0008 beam halo running, or particular validation tests.
0009
0010 This class defines the interfaces used by the Tier 0 and Tier 1
0011 processing to wrap calls to ConfigBuilder in order to retrieve all the
0012 configurations for the various types of job
0013
0014 """
0015
0016 import FWCore.ParameterSet.Config as cms
0017 from Configuration.DataProcessing.Merge import mergeProcess
0018 from Configuration.DataProcessing.Repack import repackProcess
0019
0020
0021 from Configuration.Applications.ConfigBuilder import ConfigBuilder,Options,defaultOptions
0022
0023
0024 class Scenario(object):
0025 """
0026 _Scenario_
0027
0028 """
0029 def __init__(self):
0030 self.eras=cms.Modifier()
0031
0032
0033 def promptReco(self, globalTag, **options):
0034 """
0035 _installPromptReco_
0036
0037 given a skeleton process object and references
0038 to the output modules for the products it produces,
0039 install the standard reco sequences and event content for this
0040 scenario
0041
0042 """
0043 msg = "Scenario Implementation %s\n" % self.__class__.__name__
0044 msg += "Does not contain an implementation for promptReco"
0045 raise NotImplementedError(msg)
0046
0047
0048 def expressProcessing(self, globalTag, **options):
0049 """
0050 _expressProcessing_
0051
0052 Build an express processing configuration for this scenario.
0053
0054 Express processing runs conversion, reco and alca reco on each
0055 streamer file in the express stream and writes out RAW, RECO and
0056 a combined ALCA file that gets mergepacked in a later step
0057
0058 writeTiers is list of tiers to write out, not including ALCA
0059
0060 datasets is the list of datasets to split into for each tier
0061 written out. Should always be one dataset
0062
0063 alcaDataset - if set, this means the combined Alca file is written
0064 out with no dataset splitting, it gets assigned straight to the datase
0065 provided
0066
0067 """
0068 msg = "Scenario Implementation %s\n" % self.__class__.__name__
0069 msg += "Does not contain an implementation for expressProcessing"
0070 raise NotImplementedError(msg)
0071
0072
0073
0074 def visualizationProcessing(self, globalTag, **options):
0075 """
0076 _expressProcessing_
0077
0078 Build a configuration for the visualization processing for this scenario.
0079
0080 Visualization processing runs unpacking, and reco on
0081 streamer files and it is equipped to run on the online cluster
0082 and writes RECO or FEVT files,
0083
0084 writeTiers is list of tiers to write out.
0085
0086
0087 """
0088 msg = "Scenario Implementation %s\n" % self.__class__.__name__
0089 msg += "Does not contain an implementation for visualizationProcessing"
0090 raise NotImplementedError(msg)
0091
0092
0093
0094
0095 def alcaSkim(self, skims, **options):
0096 """
0097 _alcaSkim_
0098
0099 Given a skeleton process install the skim splitting for given skims
0100
0101 """
0102 msg = "Scenario Implementation %s\n" % self.__class__.__name__
0103 msg += "Does not contain an implementation for alcaSkim"
0104 raise NotImplementedError(msg)
0105
0106
0107 def alcaReco(self, *skims, **options):
0108 """
0109 _alcaSkim_
0110
0111 Given a skeleton process install the skim production for given skims
0112
0113 """
0114 msg = "Scenario Implementation %s\n" % self.__class__.__name__
0115 msg += "Does not contain an implementation for alcaReco"
0116 raise NotImplementedError(msg)
0117
0118
0119 def dqmHarvesting(self, datasetName, runNumber, globalTag, **options):
0120 """
0121 _dqmHarvesting_
0122
0123 build a DQM Harvesting configuration
0124
0125 Arguments:
0126
0127 datasetName - aka workflow name for DQMServer, this is the name of the
0128 dataset containing the harvested run
0129 runNumber - The run being harvested
0130 globalTag - The global tag being used
0131 inputFiles - The list of LFNs being harvested
0132
0133 """
0134 msg = "Scenario Implementation %s\n" % self.__class__.__name__
0135 msg += "Does not contain an implementation for dqmHarvesting"
0136 raise NotImplementedError(msg)
0137
0138
0139 def alcaHarvesting(self, globalTag, datasetName, **options):
0140 """
0141 _alcaHarvesting_
0142
0143 build an AlCa Harvesting configuration
0144
0145 Arguments:
0146
0147 globalTag - The global tag being used
0148 inputFiles - The list of LFNs being harvested
0149
0150 """
0151 msg = "Scenario Implementation %s\n" % self.__class__.__name__
0152 msg += "Does not contain an implementation for alcaHarvesting"
0153 raise NotImplementedError(msg)
0154
0155
0156 def skimming(self, skims, globalTag, **options):
0157 """
0158 _skimming_
0159
0160 Given a process install the sequences for Tier 1 skimming
0161 and the appropriate output modules
0162
0163 """
0164 msg = "Scenario Implementation %s\n" % self.__class__.__name__
0165 msg += "Does not contain an implementation for skimming"
0166 raise NotImplementedError(msg)
0167
0168
0169 def merge(self, *inputFiles, **options):
0170 """
0171 _merge_
0172
0173 builds a merge configuration
0174
0175 """
0176 msg = "Scenario Implementation %s\n" % self.__class__.__name__
0177 return mergeProcess(*inputFiles, **options)
0178
0179
0180 def repack(self, **options):
0181 """
0182 _repack_
0183
0184 builds a repack configuration
0185
0186 """
0187 msg = "Scenario Implementation %s\n" % self.__class__.__name__
0188 return repackProcess(**options)
0189
0190
0191
0192
0193
0194
0195 def dropOutputModule(self, processRef, moduleName):
0196 """
0197 _dropOutputModule_
0198
0199 Util to prune an unwanted output module
0200
0201 """
0202 del process._Process__outputmodules[moduleName]
0203 return