Warning, /DPGAnalysis/SiStripTools/test/crab_bsvsbpix.cfg is written in an unsupported language. File is not indexed.
0001 [CRAB]
0002
0003 jobtype = cmssw
0004 #scheduler = remoteGlidein
0005 scheduler = caf
0006 ### NOTE: just setting the name of the server (pi, lnl etc etc )
0007 ### crab will submit the jobs to the server...
0008 #server_name = bari
0009 #
0010 [CMSSW]
0011
0012 ### The data you want to access (to be found on DBS)
0013
0014 #dbs_url = http://cmsdbsprod.cern.ch/cms_dbs_ph_analysis_01/servlet/DBSServlet
0015
0016 datasetpath=/ZeroBias1/Run2015A-PromptReco-v1/RECO
0017
0018
0019 pycfg_params= globalTag=GR_P_V56
0020
0021
0022 pset=DPGAnalysis/SiStripTools/test/bsvsbpix_cfg.py
0023
0024
0025 lumi_mask=/afs/cern.ch/cms/CAF/CMSCOMM/COMM_DQM/certification/Collisions15/13TeV/DCSOnly/json_DCSONLY.txt
0026 #runselection=247710
0027
0028 total_number_of_lumis =-1
0029 lumis_per_job = 10
0030
0031
0032 ### The output files (comma separated list)
0033 #output_file =
0034
0035 [USER]
0036
0037 ### OUTPUT files Management
0038 ## output back into UI
0039 return_data = 1
0040
0041 ### To use a specific name of UI directory where CRAB will create job to submit (with full path).
0042 ### the default directory will be "crab_0_data_time"
0043 ui_working_dir = /afs/cern.ch/work/v/venturia/crab/bsvsbpix_zerobias1_run2015A_DCSOnly_20150615_v2
0044
0045 ### To specify the UI directory where to store the CMS executable output
0046 ### FULL path is mandatory. Default is <ui_working_dir>/res will be used.
0047 #outputdir= /full/path/yourOutDir
0048
0049 ### To specify the UI directory where to store the stderr, stdout and .BrokerInfo of submitted jobs
0050 ### FULL path is mandatory. Default is <ui_working_dir>/res will be used.
0051 #logdir= /full/path/yourLogDir
0052
0053 ### OUTPUT files INTO A SE
0054 copy_data = 0
0055
0056 ### if you want to copy data in a "official CMS site"
0057 ### you have to specify the name as written in
0058 #storage_element = T2_IT_Bari
0059 ### the user_remote_dir will be created under the SE mountpoint
0060 ### in the case of publication this directory is not considered
0061 #user_remote_dir = name_directory_you_want
0062
0063 ### if you want to copy your data at CAF
0064 #storage_element = T2_CH_CAF
0065 ### the user_remote_dir will be created under the SE mountpoint
0066 ### in the case of publication this directory is not considered
0067 #user_remote_dir = express_2010_132421
0068
0069 ### if you want to copy your data to your area in castor at cern
0070 ### or in a "not official CMS site" you have to specify the complete name of SE
0071 storage_element=srm-cms.cern.ch
0072 ### this directory is the mountpoin of SE
0073 #storage_path=/srm/managerv2?SFN=/castor/cern.ch
0074 storage_path=/castor/cern.ch
0075 ### directory or tree of directory under the mounpoint
0076 #user_remote_dir = /user/v/venturia/skims/express_2010_132421_132422_3
0077
0078
0079 ### To publish produced output in a local istance of DBS set publish_data = 1
0080 publish_data=0
0081 ### Specify the dataset name. The full path will be <primarydataset>/<publish_data_name>/USER
0082 publish_data_name = name_you_prefer
0083 ### Specify the URL of DBS istance where CRAB has to publish the output files
0084 #dbs_url_for_publication = https://cmsdbsprod.cern.ch:8443/cms_dbs_caf_analysis_01_writer/servlet/DBSServlet
0085
0086 ### To specify additional files to be put in InputSandBox
0087 ### write the full path if the files are not in the current directory
0088 ### (wildcard * are allowed): comma separated list
0089 #additional_input_files = file1, file2, /full/path/file3
0090
0091 #if server
0092 #thresholdLevel = 100
0093 #eMail = your@Email.address
0094
0095 [GRID]
0096 #
0097 ## RB/WMS management:
0098 rb = CERN
0099
0100 ## Black and White Lists management:
0101 ## By Storage
0102 #se_black_list = T0,T1
0103 #se_black_list = T0
0104 #se_white_list =
0105
0106 ## By ComputingElement
0107 #ce_black_list =
0108 #ce_white_list =
0109
0110 [CAF]
0111 #
0112 queue=cmscaf1nd
0113
0114 [CONDORG]
0115
0116 # Set this to condor to override the batchsystem defined in gridcat.
0117 #batchsystem = condor
0118
0119 # Specify addition condor_g requirments
0120 # use this requirment to run on a cms dedicated hardare
0121 # globus_rsl = (condor_submit=(requirements 'ClusterName == \"CMS\" && (Arch == \"INTEL\" || Arch == \"X86_64\")'))
0122 # use this requirement to run on the new hardware
0123 #globus_rsl = (condor_submit=(requirements 'regexp(\"cms-*\",Machine)'))
0124