diff --git a/src/python/CRABClient/ClientMapping.py b/src/python/CRABClient/ClientMapping.py index be0cd83be..d42de3b3c 100644 --- a/src/python/CRABClient/ClientMapping.py +++ b/src/python/CRABClient/ClientMapping.py @@ -20,7 +20,7 @@ 'savelogsflag' : {'default': False, 'config': ['General.transferLogs'], 'type': 'BooleanType', 'required': False}, 'faillimit' : {'default': None, 'config': ['General.failureLimit'], 'type': 'IntType', 'required': False}, 'inputdata' : {'default': None, 'config': ['Data.inputDataset', - 'Data.primaryDataset'], 'type': 'StringType', 'required': False}, + 'Data.outputPrimaryDataset'], 'type': 'StringType', 'required': False}, 'nonvaliddata' : {'default': False, 'config': ['Data.allowNonValidInputDataset'], 'type': 'BooleanType', 'required': False}, 'userfiles' : {'default': None, 'config': ['Data.userInputFiles'], 'type': 'ListType', 'required': False}, 'dbsurl' : {'default': 'global', 'config': ['Data.inputDBS'], 'type': 'StringType', 'required': False}, @@ -84,6 +84,7 @@ 'JobType.maxjobruntime' : {'newParam' : 'JobType.maxJobRuntimeMin', 'version' : None}, 'JobType.allowNonProductionCMSSW' : {'newParam' : 'JobType.allowUndistributedCMSSW', 'version' : 'v3.3.16'}, 'Data.secondaryDataset' : {'newParam' : 'Data.secondaryInputDataset', 'version' : 'v3.3.1511'}, + 'Data.primaryDataset' : {'newParam' : 'Data.outputPrimaryDataset', 'version' : 'v3.3.1511'}, } diff --git a/src/python/CRABClient/JobType/Analysis.py b/src/python/CRABClient/JobType/Analysis.py index 36f408a79..0bf50c318 100644 --- a/src/python/CRABClient/JobType/Analysis.py +++ b/src/python/CRABClient/JobType/Analysis.py @@ -190,12 +190,12 @@ def run(self, filecacheurl = None): self.logger.warning(msg) configArguments['userfiles'] = set(userFilesList) ## Get the user-specified primary dataset name. - primaryDataset = getattr(self.config.Data, 'primaryDataset', 'CRAB_UserFiles') + outputPrimaryDataset = getattr(self.config.Data, 'outputPrimaryDataset', 'CRAB_UserFiles') # Normalizes "foo/bar" and "/foo/bar" to "/foo/bar" - primaryDataset = "/" + os.path.join(*primaryDataset.split("/")) - if not re.match("/%(primDS)s.*" % (lfnParts), primaryDataset): - self.logger.warning("Invalid primary dataset name %s; publication may fail." % (primaryDataset)) - configArguments['inputdata'] = primaryDataset + outputPrimaryDataset = "/" + os.path.join(*outputPrimaryDataset.split("/")) + if not re.match("/%(primDS)s.*" % (lfnParts), outputPrimaryDataset): + self.logger.warning("Invalid primary dataset name %s; publication may fail." % (outputPrimaryDataset)) + configArguments['inputdata'] = outputPrimaryDataset lumi_mask_name = getattr(self.config.Data, 'lumiMask', None) lumi_list = None @@ -265,9 +265,10 @@ def validateConfig(self, config): ## When running over an input dataset, we don't accept that the user specifies a ## primary dataset name, because the primary dataset name will already be extracted ## from the input dataset name. - if getattr(config.Data, 'inputDataset', None) and getattr(config.Data, 'primaryDataset', None): - msg = "Invalid CRAB configuration: Analysis job type with input dataset does not accept a primary dataset name to be specified." - msg += "\nSuggestion: Remove the parameter Data.primaryDataset." + if getattr(config.Data, 'inputDataset', None) and getattr(config.Data, 'outputPrimaryDataset', None): + msg = "Invalid CRAB configuration: Analysis job type with input dataset does not accept an output primary dataset name to be specified," + msg += " because the later will be extracted from the first." + msg += "\nSuggestion: Remove the parameter Data.outputPrimaryDataset." return False, msg ## When running over user input files, make sure the splitting mode is 'FileBased'. diff --git a/src/python/CRABClient/JobType/PrivateMC.py b/src/python/CRABClient/JobType/PrivateMC.py index 135e40535..7dc439ea9 100644 --- a/src/python/CRABClient/JobType/PrivateMC.py +++ b/src/python/CRABClient/JobType/PrivateMC.py @@ -43,12 +43,12 @@ def run(self, *args, **kwargs): self.logger.warning(msg) ## Get the user-specified primary dataset name. - primaryDataset = getattr(self.config.Data, 'primaryDataset', 'CRAB_PrivateMC') + outputPrimaryDataset = getattr(self.config.Data, 'outputPrimaryDataset', 'CRAB_PrivateMC') # Normalizes "foo/bar" and "/foo/bar" to "/foo/bar" - primaryDataset = "/" + os.path.join(*primaryDataset.split("/")) - if not re.match("/%(primDS)s.*" % (lfnParts), primaryDataset): - self.logger.warning("Invalid primary dataset name %s; publication may fail." % (primaryDataset)) - configArguments['inputdata'] = primaryDataset + outputPrimaryDataset = "/" + os.path.join(*outputPrimaryDataset.split("/")) + if not re.match("/%(primDS)s.*" % (lfnParts), outputPrimaryDataset): + self.logger.warning("Invalid primary dataset name %s; publication may fail." % (outputPrimaryDataset)) + configArguments['inputdata'] = outputPrimaryDataset return tarFilename, configArguments @@ -69,8 +69,8 @@ def validateConfig(self, config): ## If publication is True, check that there is a primary dataset name specified. if getattr(config.Data, 'publication', getParamDefaultValue('Data.publication')): - if not hasattr(config.Data, 'primaryDataset'): - msg = "Invalid CRAB configuration: Parameter Data.primaryDataset not specified." + if not hasattr(config.Data, 'outputPrimaryDataset'): + msg = "Invalid CRAB configuration: Parameter Data.outputPrimaryDataset not specified." msg += "\nMC generation job type requires this parameter for publication." return False, msg