sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def listDataTiers(self, data_tier_name=""):
"""
List data tier(s)
"""
if not isinstance(data_tier_name, basestring) :
dbsExceptionHandler('dbsException-invalid-input',
'data_tier_name given is not valid : %s' % data_tier_name)
else:
try:
data_tier_name = str(data_tier_name)
except:
dbsExceptionHandler('dbsException-invalid-input',
'data_tier_name given is not valid : %s' % data_tier_name)
conn = self.dbi.connection()
try:
result = self.dataTier.execute(conn, data_tier_name.upper())
return result
finally:
if conn:
conn.close()
|
List data tier(s)
|
entailment
|
def execute(self, conn, site_name= "", transaction = False):
"""
Lists all sites types if site_name is not provided.
"""
sql = self.sql
if site_name == "":
result = self.dbi.processData(sql, conn=conn, transaction=transaction)
else:
sql += "WHERE S.SITE_NAME = :site_name"
binds = { "site_name" : site_name }
result = self.dbi.processData(sql, binds, conn, transaction)
return self.formatDict(result)
|
Lists all sites types if site_name is not provided.
|
entailment
|
def execute(self, conn, logical_file_name={}, transaction=False):
"""
simple execute
"""
if not conn:
dbsExceptionHandler("dbsException-db-conn-failed", "Oracle/FileBuffer/DeleteFiles. Expects db connection from upper layer.")
self.dbi.processData(self.sql, logical_file_name, conn, transaction, returnCursor=True)
|
simple execute
|
entailment
|
def listDataType(self, dataType="", dataset=""):
"""
List data-type/primary-ds-type
"""
conn = self.dbi.connection()
try:
if dataset and dataType:
dbsExceptionHandler('dbsException-invalid-input',
"DBSDataType/listDataType. Data Type can be only searched by data_type or by dataset, not both.")
else:
result = self.dataType.execute(conn, dataType, dataset)
return result
finally:
if conn:
conn.close()
|
List data-type/primary-ds-type
|
entailment
|
def getBlocks(self):
"""
Get the blocks that need to be migrated
"""
try:
conn = self.dbi.connection()
result = self.buflistblks.execute(conn)
return result
finally:
if conn:
conn.close()
|
Get the blocks that need to be migrated
|
entailment
|
def getBufferedFiles(self, block_id):
"""
Get some files from the insert buffer
"""
try:
conn = self.dbi.connection()
result = self.buflist.execute(conn, block_id)
return result
finally:
if conn:
conn.close()
|
Get some files from the insert buffer
|
entailment
|
def execute(self, conn, data_tier_name='', transaction = False, cache=None):
"""
returns id for a given datatier name
"""
if cache:
ret=cache.get("DATA_TIERS")
if not ret==None:
return ret
sql = self.sql
binds={}
if data_tier_name:
op = ('=', 'like')['%' in data_tier_name]
sql += "WHERE DT.DATA_TIER_NAME %s :datatier" %op
binds = {"datatier":data_tier_name}
result = self.dbi.processData(sql, binds, conn, transaction)
plist = self.formatDict(result)
return plist
|
returns id for a given datatier name
|
entailment
|
def execute(self, conn, migration_url="", migration_input="", create_by="", migration_request_id="", transaction=False):
"""
Lists the oldest request queued
"""
binds = {}
result = self.dbi.processData(self.sql, binds, conn, transaction)
result = self.formatDict(result)
if len(result) == 0 :
return []
if result[0]["migration_request_id"] in ('', None) :
return []
return result
|
Lists the oldest request queued
|
entailment
|
def listProcessingEras(self, processing_version=''):
"""
Returns all processing eras in dbs
"""
conn = self.dbi.connection()
try:
result = self.pelst.execute(conn, processing_version)
return result
finally:
if conn:
conn.close()
|
Returns all processing eras in dbs
|
entailment
|
def insertProcessingEra(self, businput):
"""
Input dictionary has to have the following keys:
processing_version, creation_date, create_by, description
it builds the correct dictionary for dao input and executes the dao
"""
conn = self.dbi.connection()
tran = conn.begin()
try:
businput["processing_era_id"] = self.sm.increment(conn, "SEQ_PE", tran)
businput["processing_version"] = businput["processing_version"]
self.pein.execute(conn, businput, tran)
tran.commit()
tran = None
except KeyError as ke:
dbsExceptionHandler('dbsException-invalid-input',
"Invalid input:" + ke.args[0])
except Exception as ex:
if (str(ex).lower().find("unique constraint") != -1 or
str(ex).lower().find("duplicate") != -1):
# already exist
self.logger.warning("DBSProcessingEra/insertProcessingEras. " +
"Unique constraint violation being ignored...")
self.logger.warning(ex)
else:
if tran:
tran.rollback()
tran = None
raise
finally:
if tran:
tran.rollback()
if conn:
conn.close()
|
Input dictionary has to have the following keys:
processing_version, creation_date, create_by, description
it builds the correct dictionary for dao input and executes the dao
|
entailment
|
def listPhysicsGroups(self, physics_group_name=""):
"""
Returns all physics groups if physics group names are not passed.
"""
if not isinstance(physics_group_name, basestring):
dbsExceptionHandler('dbsException-invalid-input',
'physics group name given is not valid : %s' %
physics_group_name)
else:
try:
physics_group_name = str(physics_group_name)
except:
dbsExceptionHandler('dbsException-invalid-input',
'physics group name given is not valid : %s' %
physics_group_name)
conn = self.dbi.connection()
try:
result = self.pglist.execute(conn, physics_group_name)
return result
finally:
if conn:
conn.close()
|
Returns all physics groups if physics group names are not passed.
|
entailment
|
def create_token_generator(input_list):
"""SQL Generator to select from list of values in Oracle"""
###Generator trick from http://betteratoracle.com/posts/20-how-do-i-bind-a-variable-in-list
###The maximum length of the comma separated list is 4000 characters, therefore we need to split the list
###ORA-01460: unimplemented or unreasonable conversion requested will thrown if list is larger
oracle_limit = 4000
grp_list = []
if type(input_list[0]) == int :
input_str = ','.join(map(str, input_list))
else:
input_str = ','.join(input_list)
if len(input_str) >= oracle_limit:
index = 0
while True:
begin, end = index, index+oracle_limit
if end > len(input_str):
end = len(input_str)
grp_list.append(input_str[begin:end])
break
else:
index = input_str.rfind(',', begin, end)
if index == -1:
break
grp_list.append(input_str[begin:index])
index += 1 #to remove the leading comma
else:
grp_list.append(input_str)
token_generator = """
WITH TOKEN_GENERATOR AS (
"""
binds = {}
for index, chunk in enumerate(grp_list):
if index:
token_generator += """
UNION ALL
"""
bind = "token_%s" % index
token_generator += """SELECT REGEXP_SUBSTR(:{bind}, '[^,]+', 1, LEVEL) token
FROM DUAL
CONNECT BY LEVEL <= LENGTH(:{bind}) - LENGTH(REPLACE(:{bind}, ',', '')) + 1
""".format(bind=bind)
binds.update({bind: chunk})
token_generator += ")"
return token_generator, binds
|
SQL Generator to select from list of values in Oracle
|
entailment
|
def getServices(self):
"""
Simple method that returs list of all know DBS instances, instances known to this registry
"""
try:
conn = self.dbi.connection()
result = self.serviceslist.execute(conn)
return result
except Exception as ex:
msg = (("%s DBSServicesRegistry/getServices." +
" %s\n. Exception trace: \n %s") %
(DBSEXCEPTIONS['dbsException-3'], ex,
traceback.format_exc()))
self.logger.exception(msg )
raise Exception ("dbsException-3", msg )
finally:
conn.close()
|
Simple method that returs list of all know DBS instances, instances known to this registry
|
entailment
|
def addService(self):
"""
Add a service to service registry
"""
conn = self.dbi.connection()
tran = conn.begin()
try:
body = request.body.read()
service = cjson.decode(body)
addthis = {}
addthis['service_id'] = self.sm.increment(conn, "SEQ_RS", tran)
addthis['name'] = service.get('NAME', '')
if addthis['name'] == '':
msg = (("%s DBSServicesRegistry/addServices." +
" Service Must be Named\n") %
DBSEXCEPTIONS['dbsException-3'])
raise Exception("dbsException-3", msg)
addthis['type'] = service.get('TYPE', 'GENERIC')
addthis['location'] = service.get('LOCATION', 'HYPERSPACE')
addthis['status'] = service.get('STATUS', 'UNKNOWN')
addthis['admin'] = service.get('ADMIN', 'UNADMINISTRATED')
addthis['uri'] = service.get('URI', '')
if addthis['uri'] == '':
msg = (("%s DBSServicesRegistry/addServices." +
" Service URI must be provided.\n") %
DBSEXCEPTIONS['dbsException-3'])
self.logger.exception(msg)
raise Exception("dbsException-3", msg)
addthis['db'] = service.get('DB', 'NO_DATABASE')
addthis['version'] = service.get('VERSION', 'UNKNOWN' )
addthis['last_contact'] = dbsUtils().getTime()
addthis['comments'] = service.get('COMMENTS', 'NO COMMENTS')
addthis['alias'] = service.get('ALIAS', 'No Alias')
self.servicesadd.execute(conn, addthis, tran)
tran.commit()
except exceptions.IntegrityError as ex:
if (str(ex).find("unique constraint") != -1 or
str(ex).lower().find("duplicate") != -1) :
#Update the service instead
try:
self.servicesupdate.execute(conn, addthis, tran)
tran.commit()
except Exception as ex:
msg = (("%s DBSServiceRegistry/addServices." +
" %s\n. Exception trace: \n %s") %
(DBSEXCEPTIONS['dbsException-3'], ex,
traceback.format_exc()))
self.logger.exception(msg )
raise Exception ("dbsException-3", msg )
except Exception as ex:
tran.rollback()
msg = (("%s DBSServiceRegistry/addServices." +
" %s\n. Exception trace: \n %s") %
(DBSEXCEPTIONS['dbsException-3'], ex,
traceback.format_exc()))
self.logger.exception(msg )
raise Exception ("dbsException-3", msg )
finally:
conn.close()
|
Add a service to service registry
|
entailment
|
def execute(self, conn, daoinput, transaction = False):
"""
required keys:
migration_status, migration_request_id
"""
if not conn:
dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/MigrationRequests/UpdateRequestStatus. Expects db connection from upper layer.",
self.logger.exception)
if daoinput['migration_status'] == 1:
sql = self.sql2
elif daoinput['migration_status'] == 2:
sql = self.sql + " and MIGRATION_STATUS = 1 "
elif daoinput['migration_status'] == 3:
sql = self.sql3 + " and MIGRATION_STATUS = 1 "
else:
dbsExceptionHandler("dbsException-conflict-data", "Oracle/MigrationRequest/UpdateRequestStatus. Expected migration status to be 1, 2 or 3",
self.logger.exception)
result = self.dbi.processData(sql, daoinput, conn, transaction)
|
required keys:
migration_status, migration_request_id
|
entailment
|
def execute(self, conn, migration_url="", migration_input="", create_by="", migration_request_id="", oldest= False, transaction=False):
"""
Lists all requests if pattern is not provided.
"""
sql = self.sql
binds = {}
if migration_request_id:
sql += " WHERE MR.MIGRATION_REQUEST_ID=:migration_request_id"
binds['migration_request_id']=migration_request_id
elif oldest:
#FIXME: Need to write the sql.YG
#current_date = dbsUtils().getTime()
#we require waiting time for
#retry_count=0 is 1 minutes
#retry_count=1 is 2 minutes
#retyr_count=2 is 4 minutes
sql += """
WHERE MR.MIGRATION_STATUS=0
or (MR.migration_status=3 and MR.retry_count=0 and MR.last_modification_date <= :current_date-60)
or (MR.migration_status=3 and MR.retry_count=1 and MR.last_modification_date <= :current_date-120)
or (MR.migration_status=3 and MR.retry_count=2 and MR.last_modification_date <= :current_date-240)
ORDER BY MR.creation_date
"""
binds['current_date'] = dbsUtils().getTime()
#print "time= " + str(binds['current_date'])
else:
if migration_url or migration_input or create_by:
sql += " WHERE "
if migration_url:
sql += " MR.MIGRATION_URL=:migration_url"
binds['migration_url']=migration_url
if migration_input:
if migration_url:
sql += " AND "
op = ("=", "like")["%" in migration_input]
sql += " MR.MIGRATION_INPUT %s :migration_input" % op
binds['migration_input']=migration_input
if create_by:
if migration_url or migration_input:
sql += " AND "
sql += " MR.CREATE_BY=:create_by" %create_by
binds['create_by']=create_by
cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
result = []
for c in cursors:
result.extend(self.formatCursor(c, size=100))
return result
|
Lists all requests if pattern is not provided.
|
entailment
|
def execute(self, conn, logical_file_name, transaction=False):
"""
simple execute
"""
if not conn:
dbsExceptionHandler("dbsException-db-conn-failed", "Oracle/FileBuffer/DeleteDupicates. Expects db connection from upper layer.")
print(self.sql)
self.dbi.processData(self.sql, logical_file_name, conn, transaction)
|
simple execute
|
entailment
|
def execute(self, conn, block_name="", transaction = False):
"""
block: /a/b/c#d
"""
if not conn:
dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/BlockParent/List. Expects db connection from upper layer.", self.logger.exception)
sql = self.sql
if isinstance(block_name, basestring):
binds = {'block_name' :block_name}
elif type(block_name) is list:
binds = [{'block_name':x} for x in block_name]
else:
msg = "Oracle/BlockParent/List. Block_name must be provided either as a string or as a list."
dbsExceptionHandler('dbsException-invalid-input', msg, self.logger.exception)
result = self.dbi.processData(sql, binds, conn, transaction)
return self.formatDict(result)
|
block: /a/b/c#d
|
entailment
|
def execute(self, conn, origin_site_name="", dataset="", block_name="", transaction = False):
"""
origin_site_name: T1_US_FNAL_Buffer
dataset: /a/b/c
block_name: /a/b/c#d
"""
if not conn:
dbsExceptionHandler("dbsException-db-conn-failed",
"Oracle/Block/List. Expects db connection from upper layer.", self.logger.exception)
binds = {}
if origin_site_name:
wheresql = 'WHERE B.ORIGIN_SITE_NAME = :origin_site_name'
binds.update(origin_site_name=origin_site_name)
if dataset:
if 'wheresql' in locals():
wheresql += ' AND DS.DATASET = :dataset'
else:
wheresql = 'WHERE DS.DATASET = :dataset'
binds.update(dataset=dataset)
if block_name:
if 'wheresql' in locals():
wheresql += ' AND B.BLOCK_NAME = :block_name'
else:
wheresql = 'WHERE B.BLOCK_NAME = :block_name'
binds.update(block_name=block_name)
sql = '{sql} {wheresql}'.format(sql=self.sql, wheresql=wheresql)
cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
result = []
for cursor in cursors:
result.extend(self.formatCursor(cursor, size=100))
return result
|
origin_site_name: T1_US_FNAL_Buffer
dataset: /a/b/c
block_name: /a/b/c#d
|
entailment
|
def listPrimaryDatasets(self, primary_ds_name="", primary_ds_type=""):
"""
Returns all primary dataset if primary_ds_name or primary_ds_type are not passed.
"""
conn = self.dbi.connection()
try:
result = self.primdslist.execute(conn, primary_ds_name, primary_ds_type)
if conn: conn.close()
return result
finally:
if conn:
conn.close()
|
Returns all primary dataset if primary_ds_name or primary_ds_type are not passed.
|
entailment
|
def listPrimaryDSTypes(self, primary_ds_type="", dataset=""):
"""
Returns all primary dataset types if dataset or primary_ds_type are not passed.
"""
conn = self.dbi.connection()
try:
result = self.primdstypeList.execute(conn, primary_ds_type, dataset)
if conn: conn.close()
return result
finally:
if conn:
conn.close()
|
Returns all primary dataset types if dataset or primary_ds_type are not passed.
|
entailment
|
def insertPrimaryDataset(self, businput):
"""
Input dictionary has to have the following keys:
primary_ds_name, primary_ds_type, creation_date, create_by.
it builds the correct dictionary for dao input and executes the dao
"""
conn = self.dbi.connection()
tran = conn.begin()
#checking for required fields
if "primary_ds_name" not in businput:
dbsExceptionHandler("dbsException-invalid-input",
" DBSPrimaryDataset/insertPrimaryDataset. " +
"Primary dataset Name is required for insertPrimaryDataset.")
try:
businput["primary_ds_type_id"] = (self.primdstypeList.execute(conn, businput["primary_ds_type"]
))[0]["primary_ds_type_id"]
del businput["primary_ds_type"]
businput["primary_ds_id"] = self.sm.increment(conn, "SEQ_PDS")
self.primdsin.execute(conn, businput, tran)
tran.commit()
tran = None
except KeyError as ke:
dbsExceptionHandler("dbsException-invalid-input",
" DBSPrimaryDataset/insertPrimaryDataset. Missing: %s" % ke)
self.logger.warning(" DBSPrimaryDataset/insertPrimaryDataset. Missing: %s" % ke)
except IndexError as ie:
dbsExceptionHandler("dbsException-missing-data",
" DBSPrimaryDataset/insertPrimaryDataset. %s" % ie)
self.logger.warning(" DBSPrimaryDataset/insertPrimaryDataset. Missing: %s" % ie)
except Exception as ex:
if (str(ex).lower().find("unique constraint") != -1 or
str(ex).lower().find("duplicate") != -1):
self.logger.warning("DBSPrimaryDataset/insertPrimaryDataset:" +
" Unique constraint violation being ignored...")
self.logger.warning(ex)
else:
if tran:
tran.rollback()
if conn: conn.close()
raise
finally:
if tran:
tran.rollback()
if conn:
conn.close()
|
Input dictionary has to have the following keys:
primary_ds_name, primary_ds_type, creation_date, create_by.
it builds the correct dictionary for dao input and executes the dao
|
entailment
|
def execute(self, conn, name='', transaction = False):
"""
returns id for a given physics group name
"""
binds={}
if name:
op = ('=', 'like')['%' in name]
sql = self.sql + " WHERE pg.physics_group_name %s :physicsgroup" % (op)
binds = {"physicsgroup": name}
else:
sql = self.sql
self.logger.debug(sql)
result = self.dbi.processData(sql, binds, conn, transaction)
plist = self.formatDict(result)
self.logger.debug(plist)
if len(plist) < 1: return []
return plist
|
returns id for a given physics group name
|
entailment
|
def insertOutputConfig(self, businput):
"""
Method to insert the Output Config.
app_name, release_version, pset_hash, global_tag and output_module_label are
required.
args:
businput(dic): input dictionary.
Updated Oct 12, 2011
"""
if not ("app_name" in businput and "release_version" in businput\
and "pset_hash" in businput and "output_module_label" in businput
and "global_tag" in businput):
dbsExceptionHandler('dbsException-invalid-input', "business/DBSOutputConfig/insertOutputConfig require:\
app_name, release_version, pset_hash, output_module_label and global_tag")
conn = self.dbi.connection()
tran = conn.begin()
try:
# Proceed with o/p module insertion
businput['scenario'] = businput.get("scenario", None)
businput['pset_name'] = businput.get("pset_name", None)
self.outmodin.execute(conn, businput, tran)
tran.commit()
tran = None
except SQLAlchemyIntegrityError as ex:
if str(ex).find("unique constraint") != -1 or str(ex).lower().find("duplicate") != -1:
#if the validation is due to a unique constrain break in OUTPUT_MODULE_CONFIGS
if str(ex).find("TUC_OMC_1") != -1: pass
#otherwise, try again
else:
try:
self.outmodin.execute(conn, businput, tran)
tran.commit()
tran = None
except SQLAlchemyIntegrityError as ex1:
if str(ex1).find("unique constraint") != -1 and str(ex1).find("TUC_OMC_1") != -1: pass
except Exception as e1:
if tran:
tran.rollback()
tran = None
raise
else:
raise
except Exception as e:
if tran:
tran.rollback()
raise
finally:
if tran:
tran.rollback()
if conn:
conn.close()
|
Method to insert the Output Config.
app_name, release_version, pset_hash, global_tag and output_module_label are
required.
args:
businput(dic): input dictionary.
Updated Oct 12, 2011
|
entailment
|
def getHelp(self, call=""):
"""
API to get a list of supported REST APIs. In the case a particular API is specified,
the docstring of that API is displayed.
:param call: call to get detailed information about (Optional)
:type call: str
:return: List of APIs or detailed information about a specific call (parameters and docstring)
:rtype: List of strings or a dictionary containing params and doc keys depending on the input parameter
"""
if call:
params = self.methods['GET'][call]['args']
doc = self.methods['GET'][call]['call'].__doc__
return dict(params=params, doc=doc)
else:
return self.methods['GET'].keys()
|
API to get a list of supported REST APIs. In the case a particular API is specified,
the docstring of that API is displayed.
:param call: call to get detailed information about (Optional)
:type call: str
:return: List of APIs or detailed information about a specific call (parameters and docstring)
:rtype: List of strings or a dictionary containing params and doc keys depending on the input parameter
|
entailment
|
def listPrimaryDatasets(self, primary_ds_name="", primary_ds_type=""):
"""
API to list primary datasets
:param primary_ds_type: List primary datasets with primary dataset type (Optional)
:type primary_ds_type: str
:param primary_ds_name: List that primary dataset (Optional)
:type primary_ds_name: str
:returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type)
:rtype: list of dicts
:returns: List of dictionaries containing the following keys (create_by, primary_ds_type, primary_ds_id, primary_ds_name, creation_date)
:rtype: list of dicts
"""
primary_ds_name = primary_ds_name.replace("*", "%")
primary_ds_type = primary_ds_type.replace("*", "%")
try:
return self.dbsPrimaryDataset.listPrimaryDatasets(primary_ds_name, primary_ds_type)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except Exception as ex:
sError = "DBSReaderModel/listPrimaryDatasets. %s\n Exception trace: \n %s." \
% (ex, traceback.format_exc() )
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list primary datasets
:param primary_ds_type: List primary datasets with primary dataset type (Optional)
:type primary_ds_type: str
:param primary_ds_name: List that primary dataset (Optional)
:type primary_ds_name: str
:returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type)
:rtype: list of dicts
:returns: List of dictionaries containing the following keys (create_by, primary_ds_type, primary_ds_id, primary_ds_name, creation_date)
:rtype: list of dicts
|
entailment
|
def listPrimaryDsTypes(self, primary_ds_type="", dataset=""):
"""
API to list primary dataset types
:param primary_ds_type: List that primary dataset type (Optional)
:type primary_ds_type: str
:param dataset: List the primary dataset type for that dataset (Optional)
:type dataset: str
:returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type)
:rtype: list of dicts
"""
if primary_ds_type:
primary_ds_type = primary_ds_type.replace("*", "%")
if dataset:
dataset = dataset.replace("*", "%")
try:
return self.dbsPrimaryDataset.listPrimaryDSTypes(primary_ds_type, dataset)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except Exception as ex:
sError = "DBSReaderModel/listPrimaryDsTypes. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list primary dataset types
:param primary_ds_type: List that primary dataset type (Optional)
:type primary_ds_type: str
:param dataset: List the primary dataset type for that dataset (Optional)
:type dataset: str
:returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type)
:rtype: list of dicts
|
entailment
|
def listDatasets(self, dataset="", parent_dataset="", is_dataset_valid=1,
release_version="", pset_hash="", app_name="", output_module_label="", global_tag="",
processing_version=0, acquisition_era_name="", run_num=-1,
physics_group_name="", logical_file_name="", primary_ds_name="", primary_ds_type="",
processed_ds_name='', data_tier_name="", dataset_access_type="VALID", prep_id='', create_by="", last_modified_by="",
min_cdate='0', max_cdate='0', min_ldate='0', max_ldate='0', cdate='0',
ldate='0', detail=False, dataset_id=-1):
"""
API to list dataset(s) in DBS
* You can use ANY combination of these parameters in this API
* In absence of parameters, all valid datasets known to the DBS instance will be returned
:param dataset: Full dataset (path) of the dataset.
:type dataset: str
:param parent_dataset: Full dataset (path) of the dataset
:type parent_dataset: str
:param release_version: cmssw version
:type release_version: str
:param pset_hash: pset hash
:type pset_hash: str
:param app_name: Application name (generally it is cmsRun)
:type app_name: str
:param output_module_label: output_module_label
:type output_module_label: str
:param global_tag: global_tag
:type global_tag: str
:param processing_version: Processing Version
:type processing_version: str
:param acquisition_era_name: Acquisition Era
:type acquisition_era_name: str
:param run_num: Specify a specific run number or range. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...]. run_num=1 is not allowed.
:type run_num: int,list,str
:param physics_group_name: List only dataset having physics_group_name attribute
:type physics_group_name: str
:param logical_file_name: List dataset containing the logical_file_name
:type logical_file_name: str
:param primary_ds_name: Primary Dataset Name
:type primary_ds_name: str
:param primary_ds_type: Primary Dataset Type (Type of data, MC/DATA)
:type primary_ds_type: str
:param processed_ds_name: List datasets having this processed dataset name
:type processed_ds_name: str
:param data_tier_name: Data Tier
:type data_tier_name: str
:param dataset_access_type: Dataset Access Type ( PRODUCTION, DEPRECATED etc.)
:type dataset_access_type: str
:param prep_id: prep_id
:type prep_id: str
:param create_by: Creator of the dataset
:type create_by: str
:param last_modified_by: Last modifier of the dataset
:type last_modified_by: str
:param min_cdate: Lower limit for the creation date (unixtime) (Optional)
:type min_cdate: int, str
:param max_cdate: Upper limit for the creation date (unixtime) (Optional)
:type max_cdate: int, str
:param min_ldate: Lower limit for the last modification date (unixtime) (Optional)
:type min_ldate: int, str
:param max_ldate: Upper limit for the last modification date (unixtime) (Optional)
:type max_ldate: int, str
:param cdate: creation date (unixtime) (Optional)
:type cdate: int, str
:param ldate: last modification date (unixtime) (Optional)
:type ldate: int, str
:param detail: List all details of a dataset
:type detail: bool
:param dataset_id: dataset table primary key used by CMS Computing Analytics.
:type dataset_id: int, long, str
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contain the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts
"""
dataset = dataset.replace("*", "%")
parent_dataset = parent_dataset.replace("*", "%")
release_version = release_version.replace("*", "%")
pset_hash = pset_hash.replace("*", "%")
app_name = app_name.replace("*", "%")
output_module_label = output_module_label.replace("*", "%")
global_tag = global_tag.replace("*", "%")
logical_file_name = logical_file_name.replace("*", "%")
physics_group_name = physics_group_name.replace("*", "%")
primary_ds_name = primary_ds_name.replace("*", "%")
primary_ds_type = primary_ds_type.replace("*", "%")
data_tier_name = data_tier_name.replace("*", "%")
dataset_access_type = dataset_access_type.replace("*", "%")
processed_ds_name = processed_ds_name.replace("*", "%")
acquisition_era_name = acquisition_era_name.replace("*", "%")
#processing_version = processing_version.replace("*", "%")
#create_by and last_modified_by have be full spelled, no wildcard will allowed.
#We got them from request head so they can be either HN account name or DN.
#This is depended on how an user's account is set up.
#
# In the next release we will require dataset has no wildcard in it.
# DBS will reject wildcard search with dataset name with listDatasets call.
# One should seperate the dataset into primary , process and datatier if any wildcard.
# YG Oct 26, 2016
# Some of users were overwhiled by the API change. So we split the wildcarded dataset in the server instead of by the client.
# YG Dec. 9 2016
#
# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours
# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when logical_file_name is given.
# YG Jan. 15 2019
#
if (run_num != -1 and logical_file_name ==''):
for r in parseRunRange(run_num):
if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
if r == 1 or r == '1':
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
elif isinstance(r, run_tuple):
if r[0] == r[1]:
dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.",
self.logger.exception)
elif r[0] <= 1 <= r[1]:
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
if( dataset and ( dataset == "/%/%/%" or dataset== "/%" or dataset == "/%/%" ) ):
dataset=''
elif( dataset and ( dataset.find('%') != -1 ) ) :
junk, primary_ds_name, processed_ds_name, data_tier_name = dataset.split('/')
dataset = ''
if ( primary_ds_name == '%' ):
primary_ds_name = ''
if( processed_ds_name == '%' ):
processed_ds_name = ''
if ( data_tier_name == '%' ):
data_tier_name = ''
try:
dataset_id = int(dataset_id)
except:
dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input for dataset_id that has to be an int.",
self.logger.exception, 'dataset_id has to be an int.')
if create_by.find('*')!=-1 or create_by.find('%')!=-1 or last_modified_by.find('*')!=-1\
or last_modified_by.find('%')!=-1:
dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input for create_by or last_modified_by.\
No wildcard allowed.", self.logger.exception, 'No wildcards allowed for create_by or last_modified_by')
try:
if isinstance(min_cdate, basestring) and ('*' in min_cdate or '%' in min_cdate):
min_cdate = 0
else:
try:
min_cdate = int(min_cdate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for min_cdate")
if isinstance(max_cdate, basestring) and ('*' in max_cdate or '%' in max_cdate):
max_cdate = 0
else:
try:
max_cdate = int(max_cdate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for max_cdate")
if isinstance(min_ldate, basestring) and ('*' in min_ldate or '%' in min_ldate):
min_ldate = 0
else:
try:
min_ldate = int(min_ldate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for min_ldate")
if isinstance(max_ldate, basestring) and ('*' in max_ldate or '%' in max_ldate):
max_ldate = 0
else:
try:
max_ldate = int(max_ldate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for max_ldate")
if isinstance(cdate, basestring) and ('*' in cdate or '%' in cdate):
cdate = 0
else:
try:
cdate = int(cdate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for cdate")
if isinstance(ldate, basestring) and ('*' in ldate or '%' in ldate):
ldate = 0
else:
try:
ldate = int(ldate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for ldate")
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listDatasets. %s \n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
detail = detail in (True, 1, "True", "1", 'true')
try:
return self.dbsDataset.listDatasets(dataset, parent_dataset, is_dataset_valid, release_version, pset_hash,
app_name, output_module_label, global_tag, processing_version, acquisition_era_name,
run_num, physics_group_name, logical_file_name, primary_ds_name, primary_ds_type, processed_ds_name,
data_tier_name, dataset_access_type, prep_id, create_by, last_modified_by,
min_cdate, max_cdate, min_ldate, max_ldate, cdate, ldate, detail, dataset_id)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listdatasets. %s.\n Exception trace: \n %s" % (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list dataset(s) in DBS
* You can use ANY combination of these parameters in this API
* In absence of parameters, all valid datasets known to the DBS instance will be returned
:param dataset: Full dataset (path) of the dataset.
:type dataset: str
:param parent_dataset: Full dataset (path) of the dataset
:type parent_dataset: str
:param release_version: cmssw version
:type release_version: str
:param pset_hash: pset hash
:type pset_hash: str
:param app_name: Application name (generally it is cmsRun)
:type app_name: str
:param output_module_label: output_module_label
:type output_module_label: str
:param global_tag: global_tag
:type global_tag: str
:param processing_version: Processing Version
:type processing_version: str
:param acquisition_era_name: Acquisition Era
:type acquisition_era_name: str
:param run_num: Specify a specific run number or range. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...]. run_num=1 is not allowed.
:type run_num: int,list,str
:param physics_group_name: List only dataset having physics_group_name attribute
:type physics_group_name: str
:param logical_file_name: List dataset containing the logical_file_name
:type logical_file_name: str
:param primary_ds_name: Primary Dataset Name
:type primary_ds_name: str
:param primary_ds_type: Primary Dataset Type (Type of data, MC/DATA)
:type primary_ds_type: str
:param processed_ds_name: List datasets having this processed dataset name
:type processed_ds_name: str
:param data_tier_name: Data Tier
:type data_tier_name: str
:param dataset_access_type: Dataset Access Type ( PRODUCTION, DEPRECATED etc.)
:type dataset_access_type: str
:param prep_id: prep_id
:type prep_id: str
:param create_by: Creator of the dataset
:type create_by: str
:param last_modified_by: Last modifier of the dataset
:type last_modified_by: str
:param min_cdate: Lower limit for the creation date (unixtime) (Optional)
:type min_cdate: int, str
:param max_cdate: Upper limit for the creation date (unixtime) (Optional)
:type max_cdate: int, str
:param min_ldate: Lower limit for the last modification date (unixtime) (Optional)
:type min_ldate: int, str
:param max_ldate: Upper limit for the last modification date (unixtime) (Optional)
:type max_ldate: int, str
:param cdate: creation date (unixtime) (Optional)
:type cdate: int, str
:param ldate: last modification date (unixtime) (Optional)
:type ldate: int, str
:param detail: List all details of a dataset
:type detail: bool
:param dataset_id: dataset table primary key used by CMS Computing Analytics.
:type dataset_id: int, long, str
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contain the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts
|
entailment
|
def listDatasetArray(self):
"""
API to list datasets in DBS. To be called by datasetlist url with post call.
:param dataset: list of datasets [dataset1,dataset2,..,dataset n] (must have either a list of dataset or dataset_id), Max length 1000.
:type dataset: list
:param dataset_id: list of dataset ids [dataset_id1,dataset_id2,..,dataset_idn, "dsid_min-dsid_max"] ((must have either a list of dataset or dataset_id)
:type dataset_id: list
:param dataset_access_type: List only datasets with that dataset access type (Optional)
:type dataset_access_type: str
:param detail: brief list or detailed list 1/0
:type detail: bool
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contains the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts
"""
ret = []
try :
body = request.body.read()
if body:
data = cjson.decode(body)
data = validateJSONInputNoCopy("dataset", data, read=True)
#Because CMSWEB has a 300 seconds responding time. We have to limit the array siz to make sure that
#the API can be finished in 300 second.
# YG Nov-05-2015
max_array_size = 1000
if ( 'dataset' in data.keys() and isinstance(data['dataset'], list) and len(data['dataset'])>max_array_size)\
or ('dataset_id' in data.keys() and isinstance(data['dataset_id'], list) and len(data['dataset_id'])>max_array_size):
dbsExceptionHandler("dbsException-invalid-input",
"The Max list length supported in listDatasetArray is %s." %max_array_size, self.logger.exception)
ret = self.dbsDataset.listDatasetArray(data)
except cjson.DecodeError as De:
dbsExceptionHandler('dbsException-invalid-input2', "Invalid input", self.logger.exception, str(De))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSReaderModel/listDatasetArray. %s \n Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
for item in ret:
yield item
|
API to list datasets in DBS. To be called by datasetlist url with post call.
:param dataset: list of datasets [dataset1,dataset2,..,dataset n] (must have either a list of dataset or dataset_id), Max length 1000.
:type dataset: list
:param dataset_id: list of dataset ids [dataset_id1,dataset_id2,..,dataset_idn, "dsid_min-dsid_max"] ((must have either a list of dataset or dataset_id)
:type dataset_id: list
:param dataset_access_type: List only datasets with that dataset access type (Optional)
:type dataset_access_type: str
:param detail: brief list or detailed list 1/0
:type detail: bool
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contains the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts
|
entailment
|
def listDataTiers(self, data_tier_name=""):
"""
API to list data tiers known to DBS.
:param data_tier_name: List details on that data tier (Optional)
:type data_tier_name: str
:returns: List of dictionaries containing the following keys (data_tier_id, data_tier_name, create_by, creation_date)
"""
data_tier_name = data_tier_name.replace("*", "%")
try:
conn = self.dbi.connection()
return self.dbsDataTierListDAO.execute(conn, data_tier_name.upper())
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except ValueError as ve:
dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input Data", self.logger.exception, ve.message)
except TypeError as te:
dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input DataType", self.logger.exception, te.message)
except NameError as ne:
dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input Searching Key", self.logger.exception, ne.message)
except Exception as ex:
sError = "DBSReaderModel/listDataTiers. %s\n. Exception trace: \n %s" \
% ( ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
finally:
if conn:
conn.close()
|
API to list data tiers known to DBS.
:param data_tier_name: List details on that data tier (Optional)
:type data_tier_name: str
:returns: List of dictionaries containing the following keys (data_tier_id, data_tier_name, create_by, creation_date)
|
entailment
|
def listBlocks(self, dataset="", block_name="", data_tier_name="", origin_site_name="",
logical_file_name="",run_num=-1, min_cdate='0', max_cdate='0',
min_ldate='0', max_ldate='0', cdate='0', ldate='0', open_for_writing=-1, detail=False):
"""
API to list a block in DBS. At least one of the parameters block_name, dataset, data_tier_name or
logical_file_name are required. If data_tier_name is provided, min_cdate and max_cdate have to be specified and
the difference in time have to be less than 31 days.
:param block_name: name of the block
:type block_name: str
:param dataset: dataset
:type dataset: str
:param data_tier_name: data tier
:type data_tier_name: str
:param logical_file_name: Logical File Name
:type logical_file_name: str
:param origin_site_name: Origin Site Name (Optional)
:type origin_site_name: str
:param open_for_writing: Open for Writting (Optional)
:type open_for_writing: int (0 or 1)
:param run_num: run_num numbers (Optional). Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...].
:type run_num: int, list of runs or list of run ranges
:param min_cdate: Lower limit for the creation date (unixtime) (Optional)
:type min_cdate: int, str
:param max_cdate: Upper limit for the creation date (unixtime) (Optional)
:type max_cdate: int, str
:param min_ldate: Lower limit for the last modification date (unixtime) (Optional)
:type min_ldate: int, str
:param max_ldate: Upper limit for the last modification date (unixtime) (Optional)
:type max_ldate: int, str
:param cdate: creation date (unixtime) (Optional)
:type cdate: int, str
:param ldate: last modification date (unixtime) (Optional)
:type ldate: int, str
:param detail: Get detailed information of a block (Optional)
:type detail: bool
:returns: List of dictionaries containing following keys (block_name). If option detail is used the dictionaries contain the following keys (block_id, create_by, creation_date, open_for_writing, last_modified_by, dataset, block_name, file_count, origin_site_name, last_modification_date, dataset_id and block_size)
:rtype: list of dicts
"""
# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours
# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK while logical_file_name is given.
# YG Jan. 15 2019
#
if (run_num != -1 and logical_file_name ==''):
for r in parseRunRange(run_num):
if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
if r == 1 or r == '1':
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
elif isinstance(r, run_tuple):
if r[0] == r[1]:
dbsExceptionHandler("dbsException-invalid-input", "DBS run range must be apart at least by 1.",
self.logger.exception)
elif r[0] <= 1 <= r[1]:
dbsExceptionHandler("dbsException-invalid-input2", "Run_num=1 is not a valid input.",
self.logger.exception)
dataset = dataset.replace("*", "%")
block_name = block_name.replace("*", "%")
logical_file_name = logical_file_name.replace("*", "%")
origin_site_name = origin_site_name.replace("*", "%")
#
if isinstance(min_cdate, basestring) and ('*' in min_cdate or '%' in min_cdate):
min_cdate = 0
else:
try:
min_cdate = int(min_cdate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for min_cdate")
#
if isinstance(max_cdate, basestring) and ('*' in max_cdate or '%' in max_cdate):
max_cdate = 0
else:
try:
max_cdate = int(max_cdate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for max_cdate")
#
if isinstance(min_ldate, basestring) and ('*' in min_ldate or '%' in min_ldate):
min_ldate = 0
else:
try:
min_ldate = int(min_ldate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for max_cdate")
#
if isinstance(max_ldate, basestring) and ('*' in max_ldate or '%' in max_ldate):
max_ldate = 0
else:
try:
max_ldate = int(max_ldate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for max_ldate")
#
if isinstance(cdate, basestring) and ('*' in cdate or '%' in cdate):
cdate = 0
else:
try:
cdate = int(cdate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for cdate")
#
if isinstance(cdate, basestring) and ('*' in ldate or '%' in ldate):
ldate = 0
else:
try:
ldate = int(ldate)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid input for ldate")
#
detail = detail in (True, 1, "True", "1", 'true')
try:
b= self.dbsBlock.listBlocks(dataset, block_name, data_tier_name, origin_site_name, logical_file_name,
run_num, min_cdate, max_cdate, min_ldate, max_ldate, cdate, ldate, open_for_writing, detail)
#for item in b:
#yield item
return b
except HTTPError:
raise
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listBlocks. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list a block in DBS. At least one of the parameters block_name, dataset, data_tier_name or
logical_file_name are required. If data_tier_name is provided, min_cdate and max_cdate have to be specified and
the difference in time have to be less than 31 days.
:param block_name: name of the block
:type block_name: str
:param dataset: dataset
:type dataset: str
:param data_tier_name: data tier
:type data_tier_name: str
:param logical_file_name: Logical File Name
:type logical_file_name: str
:param origin_site_name: Origin Site Name (Optional)
:type origin_site_name: str
:param open_for_writing: Open for Writting (Optional)
:type open_for_writing: int (0 or 1)
:param run_num: run_num numbers (Optional). Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...].
:type run_num: int, list of runs or list of run ranges
:param min_cdate: Lower limit for the creation date (unixtime) (Optional)
:type min_cdate: int, str
:param max_cdate: Upper limit for the creation date (unixtime) (Optional)
:type max_cdate: int, str
:param min_ldate: Lower limit for the last modification date (unixtime) (Optional)
:type min_ldate: int, str
:param max_ldate: Upper limit for the last modification date (unixtime) (Optional)
:type max_ldate: int, str
:param cdate: creation date (unixtime) (Optional)
:type cdate: int, str
:param ldate: last modification date (unixtime) (Optional)
:type ldate: int, str
:param detail: Get detailed information of a block (Optional)
:type detail: bool
:returns: List of dictionaries containing following keys (block_name). If option detail is used the dictionaries contain the following keys (block_id, create_by, creation_date, open_for_writing, last_modified_by, dataset, block_name, file_count, origin_site_name, last_modification_date, dataset_id and block_size)
:rtype: list of dicts
|
entailment
|
def listBlockOrigin(self, origin_site_name="", dataset="", block_name=""):
"""
API to list blocks first generated in origin_site_name.
:param origin_site_name: Origin Site Name (Optional, No wildcards)
:type origin_site_name: str
:param dataset: dataset ( No wildcards, either dataset or block name needed)
:type dataset: str
:param block_name:
:type block_name: str
:returns: List of dictionaries containing the following keys (create_by, creation_date, open_for_writing, last_modified_by, dataset, block_name, file_count, origin_site_name, last_modification_date, block_size)
:rtype: list of dicts
"""
try:
return self.dbsBlock.listBlocksOrigin(origin_site_name, dataset, block_name)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listBlocks. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'],
self.logger.exception, sError)
|
API to list blocks first generated in origin_site_name.
:param origin_site_name: Origin Site Name (Optional, No wildcards)
:type origin_site_name: str
:param dataset: dataset ( No wildcards, either dataset or block name needed)
:type dataset: str
:param block_name:
:type block_name: str
:returns: List of dictionaries containing the following keys (create_by, creation_date, open_for_writing, last_modified_by, dataset, block_name, file_count, origin_site_name, last_modification_date, block_size)
:rtype: list of dicts
|
entailment
|
def listBlocksParents(self):
"""
API to list block parents of multiple blocks. To be called by blockparents url with post call.
:param block_names: list of block names [block_name1, block_name2, ...] (Required). Mwx length 1000.
:type block_names: list
"""
try :
body = request.body.read()
data = cjson.decode(body)
data = validateJSONInputNoCopy("block", data, read=True)
#Because CMSWEB has a 300 seconds responding time. We have to limit the array siz to make sure that
#the API can be finished in 300 second.
# YG Nov-05-2015
max_array_size = 1000
if ( 'block_names' in data.keys() and isinstance(data['block_names'], list) and len(data['block_names'])>max_array_size):
dbsExceptionHandler("dbsException-invalid-input",
"The Max list length supported in listBlocksParents is %s." %max_array_size, self.logger.exception)
return self.dbsBlock.listBlockParents(data["block_name"])
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except cjson.DecodeError as de:
sError = "DBSReaderModel/listBlockParents. %s\n. Exception trace: \n %s" \
% (de, traceback.format_exc())
msg = "DBSReaderModel/listBlockParents. %s" % de
dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, sError)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSReaderModel/listBlockParents. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list block parents of multiple blocks. To be called by blockparents url with post call.
:param block_names: list of block names [block_name1, block_name2, ...] (Required). Mwx length 1000.
:type block_names: list
|
entailment
|
def listBlockChildren(self, block_name=""):
"""
API to list block children.
:param block_name: name of block who's children needs to be found (Required)
:type block_name: str
:returns: List of dictionaries containing following keys (block_name)
:rtype: list of dicts
"""
block_name = block_name.replace("*", "%")
try:
return self.dbsBlock.listBlockChildren(block_name)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listBlockChildren. %s\n. Exception trace: \n %s" % (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list block children.
:param block_name: name of block who's children needs to be found (Required)
:type block_name: str
:returns: List of dictionaries containing following keys (block_name)
:rtype: list of dicts
|
entailment
|
def listBlockSummaries(self, block_name="", dataset="", detail=False):
"""
API that returns summary information like total size and total number of events in a dataset or a list of blocks
:param block_name: list block summaries for block_name(s)
:type block_name: str, list
:param dataset: list block summaries for all blocks in dataset
:type dataset: str
:param detail: list summary by block names if detail=True, default=False
:type detail: str, bool
:returns: list of dicts containing total block_sizes, file_counts and event_counts of dataset or blocks provided
"""
if bool(dataset)+bool(block_name)!=1:
dbsExceptionHandler("dbsException-invalid-input2",
dbsExceptionCode["dbsException-invalid-input2"],
self.logger.exception,
"Dataset or block_names must be specified at a time.")
if block_name and isinstance(block_name, basestring):
try:
block_name = [str(block_name)]
except:
dbsExceptionHandler("dbsException-invalid-input", "Invalid block_name for listBlockSummaries. ")
for this_block_name in block_name:
if re.search("[*, %]", this_block_name):
dbsExceptionHandler("dbsException-invalid-input2",
dbsExceptionCode["dbsException-invalid-input2"],
self.logger.exception,
"No wildcards are allowed in block_name list")
if re.search("[*, %]", dataset):
dbsExceptionHandler("dbsException-invalid-input2",
dbsExceptionCode["dbsException-invalid-input2"],
self.logger.exception,
"No wildcards are allowed in dataset")
data = []
try:
with self.dbi.connection() as conn:
data = self.dbsBlockSummaryListDAO.execute(conn, block_name, dataset, detail)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listBlockSummaries. %s\n. Exception trace: \n %s" % (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error',
dbsExceptionCode['dbsException-server-error'],
self.logger.exception,
sError)
for item in data:
yield item
|
API that returns summary information like total size and total number of events in a dataset or a list of blocks
:param block_name: list block summaries for block_name(s)
:type block_name: str, list
:param dataset: list block summaries for all blocks in dataset
:type dataset: str
:param detail: list summary by block names if detail=True, default=False
:type detail: str, bool
:returns: list of dicts containing total block_sizes, file_counts and event_counts of dataset or blocks provided
|
entailment
|
def listFiles(self, dataset = "", block_name = "", logical_file_name = "",
release_version="", pset_hash="", app_name="", output_module_label="",
run_num=-1, origin_site_name="", lumi_list="", detail=False, validFileOnly=0, sumOverLumi=0):
"""
API to list files in DBS. Either non-wildcarded logical_file_name, non-wildcarded dataset or non-wildcarded block_name is required.
The combination of a non-wildcarded dataset or block_name with an wildcarded logical_file_name is supported.
* For lumi_list the following two json formats are supported:
- [a1, a2, a3,]
- [[a,b], [c, d],]
* lumi_list can be either a list of lumi section numbers as [a1, a2, a3,] or a list of lumi section range as [[a,b], [c, d],]. Thay cannot be mixed.
* If lumi_list is provided run only run_num=single-run-number is allowed
* When lfn list is present, no run or lumi list is allowed.
*There are five dataset access types: VALID, INVALID, PRODUCTION, DEPRECATED and DELETED.
* One file status: IS_FILE_VALID: 1 or 0.
* When a dataset is INVALID/ DEPRECATED/ DELETED, DBS will consider all the files under it is invalid not matter what value is_file_valid has.
In general, when the dataset is in one of INVALID/ DEPRECATED/ DELETED, is_file_valid should all marked as 0, but some old DBS2 data was not.
* When Dataset is VALID/PRODUCTION, by default is_file_valid is all 1. But if individual file is invalid, then the file's is_file_valid is set to 0.
* DBS use this logical in its APIs that have validFileOnly variable.
*There are five dataset access types: VALID, INVALID, PRODUCTION, DEPRECATED and DELETED.
* One file status: IS_FILE_VALID: 1 or 0.
* When a dataset is INVALID/ DEPRECATED/ DELETED, DBS will consider all the files under it is invalid not matter what value is_file_valid has.
In general, when the dataset is in one of INVALID/ DEPRECATED/ DELETED, is_file_valid should all marked as 0, but some old DBS2 data was not.
* When Dataset is VALID/PRODUCTION, by default is_file_valid is all 1. But if individual file is invalid, then the file's is_file_valid is set to 0.
* DBS use this logical in its APIs that have validFileOnly variable.
:param logical_file_name: logical_file_name of the file
:type logical_file_name: str
:param dataset: dataset
:type dataset: str
:param block_name: block name
:type block_name: str
:param release_version: release version
:type release_version: str
:param pset_hash: parameter set hash
:type pset_hash: str
:param app_name: Name of the application
:type app_name: str
:param output_module_label: name of the used output module
:type output_module_label: str
:param run_num: run , run ranges, and run list. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...].
:type run_num: int, list, string
:param origin_site_name: site where the file was created
:type origin_site_name: str
:param lumi_list: List containing luminosity sections
:type lumi_list: list
:param detail: Get detailed information about a file
:type detail: bool
:param validFileOnly: default=0 return all the files. when =1, only return files with is_file_valid=1 or dataset_access_type=PRODUCTION or VALID
:type validFileOnly: int
:param sumOverLumi: default=0 event_count is the event_count/file. When sumOverLumi=1 and run_num is specified, the event_count is sum of the event_count/lumi for that run; When sumOverLumi = 1, no other input can be a list, for example no run_num list, lumi list or lfn list.
:type sumOverLumi: int
:returns: List of dictionaries containing the following keys (logical_file_name). If detail parameter is true, the dictionaries contain the following keys (check_sum, branch_hash_id, adler32, block_id, event_count, file_type, create_by, logical_file_name, creation_date, last_modified_by, dataset, block_name, file_id, file_size, last_modification_date, dataset_id, file_type_id, auto_cross_section, md5, is_file_valid)
:rtype: list of dicts
"""
logical_file_name = logical_file_name.replace("*", "%")
release_version = release_version.replace("*", "%")
pset_hash = pset_hash.replace("*", "%")
app_name = app_name.replace("*", "%")
block_name = block_name.replace("*", "%")
origin_site_name = origin_site_name.replace("*", "%")
dataset = dataset.replace("*", "%")
#
# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours
# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when logical_file_name is given.
# YG Jan. 15 2019
#
if (run_num != -1 and logical_file_name ==''):
for r in parseRunRange(run_num):
if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
if r == 1 or r == '1':
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
elif isinstance(r, run_tuple):
if r[0] == r[1]:
dbsExceptionHandler("dbsException-invalid-input", "DBS run range must be apart at least by 1.",
self.logger.exception)
elif r[0] <= 1 <= r[1]:
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
if lumi_list:
if run_num ==-1 or not run_num :
dbsExceptionHandler("dbsException-invalid-input", "When lumi_list is given, require a single run_num.", self.logger.exception)
elif sumOverLumi == 1:
dbsExceptionHandler("dbsException-invalid-input", "lumi_list and sumOverLumi=1 cannot be set at the same time becaue nesting of WITH clause within WITH clause not supported yet by Oracle. ", self.logger.exception)
else:
try:
lumi_list = self.dbsUtils2.decodeLumiIntervals(lumi_list)
except Exception as de:
dbsExceptionHandler("dbsException-invalid-input", "Invalid lumi_list input: "+ str(de), self.logger.exception)
else:
if not isinstance(run_num, list):
if run_num ==1 or run_num == '1':
dbsExceptionHandler("dbsException-invalid-input", "files API does not supprt run_num=1 when no lumi.", self.logger.exception)
else:
if 1 in run_num or '1' in run_num :
dbsExceptionHandler("dbsException-invalid-input", "files API does not supprt run_num=1 when no lumi.", self.logger.exception)
if int(sumOverLumi) == 1 and (isinstance(run_num, list) or isinstance(logical_file_name, list)):
dbsExceptionHandler("dbsException-invalid-input", "When sumOverLumi=1, no lfn list or run_num list allowed becaue nesting of WITH clause within WITH clause not supported yet by Oracle. ", self.logger.exception)
detail = detail in (True, 1, "True", "1", 'true')
output_module_label = output_module_label.replace("*", "%")
try:
result = self.dbsFile.listFiles(dataset, block_name, logical_file_name, release_version, pset_hash, app_name,
output_module_label, run_num, origin_site_name, lumi_list, detail,
validFileOnly, sumOverLumi)
for item in result:
yield item
except HTTPError as he:
raise he
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listFiles. %s \n Exception trace: \n %s" % (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', ex.message,
self.logger.exception, sError)
|
API to list files in DBS. Either non-wildcarded logical_file_name, non-wildcarded dataset or non-wildcarded block_name is required.
The combination of a non-wildcarded dataset or block_name with an wildcarded logical_file_name is supported.
* For lumi_list the following two json formats are supported:
- [a1, a2, a3,]
- [[a,b], [c, d],]
* lumi_list can be either a list of lumi section numbers as [a1, a2, a3,] or a list of lumi section range as [[a,b], [c, d],]. Thay cannot be mixed.
* If lumi_list is provided run only run_num=single-run-number is allowed
* When lfn list is present, no run or lumi list is allowed.
*There are five dataset access types: VALID, INVALID, PRODUCTION, DEPRECATED and DELETED.
* One file status: IS_FILE_VALID: 1 or 0.
* When a dataset is INVALID/ DEPRECATED/ DELETED, DBS will consider all the files under it is invalid not matter what value is_file_valid has.
In general, when the dataset is in one of INVALID/ DEPRECATED/ DELETED, is_file_valid should all marked as 0, but some old DBS2 data was not.
* When Dataset is VALID/PRODUCTION, by default is_file_valid is all 1. But if individual file is invalid, then the file's is_file_valid is set to 0.
* DBS use this logical in its APIs that have validFileOnly variable.
*There are five dataset access types: VALID, INVALID, PRODUCTION, DEPRECATED and DELETED.
* One file status: IS_FILE_VALID: 1 or 0.
* When a dataset is INVALID/ DEPRECATED/ DELETED, DBS will consider all the files under it is invalid not matter what value is_file_valid has.
In general, when the dataset is in one of INVALID/ DEPRECATED/ DELETED, is_file_valid should all marked as 0, but some old DBS2 data was not.
* When Dataset is VALID/PRODUCTION, by default is_file_valid is all 1. But if individual file is invalid, then the file's is_file_valid is set to 0.
* DBS use this logical in its APIs that have validFileOnly variable.
:param logical_file_name: logical_file_name of the file
:type logical_file_name: str
:param dataset: dataset
:type dataset: str
:param block_name: block name
:type block_name: str
:param release_version: release version
:type release_version: str
:param pset_hash: parameter set hash
:type pset_hash: str
:param app_name: Name of the application
:type app_name: str
:param output_module_label: name of the used output module
:type output_module_label: str
:param run_num: run , run ranges, and run list. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...].
:type run_num: int, list, string
:param origin_site_name: site where the file was created
:type origin_site_name: str
:param lumi_list: List containing luminosity sections
:type lumi_list: list
:param detail: Get detailed information about a file
:type detail: bool
:param validFileOnly: default=0 return all the files. when =1, only return files with is_file_valid=1 or dataset_access_type=PRODUCTION or VALID
:type validFileOnly: int
:param sumOverLumi: default=0 event_count is the event_count/file. When sumOverLumi=1 and run_num is specified, the event_count is sum of the event_count/lumi for that run; When sumOverLumi = 1, no other input can be a list, for example no run_num list, lumi list or lfn list.
:type sumOverLumi: int
:returns: List of dictionaries containing the following keys (logical_file_name). If detail parameter is true, the dictionaries contain the following keys (check_sum, branch_hash_id, adler32, block_id, event_count, file_type, create_by, logical_file_name, creation_date, last_modified_by, dataset, block_name, file_id, file_size, last_modification_date, dataset_id, file_type_id, auto_cross_section, md5, is_file_valid)
:rtype: list of dicts
|
entailment
|
def listFileArray(self):
"""
API to list files in DBS. Either non-wildcarded logical_file_name, non-wildcarded dataset,
non-wildcarded block_name or non-wildcarded lfn list is required.
The combination of a non-wildcarded dataset or block_name with an wildcarded logical_file_name is supported.
* For lumi_list the following two json formats are supported:
- [a1, a2, a3,]
- [[a,b], [c, d],]
* lumi_list can be either a list of lumi section numbers as [a1, a2, a3,] or a list of lumi section range as [[a,b], [c, d],]. Thay cannot be mixed.
* If lumi_list is provided run only run_num=single-run-number is allowed
* When lfn list is present, no run or lumi list is allowed.
* When run_num =1 is present, logical_file_name should be present too.
:param logical_file_name: logical_file_name of the file
:type logical_file_name: str, list
:param dataset: dataset
:type dataset: str
:param block_name: block name
:type block_name: str
:param release_version: release version
:type release_version: str
:param pset_hash: parameter set hash
:type pset_hash: str
:param app_name: Name of the application
:type app_name: str
:param output_module_label: name of the used output module
:type output_module_label: str
:param run_num: run , run ranges, and run list. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...]. Max length 1000.
:type run_num: int, list, string
:param origin_site_name: site where the file was created
:type origin_site_name: str
:param lumi_list: List containing luminosity sections. Max length 1000.
:type lumi_list: list
:param detail: Get detailed information about a file
:type detail: bool
:param validFileOnly: default=0 return all the files. when =1, only return files with is_file_valid=1 or dataset_access_type=PRODUCTION or VALID
:type validFileOnly: int
:param sumOverLumi: default=0 event_count is the event_count/file, when=1 and run_num is specified, the event_count is sum of the event_count/lumi for that run; When sumOverLumi = 1, no other input can be a list, for example no run_num list, lumi list or lfn list.
:type sumOverLumi: int
:returns: List of dictionaries containing the following keys (logical_file_name). If detail parameter is true, the dictionaries contain the following keys (check_sum, branch_hash_id, adler32, block_id, event_count, file_type, create_by, logical_file_name, creation_date, last_modified_by, dataset, block_name, file_id, file_size, last_modification_date, dataset_id, file_type_id, auto_cross_section, md5, is_file_valid)
:rtype: list of dicts
"""
ret = []
try :
body = request.body.read()
if body:
data = cjson.decode(body)
data = validateJSONInputNoCopy("files", data, True)
if 'sumOverLumi' in data and data['sumOverLumi'] ==1:
if ('logical_file_name' in data and isinstance(data['logical_file_name'], list)) \
or ('run_num' in data and isinstance(data['run_num'], list)):
dbsExceptionHandler("dbsException-invalid-input",
"When sumOverLumi=1, no input can be a list becaue nesting of WITH clause within WITH clause not supported yet by Oracle. ", self.logger.exception)
if 'lumi_list' in data and data['lumi_list']:
if 'sumOverLumi' in data and data['sumOverLumi'] ==1:
dbsExceptionHandler("dbsException-invalid-input",
"When lumi_list is given, sumOverLumi must set to 0 becaue nesting of WITH clause within WITH clause not supported yet by Oracle.", self.logger.exception)
data['lumi_list'] = self.dbsUtils2.decodeLumiIntervals(data['lumi_list'])
if 'run_num' not in data.keys() or not data['run_num'] or data['run_num'] ==-1 :
dbsExceptionHandler("dbsException-invalid-input",
"When lumi_list is given, require a single run_num.", self.logger.exception)
#check if run_num =1 w/o lfn
if ('logical_file_name' not in data or not data['logical_file_name']) and 'run_num' in data:
if isinstance(data['run_num'], list):
if 1 in data['run_num'] or '1' in data['run_num']:
raise dbsExceptionHandler("dbsException-invalid-input",
'files API does not supprt run_num=1 without logical_file_name.', self.logger.exception)
else:
if data['run_num'] == 1 or data['run_num'] == '1':
raise dbsExceptionHandler("dbsException-invalid-input",
'files API does not supprt run_num=1 without logical_file_name.', self.logger.exception)
#Because CMSWEB has a 300 seconds responding time. We have to limit the array siz to make sure that
#the API can be finished in 300 second. See github issues #465 for tests' results.
# YG May-20-2015
max_array_size = 1000
if ( 'run_num' in data.keys() and isinstance(data['run_num'], list) and len(data['run_num'])>max_array_size)\
or ('lumi_list' in data.keys() and isinstance(data['lumi_list'], list) and len(data['lumi_list'])>max_array_size)\
or ('logical_file_name' in data.keys() and isinstance(data['logical_file_name'], list) and len(data['logical_file_name'])>max_array_size):
dbsExceptionHandler("dbsException-invalid-input",
"The Max list length supported in listFileArray is %s." %max_array_size, self.logger.exception)
#
ret = self.dbsFile.listFiles(input_body=data)
except cjson.DecodeError as De:
dbsExceptionHandler('dbsException-invalid-input2', "Invalid input", self.logger.exception, str(De))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSReaderModel/listFileArray. %s \n Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', ex.message, self.logger.exception, sError)
for item in ret:
yield item
|
API to list files in DBS. Either non-wildcarded logical_file_name, non-wildcarded dataset,
non-wildcarded block_name or non-wildcarded lfn list is required.
The combination of a non-wildcarded dataset or block_name with an wildcarded logical_file_name is supported.
* For lumi_list the following two json formats are supported:
- [a1, a2, a3,]
- [[a,b], [c, d],]
* lumi_list can be either a list of lumi section numbers as [a1, a2, a3,] or a list of lumi section range as [[a,b], [c, d],]. Thay cannot be mixed.
* If lumi_list is provided run only run_num=single-run-number is allowed
* When lfn list is present, no run or lumi list is allowed.
* When run_num =1 is present, logical_file_name should be present too.
:param logical_file_name: logical_file_name of the file
:type logical_file_name: str, list
:param dataset: dataset
:type dataset: str
:param block_name: block name
:type block_name: str
:param release_version: release version
:type release_version: str
:param pset_hash: parameter set hash
:type pset_hash: str
:param app_name: Name of the application
:type app_name: str
:param output_module_label: name of the used output module
:type output_module_label: str
:param run_num: run , run ranges, and run list. Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...]. Max length 1000.
:type run_num: int, list, string
:param origin_site_name: site where the file was created
:type origin_site_name: str
:param lumi_list: List containing luminosity sections. Max length 1000.
:type lumi_list: list
:param detail: Get detailed information about a file
:type detail: bool
:param validFileOnly: default=0 return all the files. when =1, only return files with is_file_valid=1 or dataset_access_type=PRODUCTION or VALID
:type validFileOnly: int
:param sumOverLumi: default=0 event_count is the event_count/file, when=1 and run_num is specified, the event_count is sum of the event_count/lumi for that run; When sumOverLumi = 1, no other input can be a list, for example no run_num list, lumi list or lfn list.
:type sumOverLumi: int
:returns: List of dictionaries containing the following keys (logical_file_name). If detail parameter is true, the dictionaries contain the following keys (check_sum, branch_hash_id, adler32, block_id, event_count, file_type, create_by, logical_file_name, creation_date, last_modified_by, dataset, block_name, file_id, file_size, last_modification_date, dataset_id, file_type_id, auto_cross_section, md5, is_file_valid)
:rtype: list of dicts
|
entailment
|
def listFileSummaries(self, block_name='', dataset='', run_num=-1, validFileOnly=0, sumOverLumi=0):
"""
API to list number of files, event counts and number of lumis in a given block or dataset.
If the optional run_num, output are:
* The number of files which have data (lumis) for that run number;
* The total number of events in those files;
* The total number of lumis for that run_number. Note that in general this is different from the total
number of lumis in those files, since lumis are filtered by the run_number they belong to, while events
are only counted as total per file in the data before run 3. Howvere, when sumOverLumi=1, events will count by lumi when run_num
is given while event_count/lumi is filled. If sumOverLumi=1, but event_count/lumi is not filled for any of the lumis in the block or
dataset, then the API will return NULL for num_event.
* The total num blocks that have the run_num;
Either block_name or dataset name is required. No wild-cards are allowed
:param block_name: Block name
:type block_name: str
:param dataset: Dataset name
:type dataset: str
:param run_num: Run number (Optional). Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...]. run_num=1 is for MC data and caused almost full table scan. So run_num=1 will cause an input error.
:type run_num: int, str, list
:param validFileOnly: default = 0. when = 1, only dataset_access_type = valid or production and is_file_valid=1 counted.
:type validFileOnly: int
:param sumOverLumi: default = 0. when = 1 count event_num by event_count/lumi.
:type sumOverLumi: int
:returns: List of dictionaries containing the following keys (num_files, num_lumi, num_block, num_event, file_size)
:rtype: list of dicts
"""
# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours
# We will disbale all the run_num=1 calls in DBS.
# YG Jan. 16 2019
#
if (run_num != -1) :
for r in parseRunRange(run_num):
if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
if r == 1 or r == '1':
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
elif isinstance(r, run_tuple):
if r[0] == r[1]:
dbsExceptionHandler('dbsException-invalid-input', "DBS run range must be apart at least by 1.",
self.logger.exception)
elif r[0] <= 1 <= r[1]:
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
try:
r = self.dbsFile.listFileSummary(block_name, dataset, run_num, validFileOnly=validFileOnly, sumOverLumi=sumOverLumi)
for item in r:
yield item
except HTTPError as he:
raise he
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except HTTPerror as he:
raise he
except Exception as ex:
sError = "DBSReaderModel/listFileSummaries. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', ex.message, self.logger.exception, sError)
|
API to list number of files, event counts and number of lumis in a given block or dataset.
If the optional run_num, output are:
* The number of files which have data (lumis) for that run number;
* The total number of events in those files;
* The total number of lumis for that run_number. Note that in general this is different from the total
number of lumis in those files, since lumis are filtered by the run_number they belong to, while events
are only counted as total per file in the data before run 3. Howvere, when sumOverLumi=1, events will count by lumi when run_num
is given while event_count/lumi is filled. If sumOverLumi=1, but event_count/lumi is not filled for any of the lumis in the block or
dataset, then the API will return NULL for num_event.
* The total num blocks that have the run_num;
Either block_name or dataset name is required. No wild-cards are allowed
:param block_name: Block name
:type block_name: str
:param dataset: Dataset name
:type dataset: str
:param run_num: Run number (Optional). Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...]. run_num=1 is for MC data and caused almost full table scan. So run_num=1 will cause an input error.
:type run_num: int, str, list
:param validFileOnly: default = 0. when = 1, only dataset_access_type = valid or production and is_file_valid=1 counted.
:type validFileOnly: int
:param sumOverLumi: default = 0. when = 1 count event_num by event_count/lumi.
:type sumOverLumi: int
:returns: List of dictionaries containing the following keys (num_files, num_lumi, num_block, num_event, file_size)
:rtype: list of dicts
|
entailment
|
def listDatasetParents(self, dataset=''):
"""
API to list A datasets parents in DBS.
:param dataset: dataset (Required)
:type dataset: str
:returns: List of dictionaries containing the following keys (this_dataset, parent_dataset_id, parent_dataset)
:rtype: list of dicts
"""
try:
return self.dbsDataset.listDatasetParents(dataset)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listDatasetParents. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list A datasets parents in DBS.
:param dataset: dataset (Required)
:type dataset: str
:returns: List of dictionaries containing the following keys (this_dataset, parent_dataset_id, parent_dataset)
:rtype: list of dicts
|
entailment
|
def listOutputConfigs(self, dataset="", logical_file_name="",
release_version="", pset_hash="", app_name="",
output_module_label="", block_id=0, global_tag=''):
"""
API to list OutputConfigs in DBS.
* You can use any combination of these parameters in this API
* All parameters are optional, if you do not provide any parameter, all configs will be listed from DBS
:param dataset: Full dataset (path) of the dataset
:type dataset: str
:param logical_file_name: logical_file_name of the file
:type logical_file_name: str
:param release_version: cmssw version
:type release_version: str
:param pset_hash: pset hash
:type pset_hash: str
:param app_name: Application name (generally it is cmsRun)
:type app_name: str
:param output_module_label: output_module_label
:type output_module_label: str
:param block_id: ID of the block
:type block_id: int
:param global_tag: Global Tag
:type global_tag: str
:returns: List of dictionaries containing the following keys (app_name, output_module_label, create_by, pset_hash, creation_date, release_version, global_tag, pset_name)
:rtype: list of dicts
"""
release_version = release_version.replace("*", "%")
pset_hash = pset_hash.replace("*", "%")
app_name = app_name.replace("*", "%")
output_module_label = output_module_label.replace("*", "%")
try:
return self.dbsOutputConfig.listOutputConfigs(dataset,
logical_file_name, release_version, pset_hash, app_name,
output_module_label, block_id, global_tag)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listOutputConfigs. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list OutputConfigs in DBS.
* You can use any combination of these parameters in this API
* All parameters are optional, if you do not provide any parameter, all configs will be listed from DBS
:param dataset: Full dataset (path) of the dataset
:type dataset: str
:param logical_file_name: logical_file_name of the file
:type logical_file_name: str
:param release_version: cmssw version
:type release_version: str
:param pset_hash: pset hash
:type pset_hash: str
:param app_name: Application name (generally it is cmsRun)
:type app_name: str
:param output_module_label: output_module_label
:type output_module_label: str
:param block_id: ID of the block
:type block_id: int
:param global_tag: Global Tag
:type global_tag: str
:returns: List of dictionaries containing the following keys (app_name, output_module_label, create_by, pset_hash, creation_date, release_version, global_tag, pset_name)
:rtype: list of dicts
|
entailment
|
def listFileParents(self, logical_file_name='', block_id=0, block_name=''):
"""
API to list file parents
:param logical_file_name: logical_file_name of file (Required)
:type logical_file_name: str, list
:param block_id: ID of the a block, whose files should be listed
:type block_id: int, str
:param block_name: Name of the block, whose files should be listed
:type block_name: int, str
:returns: List of dictionaries containing the following keys (parent_logical_file_name, logical_file_name)
:rtype: list of dicts
"""
try:
r = self.dbsFile.listFileParents(logical_file_name, block_id, block_name)
for item in r:
yield item
except HTTPError as he:
raise he
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listFileParents. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', ex.message, self.logger.exception, sError)
|
API to list file parents
:param logical_file_name: logical_file_name of file (Required)
:type logical_file_name: str, list
:param block_id: ID of the a block, whose files should be listed
:type block_id: int, str
:param block_name: Name of the block, whose files should be listed
:type block_name: int, str
:returns: List of dictionaries containing the following keys (parent_logical_file_name, logical_file_name)
:rtype: list of dicts
|
entailment
|
def listFileParentsByLumi(self):
"""
IMPORTANT: This is ***WMAgent*** sepcial case API. It is not for others.
API to list File Parentage for a given block with or w/o a list of LFN. It is used with the POST method of fileparents call.
Using the child_lfn_list will significantly affect the API running speed.
:param block_name: This this the child's block name
:type block_name: str
:param logical_file_name: is a list of child lfn. Max length 1000.
:type logial_file_name: list of str
:returns: List of dictionaries containing the following keys:[{child_parent_id_list: [(cid1, pid1), (cid2, pid2), ... (cidn, pidn)]}]
:rtype: list of dicts
"""
try :
body = request.body.read()
if body:
data = cjson.decode(body)
data = validateJSONInputNoCopy('file_parent_lumi', data, read=True)
else:
data = {}
#Because CMSWEB has a 300 seconds responding time. We have to limit the array siz to make sure that
#the API can be finished in 300 second.
max_array_size = 1000
if ('logical_file_name' in data.keys() and isinstance(data['logical_file_name'], list) and len(data['logical_file_name'])>max_array_size):
dbsExceptionHandler("dbsException-invalid-input",
"The Max list length supported in listFilePArentsByLumi is %s." %max_array_size, self.logger.exception)
lfn = []
if "block_name" not in data.keys():
dbsExceptionHandler('dbsException-invalid-input', "block_name is required for fileparentsbylumi")
else:
if "logical_file_name" in data.keys():
lfn = data["logical_file_name"]
result = self.dbsFile.listFileParentsByLumi(block_name=data["block_name"], logical_file_name=lfn)
for r in result:
yield r
except cjson.DecodeError as De:
dbsExceptionHandler('dbsException-invalid-input2', "Invalid input", self.logger.exception, str(De))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSReaderModel/listFileParentsByLumi. %s \n Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', ex.message, self.logger.exception, sError)
|
IMPORTANT: This is ***WMAgent*** sepcial case API. It is not for others.
API to list File Parentage for a given block with or w/o a list of LFN. It is used with the POST method of fileparents call.
Using the child_lfn_list will significantly affect the API running speed.
:param block_name: This this the child's block name
:type block_name: str
:param logical_file_name: is a list of child lfn. Max length 1000.
:type logial_file_name: list of str
:returns: List of dictionaries containing the following keys:[{child_parent_id_list: [(cid1, pid1), (cid2, pid2), ... (cidn, pidn)]}]
:rtype: list of dicts
|
entailment
|
def listFileChildren(self, logical_file_name='', block_name='', block_id=0):
"""
API to list file children. One of the parameters in mandatory.
:param logical_file_name: logical_file_name of file (Required)
:type logical_file_name: str, list
:param block_name: block_name
:type block_name: str
:param block_id: block_id
:type block_id: str, int
:returns: List of dictionaries containing the following keys (child_logical_file_name, logical_file_name)
:rtype: List of dicts
"""
if isinstance(logical_file_name, list):
for f in logical_file_name:
if '*' in f or '%' in f:
dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception, "No \
wildcard allow in LFN list" )
try:
return self.dbsFile.listFileChildren(logical_file_name, block_name, block_id)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listFileChildren. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list file children. One of the parameters in mandatory.
:param logical_file_name: logical_file_name of file (Required)
:type logical_file_name: str, list
:param block_name: block_name
:type block_name: str
:param block_id: block_id
:type block_id: str, int
:returns: List of dictionaries containing the following keys (child_logical_file_name, logical_file_name)
:rtype: List of dicts
|
entailment
|
def listFileLumis(self, logical_file_name="", block_name="", run_num=-1, validFileOnly=0):
"""
API to list Lumi for files. Either logical_file_name or block_name is required. No wild card support in this API
:param block_name: Name of the block
:type block_name: str
:param logical_file_name: logical_file_name of file
:type logical_file_name: str, list
:param run_num: List lumi sections for a given run number (Optional). Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...]. run_num=1 is for MC data and caused almost full table scan. So run_num=1
will cause an input error.
:type run_num: int, str, or list
:returns: List of dictionaries containing the following keys (lumi_section_num, logical_file_name, run_num, event_count)
:rtype: list of dicts
:param validFileOnly: optional valid file flag. Default = 0 (include all files)
:type: validFileOnly: int, or str
"""
# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours
# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when logical_file_name is given.
# YG Jan. 16 2019
if (run_num != -1 and logical_file_name ==''):
for r in parseRunRange(run_num):
if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
if r == 1 or r == '1':
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
elif isinstance(r, run_tuple):
if r[0] == r[1]:
dbsExceptionHandler("dbsException-invalid-input", "DBS run range must be apart at least by 1.",self.logger.exception)
elif r[0] <= 1 <= r[1]:
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
try:
return self.dbsFile.listFileLumis(logical_file_name, block_name, run_num, validFileOnly )
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listFileLumis. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list Lumi for files. Either logical_file_name or block_name is required. No wild card support in this API
:param block_name: Name of the block
:type block_name: str
:param logical_file_name: logical_file_name of file
:type logical_file_name: str, list
:param run_num: List lumi sections for a given run number (Optional). Possible format are: run_num, 'run_min-run_max' or ['run_min-run_max', run1, run2, ...]. run_num=1 is for MC data and caused almost full table scan. So run_num=1
will cause an input error.
:type run_num: int, str, or list
:returns: List of dictionaries containing the following keys (lumi_section_num, logical_file_name, run_num, event_count)
:rtype: list of dicts
:param validFileOnly: optional valid file flag. Default = 0 (include all files)
:type: validFileOnly: int, or str
|
entailment
|
def listRuns(self, run_num=-1, logical_file_name="", block_name="", dataset=""):
"""
API to list all runs in DBS. At least one parameter is mandatory.
:param logical_file_name: List all runs in the file
:type logical_file_name: str
:param block_name: List all runs in the block
:type block_name: str
:param dataset: List all runs in that dataset
:type dataset: str
:param run_num: List all runs
:type run_num: int, string or list
"""
# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours
# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when logical_file_name is given.
# YG Jan. 16 2019
if (run_num != -1 and logical_file_name ==''):
for r in parseRunRange(run_num):
if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
if r == 1 or r == '1':
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
elif isinstance(r, run_tuple):
if r[0] == r[1]:
dbsExceptionHandler("dbsException-invalid-input", "DBS run range must be apart at least by 1.",
self.logger.exception)
elif r[0] <= 1 <= r[1]:
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input.",
self.logger.exception)
if run_num==-1 and not logical_file_name and not dataset and not block_name:
dbsExceptionHandler("dbsException-invalid-input",
"run_num, logical_file_name, block_name or dataset parameter is mandatory",
self.logger.exception)
try:
if logical_file_name:
logical_file_name = logical_file_name.replace("*", "%")
if block_name:
block_name = block_name.replace("*", "%")
if dataset:
dataset = dataset.replace("*", "%")
return self.dbsRun.listRuns(run_num, logical_file_name, block_name, dataset)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listRun. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list all runs in DBS. At least one parameter is mandatory.
:param logical_file_name: List all runs in the file
:type logical_file_name: str
:param block_name: List all runs in the block
:type block_name: str
:param dataset: List all runs in that dataset
:type dataset: str
:param run_num: List all runs
:type run_num: int, string or list
|
entailment
|
def listDataTypes(self, datatype="", dataset=""):
"""
API to list data types known to dbs (when no parameter supplied).
:param dataset: Returns data type (of primary dataset) of the dataset (Optional)
:type dataset: str
:param datatype: List specific data type
:type datatype: str
:returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type)
:rtype: list of dicts
"""
try:
return self.dbsDataType.listDataType(dataType=datatype, dataset=dataset)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listDataTypes. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list data types known to dbs (when no parameter supplied).
:param dataset: Returns data type (of primary dataset) of the dataset (Optional)
:type dataset: str
:param datatype: List specific data type
:type datatype: str
:returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type)
:rtype: list of dicts
|
entailment
|
def dumpBlock(self, block_name):
"""
API the list all information related with the block_name
:param block_name: Name of block to be dumped (Required)
:type block_name: str
"""
try:
return self.dbsBlock.dumpBlock(block_name)
except HTTPError as he:
raise he
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/dumpBlock. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', ex.message, self.logger.exception, sError)
|
API the list all information related with the block_name
:param block_name: Name of block to be dumped (Required)
:type block_name: str
|
entailment
|
def listAcquisitionEras(self, acquisition_era_name=''):
"""
API to list all Acquisition Eras in DBS.
:param acquisition_era_name: Acquisition era name (Optional, wild cards allowed)
:type acquisition_era_name: str
:returns: List of dictionaries containing following keys (description, end_date, acquisition_era_name, create_by, creation_date and start_date)
:rtype: list of dicts
"""
try:
acquisition_era_name = acquisition_era_name.replace('*', '%')
return self.dbsAcqEra.listAcquisitionEras(acquisition_era_name)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listAcquisitionEras. %s\n. Exception trace: \n %s" % (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list all Acquisition Eras in DBS.
:param acquisition_era_name: Acquisition era name (Optional, wild cards allowed)
:type acquisition_era_name: str
:returns: List of dictionaries containing following keys (description, end_date, acquisition_era_name, create_by, creation_date and start_date)
:rtype: list of dicts
|
entailment
|
def listProcessingEras(self, processing_version=0):
"""
API to list all Processing Eras in DBS.
:param processing_version: Processing Version (Optional). If provided just this processing_version will be listed
:type processing_version: str
:returns: List of dictionaries containing the following keys (create_by, processing_version, description, creation_date)
:rtype: list of dicts
"""
try:
#processing_version = processing_version.replace("*", "%")
return self.dbsProcEra.listProcessingEras(processing_version)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listProcessingEras. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list all Processing Eras in DBS.
:param processing_version: Processing Version (Optional). If provided just this processing_version will be listed
:type processing_version: str
:returns: List of dictionaries containing the following keys (create_by, processing_version, description, creation_date)
:rtype: list of dicts
|
entailment
|
def listReleaseVersions(self, release_version='', dataset='', logical_file_name=''):
"""
API to list all release versions in DBS
:param release_version: List only that release version
:type release_version: str
:param dataset: List release version of the specified dataset
:type dataset: str
:param logical_file_name: List release version of the logical file name
:type logical_file_name: str
:returns: List of dictionaries containing following keys (release_version)
:rtype: list of dicts
"""
if release_version:
release_version = release_version.replace("*", "%")
try:
return self.dbsReleaseVersion.listReleaseVersions(release_version, dataset, logical_file_name )
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listReleaseVersions. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list all release versions in DBS
:param release_version: List only that release version
:type release_version: str
:param dataset: List release version of the specified dataset
:type dataset: str
:param logical_file_name: List release version of the logical file name
:type logical_file_name: str
:returns: List of dictionaries containing following keys (release_version)
:rtype: list of dicts
|
entailment
|
def listDatasetAccessTypes(self, dataset_access_type=''):
"""
API to list dataset access types.
:param dataset_access_type: List that dataset access type (Optional)
:type dataset_access_type: str
:returns: List of dictionary containing the following key (dataset_access_type).
:rtype: List of dicts
"""
if dataset_access_type:
dataset_access_type = dataset_access_type.replace("*", "%")
try:
return self.dbsDatasetAccessType.listDatasetAccessTypes(dataset_access_type)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listDatasetAccessTypes. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list dataset access types.
:param dataset_access_type: List that dataset access type (Optional)
:type dataset_access_type: str
:returns: List of dictionary containing the following key (dataset_access_type).
:rtype: List of dicts
|
entailment
|
def listPhysicsGroups(self, physics_group_name=''):
"""
API to list all physics groups.
:param physics_group_name: List that specific physics group (Optional)
:type physics_group_name: basestring
:returns: List of dictionaries containing the following key (physics_group_name)
:rtype: list of dicts
"""
if physics_group_name:
physics_group_name = physics_group_name.replace('*', '%')
try:
return self.dbsPhysicsGroup.listPhysicsGroups(physics_group_name)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listPhysicsGroups. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to list all physics groups.
:param physics_group_name: List that specific physics group (Optional)
:type physics_group_name: basestring
:returns: List of dictionaries containing the following key (physics_group_name)
:rtype: list of dicts
|
entailment
|
def listRunSummaries(self, dataset="", run_num=-1):
"""
API to list run summaries, like the maximal lumisection in a run.
:param dataset: dataset name (Optional)
:type dataset: str
:param run_num: Run number (Required)
:type run_num: str, long, int
:rtype: list containing a dictionary with key max_lumi
"""
if run_num==-1:
dbsExceptionHandler("dbsException-invalid-input",
"The run_num parameter is mandatory",
self.logger.exception)
if re.search('[*,%]', dataset):
dbsExceptionHandler("dbsException-invalid-input",
"No wildcards are allowed in dataset",
self.logger.exception)
# run_num=1 caused full table scan and CERN DBS reported some of the queries ran more than 50 hours
# We will disbale all the run_num=1 calls in DBS. Run_num=1 will be OK when dataset is given in this API.
# YG Jan. 16 2019
if ((run_num == -1 or run_num == '-1') and dataset==''):
dbsExceptionHandler("dbsException-invalid-input", "Run_num=1 is not a valid input when no dataset is present.",
self.logger.exception)
conn = None
try:
conn = self.dbi.connection()
return self.dbsRunSummaryListDAO.execute(conn, dataset, run_num)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSReaderModel/listRunSummaries. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'],
self.logger.exception, sError)
finally:
if conn:
conn.close()
|
API to list run summaries, like the maximal lumisection in a run.
:param dataset: dataset name (Optional)
:type dataset: str
:param run_num: Run number (Required)
:type run_num: str, long, int
:rtype: list containing a dictionary with key max_lumi
|
entailment
|
def list():
""" List all events """
entries = lambder.list_events()
for e in entries:
click.echo(str(e))
|
List all events
|
entailment
|
def add(name, function_name, cron):
""" Create an event """
lambder.add_event(name=name, function_name=function_name, cron=cron)
|
Create an event
|
entailment
|
def load(file):
""" Load events from a json file """
with open(file, 'r') as f:
contents = f.read()
lambder.load_events(contents)
|
Load events from a json file
|
entailment
|
def functions(context):
""" Manage AWS Lambda functions """
# find lambder.json in CWD
config_file = "./lambder.json"
if os.path.isfile(config_file):
context.obj = FunctionConfig(config_file)
pass
|
Manage AWS Lambda functions
|
entailment
|
def list():
""" List lambder functions """
functions = lambder.list_functions()
output = json.dumps(
functions,
sort_keys=True,
indent=4,
separators=(',', ':')
)
click.echo(output)
|
List lambder functions
|
entailment
|
def new(
name,
bucket,
timeout,
memory,
description,
subnet_ids,
security_group_ids
):
""" Create a new lambda project """
config = {}
if timeout:
config['timeout'] = timeout
if memory:
config['memory'] = memory
if description:
config['description'] = description
if subnet_ids:
config['subnet_ids'] = subnet_ids
if security_group_ids:
config['security_group_ids'] = security_group_ids
lambder.create_project(name, bucket, config)
|
Create a new lambda project
|
entailment
|
def deploy(
config,
name,
bucket,
timeout,
memory,
description,
subnet_ids,
security_group_ids
):
""" Deploy/Update a function from a project directory """
# options should override config if it is there
myname = name or config.name
mybucket = bucket or config.bucket
mytimeout = timeout or config.timeout
mymemory = memory or config.memory
mydescription = description or config.description
mysubnet_ids = subnet_ids or config.subnet_ids
mysecurity_group_ids = security_group_ids or config.security_group_ids
vpc_config = {}
if mysubnet_ids and mysecurity_group_ids:
vpc_config = {
'SubnetIds': mysubnet_ids.split(','),
'SecurityGroupIds': mysecurity_group_ids.split(',')
}
click.echo('Deploying {} to {}'.format(myname, mybucket))
lambder.deploy_function(
myname,
mybucket,
mytimeout,
mymemory,
mydescription,
vpc_config
)
|
Deploy/Update a function from a project directory
|
entailment
|
def rm(config, name, bucket):
""" Delete lambda function, role, and zipfile """
# options should override config if it is there
myname = name or config.name
mybucket = bucket or config.bucket
click.echo('Deleting {} from {}'.format(myname, mybucket))
lambder.delete_function(myname, mybucket)
|
Delete lambda function, role, and zipfile
|
entailment
|
def invoke(config, name, input):
""" Invoke function in AWS """
# options should override config if it is there
myname = name or config.name
click.echo('Invoking ' + myname)
output = lambder.invoke_function(myname, input)
click.echo(output)
|
Invoke function in AWS
|
entailment
|
def putBlock(self, blockcontent, migration=False):
"""
Insert the data in sereral steps and commit when each step finishes or rollback if there is a problem.
"""
#YG
try:
#1 insert configuration
self.logger.debug("insert configuration")
configList = self.insertOutputModuleConfig(
blockcontent['dataset_conf_list'], migration)
#2 insert dataset
self.logger.debug("insert dataset")
datasetId = self.insertDataset(blockcontent, configList, migration)
#3 insert block & files
self.logger.debug("insert block & files.")
self.insertBlockFile(blockcontent, datasetId, migration)
except KeyError as ex:
dbsExceptionHandler("dbsException-invalid-input2", "DBSBlockInsert/putBlock: \
KeyError exception: %s. " %ex.args[0], self.logger.exception,
"DBSBlockInsert/putBlock: KeyError exception: %s. " %ex.args[0] )
except Exception as ex:
raise
|
Insert the data in sereral steps and commit when each step finishes or rollback if there is a problem.
|
entailment
|
def insertOutputModuleConfig(self, remoteConfig, migration=False):
"""
Insert Release version, application, parameter set hashes and the map(output module config).
"""
otptIdList = []
missingList = []
conn = self.dbi.connection()
try:
for c in remoteConfig:
cfgid = self.otptModCfgid.execute(conn, app = c["app_name"],
release_version = c["release_version"],
pset_hash = c["pset_hash"],
output_label = c["output_module_label"],
global_tag=c['global_tag'])
if cfgid <= 0 :
missingList.append(c)
else:
key = (c['app_name'] + ':' + c['release_version'] + ':' +
c['pset_hash'] + ':' +
c['output_module_label'] + ':' + c['global_tag'])
self.datasetCache['conf'][key] = cfgid
otptIdList.append(cfgid)
#print "About to set cfgid: %s" % str(cfgid)
except KeyError as ex:
if conn:conn.close()
dbsExceptionHandler("dbsException-invalid-input2", "DBSBlockInsert/insertOutputModuleConfig: \
KeyError exception: %s. " %ex.args[0], self.logger.exception,
"DBSBlockInsert/insertOutputModuleConfig: KeyError exception: %s. " %ex.args[0] )
except Exception as ex:
if conn:conn.close()
raise
if len(missingList)==0:
if conn:conn.close()
return otptIdList
#Now insert the missing configs
try:
#tran = conn.begin()
for m in missingList:
# Start a new transaction
# This is to see if we can get better results
# by committing early if we're submitting
# multiple blocks with similar features
tran = conn.begin()
#Now insert the config
# Sort out the mess
# We're having some problems with different threads
# committing different pieces at the same time
# This makes the output module config ID wrong
# Trying to catch this via exception handling on duplication
# Start a new transaction
#global_tag is now required. YG 03/08/2011
try:
cfgid = 0
if not migration:
m['create_by'] = dbsUtils().getCreateBy()
m['creation_date'] = dbsUtils().getTime()
configObj = {"release_version": m["release_version"],
"pset_hash": m["pset_hash"], "pset_name":m.get('pset_name', None),
"app_name": m["app_name"],
'output_module_label' : m['output_module_label'],
'global_tag' : m['global_tag'],
'scenario' : m.get('scenario', None),
'creation_date' : m['creation_date'],
'create_by':m['create_by']
}
self.otptModCfgin.execute(conn, configObj, tran)
tran.commit()
tran = None
except KeyError as ex:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler("dbsException-invalid-input2", "DBSBlockInsert/insertOutputModuleConfig: \
KeyError exception: %s. " %ex.args[0],
self.logger.exception,
"DBSBlockInsert/insertOutputModuleConfig: KeyError exception: %s. " %ex.args[0])
except exceptions.IntegrityError as ex:
#Another job inserted it just 1/100000 second earlier than
#you!! YG 11/17/2010
if str(ex).find("ORA-00001") != -1 or str(ex).lower().find("duplicate") !=-1:
if str(ex).find("TUC_OMC_1") != -1:
#the config is already in db, get the ID later
pass
else:
#reinsert it if one or two or three of the three attributes (vresion, hash and app) are inserted
#just 1/100000 second eailer.
try:
self.otptModCfgin.execute(conn, configObj, tran)
tran.commit()
tran = None
except exceptions.IntegrityError as ex:
if (str(ex).find("ORA-00001") != -1 and str(ex).find("TUC_OMC_1"))\
or str(ex).lower().find("duplicate") != -1:
pass
else:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler('dbsException-invalid-input2',
'Invalid data when insert Configure. ',
self.logger.exception,
'Invalid data when insert Configure. '+ str(ex))
elif str(ex).find("ORA-01400") > -1:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler("dbsException-missing-data", "Missing data when inserting Configure. ",
self.logger.exception, str(ex))
else:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler('dbsException-invalid-input2',
'Invalid data when insert Configure. ',
self.logger.exception,
'Invalid data when insert Configure. '+ str(ex))
except exceptions as ex3:
if tran:tran.rollback()
if conn:conn.close()
raise ex3
cfgid = self.otptModCfgid.execute(conn,
app = m["app_name"],
release_version = m["release_version"],
pset_hash = m["pset_hash"],
output_label = m["output_module_label"],
global_tag=m['global_tag'])
otptIdList.append(cfgid)
key = (m['app_name'] + ':' + m['release_version'] + ':' +
m['pset_hash'] + ':' +m['output_module_label'] + ':' +
m['global_tag'])
self.datasetCache['conf'][key] = cfgid
finally:
if tran:tran.rollback()
if conn:conn.close()
return otptIdList
|
Insert Release version, application, parameter set hashes and the map(output module config).
|
entailment
|
def insertDataset(self, blockcontent, otptIdList, migration=False):
"""
This method insert a datsset from a block object into dbs.
"""
dataset = blockcontent['dataset']
conn = self.dbi.connection()
# First, check and see if the dataset exists.
try:
datasetID = self.datasetid.execute(conn, dataset['dataset'])
dataset['dataset_id'] = datasetID
except KeyError as ex:
if conn:conn.close()
dbsExceptionHandler("dbsException-invalid-input2", "DBSBlockInsert/InsertDataset: Dataset is required.\
Exception: %s. troubled dataset are: %s" %(ex.args[0], dataset),
self.logger.exception, "DBSBlockInsert/InsertDataset: Dataset is required.\
Exception: %s. troubled dataset are: %s" %(ex.args[0], dataset ))
except Exception as ex1:
if conn:conn.close()
raise ex1
if datasetID > 0:
# Then we already have a valid dataset. We only need to fill the map (dataset & output module config)
# Skip to the END
try:
self.insertDatasetWOannex(dataset = dataset,
blockcontent = blockcontent,
otptIdList = otptIdList, conn = conn,
insertDataset = False, migration=migration)
finally:
if conn:conn.close()
return datasetID
# Else, we need to do the work
#Start a new transaction
tran = conn.begin()
primary_ds_name = ''
try:
#1. Deal with primary dataset. Most primary datasets are
#pre-installed in db
primds = blockcontent["primds"]
primary_ds_name = primds["primary_ds_name"]
primds["primary_ds_id"] = self.primdsid.execute(conn,
primds["primary_ds_name"], transaction=tran)
if primds["primary_ds_id"] <= 0:
#primary dataset is not in db yet.
try:
primds["primary_ds_id"] = self.sm.increment(conn, "SEQ_PDS")
primds["creation_date"] = primds.get("creation_date", dbsUtils().getTime())
if not migration:
primds["create_by"] = dbsUtils().getCreateBy()
self.primdsin.execute(conn, primds, tran)
except exceptions.IntegrityError as ex:
if (str(ex).find("ORA-00001") != -1 and str(ex).find("TUC_PDS_PRIMARY_DS_NAME") != -1)\
or str(ex).lower().find("duplicate") !=-1:
primds["primary_ds_id"] = self.primdsid.execute(conn,
primds["primary_ds_name"],
transaction=tran)
if primds["primary_ds_id"] <= 0:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler('dbsException-conflict-data',
'Primary dataset not yet inserted by concurrent insert. ',
self.logger.exception,
'Primary dataset not yet inserted by concurrent insert. '+ str(ex))
elif str(ex).find("ORA-01400") > -1:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler('dbsException-missing-data',
'Missing data when insert primary_datasets. ',
self.logger.exception,
'Missing data when insert primary_datasets. '+ str(ex))
else:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler('dbsException-invalid-input2',
'Invalid data when insert primary_datasets. ',
self.logger.exception,
'Invalid data when insert primary_datasets. '+ str(ex))
except Exception as ex:
if tran:tran.rollback()
if conn:conn.close()
raise
dataset['primary_ds_id'] = primds["primary_ds_id"]
#2 Deal with processed ds
#processed ds is handled inside of dataset insertion, However we need to make sure it is formatted correctly.
#processed_ds_name is not required pre-exist in the db. will insert with the dataset if not in yet
#
# processed_ds_name=acquisition_era_name[-filter_name][-processing_str]-vprocessing_version
# Note [-filterName] is new as 4/30/2012. See ticket #3655. YG
#
#althrough acquisition era and processing version is not required for a dataset
#in the schema(the schema is build this way because
#we need to accommodate the DBS2 data), but we impose the requirement on the API.
#So both acquisition and processing eras are required.
#We do the format checking after we deal with acquisition era and processing era.
#
#YG 12/07/2011 TK-362
#3 Deal with Acquisition era
aq = blockcontent.get('acquisition_era', {})
has_acquisition_era_name = 'acquisition_era_name' in aq
has_start_date = 'start_date' in aq
def insert_acquisition_era():
try:
#insert acquisition era into db
aq['acquisition_era_id'] = self.sm.increment(conn, "SEQ_AQE")
self.acqin.execute(conn, aq, tran)
dataset['acquisition_era_id'] = aq['acquisition_era_id']
except exceptions.IntegrityError as ei:
#ORA-01400: cannot insert NULL into required columns, usually it is the NULL on start_date
if "ORA-01400" in str(ei) :
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler("dbsException-invalid-input2",
"BlockInsert: acquisition_era_name and start_date are required. \
NULL was received from user input. Please correct your data.")
#ok, already in db?
if (str(ei).find("ORA-00001") != -1 and str(ei).find("TUC_AQE_ACQUISITION_ERA_NAME") != -1)\
or str(ei).lower().find("duplicate") !=-1:
dataset['acquisition_era_id'] = self.acqid.execute(conn, aq['acquisition_era_name'])
if dataset['acquisition_era_id'] <= 0:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler("dbsException-invalid-input2", "BlockInsert: \
Check the spelling of acquisition Era name. The db may already have the same \
acquisition era, but with different cases.", self.logger.exception, "BlockInsert: \
Check the spelling of acquisition Era name. The db may already have the same \
acquisition era, but with different cases.")
else:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler('dbsException-invalid-input2',
'Invalid data when insert acquisition_eras . ',
self.logger.exception,
'Invalid data when insert acquisition_eras. '+ str(ei))
except Exception:
if tran:tran.rollback()
if conn:conn.close()
raise
if has_acquisition_era_name and has_start_date:
insert_acquisition_era()
elif migration and not has_acquisition_era_name:
#if no processing era is available, for example for old DBS 2 data, skip insertion
aq['acquisition_era_id'] = None
dataset['acquisition_era_id'] = None
elif migration and not aq['start_date']:
aq['start_date'] = 0
insert_acquisition_era()
else:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler("dbsException-invalid-input2", "BlockInsert: Acquisition Era is required",
self.logger.exception, "BlockInsert: Acquisition Era is required")
#4 Deal with Processing era
pera = blockcontent.get('processing_era', {})
if 'processing_version' in pera:
try:
#insert processing era into db
pera['processing_era_id'] = self.sm.increment(conn, "SEQ_PE")
#pera['processing_version'] = pera['processing_version'].upper()
self.procsingin.execute(conn, pera, tran)
dataset['processing_era_id'] = pera['processing_era_id']
except exceptions.IntegrityError as ex:
if (str(ex).find("ORA-00001: unique constraint") != -1 and \
str(ex).find("TUC_PE_PROCESSING_VERSION") != -1) or \
str(ex).lower().find("duplicate") !=-1:
#ok, already in db
dataset['processing_era_id'] = self.procsingid.execute(conn, pera['processing_version'])
elif str(ex).find("ORA-01400") > -1:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler('dbsException-missing-data',
'Missing data when insert processing_eras. ',
self.logger.exception,
'Missing data when insert Processing_eras. '+ str(ex))
else:
if tran: tran.rollback()
if conn: conn.close()
dbsExceptionHandler('dbsException-invalid-input2',
'Invalid data when insert Processing_ears. ',
self.logger.exception,
'Invalid data when insert Processing_eras. '+ str(ex))
except Exception as ex:
if tran: tran.rollback()
if conn: conn.close()
raise
elif migration:
#if no processing era is available, for example for old DBS 2 data, skip insertion
pera['processing_era_id'] = None
dataset['processing_era_id'] = None
else:
if tran: tran.rollback()
if conn: conn.close()
dbsExceptionHandler('dbsException-invalid-input2', 'BlockInsert:processing version is required')
#Make sure processed_ds_name is right format.
#processed_ds_name=acquisition_era_name[-filter_name][-processing_str]-vprocessing_version
#In order to accommodate DBS2 data for migration, we turn off this check in migration.
#These will not cause any problem to none DBS2 data because when we migration, the none DBS2 data is
#already checked when they were inserted into the source dbs. YG 7/12/2012
if not migration and aq["acquisition_era_name"] != "CRAB" and aq["acquisition_era_name"] != "LHE":
erals=dataset["processed_ds_name"].rsplit('-')
if erals[0] != aq["acquisition_era_name"] or erals[len(erals)-1] != "%s%s"%("v", pera["processing_version"]):
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler('dbsException-invalid-input2', "BlockInsert:\
processed_ds_name=acquisition_era_name[-filter_name][-processing_str]-vprocessing_version must be satisified.",
self.logger.exception,
"BlockInsert: processed_ds_name=acquisition_era_name[-filter_name][-processing_str]-vprocessing_version must be satisified." )
#So far so good, let's commit first 4 db activities before going on.
tran.commit()
except KeyError as ex:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler("dbsException-invalid-input2", "DBSBlockInsert/insertOutputModuleConfig: \
KeyError exception: %s. " %ex.args[0], self.logger.exception,
"DBSBlockInsert/insertOutputModuleConfig: KeyError exception: %s." %ex.args[0] )
except:
if tran:tran.rollback()
if conn:conn.close()
raise
#Continue for the rest.
tran = conn.begin()
try:
#5 Deal with physics gruop
phg = dataset['physics_group_name']
if phg:
#Yes, the dataset has physica group.
phgId = self.phygrpid.execute(conn, phg, transaction=tran)
if phgId <= 0 :
#not in db yet, insert it
phgId = self.sm.increment(conn, "SEQ_PG")
phygrp = {'physics_group_id':phgId, 'physics_group_name':phg}
try:
self.phygrpin.execute(conn, phygrp, tran)
except exceptions.IntegrityError as ex:
if str(ex).find("ORA-00001") != -1 and str(ex).find("PK_PG") != -1:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler(message='InsertPhysicsGroup Error', logger=self.logger.exception, serverError="InsertPhysicsGroup: "+ str(ex))
if (str(ex).find("ORA-00001") != -1 and str(ex).find("TUC_PG_PHYSICS_GROUP_NAME") != -1) or\
str(ex).lower().find("duplicate") != -1:
phgId = self.phygrpid.execute(conn, phg, transaction=tran)
if phgId <= 0:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler(message='InsertPhysicsGroup Error ', logger=self.logger.exception, serverError="InsertPhysicsGroup: "+str(ex))
elif str(ex).find("ORA-01400") > -1:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler('dbsException-missing-data',
'Missing data when insert Physics_groups. ',
self.logger.exception,
'Missing data when insert Physics_groups. '+ str(ex))
else:
if tran: tran.rollback()
if conn: conn.close()
dbsExceptionHandler('dbsException-invalid-input2',
'Invalid data when insert Physics_groups. ',
self.logger.exception,
'Invalid data when insert Physics_groups. '+ str(ex))
except Exception as ex:
if tran:tran.rollback()
if conn:conn.close()
raise
dataset['physics_group_id'] = phgId
#self.logger.debug("***PHYSICS_GROUP_ID=%s***" %phgId)
else:
#no physics gruop for the dataset.
dataset['physics_group_id'] = None
del dataset['physics_group_name']
#6 Deal with Data tier. A dataset must has a data tier
dataset['data_tier_name'] = dataset['data_tier_name'].upper()
#We no longer handle Tier inside dataset insert. If a data tier is no in DBS before the dataset
# is inserted. We will report error back to the user as missing data. See github issue #466.
# This is to prevent users to insert random data tiers into phys* DB. YG May-15-2015
dtId = 0
dtId = self.tierid.execute(conn, dataset['data_tier_name'])
#When no data tier found, it return tier id -1
if dtId <= 0:
dbsExceptionHandler('dbsException-missing-data', 'Required data tier %s not found in DBS when insert dataset. Ask your admin adding the tier before insert/migrate the block/dataset.' %dataset['data_tier_name'],
self.logger.exception, 'Required data tier not found in DBS when insert dataset. ')
#7 Deal with dataset access type. A dataset must have a data type
dataset['dataset_access_type'] = dataset['dataset_access_type'].upper()
#handle dataset access type inside dataset insertion with Inser2.
tran.commit()
except Exception as ex:
if tran:tran.rollback()
if conn:conn.close()
raise
#Before we insert the dataset, we need to make sure dataset=/primary_dataset_name/processed_dataset_name/data_tier
d2 = dataset['dataset'].rsplit('/')
if (d2[1] != primary_ds_name or d2[2] != dataset["processed_ds_name"] or d2[3] != dataset['data_tier_name']):
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler('dbsException-invalid-input2',
'dataset=/primary_dataset_name/processed_dataset_name/data_tier is not matched.',
self.logger.exception, 'dataset=/primary_dataset_name/processed_dataset_name/data_tier is not matched.')
try:
#self.logger.debug("*** Trying to insert the dataset***")
dataset['dataset_id'] = self.insertDatasetWOannex(dataset = dataset,
blockcontent = blockcontent,
otptIdList = otptIdList,
conn = conn, insertDataset = True, migration=migration)
finally:
if tran:tran.rollback()
if conn:conn.close()
return dataset['dataset_id']
|
This method insert a datsset from a block object into dbs.
|
entailment
|
def insertDatasetWOannex(self, dataset, blockcontent, otptIdList, conn,
insertDataset = True, migration = False):
"""
_insertDatasetOnly_
Insert the dataset and only the dataset
Meant to be called after everything else is put into place.
The insertDataset flag is set to false if the dataset already exists
"""
tran = conn.begin()
try:
#8 Finally, we have everything to insert a dataset
if insertDataset:
# Then we have to get a new dataset ID
dataset['dataset_id'] = self.datasetid.execute(conn,
dataset['dataset'])
if dataset['dataset_id'] <= 0:
dataset['dataset_id'] = self.sm.increment(conn, "SEQ_DS")
if not migration:
dataset['last_modified_by'] = dbsUtils().getCreateBy()
dataset['create_by'] = dbsUtils().getCreateBy()
dataset['creation_date'] = dataset.get('creation_date', dbsUtils().getTime())
dataset['last_modification_date'] = dataset.get('last_modification_date', dbsUtils().getTime())
dataset['xtcrosssection'] = dataset.get('xtcrosssection', None)
dataset['prep_id'] = dataset.get('prep_id', None)
try:
self.datasetin.execute(conn, dataset, tran)
except exceptions.IntegrityError as ei:
if str(ei).find("ORA-00001") != -1 or str(ei).lower().find("duplicate") !=-1:
if conn.closed:
conn = self.dbi.connection()
dataset['dataset_id'] = self.datasetid.execute(conn, dataset['dataset'])
if dataset['dataset_id'] <= 0:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler('dbsException-conflict-data',
'Dataset/[processed DS]/[dataset access type] not yet inserted by concurrent insert. ',
self.logger.exception,
'Dataset/[processed DS]/[dataset access type] not yet inserted by concurrent insert. '+ str(ei))
elif str(ei).find("ORA-01400") > -1:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler('dbsException-missing-data',
'Missing data when insert Datasets. ',
self.logger.exception,
'Missing data when insert Datasets. '+ str(ei))
else:
if tran: tran.rollback()
if conn: conn.close()
dbsExceptionHandler('dbsException-invalid-input2',
'Invalid data when insert Datasets. ',
self.logger.exception,
'Invalid data when insert Datasets. '+ str(ei))
except Exception:
#should catch all above exception to rollback. YG Jan 17, 2013
if tran:tran.rollback()
if conn:conn.close()
raise
#9 Fill Dataset Parentage
#All parentage are deduced from file parentage.
#10 Before we commit, make dataset and output module configuration
#mapping. We have to try to fill the map even if dataset is
#already in dest db
for c in otptIdList:
try:
dcObj = {
'dataset_id' : dataset['dataset_id'],
'output_mod_config_id' : c }
self.dcin.execute(conn, dcObj, tran)
except exceptions.IntegrityError as ei:
#FIXME YG 01/17/2013
if (str(ei).find("ORA-00001") != -1 and str(ei).find("TUC_DC_1") != -1) or \
str(ei).lower().find("duplicate")!=-1:
#ok, already in db
#FIXME: What happens when there are partially in db?
#YG 11/17/2010
pass
else:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler('dbsException-invalid-input2',
'Invalid data when insert dataset_configs. ',
self.logger.exception,
'Invalid data when insert dataset_configs. '+ str(ei))
except Exception as ex:
if tran:tran.rollback()
if conn:conn.close()
raise
#Now commit everything.
tran.commit()
except exceptions.IntegrityError as ei:
# Then is it already in the database?
# Not really. We have to check it again. YG Jan 17, 2013
# we don't check the unique key here, since there are more than one unique key might
# be violated: such as data_tier, processed_dataset, dataset_access_types.
if str(ei).find("ORA-00001") != -1 or str(ei).lower().find("duplicate")!=-1:
# For now, we assume most cases are the same dataset was instered by different thread. If not,
# one has to call the insert dataset again. But we think this is a rare case and let the second
# DBSBlockInsert call fix it if it happens.
if conn.closed:
conn = self.dbi.connection()
dataset_id = self.datasetid.execute(conn, dataset['dataset'])
if dataset_id <= 0:
dbsExceptionHandler('dbsException-conflict-data',
'Dataset not yet inserted by concurrent insert',
self.logger.exception,
'Dataset not yet inserted by concurrent insert')
else:
dataset['dataset_id'] = dataset_id
else:
if tran:tran.rollback()
if conn:conn.close()
dbsExceptionHandler('dbsException-invalid-input2',
'Invalid data when insert Datasets. ',
self.logger.exception,
'Invalid data when insert Datasets. '+ str(ei))
except Exception as ex:
if tran:tran.rollback()
if conn:conn.close()
raise
finally:
if tran:tran.rollback()
if conn:conn.close()
return dataset['dataset_id']
|
_insertDatasetOnly_
Insert the dataset and only the dataset
Meant to be called after everything else is put into place.
The insertDataset flag is set to false if the dataset already exists
|
entailment
|
def listSites(self, block_name="", site_name=""):
"""
Returns sites.
"""
try:
conn = self.dbi.connection()
if block_name:
result = self.blksitelist.execute(conn, block_name)
else:
result = self.sitelist.execute(conn, site_name)
return result
finally:
if conn:
conn.close()
|
Returns sites.
|
entailment
|
def insertSite(self, businput):
"""
Input dictionary has to have the following keys:
site_name
it builds the correct dictionary for dao input and executes the dao
"""
conn = self.dbi.connection()
tran = conn.begin()
try:
siteobj = { # FIXME: unused?
"site_name" : businput["site_name"]
}
businput["site_id"] = self.sm.increment(conn, "SEQ_SI", tran)
self.sitein.execute(conn, businput, tran)
tran.commit()
except Exception as ex:
if (str(ex).lower().find("unique constraint") != -1 or
str(ex).lower().find("duplicate") != -1):
# already exists, lets fetch the ID
self.logger.warning("Ignoring unique constraint violation")
self.logger.warning(ex)
else:
if tran:
tran.rollback()
self.logger.exception(ex)
raise
finally:
if tran:
tran.close()
if conn:
conn.close()
|
Input dictionary has to have the following keys:
site_name
it builds the correct dictionary for dao input and executes the dao
|
entailment
|
def _create_das_mapping(self):
"""
das_map = {'lookup' : [{params : {'param1' : 'required', 'param2' : 'optional', 'param3' : 'default_value' ...},
url : 'https://cmsweb.cern.ch:8443/dbs/prod/global/DBSReader/acquisitioneras/',
das_map : {'das_param1' : dbs_param1, ...}
}]
}
"""
with open(self._mapfile, 'r') as f:
for entry in yaml.load_all(f):
das2dbs_param_map = {}
if 'lookup' not in entry:
continue
for param_map in entry['das_map']:
if 'api_arg' in param_map:
das2dbs_param_map[param_map['das_key']] = param_map['api_arg']
self._das_map.setdefault(entry['lookup'], []).append({'params' : entry['params'],
'url' : entry['url'],
'das2dbs_param_map' : das2dbs_param_map})
|
das_map = {'lookup' : [{params : {'param1' : 'required', 'param2' : 'optional', 'param3' : 'default_value' ...},
url : 'https://cmsweb.cern.ch:8443/dbs/prod/global/DBSReader/acquisitioneras/',
das_map : {'das_param1' : dbs_param1, ...}
}]
}
|
entailment
|
def slicedIterator(sourceList, sliceSize):
"""
:param: sourceList: list which need to be sliced
:type: list
:param: sliceSize: size of the slice
:type: int
:return: iterator of the sliced list
"""
start = 0
end = 0
while len(sourceList) > end:
end = start + sliceSize
yield sourceList[start: end]
start = end
|
:param: sourceList: list which need to be sliced
:type: list
:param: sliceSize: size of the slice
:type: int
:return: iterator of the sliced list
|
entailment
|
def checkInputParameter(method, parameters, validParameters, requiredParameters=None):
"""
Helper function to check input by using before sending to the server
:param method: Name of the API
:type method: str
:param validParameters: Allow parameters for the API call
:type validParameters: list
:param requiredParameters: Required parameters for the API call (Default: None)
:type requiredParameters: list
"""
for parameter in parameters:
if parameter not in validParameters:
raise dbsClientException("Invalid input",
"API %s does not support parameter %s. Supported parameters are %s" \
% (method, parameter, validParameters))
if requiredParameters is not None:
if 'multiple' in requiredParameters:
match = False
for requiredParameter in requiredParameters['multiple']:
if requiredParameter!='detail' and requiredParameter in parameters:
match = True
break
if not match:
raise dbsClientException("Invalid input",
"API %s does require one of the parameters %s" \
% (method, requiredParameters['multiple']))
if 'forced' in requiredParameters:
for requiredParameter in requiredParameters['forced']:
if requiredParameter not in parameters:
raise dbsClientException("Invalid input",
"API %s does require the parameter %s. Forced required parameters are %s" \
% (method, requiredParameter, requiredParameters['forced']))
if 'standalone' in requiredParameters:
overlap = []
for requiredParameter in requiredParameters['standalone']:
if requiredParameter in parameters:
overlap.append(requiredParameter)
if len(overlap) != 1:
raise dbsClientException("Invalid input",
"API %s does requires only *one* of the parameters %s." \
% (method, requiredParameters['standalone']))
|
Helper function to check input by using before sending to the server
:param method: Name of the API
:type method: str
:param validParameters: Allow parameters for the API call
:type validParameters: list
:param requiredParameters: Required parameters for the API call (Default: None)
:type requiredParameters: list
|
entailment
|
def list_parameter_splitting(data, key, size_limit=8000, method='GET'):
"""
Helper function split list used as input parameter for requests,
since Apache has a limitation to 8190 Bytes for the lenght of an URI.
We extended it to also split lfn and dataset list length for POST calls to avoid
DB abuse even if there is no limit on hoe long the list can be. YG 2015-5-13
:param data: url parameters
:type data: dict
:param key: key of parameter dictionary to split by lenght
:type used_size: str
:param size_limit: Split list in chunks of maximal size_limit bytes
:type size_limit: int
"""
values = list(data[key])
data[key] = []
for element in values:
data[key].append(element)
if method =='GET':
size = len(urllib.urlencode(data))
else:
size = len(data)
if size > size_limit:
last_element = data[key].pop()
yield data
data[key] = [last_element]
yield data
|
Helper function split list used as input parameter for requests,
since Apache has a limitation to 8190 Bytes for the lenght of an URI.
We extended it to also split lfn and dataset list length for POST calls to avoid
DB abuse even if there is no limit on hoe long the list can be. YG 2015-5-13
:param data: url parameters
:type data: dict
:param key: key of parameter dictionary to split by lenght
:type used_size: str
:param size_limit: Split list in chunks of maximal size_limit bytes
:type size_limit: int
|
entailment
|
def split_calls(func):
"""
Decorator to split up server calls for methods using url parameters, due to the lenght
limitation of the URI in Apache. By default 8190 bytes
"""
def wrapper(*args, **kwargs):
#The size limit is 8190 bytes minus url and api to call
#For example (https://cmsweb-testbed.cern.ch:8443/dbs/prod/global/filechildren), so 192 bytes should be safe.
size_limit = 8000
encoded_url = urllib.urlencode(kwargs)
if len(encoded_url) > size_limit:
for key, value in kwargs.iteritems():
###only one (first) list at a time is splitted,
###currently only file lists are supported
if key in ('logical_file_name', 'block_name', 'lumi_list', 'run_num') and isinstance(value, list):
ret_val = []
for splitted_param in list_parameter_splitting(data=dict(kwargs), #make a copy, since it is manipulated
key=key,
size_limit=size_limit):
try:
ret_val.extend(func(*args, **splitted_param))
except (TypeError, AttributeError):#update function call do not return lists
ret_val= []
return ret_val
raise dbsClientException("Invalid input",
"The lenght of the urlencoded parameters to API %s \
is exceeding %s bytes and cannot be splitted." % (func.__name__, size_limit))
else:
return func(*args, **kwargs)
return wrapper
|
Decorator to split up server calls for methods using url parameters, due to the lenght
limitation of the URI in Apache. By default 8190 bytes
|
entailment
|
def __callServer(self, method="", params={}, data={}, callmethod='GET', content='application/json'):
"""
A private method to make HTTP call to the DBS Server
:param method: REST API to call, e.g. 'datasets, blocks, files, ...'.
:type method: str
:param params: Parameters to the API call, e.g. {'dataset':'/PrimaryDS/ProcessedDS/TIER'}.
:type params: dict
:param callmethod: The HTTP method used, by default it is HTTP-GET, possible values are GET, POST and PUT.
:type callmethod: str
:param content: The type of content the server is expected to return. DBS3 only supports application/json
:type content: str
"""
UserID = os.environ['USER']+'@'+socket.gethostname()
try:
UserAgent = "DBSClient/"+os.environ['DBS3_CLIENT_VERSION']+"/"+ self.userAgent
except:
UserAgent = "DBSClient/Unknown"+"/"+ self.userAgent
request_headers = {"Content-Type": content, "Accept": content, "UserID": UserID, "User-Agent":UserAgent }
method_func = getattr(self.rest_api, callmethod.lower())
data = cjson.encode(data)
try:
self.http_response = method_func(self.url, method, params, data, request_headers)
except HTTPError as http_error:
self.__parseForException(http_error)
if content != "application/json":
return self.http_response.body
try:
json_ret=cjson.decode(self.http_response.body)
except cjson.DecodeError:
print("The server output is not a valid json, most probably you have a typo in the url.\n%s.\n" % self.url, file=sys.stderr)
raise dbsClientException("Invalid url", "Possible urls are %s" %self.http_response.body)
return json_ret
|
A private method to make HTTP call to the DBS Server
:param method: REST API to call, e.g. 'datasets, blocks, files, ...'.
:type method: str
:param params: Parameters to the API call, e.g. {'dataset':'/PrimaryDS/ProcessedDS/TIER'}.
:type params: dict
:param callmethod: The HTTP method used, by default it is HTTP-GET, possible values are GET, POST and PUT.
:type callmethod: str
:param content: The type of content the server is expected to return. DBS3 only supports application/json
:type content: str
|
entailment
|
def __parseForException(self, http_error):
"""
An internal method, should not be used by clients
:param httperror: Thrown httperror by the server
"""
data = http_error.body
try:
if isinstance(data, str):
data = cjson.decode(data)
except:
raise http_error
if isinstance(data, dict) and 'exception' in data:# re-raise with more details
raise HTTPError(http_error.url, data['exception'], data['message'], http_error.header, http_error.body)
raise http_error
|
An internal method, should not be used by clients
:param httperror: Thrown httperror by the server
|
entailment
|
def requestTimingInfo(self):
"""
Returns the time needed to process the request by the frontend server in microseconds
and the EPOC timestamp of the request in microseconds.
:rtype: tuple containing processing time and timestamp
"""
try:
return tuple(item.split('=')[1] for item in self.http_response.header.get('CMS-Server-Time').split())
except AttributeError:
return None, None
|
Returns the time needed to process the request by the frontend server in microseconds
and the EPOC timestamp of the request in microseconds.
:rtype: tuple containing processing time and timestamp
|
entailment
|
def insertBulkBlock(self, blockDump):
"""
API to insert a bulk block
:param blockDump: Output of the block dump command, example can be found in https://svnweb.cern.ch/trac/CMSDMWM/browser/DBS/trunk/Client/tests/dbsclient_t/unittests/blockdump.dict
:type blockDump: dict
"""
#We first check if the first lumi section has event_count or not
frst = True
if (blockDump['files'][0]['file_lumi_list'][0]).get('event_count') == None: frst = False
# when frst == True, we are looking for event_count == None in the data, if we did not find None (redFlg = False),
# eveything is good. Otherwise, we have to remove all even_count in lumis and raise exception.
# when frst == False, weare looking for event_count != None in the data, if we did not find Not None (redFlg = False), # everything is good. Otherwise, we have to remove all even_count in lumis and raise exception.
redFlag = False
if frst == True:
eventCT = (fl.get('event_count') == None for f in blockDump['files'] for fl in f['file_lumi_list'])
else:
eventCT = (fl.get('event_count') != None for f in blockDump['files'] for fl in f['file_lumi_list'])
redFlag = any(eventCT)
if redFlag:
for f in blockDump['files']:
for fl in f['file_lumi_list']:
if 'event_count' in fl: del fl['event_count']
result = self.__callServer("bulkblocks", data=blockDump, callmethod='POST' )
if redFlag:
raise dbsClientException("Mixed event_count per lumi in the block: %s" %blockDump['block']['block_name'],
"The block was inserted into DBS, but you need to check if the data is valid.")
else:
return result
|
API to insert a bulk block
:param blockDump: Output of the block dump command, example can be found in https://svnweb.cern.ch/trac/CMSDMWM/browser/DBS/trunk/Client/tests/dbsclient_t/unittests/blockdump.dict
:type blockDump: dict
|
entailment
|
def insertFiles(self, filesList, qInserts=False):
"""
API to insert a list of file into DBS in DBS. Up to 10 files can be inserted in one request.
:param qInserts: True means that inserts will be queued instead of done immediately. INSERT QUEUE Manager will perform the inserts, within few minutes.
:type qInserts: bool
:param filesList: List of dictionaries containing following information
:type filesList: list of dicts
:key logical_file_name: File to be inserted (str) (Required)
:key is_file_valid: (optional, default = 1): (bool)
:key block: required: /a/b/c#d (str)
:key dataset: required: /a/b/c (str)
:key file_type: (optional, default = EDM) one of the predefined types, (str)
:key check_sum: (optional, default = '-1') (str)
:key event_count: (optional, default = -1) (int)
:key file_size: (optional, default = -1.) (float)
:key adler32: (optional, default = '') (str)
:key md5: (optional, default = '') (str)
:key auto_cross_section: (optional, default = -1.) (float)
:key file_lumi_list: (optional, default = []) [{'run_num': 123, 'lumi_section_num': 12},{}....]
:key file_parent_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....]
:key file_assoc_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....]
:key file_output_config_list: (optional, default = []) [{'app_name':..., 'release_version':..., 'pset_hash':...., output_module_label':...},{}.....]
"""
if not qInserts: #turn off qInserts
return self.__callServer("files", params={'qInserts': qInserts}, data=filesList, callmethod='POST' )
return self.__callServer("files", data=filesList, callmethod='POST' )
|
API to insert a list of file into DBS in DBS. Up to 10 files can be inserted in one request.
:param qInserts: True means that inserts will be queued instead of done immediately. INSERT QUEUE Manager will perform the inserts, within few minutes.
:type qInserts: bool
:param filesList: List of dictionaries containing following information
:type filesList: list of dicts
:key logical_file_name: File to be inserted (str) (Required)
:key is_file_valid: (optional, default = 1): (bool)
:key block: required: /a/b/c#d (str)
:key dataset: required: /a/b/c (str)
:key file_type: (optional, default = EDM) one of the predefined types, (str)
:key check_sum: (optional, default = '-1') (str)
:key event_count: (optional, default = -1) (int)
:key file_size: (optional, default = -1.) (float)
:key adler32: (optional, default = '') (str)
:key md5: (optional, default = '') (str)
:key auto_cross_section: (optional, default = -1.) (float)
:key file_lumi_list: (optional, default = []) [{'run_num': 123, 'lumi_section_num': 12},{}....]
:key file_parent_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....]
:key file_assoc_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....]
:key file_output_config_list: (optional, default = []) [{'app_name':..., 'release_version':..., 'pset_hash':...., output_module_label':...},{}.....]
|
entailment
|
def listFileParentsByLumi(self, **kwargs):
"""
API to list file parents using lumi section info.
:param block_name: name of block that has files who's parents needs to be found (Required)
:type block_name: str
:param logical_file_name: if not all the file parentages under the block needed, this lfn list gives the files that needs to find its parents(optional).
:type logical_file_name: list of string
:returns: List of dictionaries containing following keys [cid,pid]
:rtype: list of dicts
"""
validParameters = ['block_name', 'logical_file_name']
requiredParameters = {'forced': ['block_name']}
checkInputParameter(method="listFileParentsByLumi", parameters=kwargs.keys(), validParameters=validParameters,
requiredParameters=requiredParameters)
return self.__callServer("fileparentsbylumi", data=kwargs, callmethod='POST')
|
API to list file parents using lumi section info.
:param block_name: name of block that has files who's parents needs to be found (Required)
:type block_name: str
:param logical_file_name: if not all the file parentages under the block needed, this lfn list gives the files that needs to find its parents(optional).
:type logical_file_name: list of string
:returns: List of dictionaries containing following keys [cid,pid]
:rtype: list of dicts
|
entailment
|
def listBlockParents(self, **kwargs):
"""
API to list block parents.
:param block_name: name of block who's parents needs to be found (Required)
:type block_name: str
:returns: List of dictionaries containing following keys (block_name)
:rtype: list of dicts
"""
validParameters = ['block_name']
requiredParameters = {'forced': validParameters}
checkInputParameter(method="listBlockParents", parameters=kwargs.keys(), validParameters=validParameters,
requiredParameters=requiredParameters)
if isinstance(kwargs["block_name"], list):
return self.__callServer("blockparents", data=kwargs, callmethod='POST')
else:
return self.__callServer("blockparents", params=kwargs)
|
API to list block parents.
:param block_name: name of block who's parents needs to be found (Required)
:type block_name: str
:returns: List of dictionaries containing following keys (block_name)
:rtype: list of dicts
|
entailment
|
def listBlocks(self, **kwargs):
"""
API to list a block in DBS. At least one of the parameters block_name, dataset, data_tier_name or
logical_file_name are required. If data_tier_name is provided, min_cdate and max_cdate have to be specified and
the difference in time have to be less than 31 days.
:param block_name: name of the block
:type block_name: str
:param dataset: dataset
:type dataset: str
:param data_tier_name: data tier
:type data_tier_name: str
:param logical_file_name: Logical File Name
:type logical_file_name: str
:param origin_site_name: Origin Site Name (Optional)
:type origin_site_name: str
:param run_num: run numbers (Optional). Possible format: run_num, "run_min-run_max", or ["run_min-run_max", run1, run2, ...]
:type run_num: int, list of runs or list of run ranges
:param min_cdate: Lower limit for the creation date (unixtime) (Optional)
:type min_cdate: int, str
:param max_cdate: Upper limit for the creation date (unixtime) (Optional)
:type max_cdate: int, str
:param min_ldate: Lower limit for the last modification date (unixtime) (Optional)
:type min_ldate: int, str
:param max_ldate: Upper limit for the last modification date (unixtime) (Optional)
:type max_ldate: int, str
:param cdate: creation date (unixtime) (Optional)
:type cdate: int, str
:param ldate: last modification date (unixtime) (Optional)
:type ldate: int, str
:param detail: Get detailed information of a block (Optional)
:type detail: bool
:returns: List of dictionaries containing following keys (block_name). If option detail is used the dictionaries contain the following keys (block_id, create_by, creation_date, open_for_writing, last_modified_by, dataset, block_name, file_count, origin_site_name, last_modification_date, dataset_id and block_size)
:rtype: list of dicts
"""
validParameters = ['dataset', 'block_name', 'data_tier_name', 'origin_site_name',
'logical_file_name', 'run_num', 'open_for_writing', 'min_cdate',
'max_cdate', 'min_ldate', 'max_ldate',
'cdate', 'ldate', 'detail']
#requiredParameters = {'multiple': validParameters}
requiredParameters = {'multiple': ['dataset', 'block_name', 'data_tier_name', 'logical_file_name']}
#set defaults
if 'detail' not in kwargs.keys():
kwargs['detail'] = False
checkInputParameter(method="listBlocks", parameters=kwargs.keys(), validParameters=validParameters,
requiredParameters=requiredParameters)
return self.__callServer("blocks", params=kwargs)
|
API to list a block in DBS. At least one of the parameters block_name, dataset, data_tier_name or
logical_file_name are required. If data_tier_name is provided, min_cdate and max_cdate have to be specified and
the difference in time have to be less than 31 days.
:param block_name: name of the block
:type block_name: str
:param dataset: dataset
:type dataset: str
:param data_tier_name: data tier
:type data_tier_name: str
:param logical_file_name: Logical File Name
:type logical_file_name: str
:param origin_site_name: Origin Site Name (Optional)
:type origin_site_name: str
:param run_num: run numbers (Optional). Possible format: run_num, "run_min-run_max", or ["run_min-run_max", run1, run2, ...]
:type run_num: int, list of runs or list of run ranges
:param min_cdate: Lower limit for the creation date (unixtime) (Optional)
:type min_cdate: int, str
:param max_cdate: Upper limit for the creation date (unixtime) (Optional)
:type max_cdate: int, str
:param min_ldate: Lower limit for the last modification date (unixtime) (Optional)
:type min_ldate: int, str
:param max_ldate: Upper limit for the last modification date (unixtime) (Optional)
:type max_ldate: int, str
:param cdate: creation date (unixtime) (Optional)
:type cdate: int, str
:param ldate: last modification date (unixtime) (Optional)
:type ldate: int, str
:param detail: Get detailed information of a block (Optional)
:type detail: bool
:returns: List of dictionaries containing following keys (block_name). If option detail is used the dictionaries contain the following keys (block_id, create_by, creation_date, open_for_writing, last_modified_by, dataset, block_name, file_count, origin_site_name, last_modification_date, dataset_id and block_size)
:rtype: list of dicts
|
entailment
|
def listDatasets(self, **kwargs):
"""
API to list dataset(s) in DBS
* You can use ANY combination of these parameters in this API
* In absence of parameters, all valid datasets known to the DBS instance will be returned
:param dataset: Full dataset (path) of the dataset
:type dataset: str
:param parent_dataset: Full dataset (path) of the dataset
:type parent_dataset: str
:param release_version: cmssw version
:type release_version: str
:param pset_hash: pset hash
:type pset_hash: str
:param app_name: Application name (generally it is cmsRun)
:type app_name: str
:param output_module_label: output_module_label
:type output_module_label: str
:param processing_version: Processing Version
:type processing_version: str
:param acquisition_era_name: Acquisition Era
:type acquisition_era_name: str
:param run_num: Specify a specific run number or range: Possible format: run_num, "run_min-run_max", or ["run_min-run_max", run1, run2, ...]
:type run_num: int,list,str
:param physics_group_name: List only dataset having physics_group_name attribute
:type physics_group_name: str
:param logical_file_name: List dataset containing the logical_file_name
:type logical_file_name: str
:param primary_ds_name: Primary Dataset Name
:type primary_ds_name: str
:param primary_ds_type: Primary Dataset Type (Type of data, MC/DATA)
:type primary_ds_type: str
:param processed_ds_name: List datasets having this processed dataset name
:type processed_ds_name: str
:param data_tier_name: Data Tier
:type data_tier_name: str
:param dataset_access_type: Dataset Access Type ( PRODUCTION, DEPRECATED etc.)
:type dataset_access_type: str
:param prep_id: prep_id
:type prep_id: str
:param create_by: Creator of the dataset
:type create_by: str
:param last_modified_by: Last modifier of the dataset
:type last_modified_by: str
:param min_cdate: Lower limit for the creation date (unixtime) (Optional)
:type min_cdate: int, str
:param max_cdate: Upper limit for the creation date (unixtime) (Optional)
:type max_cdate: int, str
:param min_ldate: Lower limit for the last modification date (unixtime) (Optional)
:type min_ldate: int, str
:param max_ldate: Upper limit for the last modification date (unixtime) (Optional)
:type max_ldate: int, str
:param cdate: creation date (unixtime) (Optional)
:type cdate: int, str
:param ldate: last modification date (unixtime) (Optional)
:type ldate: int, str
:param detail: List all details of a dataset
:type detail: bool
:param dataset_id: DB primary key of datasets table.
:type dataset_id: int, str
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contain the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts
"""
validParameters = ['dataset', 'parent_dataset', 'is_dataset_valid',
'release_version', 'pset_hash', 'app_name',
'output_module_label', 'processing_version', 'acquisition_era_name',
'run_num', 'physics_group_name', 'logical_file_name',
'primary_ds_name', 'primary_ds_type', 'processed_ds_name', 'data_tier_name',
'dataset_access_type', 'prep_id', 'create_by', 'last_modified_by',
'min_cdate', 'max_cdate', 'min_ldate', 'max_ldate', 'cdate', 'ldate',
'detail', 'dataset_id']
#set defaults
if 'detail' not in kwargs.keys():
kwargs['detail'] = False
checkInputParameter(method="listDatasets", parameters=kwargs.keys(), validParameters=validParameters)
return self.__callServer("datasets", params=kwargs)
|
API to list dataset(s) in DBS
* You can use ANY combination of these parameters in this API
* In absence of parameters, all valid datasets known to the DBS instance will be returned
:param dataset: Full dataset (path) of the dataset
:type dataset: str
:param parent_dataset: Full dataset (path) of the dataset
:type parent_dataset: str
:param release_version: cmssw version
:type release_version: str
:param pset_hash: pset hash
:type pset_hash: str
:param app_name: Application name (generally it is cmsRun)
:type app_name: str
:param output_module_label: output_module_label
:type output_module_label: str
:param processing_version: Processing Version
:type processing_version: str
:param acquisition_era_name: Acquisition Era
:type acquisition_era_name: str
:param run_num: Specify a specific run number or range: Possible format: run_num, "run_min-run_max", or ["run_min-run_max", run1, run2, ...]
:type run_num: int,list,str
:param physics_group_name: List only dataset having physics_group_name attribute
:type physics_group_name: str
:param logical_file_name: List dataset containing the logical_file_name
:type logical_file_name: str
:param primary_ds_name: Primary Dataset Name
:type primary_ds_name: str
:param primary_ds_type: Primary Dataset Type (Type of data, MC/DATA)
:type primary_ds_type: str
:param processed_ds_name: List datasets having this processed dataset name
:type processed_ds_name: str
:param data_tier_name: Data Tier
:type data_tier_name: str
:param dataset_access_type: Dataset Access Type ( PRODUCTION, DEPRECATED etc.)
:type dataset_access_type: str
:param prep_id: prep_id
:type prep_id: str
:param create_by: Creator of the dataset
:type create_by: str
:param last_modified_by: Last modifier of the dataset
:type last_modified_by: str
:param min_cdate: Lower limit for the creation date (unixtime) (Optional)
:type min_cdate: int, str
:param max_cdate: Upper limit for the creation date (unixtime) (Optional)
:type max_cdate: int, str
:param min_ldate: Lower limit for the last modification date (unixtime) (Optional)
:type min_ldate: int, str
:param max_ldate: Upper limit for the last modification date (unixtime) (Optional)
:type max_ldate: int, str
:param cdate: creation date (unixtime) (Optional)
:type cdate: int, str
:param ldate: last modification date (unixtime) (Optional)
:type ldate: int, str
:param detail: List all details of a dataset
:type detail: bool
:param dataset_id: DB primary key of datasets table.
:type dataset_id: int, str
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contain the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts
|
entailment
|
def listDatasetArray(self, **kwargs):
"""
API to list datasets in DBS.
:param dataset: list of datasets [dataset1,dataset2,..,dataset n] (Required if dataset_id is not presented), Max length 1000.
:type dataset: list
:param dataset_id: list of dataset_ids that are the primary keys of datasets table: [dataset_id1,dataset_id2,..,dataset_idn] (Required if dataset is not presented), Max length 1000.
:type dataset: list
:param dataset_access_type: List only datasets with that dataset access type (Optional)
:type dataset_access_type: str
:param detail: brief list or detailed list 1/0
:type detail: bool
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contains the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts
"""
validParameters = ['dataset', 'dataset_access_type', 'detail', 'dataset_id']
requiredParameters = {'multiple': ['dataset', 'dataset_id']}
checkInputParameter(method="listDatasetArray", parameters=kwargs.keys(), validParameters=validParameters,
requiredParameters=requiredParameters)
#set defaults
if 'detail' not in kwargs.keys():
kwargs['detail'] = False
return self.__callServer("datasetlist", data=kwargs, callmethod='POST')
|
API to list datasets in DBS.
:param dataset: list of datasets [dataset1,dataset2,..,dataset n] (Required if dataset_id is not presented), Max length 1000.
:type dataset: list
:param dataset_id: list of dataset_ids that are the primary keys of datasets table: [dataset_id1,dataset_id2,..,dataset_idn] (Required if dataset is not presented), Max length 1000.
:type dataset: list
:param dataset_access_type: List only datasets with that dataset access type (Optional)
:type dataset_access_type: str
:param detail: brief list or detailed list 1/0
:type detail: bool
:returns: List of dictionaries containing the following keys (dataset). If the detail option is used. The dictionary contains the following keys (primary_ds_name, physics_group_name, acquisition_era_name, create_by, dataset_access_type, data_tier_name, last_modified_by, creation_date, processing_version, processed_ds_name, xtcrosssection, last_modification_date, dataset_id, dataset, prep_id, primary_ds_type)
:rtype: list of dicts
|
entailment
|
def listFileArray(self, **kwargs):
"""
API to list files in DBS. Non-wildcarded logical_file_name, non-wildcarded dataset, non-wildcarded block_name or non-wildcarded lfn list is required.
The combination of a non-wildcarded dataset or block_name with an wildcarded logical_file_name is supported.
* For lumi_list the following two json formats are supported:
- [a1, a2, a3,]
- [[a,b], [c, d],]
* lumi_list can be either a list of lumi section numbers as [a1, a2, a3,] or a list of lumi section range as [[a,b], [c, d],]. They cannot be mixed.
* If lumi_list is provided run only run_num=single-run-number is allowed.
* When run_num=1, one has to provide logical_file_name.
* When lfn list is present, no run or lumi list is allowed.
:param logical_file_name: logical_file_name of the file, Max length 1000.
:type logical_file_name: str, list
:param dataset: dataset
:type dataset: str
:param block_name: block name
:type block_name: str
:param release_version: release version
:type release_version: str
:param pset_hash: parameter set hash
:type pset_hash: str
:param app_name: Name of the application
:type app_name: str
:param output_module_label: name of the used output module
:type output_module_label: str
:param run_num: run , run ranges, and run list, Max list length 1000.
:type run_num: int, list, string
:param origin_site_name: site where the file was created
:type origin_site_name: str
:param lumi_list: List containing luminosity sections, Max length 1000.
:type lumi_list: list
:param detail: Get detailed information about a file
:type detail: bool
:param validFileOnly: 0 or 1. default=0. Return only valid files if set to 1.
:type validFileOnly: int
:param sumOverLumi: 0 or 1. default=0. When sumOverLumi = 1 and run_num is given , it will count the event by lumi; No list inputs are allowed whtn sumOverLumi=1.
:type sumOverLumi: int
:returns: List of dictionaries containing the following keys (logical_file_name). If detail parameter is true, the dictionaries contain the following keys (check_sum, branch_hash_id, adler32, block_id, event_count, file_type, create_by, logical_file_name, creation_date, last_modified_by, dataset, block_name, file_id, file_size, last_modification_date, dataset_id, file_type_id, auto_cross_section, md5, is_file_valid)
:rtype: list of dicts
"""
validParameters = ['dataset', 'block_name', 'logical_file_name',
'release_version', 'pset_hash', 'app_name',
'output_module_label', 'run_num',
'origin_site_name', 'lumi_list', 'detail', 'validFileOnly', 'sumOverLumi']
requiredParameters = {'multiple': ['dataset', 'block_name', 'logical_file_name']}
#set defaults
if 'detail' not in kwargs.keys():
kwargs['detail'] = False
checkInputParameter(method="listFileArray", parameters=kwargs.keys(), validParameters=validParameters,
requiredParameters=requiredParameters)
# In order to protect DB and make sure the query can be return in 300 seconds, we limit the length of
# logical file names, lumi and run num to 1000. These number may be adjusted later if
# needed. YG May-20-2015.
# CMS has all MC data with run_num=1. It almost is a full table scan if run_num=1 without lfn. So we will request lfn
# to be present when run_num=1. YG Jan 14, 2016
if 'logical_file_name' in kwargs.keys() and isinstance(kwargs['logical_file_name'], list)\
and len(kwargs['logical_file_name']) > 1:
if 'run_num' in kwargs.keys() and isinstance(kwargs['run_num'],list) and len(kwargs['run_num']) > 1 :
raise dbsClientException('Invalid input', 'files API does not supprt two lists: run_num and lfn. ')
elif 'lumi_list' in kwargs.keys() and kwargs['lumi_list'] and len(kwargs['lumi_list']) > 1 :
raise dbsClientException('Invalid input', 'files API does not supprt two lists: lumi_lis and lfn. ')
elif 'lumi_list' in kwargs.keys() and kwargs['lumi_list']:
if 'run_num' not in kwargs.keys() or not kwargs['run_num'] or kwargs['run_num'] ==-1 :
raise dbsClientException('Invalid input', 'When Lumi section is present, a single run is required. ')
else:
if 'run_num' in kwargs.keys():
if isinstance(kwargs['run_num'], list):
if 1 in kwargs['run_num'] or '1' in kwargs['run_num']:
raise dbsClientException('Invalid input', 'files API does not supprt run_num=1 when no lumi.')
else:
if kwargs['run_num']==1 or kwargs['run_num']=='1':
raise dbsClientException('Invalid input', 'files API does not supprt run_num=1 when no lumi.')
#check if no lfn is given, but run_num=1 is used for searching
if ('logical_file_name' not in kwargs.keys() or not kwargs['logical_file_name']) and 'run_num' in kwargs.keys():
if isinstance(kwargs['run_num'], list):
if 1 in kwargs['run_num'] or '1' in kwargs['run_num']:
raise dbsClientException('Invalid input', 'files API does not supprt run_num=1 without logical_file_name.')
else:
if kwargs['run_num'] == 1 or kwargs['run_num'] == '1':
raise dbsClientException('Invalid input', 'files API does not supprt run_num=1 without logical_file_name.')
results = []
mykey = None
total_lumi_len = 0
split_lumi_list = []
max_list_len = 1000 #this number is defined in DBS server
for key, value in kwargs.iteritems():
if key == 'lumi_list' and isinstance(kwargs['lumi_list'], list)\
and kwargs['lumi_list'] and isinstance(kwargs['lumi_list'][0], list):
lapp = 0
l = 0
sm = []
for i in kwargs['lumi_list']:
while i[0]+max_list_len < i[1]:
split_lumi_list.append([[i[0], i[0]+max_list_len-1]])
i[0] = i[0] + max_list_len
else:
l += (i[1]-i[0]+1)
if l <= max_list_len:
sm.append([i[0], i[1]])
lapp = l #number lumis in sm
else:
split_lumi_list.append(sm)
sm=[]
sm.append([i[0], i[1]])
lapp = i[1]-i[0]+1
if sm:
split_lumi_list.append(sm)
elif key in ('logical_file_name', 'run_num', 'lumi_list') and isinstance(value, list) and len(value)>max_list_len:
mykey =key
#
if mykey:
sourcelist = []
#create a new list to slice
sourcelist = kwargs[mykey][:]
for slice in slicedIterator(sourcelist, max_list_len):
kwargs[mykey] = slice
results.extend(self.__callServer("fileArray", data=kwargs, callmethod="POST"))
elif split_lumi_list:
for item in split_lumi_list:
kwargs['lumi_list'] = item
results.extend(self.__callServer("fileArray", data=kwargs, callmethod="POST"))
else:
return self.__callServer("fileArray", data=kwargs, callmethod="POST")
#make sure only one dictionary per lfn.
#Make sure this changes when we move to 2.7 or 3.0
#http://stackoverflow.com/questions/11092511/python-list-of-unique-dictionaries
# YG May-26-2015
return dict((v['logical_file_name'], v) for v in results).values()
|
API to list files in DBS. Non-wildcarded logical_file_name, non-wildcarded dataset, non-wildcarded block_name or non-wildcarded lfn list is required.
The combination of a non-wildcarded dataset or block_name with an wildcarded logical_file_name is supported.
* For lumi_list the following two json formats are supported:
- [a1, a2, a3,]
- [[a,b], [c, d],]
* lumi_list can be either a list of lumi section numbers as [a1, a2, a3,] or a list of lumi section range as [[a,b], [c, d],]. They cannot be mixed.
* If lumi_list is provided run only run_num=single-run-number is allowed.
* When run_num=1, one has to provide logical_file_name.
* When lfn list is present, no run or lumi list is allowed.
:param logical_file_name: logical_file_name of the file, Max length 1000.
:type logical_file_name: str, list
:param dataset: dataset
:type dataset: str
:param block_name: block name
:type block_name: str
:param release_version: release version
:type release_version: str
:param pset_hash: parameter set hash
:type pset_hash: str
:param app_name: Name of the application
:type app_name: str
:param output_module_label: name of the used output module
:type output_module_label: str
:param run_num: run , run ranges, and run list, Max list length 1000.
:type run_num: int, list, string
:param origin_site_name: site where the file was created
:type origin_site_name: str
:param lumi_list: List containing luminosity sections, Max length 1000.
:type lumi_list: list
:param detail: Get detailed information about a file
:type detail: bool
:param validFileOnly: 0 or 1. default=0. Return only valid files if set to 1.
:type validFileOnly: int
:param sumOverLumi: 0 or 1. default=0. When sumOverLumi = 1 and run_num is given , it will count the event by lumi; No list inputs are allowed whtn sumOverLumi=1.
:type sumOverLumi: int
:returns: List of dictionaries containing the following keys (logical_file_name). If detail parameter is true, the dictionaries contain the following keys (check_sum, branch_hash_id, adler32, block_id, event_count, file_type, create_by, logical_file_name, creation_date, last_modified_by, dataset, block_name, file_id, file_size, last_modification_date, dataset_id, file_type_id, auto_cross_section, md5, is_file_valid)
:rtype: list of dicts
|
entailment
|
def listPrimaryDSTypes(self, **kwargs):
"""
API to list primary dataset types
:param primary_ds_type: List that primary dataset type (Optional)
:type primary_ds_type: str
:param dataset: List the primary dataset type for that dataset (Optional)
:type dataset: str
:returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type)
:rtype: list of dicts
"""
validParameters = ['primary_ds_type', 'dataset']
checkInputParameter(method="listPrimaryDSTypes", parameters=kwargs.keys(), validParameters=validParameters)
return self.__callServer("primarydstypes", params=kwargs)
|
API to list primary dataset types
:param primary_ds_type: List that primary dataset type (Optional)
:type primary_ds_type: str
:param dataset: List the primary dataset type for that dataset (Optional)
:type dataset: str
:returns: List of dictionaries containing the following keys (primary_ds_type_id, data_type)
:rtype: list of dicts
|
entailment
|
def listRuns(self, **kwargs):
"""
API to list all run dictionary, for example: [{'run_num': [160578, 160498, 160447, 160379]}].
At least one parameter is mandatory.
:param logical_file_name: List all runs in the file
:type logical_file_name: str
:param block_name: List all runs in the block
:type block_name: str
:param dataset: List all runs in that dataset
:type dataset: str
:param run_num: List all runs
:type run_num: int, string or list
"""
validParameters = ['run_num', 'logical_file_name', 'block_name', 'dataset']
requiredParameters = {'multiple': validParameters}
checkInputParameter(method="listRuns", parameters=kwargs.keys(), validParameters=validParameters,
requiredParameters=requiredParameters)
return self.__callServer("runs", params=kwargs)
|
API to list all run dictionary, for example: [{'run_num': [160578, 160498, 160447, 160379]}].
At least one parameter is mandatory.
:param logical_file_name: List all runs in the file
:type logical_file_name: str
:param block_name: List all runs in the block
:type block_name: str
:param dataset: List all runs in that dataset
:type dataset: str
:param run_num: List all runs
:type run_num: int, string or list
|
entailment
|
def updateAcqEraEndDate(self, **kwargs):
"""
API to update the end_date of an acquisition era
:param acquisition_era_name: acquisition_era_name to update (Required)
:type acquisition_era_name: str
:param end_date: end_date not zero (Required)
:type end_date: int
"""
validParameters = ['end_date', 'acquisition_era_name']
requiredParameters = {'forced': validParameters}
checkInputParameter(method="updateAcqEraEndDate", parameters=kwargs.keys(), validParameters=validParameters,
requiredParameters=requiredParameters)
return self.__callServer("acquisitioneras", params=kwargs, callmethod='PUT')
|
API to update the end_date of an acquisition era
:param acquisition_era_name: acquisition_era_name to update (Required)
:type acquisition_era_name: str
:param end_date: end_date not zero (Required)
:type end_date: int
|
entailment
|
def execute(self, conn, logical_file_name='', block_name='', run_num=-1, validFileOnly=0, migration=False):
"""
Lists lumi section numbers with in a file, a list of files or a block.
"""
sql = ""
wheresql = ""
lfn_generator = ""
run_generator = ""
if logical_file_name and not isinstance(logical_file_name, list):
binds = {'logical_file_name': logical_file_name}
if int(validFileOnly) == 0:
if migration: #migration always call with single file and include all files no matter valid or not.
sql = self.sql + """ FROM {owner}FILE_LUMIS FL
JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID
WHERE F.LOGICAL_FILE_NAME = :logical_file_name
""".format(owner=self.owner)
else:
sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL
JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID
WHERE F.LOGICAL_FILE_NAME = :logical_file_name
""".format(owner=self.owner)
else:
sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL
JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID
JOIN {owner}DATASETS D ON D.DATASET_ID = F.DATASET_ID
JOIN {owner}DATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID
WHERE F.IS_FILE_VALID = 1 AND F.LOGICAL_FILE_NAME = :logical_file_name
AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION')
""".format(owner=self.owner)
elif logical_file_name and isinstance(logical_file_name, list):
sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID """.format(owner=self.owner)
lfn_generator, binds = create_token_generator(logical_file_name)
if int(validFileOnly) == 0:
wheresql = "WHERE F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)"
else:
sql = sql + """ JOIN {owner}DATASETS D ON D.DATASET_ID = F.DATASET_ID
JOIN {owner}DATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID
""".format(owner=self.owner)
wheresql = """ WHERE F.IS_FILE_VALID = 1 AND F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)
AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION')
"""
sql = "{lfn_generator} {sql} {wheresql}".format(lfn_generator=lfn_generator, sql=sql, wheresql=wheresql)
elif block_name:
binds = {'block_name': block_name}
if int(validFileOnly) == 0:
sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID
JOIN {owner}BLOCKS B ON B.BLOCK_ID = F.BLOCK_ID
WHERE B.BLOCK_NAME = :block_name""".format(owner=self.owner)
else:
sql = self.sql + """ , F.LOGICAL_FILE_NAME as LOGICAL_FILE_NAME FROM {owner}FILE_LUMIS FL JOIN {owner}FILES F ON F.FILE_ID = FL.FILE_ID
JOIN {owner}DATASETS D ON D.DATASET_ID = F.DATASET_ID
JOIN {owner}DATASET_ACCESS_TYPES DT ON DT.DATASET_ACCESS_TYPE_ID = D.DATASET_ACCESS_TYPE_ID
JOIN {owner}BLOCKS B ON B.BLOCK_ID = F.BLOCK_ID
WHERE F.IS_FILE_VALID = 1 AND B.BLOCK_NAME = :block_name
AND DT.DATASET_ACCESS_TYPE in ('VALID', 'PRODUCTION')
""".format(owner=self.owner)
else:
dbsExceptionHandler('dbsException-invalid-input2', "FileLumi/List: Either logocal_file_name or block_name must be provided.", self.logger.exception, "FileLumi/List: Either logocal_file_name or block_name must be provided.")
#
if run_num != -1:
run_list = []
wheresql_run_list=''
wheresql_run_range=''
for r in parseRunRange(run_num):
if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long) or isinstance(r, str):
run_list.append(str(r))
if isinstance(r, run_tuple):
if r[0] == r[1]:
dbsExceptionHandler('dbsException-invalid-input2', "DBS run range must be apart at least by 1.",
self.logger.exception, "DBS run range must be apart at least by 1.")
wheresql_run_range = " FL.RUN_NUM between :minrun and :maxrun "
binds.update({"minrun":r[0]})
binds.update({"maxrun":r[1]})
#
if run_list:
if len(run_list) == 1:
wheresql_run_list = " fl.RUN_NUM = :single_run "
binds.update({"single_run": long(run_list[0])})
else:
wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
run_generator, run_binds = create_token_generator(run_list)
sql = "{run_generator}".format(run_generator=run_generator) + sql
binds.update(run_binds)
if wheresql_run_range and wheresql_run_list:
sql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )"
elif wheresql_run_range and not wheresql_run_list:
sql += " and " + wheresql_run_range
elif not wheresql_run_range and wheresql_run_list:
sql += " and " + wheresql_run_list
self.logger.debug(sql)
self.logger.debug(binds)
if run_generator and lfn_generator:
dbsExceptionHandler('dbsException-invalid-input2', "listFileLumiArray support single list of lfn or run_num. ",
self.logger.exception, "listFileLumiArray support single list of lfn or run_num. ")
cursors = self.dbi.processData(sql, binds, conn, transaction=False, returnCursor=True)
result=[]
file_run_lumi={}
event_ct=False
for i in cursors:
result.extend(self.formatCursor(i, size=100))
#for migration, we need flat format to load the data into another DB.
#self.logger.error(result)
if migration:
#YG 09/2015.
for item in result:
yield item
else:
if result and result[0]['event_count']:
event_ct = True
for i in result:
r = i['run_num']
f = i['logical_file_name']
if event_ct:
file_run_lumi.setdefault((f, r), []).append([i['lumi_section_num'], i['event_count']])
else:
file_run_lumi.setdefault((f, r), []).append(i['lumi_section_num'])
for k, v in file_run_lumi.iteritems():
if event_ct:
lumi=[]
event=[]
for le in v:
lumi.append(le[0])
event.append(le[1])
yield {'logical_file_name':k[0], 'run_num':k[1], 'lumi_section_num':lumi, 'event_count':event}
else:
yield {'logical_file_name':k[0], 'run_num':k[1], 'lumi_section_num':v}
del file_run_lumi
del result
|
Lists lumi section numbers with in a file, a list of files or a block.
|
entailment
|
def execute(self, conn, child_block_name='', child_lfn_list=[], transaction=False):
sql = ''
binds = {}
child_ds_name = ''
child_where = ''
if child_block_name:
child_ds_name = child_block_name.split('#')[0]
parent_where = " where d.dataset = :child_ds_name ))"
binds ={"child_ds_name": child_ds_name}
else:
dbsExceptionHandler('dbsException-invalid-input', "Missing child block_name for listFileParentsByLumi. ")
#
if not child_lfn_list:
# most use cases
child_where = " where b.block_name = :child_block_name )"
binds.update({"child_block_name": child_block_name})
sql = """
with
parents as
(
""" +\
self.parent_sql +\
parent_where +\
"""),
"""+\
"""
children as
(
""" +\
self.child_sql +\
child_where +\
""")
select distinct cid, pid from children c
inner join parents p on c.R = p.R and c.L = p.L
"""
else:
# not commom
child_where = """ where b.block_name = :child_block_name
and f.logical_file_name in (SELECT TOKEN FROM TOKEN_GENERATOR) ))
"""
lfn_generator, bind = create_token_generator(child_lfn_list)
binds.update(bind)
sql = lfn_generator +\
"""
with
parents as
(
""" +\
self.parent_sql +\
parent_where +\
"""),
"""+\
"""
children as
(
""" +\
self.child_sql +\
child_where +\
""")
select distinct cid, pid from children c
inner join parents p on c.R = p.R and c.L = p.L
"""
print(sql)
r = self.dbi.processData(sql, binds, conn, transaction=transaction)
#print(self.format(r))
return self.format(r)
"""
cursors = self.dbi.processData(sql, binds, conn, transaction=transaction, returnCursor=True)
for i in cursors:
d = self.formatCursor(i, size=100)
if isinstance(d, list) or isinstance(d, GeneratorType):
for elem in d:
yield elem
elif d:
yield d
"""
|
cursors = self.dbi.processData(sql, binds, conn, transaction=transaction, returnCursor=True)
for i in cursors:
d = self.formatCursor(i, size=100)
if isinstance(d, list) or isinstance(d, GeneratorType):
for elem in d:
yield elem
elif d:
yield d
|
entailment
|
def execute(self, conn, run_num=-1, logical_file_name="", block_name="", dataset="", trans=False):
"""
Lists all primary datasets if pattern is not provided.
"""
sql = self.sql
binds = {}
if logical_file_name and "%" not in logical_file_name:
sql += """ inner join %sFILES FILES on FILES.FILE_ID = FL.FILE_ID
WHERE FILES.LOGICAL_FILE_NAME = :logical_file_name"""%(self.owner)
binds["logical_file_name"] = logical_file_name
elif block_name and "%" not in block_name:
sql += """ inner join %sFILES FILES on FILES.FILE_ID = FL.FILE_ID
inner join %sBLOCKS BLOCKS on BLOCKS.BLOCK_ID = FILES.BLOCK_ID
WHERE BLOCKS.BLOCK_NAME = :block_name """%(self.owner, self.owner)
binds["block_name"] = block_name
elif dataset and "%" not in dataset:
sql += """ inner join %sFILES FILES on FILES.FILE_ID = FL.FILE_ID
inner join %sDATASETS DATASETS on DATASETS.DATASET_ID = FILES.DATASET_ID
WHERE DATASETS.DATASET = :dataset """%(self.owner, self.owner)
binds["dataset"] = dataset
else:
pass
if run_num != -1:
andorwhere = ("WHERE", "AND")["WHERE" in sql]
run_list = []
wheresql_run_list = ''
wheresql_run_range = ''
#
for r in parseRunRange(run_num):
if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
run_list.append(str(r))
if isinstance(r, run_tuple):
if r[0] == r[1]:
dbsExceptionHandler('dbsException-invalid-input', "DBS run_num range must be apart at least by 1.", self.logger.exception)
wheresql_run_range = " FL.RUN_NUM between :minrun and :maxrun "
binds.update({"minrun":r[0]})
binds.update({"maxrun":r[1]})
#
if run_list:
wheresql_run_list = " fl.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
run_generator, run_binds = create_token_generator(run_list)
sql = "{run_generator}".format(run_generator=run_generator) + sql
binds.update(run_binds)
if wheresql_run_range and wheresql_run_list:
sql += " %s (" %andorwhere + wheresql_run_range + " or " + wheresql_run_list + " )"
elif wheresql_run_range and not wheresql_run_list:
sql += " %s " %andorwhere + wheresql_run_range
elif not wheresql_run_range and wheresql_run_list:
sql += " %s " %andorwhere + wheresql_run_list
#self.logger.debug(sql)
cursors = self.dbi.processData(sql, binds, conn, transaction=trans, returnCursor=True)
result=[]
for c in cursors:
result.extend(self.formatCursor(c, size=100))
return result
|
Lists all primary datasets if pattern is not provided.
|
entailment
|
def find_devices():
"""Return a list of dictionaries. Each dictionary represents one device.
The dictionary contains the following keys: port, unique_id and in_use.
`port` can be used with :func:`open`. `serial_number` is the serial number
of the device (and can also be used with :func:`open`) and `in_use`
indicates whether the device was opened before and can currently not be
opened.
.. note::
There is no guarantee, that the returned information is still valid
when you open the device. Esp. if you open a device by the port, the
unique_id may change because you've just opened another device. Eg. it
may be disconnected from the machine after you call :func:`find_devices`
but before you call :func:`open`.
To open a device by its serial number, you should use the :func:`open`
with the `serial_number` parameter.
"""
# first fetch the number of attached devices, so we can create a buffer
# with the exact amount of entries. api expects array of u16
num_devices = api.py_aa_find_devices(0, array.array('H'))
_raise_error_if_negative(num_devices)
# return an empty list if no device is connected
if num_devices == 0:
return list()
ports = array.array('H', (0,) * num_devices)
unique_ids = array.array('I', (0,) * num_devices)
num_devices = api.py_aa_find_devices_ext(len(ports), len(unique_ids),
ports, unique_ids)
_raise_error_if_negative(num_devices)
if num_devices == 0:
return list()
del ports[num_devices:]
del unique_ids[num_devices:]
devices = list()
for port, uid in zip(ports, unique_ids):
in_use = bool(port & PORT_NOT_FREE)
dev = dict(
port=port & ~PORT_NOT_FREE,
serial_number=_unique_id_str(uid),
in_use=in_use)
devices.append(dev)
return devices
|
Return a list of dictionaries. Each dictionary represents one device.
The dictionary contains the following keys: port, unique_id and in_use.
`port` can be used with :func:`open`. `serial_number` is the serial number
of the device (and can also be used with :func:`open`) and `in_use`
indicates whether the device was opened before and can currently not be
opened.
.. note::
There is no guarantee, that the returned information is still valid
when you open the device. Esp. if you open a device by the port, the
unique_id may change because you've just opened another device. Eg. it
may be disconnected from the machine after you call :func:`find_devices`
but before you call :func:`open`.
To open a device by its serial number, you should use the :func:`open`
with the `serial_number` parameter.
|
entailment
|
def open(port=None, serial_number=None):
"""Open an aardvark device and return an :class:`Aardvark` object. If the
device cannot be opened an :class:`IOError` is raised.
The `port` can be retrieved by :func:`find_devices`. Usually, the first
device is 0, the second 1, etc.
If you are using only one device, you can therefore omit the parameter
in which case 0 is used.
Another method to open a device is to use the serial number. You can either
find the number on the device itself or in the in the corresponding USB
property. The serial number is a string which looks like `NNNN-MMMMMMM`.
Raises an :class:`IOError` if the port (or serial number) does not exist,
is already connected or an incompatible device is found.
.. note::
There is a small chance that this function raises an :class:`IOError`
although the correct device is available and not opened. The
open-by-serial-number method works by scanning the devices. But as
explained in :func:`find_devices`, the returned information may be
outdated. Therefore, :func:`open` checks the serial number once the
device is opened and if it is not the expected one, raises
:class:`IOError`. No retry mechanism is implemented.
As long as nobody comes along with a better idea, this failure case is
up to the user.
"""
if port is None and serial_number is None:
dev = Aardvark()
elif serial_number is not None:
for d in find_devices():
if d['serial_number'] == serial_number:
break
else:
_raise_error_if_negative(ERR_UNABLE_TO_OPEN)
dev = Aardvark(d['port'])
# make sure we opened the correct device
if dev.unique_id_str() != serial_number:
dev.close()
_raise_error_if_negative(ERR_UNABLE_TO_OPEN)
else:
dev = Aardvark(port)
return dev
|
Open an aardvark device and return an :class:`Aardvark` object. If the
device cannot be opened an :class:`IOError` is raised.
The `port` can be retrieved by :func:`find_devices`. Usually, the first
device is 0, the second 1, etc.
If you are using only one device, you can therefore omit the parameter
in which case 0 is used.
Another method to open a device is to use the serial number. You can either
find the number on the device itself or in the in the corresponding USB
property. The serial number is a string which looks like `NNNN-MMMMMMM`.
Raises an :class:`IOError` if the port (or serial number) does not exist,
is already connected or an incompatible device is found.
.. note::
There is a small chance that this function raises an :class:`IOError`
although the correct device is available and not opened. The
open-by-serial-number method works by scanning the devices. But as
explained in :func:`find_devices`, the returned information may be
outdated. Therefore, :func:`open` checks the serial number once the
device is opened and if it is not the expected one, raises
:class:`IOError`. No retry mechanism is implemented.
As long as nobody comes along with a better idea, this failure case is
up to the user.
|
entailment
|
def enable_i2c(self):
"""Set this to `True` to enable the hardware I2C interface. If set to
`False` the hardware interface will be disabled and its pins (SDA and
SCL) can be used as GPIOs.
"""
config = self._interface_configuration(CONFIG_QUERY)
return config == CONFIG_GPIO_I2C or config == CONFIG_SPI_I2C
|
Set this to `True` to enable the hardware I2C interface. If set to
`False` the hardware interface will be disabled and its pins (SDA and
SCL) can be used as GPIOs.
|
entailment
|
def enable_spi(self):
"""Set this to `True` to enable the hardware SPI interface. If set to
`False` the hardware interface will be disabled and its pins (MISO,
MOSI, SCK and SS) can be used as GPIOs.
"""
config = self._interface_configuration(CONFIG_QUERY)
return config == CONFIG_SPI_GPIO or config == CONFIG_SPI_I2C
|
Set this to `True` to enable the hardware SPI interface. If set to
`False` the hardware interface will be disabled and its pins (MISO,
MOSI, SCK and SS) can be used as GPIOs.
|
entailment
|
def i2c_bitrate(self):
"""I2C bitrate in kHz. Not every bitrate is supported by the host
adapter. Therefore, the actual bitrate may be less than the value which
is set.
The power-on default value is 100 kHz.
"""
ret = api.py_aa_i2c_bitrate(self.handle, 0)
_raise_error_if_negative(ret)
return ret
|
I2C bitrate in kHz. Not every bitrate is supported by the host
adapter. Therefore, the actual bitrate may be less than the value which
is set.
The power-on default value is 100 kHz.
|
entailment
|
def i2c_pullups(self):
"""Setting this to `True` will enable the I2C pullup resistors. If set
to `False` the pullup resistors will be disabled.
Raises an :exc:`IOError` if the hardware adapter does not support
pullup resistors.
"""
ret = api.py_aa_i2c_pullup(self.handle, I2C_PULLUP_QUERY)
_raise_error_if_negative(ret)
return ret
|
Setting this to `True` will enable the I2C pullup resistors. If set
to `False` the pullup resistors will be disabled.
Raises an :exc:`IOError` if the hardware adapter does not support
pullup resistors.
|
entailment
|
def target_power(self):
"""Setting this to `True` will activate the power pins (4 and 6). If
set to `False` the power will be deactivated.
Raises an :exc:`IOError` if the hardware adapter does not support
the switchable power pins.
"""
ret = api.py_aa_target_power(self.handle, TARGET_POWER_QUERY)
_raise_error_if_negative(ret)
return ret
|
Setting this to `True` will activate the power pins (4 and 6). If
set to `False` the power will be deactivated.
Raises an :exc:`IOError` if the hardware adapter does not support
the switchable power pins.
|
entailment
|
def i2c_bus_timeout(self):
"""I2C bus lock timeout in ms.
Minimum value is 10 ms and the maximum value is 450 ms. Not every value
can be set and will be rounded to the next possible number. You can
read back the property to get the actual value.
The power-on default value is 200 ms.
"""
ret = api.py_aa_i2c_bus_timeout(self.handle, 0)
_raise_error_if_negative(ret)
return ret
|
I2C bus lock timeout in ms.
Minimum value is 10 ms and the maximum value is 450 ms. Not every value
can be set and will be rounded to the next possible number. You can
read back the property to get the actual value.
The power-on default value is 200 ms.
|
entailment
|
def i2c_master_write(self, i2c_address, data, flags=I2C_NO_FLAGS):
"""Make an I2C write access.
The given I2C device is addressed and data given as a string is
written. The transaction is finished with an I2C stop condition unless
I2C_NO_STOP is set in the flags.
10 bit addresses are supported if the I2C_10_BIT_ADDR flag is set.
"""
data = array.array('B', data)
status, _ = api.py_aa_i2c_write_ext(self.handle, i2c_address, flags,
len(data), data)
_raise_i2c_status_code_error_if_failure(status)
|
Make an I2C write access.
The given I2C device is addressed and data given as a string is
written. The transaction is finished with an I2C stop condition unless
I2C_NO_STOP is set in the flags.
10 bit addresses are supported if the I2C_10_BIT_ADDR flag is set.
|
entailment
|
def i2c_master_read(self, addr, length, flags=I2C_NO_FLAGS):
"""Make an I2C read access.
The given I2C device is addressed and clock cycles for `length` bytes
are generated. A short read will occur if the device generates an early
NAK.
The transaction is finished with an I2C stop condition unless the
I2C_NO_STOP flag is set.
"""
data = array.array('B', (0,) * length)
status, rx_len = api.py_aa_i2c_read_ext(self.handle, addr, flags,
length, data)
_raise_i2c_status_code_error_if_failure(status)
del data[rx_len:]
return bytes(data)
|
Make an I2C read access.
The given I2C device is addressed and clock cycles for `length` bytes
are generated. A short read will occur if the device generates an early
NAK.
The transaction is finished with an I2C stop condition unless the
I2C_NO_STOP flag is set.
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.