sentence1
stringlengths 52
3.87M
| sentence2
stringlengths 1
47.2k
| label
stringclasses 1
value |
|---|---|---|
def stop(self, unique_id, configs=None):
"""Stop the service. If the deployer has not started a service with`unique_id` the deployer will raise an Exception
There are two configs that will be considered:
'terminate_only': if this config is passed in then this method is the same as terminate(unique_id) (this is also the
behavior if stop_command is None and not overridden)
'stop_command': overrides the default stop_command
:param unique_id:
:param configs:
:return:
"""
# the following is necessay to set the configs for this function as the combination of the
# default configurations and the parameter with the parameter superceding the defaults but
# not modifying the defaults
if configs is None:
configs = {}
tmp = self.default_configs.copy()
tmp.update(configs)
configs = tmp
logger.debug("stopping " + unique_id)
if unique_id in self.processes:
hostname = self.processes[unique_id].hostname
else:
logger.error("Can't stop {0}: process not known".format(unique_id))
raise DeploymentError("Can't stop {0}: process not known".format(unique_id))
if configs.get('terminate_only', False):
self.terminate(unique_id, configs)
else:
stop_command = configs.get('stop_command') or self.default_configs.get('stop_command')
env = configs.get("env", {})
if stop_command is not None:
install_path = self.processes[unique_id].install_path
with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh:
log_output(exec_with_env(ssh, "cd {0}; {1}".format(install_path, stop_command),
msg="Failed to stop {0}".format(unique_id), env=env))
else:
self.terminate(unique_id, configs)
if 'delay' in configs:
time.sleep(configs['delay'])
|
Stop the service. If the deployer has not started a service with`unique_id` the deployer will raise an Exception
There are two configs that will be considered:
'terminate_only': if this config is passed in then this method is the same as terminate(unique_id) (this is also the
behavior if stop_command is None and not overridden)
'stop_command': overrides the default stop_command
:param unique_id:
:param configs:
:return:
|
entailment
|
def uninstall(self, unique_id, configs=None):
"""uninstall the service. If the deployer has not started a service with
`unique_id` this will raise a DeploymentError. This considers one config:
'additional_directories': a list of directories to remove in addition to those provided in the constructor plus
the install path. This will update the directories to remove but does not override it
:param unique_id:
:param configs:
:return:
"""
# the following is necessay to set the configs for this function as the combination of the
# default configurations and the parameter with the parameter superceding the defaults but
# not modifying the defaults
if configs is None:
configs = {}
tmp = self.default_configs.copy()
tmp.update(configs)
configs = tmp
if unique_id in self.processes:
hostname = self.processes[unique_id].hostname
else:
logger.error("Can't uninstall {0}: process not known".format(unique_id))
raise DeploymentError("Can't uninstall {0}: process not known".format(unique_id))
install_path = self.processes[unique_id].install_path
directories_to_remove = self.default_configs.get('directories_to_clean', [])
directories_to_remove.extend(configs.get('additional_directories', []))
if install_path not in directories_to_remove:
directories_to_remove.append(install_path)
with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh:
for directory_to_remove in directories_to_remove:
log_output(better_exec_command(ssh, "rm -rf {0}".format(directory_to_remove),
"Failed to remove {0}".format(directory_to_remove)))
|
uninstall the service. If the deployer has not started a service with
`unique_id` this will raise a DeploymentError. This considers one config:
'additional_directories': a list of directories to remove in addition to those provided in the constructor plus
the install path. This will update the directories to remove but does not override it
:param unique_id:
:param configs:
:return:
|
entailment
|
def get_pid(self, unique_id, configs=None):
"""Gets the pid of the process with `unique_id`. If the deployer does not know of a process
with `unique_id` then it should return a value of constants.PROCESS_NOT_RUNNING_PID
"""
RECV_BLOCK_SIZE = 16
# the following is necessay to set the configs for this function as the combination of the
# default configurations and the parameter with the parameter superceding the defaults but
# not modifying the defaults
if configs is None:
configs = {}
tmp = self.default_configs.copy()
tmp.update(configs)
configs = tmp
if unique_id in self.processes:
hostname = self.processes[unique_id].hostname
else:
return constants.PROCESS_NOT_RUNNING_PID
if self.processes[unique_id].start_command is None:
return constants.PROCESS_NOT_RUNNING_PID
if self.processes[unique_id].pid_file is not None:
with open_remote_file(hostname, self.processes[unique_id].pid_file,
username=runtime.get_username(), password=runtime.get_password()) as pid_file:
full_output = pid_file.read()
elif 'pid_file' in configs.keys():
with open_remote_file(hostname, configs['pid_file'],
username=runtime.get_username(), password=runtime.get_password()) as pid_file:
full_output = pid_file.read()
else:
pid_keyword = self.processes[unique_id].start_command
if self.processes[unique_id].args is not None:
pid_keyword = "{0} {1}".format(pid_keyword, ' '.join(self.processes[unique_id].args))
pid_keyword = configs.get('pid_keyword', pid_keyword)
# TODO(jehrlich): come up with a simpler approach to this
pid_command = "ps aux | grep '{0}' | grep -v grep | tr -s ' ' | cut -d ' ' -f 2 | grep -Eo '[0-9]+'".format(pid_keyword)
pid_command = configs.get('pid_command', pid_command)
non_failing_command = "{0}; if [ $? -le 1 ]; then true; else false; fi;".format(pid_command)
env = configs.get("env", {})
with get_ssh_client(hostname, username=runtime.get_username(), password=runtime.get_password()) as ssh:
chan = exec_with_env(ssh, non_failing_command, msg="Failed to get PID", env=env)
output = chan.recv(RECV_BLOCK_SIZE)
full_output = output
while len(output) > 0:
output = chan.recv(RECV_BLOCK_SIZE)
full_output += output
if len(full_output) > 0:
pids = [int(pid_str) for pid_str in full_output.split('\n') if pid_str.isdigit()]
if len(pids) > 0:
return pids
return constants.PROCESS_NOT_RUNNING_PID
|
Gets the pid of the process with `unique_id`. If the deployer does not know of a process
with `unique_id` then it should return a value of constants.PROCESS_NOT_RUNNING_PID
|
entailment
|
def get_host(self, unique_id):
"""Gets the host of the process with `unique_id`. If the deployer does not know of a process
with `unique_id` then it should return a value of SOME_SENTINAL_VALUE
:Parameter unique_id: the name of the process
:raises NameError if the name is not valid process
"""
if unique_id in self.processes:
return self.processes[unique_id].hostname
logger.error("{0} not a known process".format(unique_id))
raise NameError("{0} not a known process".format(unique_id))
|
Gets the host of the process with `unique_id`. If the deployer does not know of a process
with `unique_id` then it should return a value of SOME_SENTINAL_VALUE
:Parameter unique_id: the name of the process
:raises NameError if the name is not valid process
|
entailment
|
def kill_all_process(self):
""" Terminates all the running processes. By default it is set to false.
Users can set to true in config once the method to get_pid is done deterministically
either using pid_file or an accurate keyword
"""
if (runtime.get_active_config("cleanup_pending_process",False)):
for process in self.get_processes():
self.terminate(process.unique_id)
|
Terminates all the running processes. By default it is set to false.
Users can set to true in config once the method to get_pid is done deterministically
either using pid_file or an accurate keyword
|
entailment
|
def string_to_level(log_level):
"""
Converts a string to the corresponding log level
"""
if (log_level.strip().upper() == "DEBUG"):
return logging.DEBUG
if (log_level.strip().upper() == "INFO"):
return logging.INFO
if (log_level.strip().upper() == "WARNING"):
return logging.WARNING
if (log_level.strip().upper() == "ERROR"):
return logging.ERROR
|
Converts a string to the corresponding log level
|
entailment
|
def main():
"""
Parse command line arguments and then run the test suite
"""
parser = argparse.ArgumentParser(description='A distributed test framework')
parser.add_argument('testfile',
help='The file that is used to determine the test suite run')
parser.add_argument('--test-only',
nargs='*',
dest='test_list',
help='run only the named tests to help debug broken tests')
parser.add_argument('--machine-list',
nargs='*',
dest='machine_list',
help='''mapping of logical host names to physical names allowing the same
test suite to run on different hardware, each argument is a pair
of logical name and physical name separated by a =''')
parser.add_argument('--config-overrides',
nargs='*',
dest='config_overrides',
help='''config overrides at execution time, each argument is a config with
its value separated by a =. This has the highest priority of all
configs''')
parser.add_argument('-d', '--output-dir',
dest='output_dir',
help='''Directory to write output files and logs. Defaults to the current
directory.''')
parser.add_argument("--log-level", dest="log_level", help="Log level (default INFO)", default="INFO")
parser.add_argument("--console-log-level", dest="console_level", help="Console Log level (default ERROR)",
default="ERROR")
parser.add_argument("--nopassword", action='store_true', dest="nopassword", help="Disable password prompt")
parser.add_argument("--user", dest="user", help="user to run the test as (defaults to current user)")
args = parser.parse_args()
try:
call_main(args)
except ValueError:
#We only sys.exit here, as call_main is used as part of a unit test
#and should not exit the system
sys.exit(1)
|
Parse command line arguments and then run the test suite
|
entailment
|
def reset_all():
"""
Clear relevant globals to start fresh
:return:
"""
global _username
global _password
global _active_config
global _active_tests
global _machine_names
global _deployers
reset_deployers()
reset_collector()
_username = None
_password = None
_active_config = None
_active_tests = {}
_machine_names = defaultdict()
|
Clear relevant globals to start fresh
:return:
|
entailment
|
def get_active_config(config_option, default=None):
"""
gets the config value associated with the config_option or returns an empty string if the config is not found
:param config_option:
:param default: if not None, will be used
:return: value of config. If key is not in config, then default will be used if default is not set to None.
Otherwise, KeyError is thrown.
"""
return _active_config.mapping[config_option] if default is None else _active_config.mapping.get(config_option, default)
|
gets the config value associated with the config_option or returns an empty string if the config is not found
:param config_option:
:param default: if not None, will be used
:return: value of config. If key is not in config, then default will be used if default is not set to None.
Otherwise, KeyError is thrown.
|
entailment
|
def generate(self):
"""
Generates the report
"""
self._setup()
header_html = self._generate_header()
footer_html = self._generate_footer()
results_topbar_html = self._generate_topbar("results")
summary_topbar_html = self._generate_topbar("summary")
logs_topbar_html = self._generate_topbar("logs")
diff_topbar_html = self._generate_topbar("diff")
summary_body_html = self._generate_summary_body()
diff_body_html = self._generate_diff_body()
summary_html = header_html + summary_topbar_html + summary_body_html + footer_html
diff_html = header_html + diff_topbar_html + diff_body_html+ footer_html
Reporter._make_file(summary_html, self.report_info.home_page)
Reporter._make_file(diff_html,self.report_info.diff_page)
log_body_html = self._generate_log_body()
log_html = header_html + logs_topbar_html + log_body_html+footer_html
Reporter._make_file(log_html, self.report_info.log_page)
for config_name in self.report_info.config_to_test_names_map.keys():
config_dir = os.path.join(self.report_info.resource_dir, config_name)
utils.makedirs(config_dir)
config_body_html = self._generate_config_body(config_name)
config_html = header_html + results_topbar_html + config_body_html + footer_html
config_file = os.path.join(config_dir, config_name + self.report_info.report_file_sfx)
Reporter._make_file(config_html, config_file)
for test_name in self.data_source.get_test_names(config_name):
test_body_html = self._generate_test_body(config_name, test_name)
test_html = header_html + results_topbar_html + test_body_html + footer_html
test_file = os.path.join(config_dir, test_name + self.report_info.report_file_sfx)
Reporter._make_file(test_html, test_file)
|
Generates the report
|
entailment
|
def execute ( self, conn, dataset, dataset_access_type, transaction=False ):
"""
for a given file
"""
if not conn:
dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/Dataset/UpdateType. Expects db connection from upper layer.", self.logger.exception)
binds = { "dataset" : dataset , "dataset_access_type" : dataset_access_type ,"myuser": dbsUtils().getCreateBy(), "mydate": dbsUtils().getTime() }
result = self.dbi.processData(self.sql, binds, conn, transaction)
|
for a given file
|
entailment
|
def inputChecks(**_params_):
"""
This is a function to check all the input for GET APIs.
"""
def checkTypes(_func_, _params_ = _params_):
log = clog.error_log
@wraps(_func_)
def wrapped(*args, **kw):
arg_names = _func_.__code__.co_varnames[:_func_.__code__.co_argcount]
ka = {}
ka.update(list(zip(arg_names, args)))
ka.update(kw)
#print ka
for name, value in ka.iteritems():
#In fact the framework removes all the input variables that is not in the args list of _addMethod.
#So DBS list API will never see these variables. For example, if one has
#http://hostname/cms_dbs/DBS/datatiers?name=abc, the API will get a request to list all the datatiers because
#"name=abc" is removed by the framework since name is not a key work for the api.
if name !='self':
types = _params_[name]
#if name =='lumi_list': value = cjson.decode(value)
if not isinstance(value, types):
serverlog = "Expected '%s' to be %s; was %s." % (name, types, type(value))
#raise TypeError, "Expected '%s' to be %s; was %s." % (name, types, type(value))
dbsExceptionHandler("dbsException-invalid-input2", message="Invalid Input DataType %s for %s..." %(type(value), name[:10]),\
logger=log.error, serverError=serverlog)
else:
try:
if isinstance(value, basestring):
try:
value = str(value)
except:
dbsExceptionHandler("dbsException-invalid-input", "invalid value for %s" %name)
if name == 'dataset':
if '*' in value:
searchdataset(value)
else:
reading_dataset_check(value)
elif name =='lumi_list': value = cjson.decode(value)
elif name =='validFileOnly':
try:
int(value)
except Exception as e:
dbsExceptionHandler("dbsException-invalid-input2", message="invalid value for %s" %name, serverError="invalid value %s for %s" %(value, name), logger=log.error)
elif name =='sumOverLumi':
try:
int(value)
except Exception as e:
dbsExceptionHandler("dbsException-invalid-input2", message="invalid value for %s" %name, serverError="invalid value %s for %s" %(value, name), logger=log.error)
elif name =='block_name':
if '*' in value:
searchblock(value)
else:
reading_block_check(value)
elif name =='primary_ds_name':
if '*' in value: searchstr(value)
else: primdataset(value)
elif name =='processed_ds_name':
if '*' in value:
searchstr(value)
else:
reading_procds_check(value)
elif name=='logical_file_name':
if '*' in value:
searchstr(value)
else:
reading_lfn_check(value)
elif name=='processing_version':
procversion(value)
elif name=='global_tag':
if '*' in value: searchstr(value)
else: globalTag(value)
elif name == 'create_by':
DBSUser(value)
elif name == 'last_modified_by':
DBSUser(value)
else:
searchstr(value)
elif type(value) == list:
if name == 'logical_file_name':
for f in value:
if '*' in f:
searchstr(f)
else:
reading_lfn_check(f)
elif name == 'block_names':
for block_name in value:
reading_block_check(block_name)
elif name == 'run_num':
for run_num in value:
try:
int(run_num)
except Exception:
try:
min_run, max_run = run_num.split('-', 1)
int(min_run)
int(max_run)
except Exception as e:
serverLog = str(e) + "\n run_num=%s is an invalid run number." %run_num
dbsExceptionHandler("dbsException-invalid-input2", message="Invalid input data %s...: invalid run number." %run_num[:10],\
serverError=serverLog, logger=log.error)
elif name == 'dataset_id':
for id in value:
try:
int(id)
except Exception :
try:
min_id, max_id = id.split('-', 1)
int(min_id)
int(max_id)
except Exception as e :
serverLog = str(e) + "\n dataset_id=%s is an invalid oracle id." %dataset_id
dbsExceptionHandler("dbsException-invalid-input2", message="Invalid input data %s...: invalid dataset_id." %id[:10], \
serverError=serverLog, logger=log.error)
except AssertionError as ae:
serverLog = str(ae) + " key-value pair (%s, %s) cannot pass input checking" %(name, value)
#print ae
dbsExceptionHandler("dbsException-invalid-input2", message="Invalid Input Data %s...: Not Match Required Format" %value[:10],\
serverError=serverLog, logger=log.error)
except Exception as e1:
raise
return _func_(*args, **kw)
return wrapped
return checkTypes
|
This is a function to check all the input for GET APIs.
|
entailment
|
def validateStringInput(input_key,input_data, read=False):
"""
To check if a string has the required format. This is only used for POST APIs.
"""
log = clog.error_log
func = None
if '*' in input_data or '%' in input_data:
func = validationFunctionWildcard.get(input_key)
if func is None:
func = searchstr
elif input_key == 'migration_input' :
if input_data.find('#') != -1 : func = block
else : func = dataset
else:
if not read:
func = validationFunction.get(input_key)
if func is None:
func = namestr
else:
if input_key == 'dataset':
func = reading_dataset_check
elif input_key == 'block_name':
func = reading_block_check
elif input_key == 'logical_file_name':
func = reading_lfn_check
else:
func = namestr
try:
func(input_data)
except AssertionError as ae:
serverLog = str(ae) + " key-value pair (%s, %s) cannot pass input checking" %(input_key, input_data)
#print serverLog
dbsExceptionHandler("dbsException-invalid-input2", message="Invalid Input Data %s...: Not Match Required Format" %input_data[:10], \
logger=log.error, serverError=serverLog)
return input_data
|
To check if a string has the required format. This is only used for POST APIs.
|
entailment
|
def execute(self, conn, transaction=False):
"""
Lists all primary datasets if pattern is not provided.
"""
sql = self.sql
binds = {}
cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
result = []
for c in cursors:
result.extend(self.formatCursor(c, size=100))
return result
|
Lists all primary datasets if pattern is not provided.
|
entailment
|
def execute(self, conn, daoinput, transaction = False):
"""
daoinput keys:
migration_request_id
"""
if not conn:
dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/MigrationRequests/Remove. Expects db connection from upper layer.",
self.logger.exception)
daoinput['create_by'] = dbsUtils().getCreateBy()
try:
msg = "DBSMigration: Invalid request. Sucessfully processed or processing requests cannot be removed,\
or the requested migration did not exist, or the requestor for removing and creating has to be the same user. "
checkit = self.dbi.processData(self.select, daoinput, conn, transaction)
if self.formatDict(checkit)[0]["count"] >= 1:
reqID = {'migration_rqst_id':daoinput['migration_rqst_id']}
result = self.dbi.processData(self.sql, reqID, conn, transaction)
else:
dbsExceptionHandler('dbsException-invalid-input', msg, self.logger.exception)
except:
raise
|
daoinput keys:
migration_request_id
|
entailment
|
def jsonstreamer(func):
"""JSON streamer decorator"""
def wrapper (self, *args, **kwds):
gen = func (self, *args, **kwds)
yield "["
firstItem = True
for item in gen:
if not firstItem:
yield ","
else:
firstItem = False
yield cjson.encode(item)
yield "]"
return wrapper
|
JSON streamer decorator
|
entailment
|
def decodeLumiIntervals(self, lumi_list):
"""lumi_list must be of one of the two following formats:
'[[a,b], [c,d],' or
[a1, a2, a3] """
errmessage = "lumi intervals must be of one of the two following formats: '[[a,b], [c,d], ...],' or [a1, a2, a3 ...] "
if isinstance(lumi_list, basestring):
try:
lumi_list = cjson.decode(lumi_list)
except:
dbsExceptionHandler("dbsException-invalid-input2", "invalid lumi format", None, "Could not decode the input lumi_list: %s" % lumi_list)
if not isinstance(lumi_list, list):
dbsExceptionHandler("dbsException-invalid-input2", "invalid lumi input", None, errmessage)
#check only the first element... in case [1, '2', '3'] is passed, exception will not be raised here.
if len(lumi_list)==0 or isinstance(lumi_list[0], int):
return lumi_list
elif isinstance(lumi_list[0], list):
result = []
resultext = result.extend
for lumiinterval in lumi_list:
if not isinstance(lumiinterval, list) or len(lumiinterval) != 2:
dbsExceptionHandler("dbsException-invalid-input2", "invalid lumi input", None, errmessage)
resultext(range(lumiinterval[0], lumiinterval[1]+1))
result = list(set(result)) #removes the dublicates, no need to sort
return result
else:
dbsExceptionHandler("dbsException-invalid-input2", 'invalid lumi format', None, \
'Unsupported lumi format: %s. %s' % (lumi_list, errmessage))
|
lumi_list must be of one of the two following formats:
'[[a,b], [c,d],' or
[a1, a2, a3]
|
entailment
|
def listDatasetAccessTypes(self, dataset_access_type=""):
"""
List dataset access types
"""
if isinstance(dataset_access_type, basestring):
try:
dataset_access_type = str(dataset_access_type)
except:
dbsExceptionHandler('dbsException-invalid-input', 'dataset_access_type given is not valid : %s' %dataset_access_type)
else:
dbsExceptionHandler('dbsException-invalid-input', 'dataset_access_type given is not valid : %s' %dataset_access_type)
conn = self.dbi.connection()
try:
plist = self.datasetAccessType.execute(conn, dataset_access_type.upper())
result = [{}]
if plist:
t = []
for i in plist:
for k, v in i.iteritems():
t.append(v)
result[0]['dataset_access_type'] = t
return result
finally:
if conn:
conn.close()
|
List dataset access types
|
entailment
|
def block_before(self):
"""
Check the current request and block it if the IP address it's
coming from is blacklisted.
"""
# To avoid unnecessary database queries, ignore the IP check for
# requests for static files
if request.path.startswith(url_for('static', filename='')):
return
# Some static files might be served from the root path (e.g.
# favicon.ico, robots.txt, etc.). Ignore the IP check for most
# common extensions of those files.
ignored_extensions = ('ico', 'png', 'txt', 'xml')
if request.path.rsplit('.', 1)[-1] in ignored_extensions:
return
ips = request.headers.getlist('X-Forwarded-For')
if not ips:
return
# If the X-Forwarded-For header contains multiple comma-separated
# IP addresses, we're only interested in the last one.
ip = ips[0].strip()
if ip[-1] == ',':
ip = ip[:-1]
ip = ip.rsplit(',', 1)[-1].strip()
if self.matches_ip(ip):
if self.logger is not None:
self.logger.info("IPBlock: matched {}, {}".format(ip, self.block_msg))
if self.blocking_enabled:
return 'IP Blocked', 200
|
Check the current request and block it if the IP address it's
coming from is blacklisted.
|
entailment
|
def matches_ip(self, ip):
"""Return True if the given IP is blacklisted, False otherwise."""
# Check the cache if caching is enabled
if self.cache is not None:
matches_ip = self.cache.get(ip)
if matches_ip is not None:
return matches_ip
# Query MongoDB to see if the IP is blacklisted
matches_ip = IPNetwork.matches_ip(
ip, read_preference=self.read_preference)
# Cache the result if caching is enabled
if self.cache is not None:
self.cache[ip] = matches_ip
return matches_ip
|
Return True if the given IP is blacklisted, False otherwise.
|
entailment
|
def execute(self, conn, logical_file_name, block_name, block_id, transaction=False):
"""
Lists all primary datasets if pattern is not provided.
"""
binds = {}
sql = ''
if logical_file_name:
if isinstance(logical_file_name, basestring):
wheresql = "WHERE F.LOGICAL_FILE_NAME = :logical_file_name"
binds = {"logical_file_name": logical_file_name}
sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql)
elif isinstance(logical_file_name, list):
wheresql = "WHERE F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)"
lfn_generator, binds = create_token_generator(logical_file_name)
sql = "{lfn_generator} {sql} {wheresql}".format(lfn_generator=lfn_generator, sql=self.sql,
wheresql=wheresql)
elif block_name:
joins = "JOIN {owner}BLOCKS B on B.BLOCK_ID = F.BLOCK_ID".format(owner=self.owner)
wheresql = "WHERE B.BLOCK_NAME = :block_name"
binds = {"block_name": block_name}
sql = "{sql} {joins} {wheresql}".format(sql=self.sql, joins=joins, wheresql=wheresql)
elif block_id:
wheresql = "WHERE F.BLOCK_ID = :block_id"
binds = {"block_id": block_id}
sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql)
else:
dbsExceptionHandler('dbsException-invalid-input', "Logical_file_names is required for listChild dao.", self.logger.exception)
cursors = self.dbi.processData(sql, binds, conn, transaction=transaction, returnCursor=True)
result = []
for c in cursors:
result.extend(self.formatCursor(c, size=100))
return result
|
Lists all primary datasets if pattern is not provided.
|
entailment
|
def listAcquisitionEras(self, acq=''):
"""
Returns all acquistion eras in dbs
"""
try:
acq = str(acq)
except:
dbsExceptionHandler('dbsException-invalid-input', 'acquistion_era_name given is not valid : %s' %acq)
conn = self.dbi.connection()
try:
result = self.acqlst.execute(conn, acq)
return result
finally:
if conn:conn.close()
|
Returns all acquistion eras in dbs
|
entailment
|
def listAcquisitionEras_CI(self, acq=''):
"""
Returns all acquistion eras in dbs
"""
try:
acq = str(acq)
except:
dbsExceptionHandler('dbsException-invalid-input', 'aquistion_era_name given is not valid : %s'%acq)
conn = self.dbi.connection()
try:
result = self.acqlst_ci.execute(conn, acq)
return result
finally:
if conn:conn.close()
|
Returns all acquistion eras in dbs
|
entailment
|
def insertAcquisitionEra(self, businput):
"""
Input dictionary has to have the following keys:
acquisition_era_name, creation_date, create_by, start_date, end_date.
it builds the correct dictionary for dao input and executes the dao
"""
conn = self.dbi.connection()
tran = conn.begin()
try:
businput["acquisition_era_id"] = self.sm.increment(conn, "SEQ_AQE", tran)
businput["acquisition_era_name"] = businput["acquisition_era_name"]
#self.logger.warning(businput)
self.acqin.execute(conn, businput, tran)
tran.commit()
tran = None
except KeyError as ke:
dbsExceptionHandler('dbsException-invalid-input', "Invalid input:"+ke.args[0])
except Exception as ex:
if str(ex).lower().find("unique constraint") != -1 or str(ex).lower().find("duplicate") != -1:
dbsExceptionHandler('dbsException-invalid-input2', "Invalid input: acquisition_era_name already exists in DB", serverError="%s" %ex)
else:
raise
finally:
if tran:
tran.rollback()
if conn:
conn.close()
|
Input dictionary has to have the following keys:
acquisition_era_name, creation_date, create_by, start_date, end_date.
it builds the correct dictionary for dao input and executes the dao
|
entailment
|
def UpdateAcqEraEndDate(self, acquisition_era_name ="", end_date=0):
"""
Input dictionary has to have the following keys:
acquisition_era_name, end_date.
"""
if acquisition_era_name =="" or end_date==0:
dbsExceptionHandler('dbsException-invalid-input', "acquisition_era_name and end_date are required")
conn = self.dbi.connection()
tran = conn.begin()
try:
self.acqud.execute(conn, acquisition_era_name, end_date, tran)
if tran:tran.commit()
tran = None
finally:
if tran:tran.rollback()
if conn:conn.close()
|
Input dictionary has to have the following keys:
acquisition_era_name, end_date.
|
entailment
|
def execute(self, conn, app="", release_version="", pset_hash="", output_label="",
global_tag='', transaction = False):
"""
returns id for a given application
"""
sql = self.sql
binds = {}
setAnd=False
if not app == "":
sql += " A.APP_NAME=:app_name"
binds["app_name"]=app
setAnd=True
if not release_version == "":
if setAnd : sql += " AND "
sql += " R.RELEASE_VERSION=:release_version"
binds["release_version"]=release_version
setAnd=True
if not pset_hash == "":
if setAnd : sql += " AND "
sql += " P.PSET_HASH=:pset_hash"
binds["pset_hash"]=pset_hash
setAnd=True
if not output_label == "":
if setAnd : sql += " AND "
sql += " O.OUTPUT_MODULE_LABEL=:output_module_label"
binds["output_module_label"]=output_label
setAnd=True
if not global_tag == "":
if setAnd : sql += " AND "
sql += " O.GLOBAL_TAG=:global_tag"
binds["global_tag"]=global_tag
if app == release_version == pset_hash == global_tag == "":
dbsExceptionHandler('dbsException-invalid-input', "%s Either app_name, release_version, pset_hash or global_tag must be provided", self.logger.exception)
result = self.dbi.processData(sql, binds, conn, transaction)
plist = self.formatDict(result)
if len(plist) < 1: return -1
return plist[0]["output_mod_config_id"]
|
returns id for a given application
|
entailment
|
def prepareDatasetMigrationList(self, conn, request):
"""
Prepare the ordered lists of blocks based on input DATASET (note Block is different)
1. Get list of blocks from source
2. Check and see if these blocks are already at DST
3. Check if dataset has parents
4. Check if parent blocks are already at DST
"""
ordered_dict = {}
order_counter = 0
srcdataset = request["migration_input"]
url = request["migration_url"]
try:
tmp_ordered_dict = self.processDatasetBlocks(url, conn,
srcdataset, order_counter)
if tmp_ordered_dict != {}:
ordered_dict.update(tmp_ordered_dict)
self.logger.debug("ordered_dict length at level %s" %order_counter)
self.logger.debug(len(ordered_dict))
else:
#return {}
m = 'Requested dataset %s is already in destination' %srcdataset
dbsExceptionHandler('dbsException-invalid-input2', message=m, serverError=m)
# Now process the parent datasets
parent_ordered_dict = self.getParentDatasetsOrderedList(url, conn,
srcdataset, order_counter+1)
if parent_ordered_dict != {}:
ordered_dict.update(parent_ordered_dict)
self.logger.debug("***** parent ordered_dict length at level %s ******" %(order_counter+1))
self.logger.debug(len(ordered_dict))
return remove_duplicated_items(ordered_dict)
except dbsException:
raise
except Exception as ex:
if 'urlopen error' in str(ex):
message='Connection to source DBS server refused. Check your source url.'
elif 'Bad Request' in str(ex):
message='cannot get data from the source DBS server. Check your migration input.'
else:
message='Failed to make a dataset migration list.'
dbsExceptionHandler('dbsException-invalid-input2', \
serverError="""DBSMigrate/prepareDatasetMigrationList failed
to prepare ordered block list: %s""" %str(ex), message=message)
|
Prepare the ordered lists of blocks based on input DATASET (note Block is different)
1. Get list of blocks from source
2. Check and see if these blocks are already at DST
3. Check if dataset has parents
4. Check if parent blocks are already at DST
|
entailment
|
def processDatasetBlocks(self, url, conn, inputdataset, order_counter):
"""
Utility function, that comapares blocks of a dataset at source and dst
and returns an ordered list of blocks not already at dst for migration
"""
ordered_dict = {}
srcblks = self.getSrcBlocks(url, dataset=inputdataset)
if len(srcblks) < 0:
e = "DBSMigration: No blocks in the required dataset %s found at source %s."%(inputdataset, url)
dbsExceptionHandler('dbsException-invalid-input2', e, self.logger.exception, e)
dstblks = self.blocklist.execute(conn, dataset=inputdataset)
self.logger.debug("******* dstblks for dataset %s ***********" %inputdataset)
self.logger.debug(dstblks)
blocksInSrcNames = [ y['block_name'] for y in srcblks]
blocksInDstNames = []
for item in dstblks:
blocksInDstNames.append(item['block_name'])
ordered_dict[order_counter] = []
for ablk in blocksInSrcNames:
if not ablk in blocksInDstNames:
ordered_dict[order_counter].append(ablk)
if ordered_dict[order_counter] != []:
self.logger.debug("**** ordered_dict dict length ****")
self.logger.debug(len(ordered_dict))
return ordered_dict
else:
return {}
|
Utility function, that comapares blocks of a dataset at source and dst
and returns an ordered list of blocks not already at dst for migration
|
entailment
|
def getParentDatasetsOrderedList(self, url, conn, dataset, order_counter):
"""
check if input dataset has parents,
check if any of the blocks are already at dst,
prepare the ordered list and return it.
url : source DBS url
dataset : to be migrated dataset
order_counter: the order in which migration happends.
"""
ordered_dict = {}
parentSrcDatasets = self.getSrcDatasetParents(url, dataset)
if len(parentSrcDatasets) > 0:
parentSrcDatasetNames = [y['parent_dataset']
for y in parentSrcDatasets]
for aparentDataset in parentSrcDatasetNames:
parent_ordered_dict = self.processDatasetBlocks(url, conn,
aparentDataset, order_counter)
self.logger.debug("************ dict length of parent blocks for the parent dataset %s at level %s" %(aparentDataset, order_counter))
self.logger.debug(len(parent_ordered_dict))
if parent_ordered_dict != {}:
ordered_dict.update(parent_ordered_dict)
self.logger.debug("**** ordered_dict length ****")
self.logger.debug(len(ordered_dict))
# parents of parent
pparent_ordered_dict = self.getParentDatasetsOrderedList(url,
conn, aparentDataset, order_counter+1)
self.logger.debug("************dict length parent parent blocks for the parent dataset %s at level %s" %(aparentDataset, order_counter+1))
self.logger.debug(len(pparent_ordered_dict))
if pparent_ordered_dict != {}:
ordered_dict.update(pparent_ordered_dict)
self.logger.debug("**** ordered_dict length ****")
self.logger.debug(len(ordered_dict))
return ordered_dict
|
check if input dataset has parents,
check if any of the blocks are already at dst,
prepare the ordered list and return it.
url : source DBS url
dataset : to be migrated dataset
order_counter: the order in which migration happends.
|
entailment
|
def prepareBlockMigrationList(self, conn, request):
"""
Prepare the ordered lists of blocks based on input BLOCK
1. see if block already exists at dst (no need to migrate),
raise "ALREADY EXISTS"
2. see if block exists at src & make sure the block's open_for_writing=0
3. see if block has parents
4. see if parent blocks are already at dst
5. add 'order' to parent and then this block (ascending)
6. return the ordered list
"""
ordered_dict = {}
block_name = request["migration_input"]
url = request["migration_url"]
order_counter = 0
try:
#1.
dstblock = self.blocklist.execute(conn, block_name=block_name)
for item in dstblock:
if item:
dbsExceptionHandler('dbsException-invalid-input', 'ALREADY EXISTS: \
Required block (%s) migration is already at destination' %block_name, self.logger.exception)
#2.
srcblock = self.getSrcBlocks(url, block=block_name)
if len(srcblock) < 1:
e = 'DBSMigration: Invalid input. Required Block %s not found at source %s.' %(block_name, url)
dbsExceptionHandler('dbsException-invalid-input2', e, self.logger.exception, e)
##This block has to be migrated
ordered_dict[order_counter] = []
ordered_dict[order_counter].append(block_name)
parent_ordered_dict = self.getParentBlocksOrderedList(url, conn,
block_name, order_counter+1)
if parent_ordered_dict != {}:
ordered_dict.update(parent_ordered_dict)
#6.
#check for duplicates
return remove_duplicated_items(ordered_dict)
except Exception as ex:
if '500 Internal Server Error' in str(ex):
#"Server Error" is the default in dbsExceptionHandler
dbsExceptionHandler('Server Error', str(ex), self.logger.exception, "DBSMigrate/prepareBlockMigrationList: "+str(ex))
if isinstance(ex, pycurl.error):
if ex.args[0] == 7:
message = ex.args[1]
dbsExceptionHandler('dbsException-failed-connect2host', message, self.logger.exception, message)
if 'urlopen error' in str(ex):
message='Connection to source DBS server refused. Check your source url.'
elif 'Bad Request' in str(ex):
message='cannot get data from the source DBS server. Check your migration input.'
else:
message='Failed to make a block migration list.'
dbsExceptionHandler('dbsException-invalid-input2', \
"""DBSMigrate/prepareBlockMigrationList failed
to prepare ordered block list: %s""" %str(ex), self.logger.exception, message)
|
Prepare the ordered lists of blocks based on input BLOCK
1. see if block already exists at dst (no need to migrate),
raise "ALREADY EXISTS"
2. see if block exists at src & make sure the block's open_for_writing=0
3. see if block has parents
4. see if parent blocks are already at dst
5. add 'order' to parent and then this block (ascending)
6. return the ordered list
|
entailment
|
def removeMigrationRequest(self, migration_rqst):
"""
Method to remove pending or failed migration request from the queue.
"""
conn = self.dbi.connection()
try:
tran = conn.begin()
self.mgrremove.execute(conn, migration_rqst)
tran.commit()
except dbsException as he:
if conn: conn.close()
raise
except Exception as ex:
if conn: conn.close()
raise
if conn: conn.close()
|
Method to remove pending or failed migration request from the queue.
|
entailment
|
def insertMigrationRequest(self, request):
"""
Method to insert use requests to MIGRATION_REQUESTS table.
request keys: migration_url, migration_input
"""
conn = self.dbi.connection()
# check if already queued.
#If the migration_input is the same, but the src url is different,
#We will consider it as a submitted request. YG 05-18-2012
try:
alreadyqueued = self.mgrlist.execute(conn,
migration_input=request["migration_input"])
is_already_queued = len(alreadyqueued) > 0
# close connection before returning json object
if is_already_queued and conn:
conn.close()
#if the queued is not failed, then we don't need to do it again.
#add a new migration_status=9 (terminal failure)
if is_already_queued and alreadyqueued[0]['migration_status'] == 2:
return {"migration_report" : "REQUEST ALREADY QUEUED. Migration is finished",
"migration_details" : alreadyqueued[0] }
elif is_already_queued and alreadyqueued[0]['migration_status'] != 9:
return {"migration_report" : "REQUEST ALREADY QUEUED. Migration in progress",
"migration_details" : alreadyqueued[0] }
elif is_already_queued and alreadyqueued[0]['migration_status'] == 9:
return {"migration_report" : "REQUEST ALREADY QUEUED. Migration terminally failed. ",
"migration_details" : alreadyqueued[0] }
else:
# not already queued
#Determine if its a dataset or block migration
#The prepare list calls will check if the requested blocks/dataset already in destination.
if request["migration_input"].find("#") != -1:
ordered_list = self.prepareBlockMigrationList(conn, request)
else:
ordered_list = self.prepareDatasetMigrationList(conn, request)
# now we have the blocks that need to be queued (ordered)
except Exception as ex:
if conn: conn.close()
raise
tran = conn.begin()
try:
# insert the request
#request.update(migration_status=0)
request['migration_request_id'] = self.sm.increment(conn, "SEQ_MR", tran)
self.mgrin.execute(conn, request, tran)
# INSERT the ordered_list
totalQueued = 0
k = ordered_list.keys()
k.sort()
k.reverse()
self.logger.debug("****************** ordered_list keys **********")
self.logger.debug(k)
#for iter in reversed(range(len(ordered_list))):
for iter in k:
self.logger.debug("length for Key: %s" %iter)
self.logger.debug(len(ordered_list[iter]))
if len(ordered_list[iter]) > 0:
daoinput = [{
"migration_block_id" :
self.sm.increment(conn, "SEQ_MB", tran),
"migration_request_id" :
request["migration_request_id"],
"migration_block_name" : blk,
"migration_order" : iter,
"migration_status" : 0,
"creation_date" : request['creation_date'],
"last_modification_date" : request['last_modification_date'],
"create_by" : request['create_by'],
"last_modified_by" : request['last_modified_by']
}
for blk in ordered_list[iter]]
self.mgrblkin.execute(conn, daoinput, tran)
totalQueued += len(ordered_list[iter])
# all good ?, commit the transaction
tran.commit()
if conn: conn.close()
# return things like (X blocks queued for migration)
return {
"migration_report" : "REQUEST QUEUED with total %d blocks to be migrated" %totalQueued,
"migration_details" : request }
except SQLAlchemyIntegrityError as ex:
e = "DBSMigration: ENQUEUEING_FAILED1 from SQLAichemy Integrity Error. Reason may be (%s)" %(ex.statement + "; " + str(ex.params) + "; " + str(ex.orig))
self.logger.debug(e)
import traceback
tk = traceback.format_exc()
self.logger.debug(tk)
tran.rollback()
if conn: conn.close()
if (str(ex).find("unique constraint") != -1 or
str(ex).lower().find("duplicate") != -1):
#FIXME: Need to check which unique key. YG 2/11/13
#The unique constraints are: MIGRATION_REQUESTS(MIGRATION_INPUT)
#MIGRATION_BLOCKS(MIGRATION_BLOCK_NAME, MIGRATION_REQUEST_ID)
return {
"migration_report" : "REQUEST ALREADY QUEUED",
"migration_details" : request }
else:
if conn: conn.close()
self.logger.error(tk)
m = "DBSMigration: ENQUEUEING_FAILED1."
dbsExceptionHandler('dbsException-invalid-input2', m, self.logger.exception, e)
except HTTPError as he:
raise he
except Exception as ex:
import traceback
self.logger.error(traceback.format_exc())
if tran: tran.rollback()
if conn: conn.close()
m = "DBSMigration: ENQUEUEING_FAILED."
e = "DBSMigration: ENQUEUEING_FAILED. General exception caught: Reason may be (%s)" %str(ex)
dbsExceptionHandler('dbsException-invalid-input2', m, self.logger.exception, e)
finally:
if conn: conn.close()
|
Method to insert use requests to MIGRATION_REQUESTS table.
request keys: migration_url, migration_input
|
entailment
|
def listMigrationRequests(self, migration_request_id="", block_name="",
dataset="", user="", oldest=False):
"""
get the status of the migration
migratee : can be dataset or block_name
"""
conn = self.dbi.connection()
migratee = ""
try:
if block_name:
migratee = block_name
elif dataset:
migratee = dataset
result = self.mgrlist.execute(conn, migration_url="",
migration_input=migratee, create_by=user,
migration_request_id=migration_request_id, oldest=oldest)
return result
finally:
if conn: conn.close()
|
get the status of the migration
migratee : can be dataset or block_name
|
entailment
|
def listMigrationBlocks(self, migration_request_id=""):
"""
get eveything of block that is has status = 0 and migration_request_id as specified.
"""
conn = self.dbi.connection()
try:
return self.mgrblklist.execute(conn, migration_request_id=migration_request_id)
finally:
if conn: conn.close()
|
get eveything of block that is has status = 0 and migration_request_id as specified.
|
entailment
|
def updateMigrationRequestStatus(self, migration_status, migration_request_id):
"""
migration_status:
0=PENDING
1=IN PROGRESS
2=COMPLETED
3=FAILED (will be retried)
9=Terminally FAILED
status change:
0 -> 1
1 -> 2
1 -> 3
1 -> 9
are only allowed changes for working through migration.
3 -> 1 is allowed for retrying and retry count +1.
"""
conn = self.dbi.connection()
tran = conn.begin()
try:
upst = dict(migration_status=migration_status,
migration_request_id=migration_request_id,
last_modification_date=dbsUtils().getTime())
self.mgrRqUp.execute(conn, upst)
except:
if tran:tran.rollback()
raise
else:
if tran:tran.commit()
finally:
#open transaction is committed when conn closed.
if conn:conn.close()
|
migration_status:
0=PENDING
1=IN PROGRESS
2=COMPLETED
3=FAILED (will be retried)
9=Terminally FAILED
status change:
0 -> 1
1 -> 2
1 -> 3
1 -> 9
are only allowed changes for working through migration.
3 -> 1 is allowed for retrying and retry count +1.
|
entailment
|
def updateMigrationBlockStatus(self, migration_status=0, migration_block=None, migration_request=None):
"""
migration_status:
0=PENDING
1=IN PROGRESS
2=COMPLETED
3=FAILED (will be retried)
9=Terminally FAILED
status change:
0 -> 1
1 -> 2
1 -> 3
1 -> 9
are only allowed changes for working through migration.
3 -> 1 allowed for retrying.
"""
conn = self.dbi.connection()
tran = conn.begin()
try:
if migration_block:
upst = dict(migration_status=migration_status,
migration_block_id=migration_block, last_modification_date=dbsUtils().getTime())
elif migration_request:
upst = dict(migration_status=migration_status, migration_request_id=migration_request,
last_modification_date=dbsUtils().getTime())
self.mgrup.execute(conn, upst)
except:
if tran:tran.rollback()
raise
else:
if tran:tran.commit()
finally:
if conn:conn.close()
|
migration_status:
0=PENDING
1=IN PROGRESS
2=COMPLETED
3=FAILED (will be retried)
9=Terminally FAILED
status change:
0 -> 1
1 -> 2
1 -> 3
1 -> 9
are only allowed changes for working through migration.
3 -> 1 allowed for retrying.
|
entailment
|
def getSrcDatasetParents(self, url, dataset):
"""
List block at src DBS
"""
#resturl = "%s/datasetparents?dataset=%s" % (url, dataset)
params={'dataset':dataset}
return cjson.decode(self.callDBSService(url, 'datasetparents', params, {}))
|
List block at src DBS
|
entailment
|
def getSrcBlockParents(self, url, block):
"""
List block at src DBS
"""
#blockname = block.replace("#", urllib.quote_plus('#'))
#resturl = "%s/blockparents?block_name=%s" % (url, blockname)
params={'block_name':block}
return cjson.decode(self.callDBSService(url, 'blockparents', params, {}))
|
List block at src DBS
|
entailment
|
def getSrcBlocks(self, url, dataset="", block=""):
"""
Need to list all blocks of the dataset and its parents starting from the top
For now just list the blocks from this dataset.
Client type call...
"""
if block:
params={'block_name':block, 'open_for_writing':0}
elif dataset:
params={'dataset':dataset, 'open_for_writing':0}
else:
m = 'DBSMigration: Invalid input. Either block or dataset name has to be provided'
e = 'DBSMigrate/getSrcBlocks: Invalid input. Either block or dataset name has to be provided'
dbsExceptionHandler('dbsException-invalid-input2', m, self.logger.exception, e )
return cjson.decode(self.callDBSService(url, 'blocks', params, {}))
|
Need to list all blocks of the dataset and its parents starting from the top
For now just list the blocks from this dataset.
Client type call...
|
entailment
|
def executeSingle( self, conn, daoinput, tablename, transaction = False):
"""build dynamic sql based on daoinput"""
sql1 = " insert into %s%s( " %(self.owner, tablename)
sql2 =" values("
"Now loop over all the input keys. We need to check if all the keys are valid !!!"
for key in daoinput:
sql1 += "%s," %key.upper()
sql2 += ":%s," %key.lower()
sql = sql1.strip(',') + ') ' + sql2.strip(',') + ' )'
self.dbi.processData(sql, daoinput, conn, transaction)
|
build dynamic sql based on daoinput
|
entailment
|
def _cast_boolean(self, value):
"""
Helper to convert config values to boolean as ConfigParser do.
"""
if value.lower() not in self._BOOLEANS:
raise ValueError('Not a boolean: %s' % value)
return self._BOOLEANS[value.lower()]
|
Helper to convert config values to boolean as ConfigParser do.
|
entailment
|
def get(self, option, default=undefined, cast=undefined):
"""
Return the value for option or default if defined.
"""
if option in self.repository:
value = self.repository.get(option)
else:
value = default
if isinstance(value, Undefined):
raise UndefinedValueError('%s option not found and default value was not defined.' % option)
if isinstance(cast, Undefined):
cast = lambda v: v # nop
elif cast is bool:
cast = self._cast_boolean
return cast(value)
|
Return the value for option or default if defined.
|
entailment
|
def parse_requirements(requirements_file):
"""
Create a list for the 'install_requires' component of the setup function
by parsing a requirements file
"""
if os.path.exists(requirements_file):
# return a list that contains each line of the requirements file
return open(requirements_file, 'r').read().splitlines()
else:
print("ERROR: requirements file " + requirements_file + " not found.")
sys.exit(1)
|
Create a list for the 'install_requires' component of the setup function
by parsing a requirements file
|
entailment
|
def execute(self, conn, dataset="", block_name="", data_tier_name="", origin_site_name="", logical_file_name="",
run_num=-1, min_cdate=0, max_cdate=0, min_ldate=0, max_ldate=0, cdate=0,
ldate=0, open_for_writing=-1, transaction = False):
"""
dataset: /a/b/c
block: /a/b/c#d
"""
binds = {}
basesql = self.sql
joinsql = ""
wheresql = ""
generatedsql = ""
if logical_file_name and logical_file_name != "%":
joinsql += " JOIN %sFILES FL ON FL.BLOCK_ID = B.BLOCK_ID " %(self.owner)
op = ("=", "like")["%" in logical_file_name]
wheresql += " WHERE LOGICAL_FILE_NAME %s :logical_file_name " % op
binds.update( logical_file_name = logical_file_name )
if block_name and block_name !="%":
andorwhere = ("WHERE", "AND")[bool(wheresql)]
op = ("=", "like")["%" in block_name]
wheresql += " %s B.BLOCK_NAME %s :block_name " % ((andorwhere, op))
binds.update( block_name = block_name )
if data_tier_name or (dataset and dataset!="%"):
joinsql += "JOIN %sDATASETS DS ON DS.DATASET_ID = B.DATASET_ID " % (self.owner)
andorwhere = ("WHERE", "AND")[bool(wheresql)]
if dataset:
op = ("=", "like")["%" in dataset]
wheresql += " %s DS.DATASET %s :dataset " % ((andorwhere, op))
binds.update(dataset=dataset)
if data_tier_name:
joinsql += "JOIN {owner}DATA_TIERS DT ON DS.DATA_TIER_ID=DT.DATA_TIER_ID ".format(owner=self.owner)
wheresql += " %s DT.DATA_TIER_NAME=:data_tier_name " % (andorwhere)
binds.update(data_tier_name=data_tier_name)
if origin_site_name and origin_site_name != "%":
op = ("=", "like")["%" in origin_site_name]
wheresql += " AND B.ORIGIN_SITE_NAME %s :origin_site_name " % op
binds.update(origin_site_name = origin_site_name)
if open_for_writing == 0 or open_for_writing == 1:
wheresql += " AND B.OPEN_FOR_WRITTING = :open_for_writing "
if cdate != 0:
wheresql += "AND B.CREATION_DATE = :cdate "
binds.update(cdate = cdate)
elif min_cdate != 0 and max_cdate != 0:
wheresql += "AND B.CREATION_DATE BETWEEN :min_cdate and :max_cdate "
binds.update(min_cdate = min_cdate)
binds.update(max_cdate = max_cdate)
elif min_cdate != 0 and max_cdate == 0:
wheresql += "AND B.CREATION_DATE > :min_cdate "
binds.update(min_cdate = min_cdate)
elif min_cdate ==0 and max_cdate != 0:
wheresql += "AND B.CREATION_DATE < :max_cdate "
binds.update(max_cdate = max_cdate)
else:
pass
if ldate != 0:
wheresql += "AND B.LAST_MODIFICATION_DATE = :ldate "
binds.update(ldate = ldate)
elif min_ldate != 0 and max_ldate != 0:
wheresql += "AND B.LAST_MODIFICATION_DATE BETWEEN :min_ldate and :max_ldate "
binds.update(min_ldate = min_ldate)
binds.update(max_ldate = max_ldate)
elif min_ldate != 0 and max_ldate == 0:
wheresql += "AND B.LAST_MODIFICATION_DATE > :min_ldate "
binds.update(min_ldate = min_ldate)
elif min_cdate ==0 and max_cdate != 0:
wheresql += "AND B.LAST_MODIFICATION_DATE < :max_ldate "
binds.update(max_ldate = max_ldate)
else:
pass
#one may provide a list of runs , so it has to be the last one in building the bind.
if run_num !=-1 :
basesql = basesql.replace("SELECT", "SELECT DISTINCT") + " , FLM.RUN_NUM "
if not logical_file_name:
joinsql += " JOIN %sFILES FL ON FL.BLOCK_ID = B.BLOCK_ID " %(self.owner)
joinsql += " JOIN %sFILE_LUMIS FLM on FLM.FILE_ID = FL.FILE_ID " %(self.owner)
run_list=[]
wheresql_run_list=''
wheresql_run_range=''
#
for r in parseRunRange(run_num):
if isinstance(r, basestring) or isinstance(r, int) or isinstance(r, long):
run_list.append(str(r))
if isinstance(r, run_tuple):
if r[0] == r[1]:
dbsExceptionHandler('dbsException-invalid-input', "DBS run_num range must be apart at least by 1.", self.logger.exception)
wheresql_run_range = " FLM.RUN_NUM between :minrun and :maxrun "
binds.update({"minrun":r[0]})
binds.update({"maxrun":r[1]})
#
if run_list:
wheresql_run_list = " FLM.RUN_NUM in (SELECT TOKEN FROM TOKEN_GENERATOR) "
generatedsql, run_binds = create_token_generator(run_list)
binds.update(run_binds)
#
if wheresql_run_range and wheresql_run_list:
wheresql += " and (" + wheresql_run_range + " or " + wheresql_run_list + " )"
elif wheresql_run_range and not wheresql_run_list:
wheresql += " and " + wheresql_run_range
elif not wheresql_run_range and wheresql_run_list:
wheresql += " and " + wheresql_run_list
#
sql = " ".join((generatedsql, basesql, self.fromsql, joinsql, wheresql))
cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
for i in cursors:
d = self.formatCursor(i, size=100)
if isinstance(d, list) or isinstance(d, GeneratorType):
for elem in d:
yield elem
elif d:
yield d
|
dataset: /a/b/c
block: /a/b/c#d
|
entailment
|
def execute(self, conn, dsType = "", dataset="", transaction = False):
"""
Lists all primary dataset types if no user input is provided.
"""
sql = self.sql
binds={}
if not dsType and not dataset:
pass
elif dsType and dataset in ("", None, '%'):
op = ("=", "like")["%" in dsType]
sql += "WHERE PDT.PRIMARY_DS_TYPE %s :primdstype"%op
binds = {"primdstype":dsType}
elif dataset and dsType in ("", None, '%'):
op = ("=", "like")["%" in dataset]
sql += "JOIN %sPRIMARY_DATASETS PDS on PDS.PRIMARY_DS_TYPE_ID = PDT.PRIMARY_DS_TYPE_ID \
JOIN %sDATASETS DS ON DS.PRIMARY_DS_ID = PDS.PRIMARY_DS_ID \
WHERE DS.DATASET %s :dataset" %(self.owner, self.owner, op)
binds={"dataset":dataset}
elif dataset and dsType:
op = ("=", "like")["%" in dsType]
op1 = ("=", "like")["%" in dataset]
sql += "JOIN %sPRIMARY_DATASETS PDS on PDS.PRIMARY_DS_TYPE_ID = PDT.PRIMARY_DS_TYPE_ID \
JOIN %sDATASETS DS ON DS.PRIMARY_DS_ID = PDS.PRIMARY_DS_ID \
WHERE DS.DATASET %s :dataset and PDT.PRIMARY_DS_TYPE %s :primdstype" \
%(self.owner, self.owner, op1, op)
binds = {"primdstype":dsType, "dataset":dataset}
else:
dbsExceptionHandler('dbsException-invalid-input', "DAO Primary_DS_TYPE List accepts no input, or\
dataset,primary_ds_type as input.", self.logger.exception)
cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
result = []
for c in cursors:
result.extend(self.formatCursor(c, size=100))
return result
|
Lists all primary dataset types if no user input is provided.
|
entailment
|
def execute(self, conn, dataset="", logical_file_name="", app="", release_version="", pset_hash="",
output_label ="", block_id=0, global_tag='', transaction = False):
"""
returns id for a given application
"""
#sql=self.sql
binds = {}
setAnd=False
#add search only block id only for migration dump block.
if block_id==0:
sql = self.sql1 + self.sql2
if dataset:
sql += " JOIN %sDATASET_OUTPUT_MOD_CONFIGS DC ON DC.OUTPUT_MOD_CONFIG_ID=O.OUTPUT_MOD_CONFIG_ID" % self.owner
sql += " JOIN %sDATASETS DS ON DS.DATASET_ID=DC.DATASET_ID" % self.owner
if logical_file_name:
sql += " JOIN %sFILE_OUTPUT_MOD_CONFIGS FC ON FC.OUTPUT_MOD_CONFIG_ID=O.OUTPUT_MOD_CONFIG_ID" % self.owner
sql += " JOIN %sFILES FS ON FS.FILE_ID=FC.FILE_ID" % self.owner
if not app == "":
op = ("=", "like")["%" in app]
sql += " WHERE A.APP_NAME %s :app_name" % op
binds["app_name"]=app
setAnd=True
if not release_version == "":
op = ("=", "like")["%" in release_version]
if setAnd : sql += " AND "
else : sql += " WHERE "
sql += " R.RELEASE_VERSION %s :release_version" % op
binds["release_version"]=release_version
setAnd=True
if not pset_hash == "":
op = ("=", "like")["%" in pset_hash]
if setAnd : sql += " AND "
else : sql += " WHERE "
sql += " P.PSET_HASH %s :pset_hash" % op
binds["pset_hash"]=pset_hash
setAnd=True
if not output_label == "":
op = ("=", "like")["%" in output_label]
if setAnd : sql += " AND "
else : sql += " WHERE "
sql += " O.OUTPUT_MODULE_LABEL %s :output_module_label" % op
binds["output_module_label"]=output_label
setAnd=True
if not global_tag == "":
op = ("=", "like")["%" in global_tag]
if setAnd : sql += " AND "
else : sql += " WHERE "
sql += " O.GLOBAL_TAG %s :global_tag" % op
binds["global_tag"]=global_tag
setAnd=True
if dataset:
if setAnd : sql += " AND "
else : sql += " WHERE "
sql += "DS.DATASET=:dataset"
binds["dataset"]=dataset
setAnd=True
if logical_file_name:
if setAnd : sql += " AND "
else : sql += " WHERE "
sql += "FS.LOGICAL_FILE_NAME=:logical_file_name"
binds["logical_file_name"]=logical_file_name
setAnd=True
else:
#select by block id and return config along with LFN
sql= self.sql1 + " , FS.LOGICAL_FILE_NAME LFN " + self.sql2 \
+ " JOIN %sFILE_OUTPUT_MOD_CONFIGS FC ON FC.OUTPUT_MOD_CONFIG_ID=O.OUTPUT_MOD_CONFIG_ID" % self.owner \
+ " JOIN %sFILES FS ON FS.FILE_ID=FC.FILE_ID" % self.owner \
+ " WHERE FS.BLOCK_ID = :block_id "
binds["block_id"]=block_id
cursors = self.dbi.processData(sql, binds, conn, transaction=False, returnCursor=True)
#assert len(cursors) == 1, "output module config does not exist"
result = []
for c in cursors:
result.extend(self.formatCursor(c, size=100))
return result
|
returns id for a given application
|
entailment
|
def execute(self, conn, block_name="", transaction = False):
"""
block: /a/b/c#d
"""
if not conn:
msg='Oracle/BlockParent/List. No DB connection found'
dbsExceptionHandler('dbsException-failed-connect2host', msg, self.logger.exception)
sql = self.sql
binds = {}
if block_name:
binds.update(block_name = block_name)
else:
dbsExceptionHandler("dbsException-invalid-input", "Oracle/BlockParent/ListChild. block_name must be provided.", self.logger.exception)
cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
result = []
for c in cursors:
result.extend(self.formatCursor(c, size=100))
return result
|
block: /a/b/c#d
|
entailment
|
def listFileLumis(self, logical_file_name="", block_name="", run_num=-1, validFileOnly=0, input_body=-1):
"""
optional parameter: logical_file_name, block_name, validFileOnly
returns: logical_file_name, file_lumi_id, run_num, lumi_section_num
"""
if((logical_file_name=='' or '*'in logical_file_name or '%' in logical_file_name) \
and (block_name=='' or '*' in block_name or '%' in block_name) and input_body==-1 ):
dbsExceptionHandler('dbsException-invalid-input', \
"Fully specified logical_file_name or block_name is required if GET is called. No wildcards are allowed.",
self.logger.exception, "Fully specified logical_file_name or block_name is required if GET is called. No wildcards are allowed.")
elif input_body != -1 :
try:
logical_file_name = input_body["logical_file_name"]
run_num = input_body.get("run_num", -1)
validFileOnly = input_body.get("validFileOnly", 0)
block_name = ""
except cjson.DecodeError as de:
msg = "business/listFileLumis requires at least a list of logical_file_name. %s" % de
dbsExceptionHandler('dbsException-invalid-input2', "Invalid input", self.logger.exception, msg)
elif input_body != -1 and (logical_file_name is not None or block_name is not None):
dbsExceptionHandler('dbsException-invalid-input', "listFileLumis may have input in the command or in the payload, not mixed.", self.logger.exception, "listFileLumis may have input in the command or in the payload, not mixed.")
with self.dbi.connection() as conn:
for item in self.filelumilist.execute(conn, logical_file_name, block_name, run_num, validFileOnly=validFileOnly):
yield item
|
optional parameter: logical_file_name, block_name, validFileOnly
returns: logical_file_name, file_lumi_id, run_num, lumi_section_num
|
entailment
|
def listFileSummary(self, block_name="", dataset="", run_num=-1, validFileOnly=0, sumOverLumi=0):
"""
required parameter: full block_name or dataset name. No wildcards allowed. run_num is optional.
"""
if not block_name and not dataset:
msg = "Block_name or dataset is required for listFileSummary API"
dbsExceptionHandler('dbsException-invalid-input', msg, self.logger.exception)
if '%' in block_name or '*' in block_name or '%' in dataset or '*' in dataset:
msg = "No wildcard is allowed in block_name or dataset for filesummaries API"
dbsExceptionHandler('dbsException-invalid-input', msg, self.logger.exception)
#
with self.dbi.connection() as conn:
for item in self.filesummarylist.execute(conn, block_name, dataset, run_num,
validFileOnly=validFileOnly, sumOverLumi=sumOverLumi):
if item['num_file']==0 and item['num_block']==0 \
and item['num_event']==0 and item['file_size']==0:
pass
else:
yield item
|
required parameter: full block_name or dataset name. No wildcards allowed. run_num is optional.
|
entailment
|
def listFileParents(self, logical_file_name="", block_id=0, block_name=""):
"""
required parameter: logical_file_name or block_name
returns: this_logical_file_name, parent_logical_file_name, parent_file_id
"""
#self.logger.debug("lfn %s, block_name %s, block_id :%s" % (logical_file_name, block_name, block_id))
if not logical_file_name and not block_name and not block_id:
dbsExceptionHandler('dbsException-invalid-input', \
"Logical_file_name, block_id or block_name is required for fileparents api", self.logger.exception )
with self.dbi.connection() as conn:
sqlresult = self.fileparentlist.execute(conn, logical_file_name, block_id, block_name)
d = {}
#self.logger.debug(sqlresult)
for i in sqlresult:
k = i['this_logical_file_name']
v = i['parent_logical_file_name']
d.setdefault(k, []).append(v)
for k, v in d.iteritems():
yield {'logical_file_name':k, 'parent_logical_file_name': v}
del d
|
required parameter: logical_file_name or block_name
returns: this_logical_file_name, parent_logical_file_name, parent_file_id
|
entailment
|
def listFileParentsByLumi(self, block_name='', logical_file_name=[]):
"""
required parameter: block_name
returns: [{child_parent_id_list: [(cid1, pid1), (cid2, pid2), ... (cidn, pidn)]}]
"""
#self.logger.debug("lfn %s, block_name %s" % (logical_file_name, block_name))
if not block_name:
dbsExceptionHandler('dbsException-invalid-input', \
"Child block_name is required for fileparents/listFileParentsByLumi api", self.logger.exception )
with self.dbi.connection() as conn:
sqlresult = self.fileparentbylumi.execute(conn, block_name, logical_file_name)
return [{"child_parent_id_list":sqlresult}]
|
required parameter: block_name
returns: [{child_parent_id_list: [(cid1, pid1), (cid2, pid2), ... (cidn, pidn)]}]
|
entailment
|
def listFileChildren(self, logical_file_name='', block_name='', block_id=0):
"""
required parameter: logical_file_name or block_name or block_id
returns: logical_file_name, child_logical_file_name, parent_file_id
"""
conn = self.dbi.connection()
try:
if not logical_file_name and not block_name and not block_id:
dbsExceptionHandler('dbsException-invalid-input',\
"Logical_file_name, block_id or block_name is required for listFileChildren api")
sqlresult = self.filechildlist.execute(conn, logical_file_name, block_name, block_id)
d = {}
result = []
for i in range(len(sqlresult)):
k = sqlresult[i]['logical_file_name']
v = sqlresult[i]['child_logical_file_name']
if k in d:
d[k].append(v)
else:
d[k] = [v]
for k, v in d.iteritems():
r = {'logical_file_name':k, 'child_logical_file_name': v}
result.append(r)
return result
finally:
if conn:
conn.close()
|
required parameter: logical_file_name or block_name or block_id
returns: logical_file_name, child_logical_file_name, parent_file_id
|
entailment
|
def updateStatus(self, logical_file_name, is_file_valid, lost, dataset):
"""
Used to toggle the status of a file from is_file_valid=1 (valid) to is_file_valid=0 (invalid)
"""
conn = self.dbi.connection()
trans = conn.begin()
try :
self.updatestatus.execute(conn, logical_file_name, is_file_valid, lost, dataset, trans)
trans.commit()
trans = None
except Exception as ex:
if trans:
trans.rollback()
trans = None
raise ex
finally:
if trans:
trans.rollback()
if conn:
conn.close()
|
Used to toggle the status of a file from is_file_valid=1 (valid) to is_file_valid=0 (invalid)
|
entailment
|
def listFiles(self, dataset="", block_name="", logical_file_name="",
release_version="", pset_hash="", app_name="",
output_module_label="", run_num=-1,
origin_site_name="", lumi_list=[], detail=False, validFileOnly=0, sumOverLumi=0, input_body=-1):
"""
One of below parameter groups must be present:
non-patterned dataset, non-patterned block, non-patterned dataset with lfn, non-patterned block with lfn,
non-patterned lfn
non-patterned lfn list
"""
if input_body != -1 :
try:
logical_file_name = input_body.get("logical_file_name", "")
run_num = input_body.get("run_num", -1)
validFileOnly = input_body.get("validFileOnly", 0)
sumOverLumi = input_body.get("sumOverLumi", 0)
detail = input_body.get("detail", False)
block_name = input_body.get("block_name", "")
dataset = input_body.get("dataset", "")
release_version = input_body.get("release_version", "")
pset_hash = input_body.get("pset_hash", "")
app_name = input_body.get("app_name", "")
output_module_label = input_body.get("output_module_label", "")
origin_site_name = input_body.get("origin_site_name", "")
lumi_list = input_body.get("lumi_list", [])
except cjson.DecodeError as de:
msg = "business/listFilss POST call requires at least dataset, block_name, or a list of logical_file_name %s" % de
dbsExceptionHandler('dbsException-invalid-input', "Invalid input", self.logger.exception, msg)
if ('%' in block_name):
dbsExceptionHandler('dbsException-invalid-input', "You must specify exact block name not a pattern", self.logger.exception)
elif ('%' in dataset):
print("***** in dataset name")
dbsExceptionHandler('dbsException-invalid-input', " You must specify exact dataset name not a pattern", self.logger.exception)
elif (not dataset and not block_name and (not logical_file_name or '%'in logical_file_name) ):
dbsExceptionHandler('dbsException-invalid-input', """You must specify one of the parameter groups: \
non-pattern dataset, \
non-pattern block , non-pattern dataset with lfn ,\
non-pattern block with lfn or no-pattern lfn, \
non-patterned lfn list .""", self.logger.exception)
elif (lumi_list and len(lumi_list) != 0):
if run_num==-1:
dbsExceptionHandler('dbsException-invalid-input', "Lumi list must accompany A single run number, \
use run_num=123", self.logger.exception)
elif isinstance(run_num, basestring):
try:
run_num = int(run_num)
except:
dbsExceptionHandler('dbsException-invalid-input', "Lumi list must accompany A single run number,\
use run_num=123", self.logger.exception)
elif isinstance(run_num, list):
if len(run_num) == 1:
try:
run_num = int(run_num[0])
except:
dbsExceptionHandler('dbsException-invalid-input', "Lumi list must accompany A single run number,\
use run_num=123", self.logger.exception)
else:
dbsExceptionHandler('dbsException-invalid-input', "Lumi list must accompany A single run number,\
use run_num=123", self.logger.exception)
else:
pass
with self.dbi.connection() as conn:
dao = (self.filebrieflist, self.filelist)[detail]
for item in dao.execute(conn, dataset, block_name, logical_file_name, release_version, pset_hash, app_name,
output_module_label, run_num, origin_site_name, lumi_list, validFileOnly, sumOverLumi):
yield item # we need to yield while connection is open
|
One of below parameter groups must be present:
non-patterned dataset, non-patterned block, non-patterned dataset with lfn, non-patterned block with lfn,
non-patterned lfn
non-patterned lfn list
|
entailment
|
def insertFile(self, businput, qInserts=False):
"""
This method supports bulk insert of files
performing other operations such as setting Block and Dataset parentages,
setting mapping between OutputConfigModules and File(s) etc.
:param qInserts: True means that inserts will be queued instead of done immediately. INSERT QUEUE Manager will perform the inserts, within few minutes.
:type qInserts: bool
:param logical_file_name (required) : string
:param is_file_valid: (optional, default = 1): 1/0
:param block, required: /a/b/c#d
:param dataset, required: /a/b/c
:param file_type (optional, default = EDM): one of the predefined types,
:param check_sum (optional): string
:param event_count (optional, default = -1): int
:param file_size (optional, default = -1.): float
:param adler32 (optional): string
:param md5 (optional): string
:param auto_cross_section (optional, default = -1.): float
:param file_lumi_list (optional, default = []): [{'run_num': 123, 'lumi_section_num': 12},{}....]
:param file_parent_list(optional, default = []) :[{'file_parent_lfn': 'mylfn'},{}....]
:param file_assoc_list(optional, default = []) :[{'file_parent_lfn': 'mylfn'},{}....]
:param file_output_config_list(optional, default = []) :
[{'app_name':..., 'release_version':..., 'pset_hash':...., output_module_label':...},{}.....]
"""
# We do not want to go be beyond 10 files at a time
# If user wants to insert over 10 files in one shot, we run into risks of locking the database
# tables for longer time, and in case of error, it will be hard to see where error occured
if len(businput) > 10:
dbsExceptionHandler('dbsException-input-too-large', "DBS cannot insert \
more than 10 files in one bulk call")
return
conn = self.dbi.connection()
tran = conn.begin()
try:
#Now we are dealing with independent files that have different dataset/block and so on.
#See Trac #358.
#The expected input data format is a list of dictionary to insert independent files into DBS,
#inputdata={'files':[{}, {}, {}]}
#YG 09/15/2011
# AA- 01/06/2010 -- we have to do this file-by-file, there is no real good way to do this complex operation otherwise
#files2insert = []
#fidl = []
fileInserted = False
dataset = ""
block_name = ""
dataset_id = -1
block_id = -1
dsconfigs = []
for f in businput:
if not ("logical_file_name" in f and "block_name" in f and "dataset" in f ):
dbsExceptionHandler('dbsException-invalid-input', "DBSFile/insertFile must have logical_file_name, block_name and dataset as input")
if f["block_name"].split('#')[0] != f["dataset"]:
dbsExceptionHandler('dbsException-invalid-input', "DBSFile/insertFile: dataset and block_name NOT match")
# first check if the dataset exists
# and block exists that files are suppose to be going to and is OPEN for writing
if dataset != f["dataset"]:
dataset_id = self.datasetid.execute(conn, dataset=f["dataset"])
dataset = f["dataset"]
if dataset_id == -1 :
dbsExceptionHandler('dbsException-missing-data', "Required Dataset Not Found.", None,
"Required Dataset %s does not exist"%f["dataset"] )
# get the list of configs in for this dataset
dsconfigs = [x['output_mod_config_id'] for x in self.dsconfigids.execute(conn, dataset=f["dataset"])]
fileconfigs = [] # this will hold file configs that we will list in the insert file logic below
if block_name != f["block_name"]:
block_info = self.blocklist.execute(conn, block_name=f["block_name"])
for b in block_info:
if not b :
dbsExceptionHandler( "dbsException-missing-data", "Required block not found", None,
"Cannot found required block %s in DB" %f["block_name"])
else:
if b["open_for_writing"] != 1 :
dbsExceptionHandler("dbsException-conflict-data", "Block closed", None,
"Block %s is not open for writting" %f["block_name"])
if "block_id" in b:
block_id = b["block_id"]
else:
dbsExceptionHandler("dbsException-missing-data", "Block not found", None,
"Cannot found required block %s in DB" %f["block_name"])
else: dbsExceptionHandler('dbsException-missing-data', "Required block name Not Found in input.",
None, "Required block Not Found in input.")
#make the default file_type=EDM
file_type_id = self.ftypeid.execute( conn, f.get("file_type", "EDM"))
if file_type_id == -1:
dbsExceptionHandler('dbsException-missing-data', "File type not found.", None,
"Required file type %s not found in DBS"%f.get("file_type", "EDM") )
iFile = 0
fileIncrement = 40
fID = self.sm.increment(conn, "SEQ_FL", incCount=fileIncrement)
#looping over the files, everytime create a new object 'filein' as you never know
#whats in the original object and we do not want to know
#for f in businput:
file_clob = {}
fparents2insert = []
flumis2insert = []
fconfigs2insert = []
# create the file object from the original
# taking care of defaults, and required
filein = {
"logical_file_name" : f["logical_file_name"],
"is_file_valid" : f.get("is_file_valid", 1),
"check_sum" : f.get("check_sum", None),
"event_count" : f.get("event_count", -1),
"file_size" : f.get("file_size", -1),
"adler32" : f.get("adler32", None),
"md5" : f.get("md5", None),
"auto_cross_section" : f.get("auto_cross_section", -1),
#"creation_date" : f.get("creation_date", None), See Ticket #965 YG.
#"create_by": f.get("create_by", None),
"last_modification_date": f.get("last_modification_date", None),
#"last_modified_by" : f.get("last_modified_by", None)
"last_modified_by" : dbsUtils().getCreateBy()
}
if filein["md5"] is None and filein["check_sum"] is None and filein["adler32"] is None:
dbsExceptionHandler('dbsException-invalid-input', "Missing check_sum or adler32, or md5")
if iFile == fileIncrement:
fID = self.sm.increment(conn, "SEQ_FL", incCount=fileIncrement)
iFile = 0
filein["file_id"] = fID + iFile
iFile += 1
filein["dataset_id"] = dataset_id
filein["block_id"] = block_id
filein["file_type_id"] = file_type_id
#FIXME: Add this later if f.get("branch_hash", "") not in ("", None):
#filein["branch_hash"]=self.fbranchid.execute( f.get("branch_hash"), conn, transaction=tran)
# insert file -- as decided, one file at a time
# filein will be what goes into database
try:
if not qInserts:
self.filein.execute(conn, filein, transaction=tran)
fileInserted = True
else:
file_clob['file'] = filein
except SQLAlchemyIntegrityError as ex:
if str(ex).find("unique constraint") != -1 or str(ex).lower().find("duplicate") != -1:
# Lets move on to NEXT file, we do not want to continue processing this file
#Nothing about this file is updated when it is already in DB. No file parentage, block parentage, dataset parentage and so on.
#Is this right? YG Oct. 24
self.logger.warning("DBSFile/insertFile. File already exists in DBS, not changing it: %s"
%filein["logical_file_name"] )
continue
else:
raise
#process file parents, file lumi, file outputmodconfigs, ...
#file lumi sections
if "file_lumi_list" in f:
fllist = f["file_lumi_list"]
if len(fllist) > 0:
for fl in fllist:
fldao = {
"run_num" : fl["run_num"],
"lumi_section_num" : fl["lumi_section_num"]
}
if "event_count" in fl:
fldao["event_count"] = fl["event_count"]
fldao["file_id"] = filein["file_id"]
flumis2insert.append(fldao)
if "file_parent_list" in f:
#file parents
fplist = f["file_parent_list"]
for fp in fplist:
fpdao = {}
fpdao["this_file_id"] = filein["file_id"]
fpdao["parent_logical_file_name"] = fp["file_parent_lfn"]
fparents2insert.append(fpdao)
if "file_output_config_list" in f:
#file output config modules
foutconfigs = f["file_output_config_list"]
if(len(foutconfigs) > 0):
for fc in foutconfigs:
fcdao = {}
fcdao["file_id"] = filein["file_id"]
fcdao["output_mod_config_id"] = self.outconfigid.execute(conn, fc["app_name"],
fc["release_version"], fc["pset_hash"], fc["output_module_label"],
fc["global_tag"])
if fcdao["output_mod_config_id"] == -1 :
dbsExceptionHandler('dbsException-missing-data', 'Config Not found.', None, "DBSFile/insertFile.\
Output module config (%s, %s, %s, %s) \
not found" % (fc["app_name"],
fc["release_version"], fc["pset_hash"], fc["output_module_label"]) )
fileconfigs.append(fcdao["output_mod_config_id"])
fconfigs2insert.append(fcdao)
#FIXME: file associations?-- in a later release
#
# insert file - lumi
if flumis2insert:
file_clob['file_lumi_list'] = flumis2insert
if not qInserts:
self.flumiin.execute(conn, flumis2insert, transaction=tran)
# insert file parent mapping
if fparents2insert:
file_clob['file_parent_list'] = fparents2insert
if not qInserts:
self.fparentin.execute(conn, fparents2insert, transaction=tran)
# First check to see if these output configs are mapped to THIS dataset as well, if not raise an exception
if not set(fileconfigs).issubset(set(dsconfigs)) :
dbsExceptionHandler('dbsException-conflict-data', 'Mismatched configure. ', None, "DBSFile/insertFile. Output configs mismatch, \
output configs known to dataset: \
%s are different from what are being mapped to file : %s " \
%(f["dataset"], filein["logical_file_name"]) )
# insert output module config mapping
if fconfigs2insert:
file_clob['file_output_config_list'] = fconfigs2insert
if not qInserts:
self.fconfigin.execute(conn, fconfigs2insert, transaction=tran)
if qInserts:
try:
self.logger.warning(file_clob)
self.filebufin.execute(conn, filein['logical_file_name'], block_id, file_clob, transaction=tran)
except SQLAlchemyIntegrityError as ex:
if str(ex).find("unique constraint") != -1 or str(ex).lower().find("duplicate") != -1:
pass
else:
raise
#insert block parentages and dataset parentages based on file parentages
# Do this one by one, as it is sure to have duplicate in dest table
if fileInserted and fparents2insert:
for fp in fparents2insert:
try:
bkParentage2insert={'this_block_id' : filein["block_id"], 'parent_logical_file_name': fp['parent_logical_file_name']}
self.blkparentin.execute(conn, bkParentage2insert, transaction=tran)
dsParentage2insert={'this_dataset_id': filein["dataset_id"], 'parent_logical_file_name' : fp['parent_logical_file_name']}
self.dsparentin.execute(conn, dsParentage2insert, transaction=tran)
except SQLAlchemyIntegrityError as ex:
#ORA-00001
if (str(ex).find("ORA-00001") != -1 and str(ex).find("PK_DP") != -1) or str(ex).find("PK_BP") != -1 or str(ex).lower().find("duplicate") != -1:
pass
elif str(ex).find("ORA-01400") != -1:
raise
else:
raise
# Update block parameters, file_count, block_size
if not qInserts:
blkParams = self.blkstats.execute(conn, block_id,
transaction=tran)
blkParams['block_size'] = long(blkParams['block_size'])
self.blkstatsin.execute(conn, blkParams, transaction=tran)
# All good ?
tran.commit()
tran = None
except Exception as ex:
if tran:
tran.rollback()
tran = None
raise
finally:
if tran:
tran.rollback()
if conn:
conn.close()
|
This method supports bulk insert of files
performing other operations such as setting Block and Dataset parentages,
setting mapping between OutputConfigModules and File(s) etc.
:param qInserts: True means that inserts will be queued instead of done immediately. INSERT QUEUE Manager will perform the inserts, within few minutes.
:type qInserts: bool
:param logical_file_name (required) : string
:param is_file_valid: (optional, default = 1): 1/0
:param block, required: /a/b/c#d
:param dataset, required: /a/b/c
:param file_type (optional, default = EDM): one of the predefined types,
:param check_sum (optional): string
:param event_count (optional, default = -1): int
:param file_size (optional, default = -1.): float
:param adler32 (optional): string
:param md5 (optional): string
:param auto_cross_section (optional, default = -1.): float
:param file_lumi_list (optional, default = []): [{'run_num': 123, 'lumi_section_num': 12},{}....]
:param file_parent_list(optional, default = []) :[{'file_parent_lfn': 'mylfn'},{}....]
:param file_assoc_list(optional, default = []) :[{'file_parent_lfn': 'mylfn'},{}....]
:param file_output_config_list(optional, default = []) :
[{'app_name':..., 'release_version':..., 'pset_hash':...., output_module_label':...},{}.....]
|
entailment
|
def insertFileParents(self, businput):
"""
This is a special function for WMAgent only.
input block_name: is a child block name.
input chils_parent_id_list: is a list of file id of child, parent pair: [[cid1, pid1],[cid2,pid2],[cid3,pid3],...]
The requirment for this API is
1. All the child files belong to the block.
2. All the child-parent pairs are not already in DBS.
3. The dataset parentage is already in DBS.
We will fill the block parentage here using the file parentage info.
Y. Guo
July 18, 2018
"""
if "block_name" not in businput.keys() or "child_parent_id_list" not in businput.keys() or not businput["child_parent_id_list"] or not businput["block_name"]:
dbsExceptionHandler("dbsException-invalid-input2", "DBSFile/insertFileParents: require child block_name and list of child/parent file id pairs" , self.logger.exception, "DBSFile/insertFileParents: require child block_name and list of child/parent file id pairs")
tran = None
conn = None
try:
#We should get clean insert for both file/block parentage.
#block parent duplication is handled at dao level. File parent should not have deplication.
conn = self.dbi.connection()
tran = conn.begin()
self.logger.info("Insert File parentage mapping")
self.fparentin2.execute(conn, businput, tran)
self.logger.info("Insert block parentage mapping")
self.blkparentin3.execute(conn, businput, tran)
if tran:tran.commit()
if conn:conn.close()
except SQLAlchemyIntegrityError as ex:
if tran:tran.rollback()
if conn:conn.close()
if str(ex).find("ORA-01400") > -1:
dbsExceptionHandler('dbsException-missing-data',
'Missing data when insert filei/block parent. ', self.logger.exception,
'Missing data when insert file/block parent. '+ str(ex))
else:
dbsExceptionHandler('dbsException-invalid-input2',
'Invalid data when insert file/block parent. ', self.logger.exception,
'Invalid data when insert file/block parent. '+ str(ex))
finally:
if tran:tran.rollback()
if conn:conn.close()
|
This is a special function for WMAgent only.
input block_name: is a child block name.
input chils_parent_id_list: is a list of file id of child, parent pair: [[cid1, pid1],[cid2,pid2],[cid3,pid3],...]
The requirment for this API is
1. All the child files belong to the block.
2. All the child-parent pairs are not already in DBS.
3. The dataset parentage is already in DBS.
We will fill the block parentage here using the file parentage info.
Y. Guo
July 18, 2018
|
entailment
|
def increment(self, conn, seqName, transaction = False, incCount=1):
"""
increments the sequence `seqName` by default `Incremented by`
and returns its value
incCount: is UNUSED variable in Oracle implementation
"""
#FIXME: Do we need to lock the tables here?
sql = "select %s%s.nextval as val from dual" % (self.owner, seqName)
result = self.dbi.processData(sql, conn=conn, transaction=transaction)
resultlist = self.formatDict(result)
return resultlist[0]['val']
|
increments the sequence `seqName` by default `Incremented by`
and returns its value
incCount: is UNUSED variable in Oracle implementation
|
entailment
|
def listReleaseVersions(self, release_version="", dataset='', logical_file_name=''):
"""
List release versions
"""
if dataset and ('%' in dataset or '*' in dataset):
dbsExceptionHandler('dbsException-invalid-input',
" DBSReleaseVersion/listReleaseVersions. No wildcards are" +
" allowed in dataset.\n.")
if logical_file_name and ('%' in logical_file_name or '*' in logical_file_name):
dbsExceptionHandler('dbsException-invalid-input',
" DBSReleaseVersion/listReleaseVersions. No wildcards are" +
" allowed in logical_file_name.\n.")
conn = self.dbi.connection()
try:
plist = self.releaseVersion.execute(conn, release_version.upper(), dataset, logical_file_name)
result = [{}]
if plist:
t = []
for i in plist:
for k, v in i.iteritems():
t.append(v)
result[0]['release_version'] = t
return result
finally:
if conn:
conn.close()
|
List release versions
|
entailment
|
def __search_ca_path(self):
"""
Get CA Path to check the validity of the server host certificate on the client side
"""
if "X509_CERT_DIR" in os.environ:
self._ca_path = os.environ['X509_CERT_DIR']
elif os.path.exists('/etc/grid-security/certificates'):
self._ca_path = '/etc/grid-security/certificates'
else:
raise ClientAuthException("Could not find a valid CA path")
|
Get CA Path to check the validity of the server host certificate on the client side
|
entailment
|
def __search_cert_key(self):
"""
Get the user credentials if they exist, otherwise throw an exception.
This code was modified from DBSAPI/dbsHttpService.py and WMCore/Services/Requests.py
"""
# Now we're trying to guess what the right cert/key combo is...
# First preference to HOST Certificate, This is how it set in Tier0
if 'X509_HOST_CERT' in os.environ:
self._ssl_cert = os.environ['X509_HOST_CERT']
self._ssl_key = os.environ['X509_HOST_KEY']
# Second preference to User Proxy, very common
elif 'X509_USER_PROXY' in os.environ and os.path.exists(os.environ['X509_USER_PROXY']):
self._ssl_cert = os.environ['X509_USER_PROXY']
self._ssl_key = self._ssl_cert
# Third preference to User Cert/Proxy combinition
elif 'X509_USER_CERT' in os.environ and 'X509_USER_KEY' in os.environ:
self._ssl_cert = os.environ['X509_USER_CERT']
self._ssl_key = os.environ['X509_USER_KEY']
# TODO: only in linux, unix case, add other os case
# look for proxy at default location /tmp/x509up_u$uid
elif os.path.exists('/tmp/x509up_u%s' % str(os.getuid())):
self._ssl_cert = '/tmp/x509up_u%s' % str(os.getuid())
self._ssl_key = self._ssl_cert
elif sys.stdin.isatty():
home_dir = os.environ['HOME']
user_cert = os.path.join(home_dir, '.globus/usercert.pem')
user_key = os.path.join(home_dir, '.globus/userkey.pem')
if os.path.exists(user_cert):
self._ssl_cert = user_cert
if os.path.exists(user_key):
self._ssl_key = user_key
#store password for convenience
self._ssl_key_pass = getpass("Password for %s: " % self._ssl_key)
else:
self._ssl_key = self._ssl_cert
else:
raise ClientAuthException("No valid X509 cert-key-pair found.")
else:
raise ClientAuthException("No valid X509 cert-key-pair found.")
|
Get the user credentials if they exist, otherwise throw an exception.
This code was modified from DBSAPI/dbsHttpService.py and WMCore/Services/Requests.py
|
entailment
|
def authInsert(user, role, group, site):
"""
Authorization function for general insert
"""
if not role: return True
for k, v in user['roles'].iteritems():
for g in v['group']:
if k in role.get(g, '').split(':'):
return True
return False
|
Authorization function for general insert
|
entailment
|
def submit(self):
"""
Interface for submitting a migration request.
Required input keys:
MIGRATION_URL: The source DBS url for migration.
MIGRATION_INPUT: The block or dataset names to be migrated.
"""
body = request.body.read()
indata = cjson.decode(body)
try:
indata = validateJSONInputNoCopy("migration_rqst", indata)
indata.update({"creation_date": dbsUtils().getTime(),
"last_modification_date" : dbsUtils().getTime(),
"create_by" : dbsUtils().getCreateBy() ,
"last_modified_by" : dbsUtils().getCreateBy(),
"migration_status": 0})
return self.dbsMigrate.insertMigrationRequest(indata)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSMigrateModle/submit. %s\n Exception trace: \n %s." \
% (ex, traceback.format_exc() )
if hasattr(ex, 'status') and ex.status == 400:
dbsExceptionHandler('dbsException-invalid-input2', str(ex), self.logger.exception, sError)
else:
dbsExceptionHandler('dbsException-server-error', str(ex), self.logger.exception, sError)
|
Interface for submitting a migration request.
Required input keys:
MIGRATION_URL: The source DBS url for migration.
MIGRATION_INPUT: The block or dataset names to be migrated.
|
entailment
|
def status(self, migration_rqst_id="", block_name="", dataset="", user=""):
"""
Interface to query status of a migration request
In this preference order of input parameters :
migration_rqst_id, block, dataset, user
(if multi parameters are provided, only the precedence order is followed)
"""
try:
return self.dbsMigrate.listMigrationRequests(migration_rqst_id,
block_name, dataset, user)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.serverError)
except Exception as ex:
sError = "DBSMigrateModle/status. %s\n Exception trace: \n %s." \
% (ex, traceback.format_exc() )
if hasattr(ex, 'status') and ex.status == 400:
dbsExceptionHandler('dbsException-invalid-input2', str(ex), self.logger.exception, sError)
else:
dbsExceptionHandler('dbsException-server-error', str(ex), self.logger.exception, sError)
|
Interface to query status of a migration request
In this preference order of input parameters :
migration_rqst_id, block, dataset, user
(if multi parameters are provided, only the precedence order is followed)
|
entailment
|
def remove(self):
"""
Interface to remove a migration request from the queue.
Only Permanent FAILED/9 and PENDING/0 requests can be removed
(running and sucessed requests cannot be removed)
"""
body = request.body.read()
indata = cjson.decode(body)
try:
indata = validateJSONInputNoCopy("migration_rqst", indata)
return self.dbsMigrate.removeMigrationRequest(indata)
except dbsException as he:
dbsExceptionHandler(he.eCode, he.message, self.logger.exception, he.message)
except Exception as e:
if e.code == 400:
dbsExceptionHandler('dbsException-invalid-input2', str(e), self.logger.exception, str(e))
else:
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, str(e))
|
Interface to remove a migration request from the queue.
Only Permanent FAILED/9 and PENDING/0 requests can be removed
(running and sucessed requests cannot be removed)
|
entailment
|
def execute(self, conn, logical_file_name='', block_id=0, block_name='', transaction=False):
"""
return {} if condition is not provided.
"""
sql = ''
binds = {}
if logical_file_name:
if isinstance(logical_file_name, basestring):
wheresql = "WHERE F.LOGICAL_FILE_NAME = :logical_file_name"
binds = {"logical_file_name": logical_file_name}
sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql)
elif isinstance(logical_file_name, list):
wheresql = "WHERE F.LOGICAL_FILE_NAME in (SELECT TOKEN FROM TOKEN_GENERATOR)"
lfn_generator, binds = create_token_generator(logical_file_name)
sql = "{lfn_generator} {sql} {wheresql}".format(lfn_generator=lfn_generator, sql=self.sql,
wheresql=wheresql)
elif block_id != 0:
wheresql = "WHERE F.BLOCK_ID = :block_id"
binds ={'block_id': block_id}
sql = "{sql} {wheresql}".format(sql=self.sql, wheresql=wheresql)
elif block_name:
joins = "JOIN {owner}BLOCKS B on B.BLOCK_ID = F.BLOCK_ID".format(owner=self.owner)
wheresql = "WHERE B.BLOCK_NAME= :block_name"
binds ={'block_name': block_name}
sql = "{sql} {joins} {wheresql}".format(sql=self.sql, joins=joins, wheresql=wheresql)
else:
return
cursors = self.dbi.processData(sql, binds, conn, transaction=transaction, returnCursor=True)
for i in cursors:
d = self.formatCursor(i, size=100)
if isinstance(d, list) or isinstance(d, GeneratorType):
for elem in d:
yield elem
elif d:
yield d
|
return {} if condition is not provided.
|
entailment
|
def listDatasetParents(self, dataset=""):
"""
takes required dataset parameter
returns only parent dataset name
"""
if( dataset == "" ):
dbsExceptionHandler("dbsException-invalid-input", "DBSDataset/listDatasetParents. Child Dataset name is required.")
conn = self.dbi.connection()
try:
result = self.datasetparentlist.execute(conn, dataset)
return result
finally:
if conn:
conn.close()
|
takes required dataset parameter
returns only parent dataset name
|
entailment
|
def listDatasetChildren(self, dataset):
"""
takes required dataset parameter
returns only children dataset name
"""
if( dataset == "" ):
dbsExceptionHandler("dbsException-invalid-input", "DBSDataset/listDatasetChildren. Parent Dataset name is required.")
conn = self.dbi.connection()
try:
result = self.datasetchildlist.execute(conn, dataset)
return result
finally:
if conn:
conn.close()
|
takes required dataset parameter
returns only children dataset name
|
entailment
|
def updateStatus(self, dataset, is_dataset_valid):
"""
Used to toggle the status of a dataset is_dataset_valid=0/1 (invalid/valid)
"""
if( dataset == "" ):
dbsExceptionHandler("dbsException-invalid-input", "DBSDataset/updateStatus. dataset is required.")
conn = self.dbi.connection()
trans = conn.begin()
try:
self.updatestatus.execute(conn, dataset, is_dataset_valid, trans)
trans.commit()
trans = None
except Exception as ex:
if trans:
trans.rollback()
raise ex
finally:
if trans:
trans.rollback()
if conn:
conn.close()
|
Used to toggle the status of a dataset is_dataset_valid=0/1 (invalid/valid)
|
entailment
|
def updateType(self, dataset, dataset_access_type):
"""
Used to change the status of a dataset type (production/etc.)
"""
if( dataset == "" ):
dbsExceptionHandler("dbsException-invalid-input", "DBSDataset/updateType. dataset is required.")
conn = self.dbi.connection()
trans = conn.begin()
try :
self.updatetype.execute(conn, dataset, dataset_access_type.upper(), trans)
trans.commit()
trans = None
except SQLAlchemyDatabaseError as ex:
if str(ex).find("ORA-01407") != -1:
dbsExceptionHandler("dbsException-invalid-input2", "Invalid Input", None, "DBSDataset/updateType. A Valid dataset_access_type is required.")
finally:
if trans:
trans.rollback()
if conn:
conn.close()
|
Used to change the status of a dataset type (production/etc.)
|
entailment
|
def listDatasets(self, dataset="", parent_dataset="", is_dataset_valid=1,
release_version="", pset_hash="", app_name="",
output_module_label="", global_tag="", processing_version=0,
acquisition_era="", run_num=-1, physics_group_name="",
logical_file_name="", primary_ds_name="",
primary_ds_type="", processed_ds_name="", data_tier_name="",
dataset_access_type="VALID", prep_id="", create_by='', last_modified_by='', min_cdate=0, max_cdate=0,
min_ldate=0, max_ldate=0, cdate=0, ldate=0, detail=False, dataset_id=-1):
"""
lists all datasets if dataset parameter is not given.
The parameter can include % character.
all other parameters are not wild card ones.
"""
if(logical_file_name and logical_file_name.find("%")!=-1):
dbsExceptionHandler('dbsException-invalid-input', 'DBSDataset/listDatasets API requires \
fullly qualified logical_file_name. NO wildcard is allowed in logical_file_name.')
if(dataset and dataset.find("/%/%/%")!=-1):
dataset=''
with self.dbi.connection() as conn:
dao = (self.datasetbrieflist, self.datasetlist)[detail]
if dataset_access_type: dataset_access_type = dataset_access_type.upper()
if data_tier_name: data_tier_name = data_tier_name.upper()
#if processing_version: processing_version = processing_version.upper()
#if acquisition_era: acquisition_era = acquisition_era.upper()
for item in dao.execute(conn,
dataset, is_dataset_valid,
parent_dataset,
release_version,
pset_hash,
app_name,
output_module_label,
global_tag,
processing_version,
acquisition_era,
run_num, physics_group_name,
logical_file_name,
primary_ds_name, primary_ds_type,
processed_ds_name, data_tier_name,
dataset_access_type, prep_id, create_by, last_modified_by,
min_cdate, max_cdate, min_ldate, max_ldate,
cdate, ldate, dataset_id):
yield item
|
lists all datasets if dataset parameter is not given.
The parameter can include % character.
all other parameters are not wild card ones.
|
entailment
|
def insertDataset(self, businput):
"""
input dictionary must have the following keys:
dataset, primary_ds_name(name), processed_ds(name), data_tier(name),
acquisition_era(name), processing_version
It may have following keys:
physics_group(name), xtcrosssection, creation_date, create_by,
last_modification_date, last_modified_by
"""
if not ("primary_ds_name" in businput and "dataset" in businput
and "dataset_access_type" in businput and "processed_ds_name" in businput ):
dbsExceptionHandler('dbsException-invalid-input', "business/DBSDataset/insertDataset must have dataset,\
dataset_access_type, primary_ds_name, processed_ds_name as input")
if "data_tier_name" not in businput:
dbsExceptionHandler('dbsException-invalid-input', "insertDataset must have data_tier_name as input.")
conn = self.dbi.connection()
tran = conn.begin()
try:
dsdaoinput = {}
dsdaoinput["primary_ds_name"] = businput["primary_ds_name"]
dsdaoinput["data_tier_name"] = businput["data_tier_name"].upper()
dsdaoinput["dataset_access_type"] = businput["dataset_access_type"].upper()
#not required pre-exist in the db. will insert with the dataset if not in yet
#processed_ds_name=acquisition_era_name[-fileter_name][-processing_str]-vprocessing_version Changed as 4/30/2012 YG.
#althrough acquisition era and processing version is not required for a dataset in the schema(the schema is build this way because
#we need to accomdate the DBS2 data), but we impose the requirement on the API. So both acquisition and processing eras are required
#YG 12/07/2011 TK-362
if "acquisition_era_name" in businput and "processing_version" in businput:
erals=businput["processed_ds_name"].rsplit('-')
if erals[0]==businput["acquisition_era_name"] and erals[len(erals)-1]=="%s%s"%("v", businput["processing_version"]):
dsdaoinput["processed_ds_name"] = businput["processed_ds_name"]
else:
dbsExceptionHandler('dbsException-invalid-input', "insertDataset:\
processed_ds_name=acquisition_era_name[-filter_name][-processing_str]-vprocessing_version must be satisified.")
else:
dbsExceptionHandler("dbsException-missing-data", "insertDataset: Required acquisition_era_name or processing_version is not found in the input")
if "physics_group_name" in businput:
dsdaoinput["physics_group_id"] = self.phygrpid.execute(conn, businput["physics_group_name"])
if dsdaoinput["physics_group_id"] == -1:
dbsExceptionHandler("dbsException-missing-data", "insertDataset. physics_group_name not found in DB")
else:
dsdaoinput["physics_group_id"] = None
dsdaoinput["dataset_id"] = self.sm.increment(conn, "SEQ_DS")
# we are better off separating out what we need for the dataset DAO
dsdaoinput.update({
"dataset" : "/%s/%s/%s" %
(businput["primary_ds_name"],
businput["processed_ds_name"],
businput["data_tier_name"].upper()),
"prep_id" : businput.get("prep_id", None),
"xtcrosssection" : businput.get("xtcrosssection", None),
"creation_date" : businput.get("creation_date", dbsUtils().getTime() ),
"create_by" : businput.get("create_by", dbsUtils().getCreateBy()) ,
"last_modification_date" : businput.get("last_modification_date", dbsUtils().getTime()),
#"last_modified_by" : businput.get("last_modified_by", dbsUtils().getModifiedBy())
"last_modified_by" : dbsUtils().getModifiedBy()
})
"""
repeated again, why? comment out by YG 3/14/2012
#physics group
if "physics_group_name" in businput:
dsdaoinput["physics_group_id"] = self.phygrpid.execute(conn, businput["physics_group_name"])
if dsdaoinput["physics_group_id"] == -1:
dbsExceptionHandler("dbsException-missing-data", "insertDataset. Physics Group : %s Not found"
% businput["physics_group_name"])
else: dsdaoinput["physics_group_id"] = None
"""
# See if Processing Era exists
if "processing_version" in businput and businput["processing_version"] != 0:
dsdaoinput["processing_era_id"] = self.proceraid.execute(conn, businput["processing_version"])
if dsdaoinput["processing_era_id"] == -1 :
dbsExceptionHandler("dbsException-missing-data", "DBSDataset/insertDataset: processing_version not found in DB")
else:
dbsExceptionHandler("dbsException-invalid-input", "DBSDataset/insertDataset: processing_version is required")
# See if Acquisition Era exists
if "acquisition_era_name" in businput:
dsdaoinput["acquisition_era_id"] = self.acqeraid.execute(conn, businput["acquisition_era_name"])
if dsdaoinput["acquisition_era_id"] == -1:
dbsExceptionHandler("dbsException-missing-data", "DBSDataset/insertDataset: acquisition_era_name not found in DB")
else:
dbsExceptionHandler("dbsException-invalid-input", "DBSDataset/insertDataset: acquisition_era_name is required")
try:
# insert the dataset
self.datasetin.execute(conn, dsdaoinput, tran)
except SQLAlchemyIntegrityError as ex:
if (str(ex).lower().find("unique constraint") != -1 or
str(ex).lower().find("duplicate") != -1):
# dataset already exists, lets fetch the ID
self.logger.warning(
"Unique constraint violation being ignored...")
self.logger.warning("%s" % ex)
ds = "/%s/%s/%s" % (businput["primary_ds_name"], businput["processed_ds_name"], businput["data_tier_name"].upper())
dsdaoinput["dataset_id"] = self.datasetid.execute(conn, ds )
if dsdaoinput["dataset_id"] == -1 :
dbsExceptionHandler("dbsException-missing-data", "DBSDataset/insertDataset. Strange error, the dataset %s does not exist ?"
% ds )
if (str(ex).find("ORA-01400") ) != -1 :
dbsExceptionHandler("dbsException-missing-data", "insertDataset must have: dataset,\
primary_ds_name, processed_ds_name, data_tier_name ")
except Exception as e:
raise
#FIXME : What about the READ-only status of the dataset
#There is no READ-oly status for a dataset.
# Create dataset_output_mod_mod_configs mapping
if "output_configs" in businput:
for anOutConfig in businput["output_configs"]:
dsoutconfdaoin = {}
dsoutconfdaoin["dataset_id"] = dsdaoinput["dataset_id"]
dsoutconfdaoin["output_mod_config_id"] = self.outconfigid.execute(conn, anOutConfig["app_name"],
anOutConfig["release_version"],
anOutConfig["pset_hash"],
anOutConfig["output_module_label"],
anOutConfig["global_tag"])
if dsoutconfdaoin["output_mod_config_id"] == -1 :
dbsExceptionHandler("dbsException-missing-data", "DBSDataset/insertDataset: Output config (%s, %s, %s, %s, %s) not found"
% (anOutConfig["app_name"],
anOutConfig["release_version"],
anOutConfig["pset_hash"],
anOutConfig["output_module_label"],
anOutConfig["global_tag"]))
try:
self.datasetoutmodconfigin.execute(conn, dsoutconfdaoin, tran)
except Exception as ex:
if str(ex).lower().find("unique constraint") != -1 or str(ex).lower().find("duplicate") != -1:
pass
else:
raise
# Dataset parentage will NOT be added by this API it will be set by insertFiles()--deduced by insertFiles
# Dataset runs will NOT be added by this API they will be set by insertFiles()--deduced by insertFiles OR insertRun API call
tran.commit()
tran = None
except Exception:
if tran:
tran.rollback()
tran = None
raise
finally:
if tran:
tran.rollback()
if conn:
conn.close()
|
input dictionary must have the following keys:
dataset, primary_ds_name(name), processed_ds(name), data_tier(name),
acquisition_era(name), processing_version
It may have following keys:
physics_group(name), xtcrosssection, creation_date, create_by,
last_modification_date, last_modified_by
|
entailment
|
def execute(self, conn, block_name, origin_site_name, transaction=False):
"""
Update origin_site_name for a given block_name
"""
if not conn:
dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/Block/UpdateStatus. \
Expects db connection from upper layer.", self.logger.exception)
binds = {"block_name": block_name, "origin_site_name": origin_site_name, "mtime": dbsUtils().getTime(),
"myuser": dbsUtils().getCreateBy()}
self.dbi.processData(self.sql, binds, conn, transaction)
|
Update origin_site_name for a given block_name
|
entailment
|
def increment(self, conn, seqName, transaction = False, incCount=1):
"""
increments the sequence `seqName` by default `Incremented by one`
and returns its value
"""
try:
seqTable = "%sS" %seqName
tlock = "lock tables %s write" %seqTable
self.dbi.processData(tlock, [], conn, transaction)
sql = "select ID from %s" % seqTable
result = self.dbi.processData(sql, [], conn, transaction)
resultlist = self.formatDict(result)
newSeq = resultlist[0]['id']+incCount
sql = "UPDATE %s SET ID=:seq_count" % seqTable
seqparms={"seq_count" : newSeq}
self.dbi.processData(sql, seqparms, conn, transaction)
tunlock = "unlock tables"
self.dbi.processData(tunlock, [], conn, transaction)
return newSeq
except:
#FIXME
tunlock = "unlock tables"
self.dbi.processData(tunlock, [], conn, transaction)
raise
|
increments the sequence `seqName` by default `Incremented by one`
and returns its value
|
entailment
|
def listRuns(self, run_num=-1, logical_file_name="",
block_name="", dataset=""):
"""
List run known to DBS.
"""
if( '%' in logical_file_name or '%' in block_name or '%' in dataset ):
dbsExceptionHandler('dbsException-invalid-input',
" DBSDatasetRun/listRuns. No wildcards are allowed in logical_file_name, block_name or dataset.\n.")
conn = self.dbi.connection()
tran = False
try:
ret = self.runlist.execute(conn, run_num, logical_file_name, block_name, dataset, tran)
result = []
rnum = []
for i in ret:
rnum.append(i['run_num'])
result.append({'run_num' : rnum})
return result
finally:
if conn:
conn.close()
|
List run known to DBS.
|
entailment
|
def insertPrimaryDataset(self):
"""
API to insert A primary dataset in DBS
:param primaryDSObj: primary dataset object
:type primaryDSObj: dict
:key primary_ds_type: TYPE (out of valid types in DBS, MC, DATA) (Required)
:key primary_ds_name: Name of the primary dataset (Required)
"""
try :
body = request.body.read()
indata = cjson.decode(body)
indata = validateJSONInputNoCopy("primds", indata)
indata.update({"creation_date": dbsUtils().getTime(), "create_by": dbsUtils().getCreateBy() })
self.dbsPrimaryDataset.insertPrimaryDataset(indata)
except cjson.DecodeError as dc:
dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert PrimaryDataset input", self.logger.exception, str(dc))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSWriterModel/insertPrimaryDataset. %s\n Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to insert A primary dataset in DBS
:param primaryDSObj: primary dataset object
:type primaryDSObj: dict
:key primary_ds_type: TYPE (out of valid types in DBS, MC, DATA) (Required)
:key primary_ds_name: Name of the primary dataset (Required)
|
entailment
|
def updateAcqEraEndDate(self, acquisition_era_name ="", end_date=0):
"""
API to update the end_date of an acquisition era
:param acquisition_era_name: acquisition_era_name to update (Required)
:type acquisition_era_name: str
:param end_date: end_date not zero (Required)
:type end_date: int
"""
try:
self.dbsAcqEra.UpdateAcqEraEndDate( acquisition_era_name, end_date)
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSWriterModel/update.AcqEraEndDate %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to update the end_date of an acquisition era
:param acquisition_era_name: acquisition_era_name to update (Required)
:type acquisition_era_name: str
:param end_date: end_date not zero (Required)
:type end_date: int
|
entailment
|
def insertBulkBlock(self):
"""
API to insert a bulk block
:param blockDump: Output of the block dump command
:type blockDump: dict
"""
try:
body = request.body.read()
indata = cjson.decode(body)
if (indata.get("file_parent_list", []) and indata.get("dataset_parent_list", [])):
dbsExceptionHandler("dbsException-invalid-input2", "insertBulkBlock: dataset and file parentages cannot be in the input at the same time",
self.logger.exception, "insertBulkBlock: datset and file parentages cannot be in the input at the same time.")
indata = validateJSONInputNoCopy("blockBulk", indata)
self.dbsBlockInsert.putBlock(indata)
except cjson.DecodeError as dc:
dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert BulkBlock input", self.logger.exception, str(dc))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except HTTPError as he:
raise he
except Exception as ex:
#illegal variable name/number
if str(ex).find("ORA-01036") != -1:
dbsExceptionHandler("dbsException-invalid-input2", "illegal variable name/number from input", self.logger.exception, str(ex))
else:
sError = "DBSWriterModel/insertBulkBlock. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to insert a bulk block
:param blockDump: Output of the block dump command
:type blockDump: dict
|
entailment
|
def insertBlock(self):
"""
API to insert a block into DBS
:param blockObj: Block object
:type blockObj: dict
:key open_for_writing: Open For Writing (1/0) (Optional, default 1)
:key block_size: Block Size (Optional, default 0)
:key file_count: File Count (Optional, default 0)
:key block_name: Block Name (Required)
:key origin_site_name: Origin Site Name (Required)
"""
try:
body = request.body.read()
indata = cjson.decode(body)
indata = validateJSONInputNoCopy("block", indata)
self.dbsBlock.insertBlock(indata)
except cjson.DecodeError as dc:
dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert Block input", self.logger.exception, str(dc))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except Exception as ex:
sError = "DBSWriterModel/insertBlock. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to insert a block into DBS
:param blockObj: Block object
:type blockObj: dict
:key open_for_writing: Open For Writing (1/0) (Optional, default 1)
:key block_size: Block Size (Optional, default 0)
:key file_count: File Count (Optional, default 0)
:key block_name: Block Name (Required)
:key origin_site_name: Origin Site Name (Required)
|
entailment
|
def insertFile(self, qInserts=False):
"""
API to insert a list of file into DBS in DBS. Up to 10 files can be inserted in one request.
:param qInserts: True means that inserts will be queued instead of done immediately. INSERT QUEUE Manager will perform the inserts, within few minutes.
:type qInserts: bool
:param filesList: List of dictionaries containing following information
:type filesList: list of dicts
:key logical_file_name: File to be inserted (str) (Required)
:key is_file_valid: (optional, default = 1): (bool)
:key block: required: /a/b/c#d (str)
:key dataset: required: /a/b/c (str)
:key file_type: (optional, default = EDM) one of the predefined types, (str)
:key check_sum: (optional, default = '-1') (str)
:key event_count: (optional, default = -1) (int)
:key file_size: (optional, default = -1.) (float)
:key adler32: (optional, default = '') (str)
:key md5: (optional, default = '') (str)
:key auto_cross_section: (optional, default = -1.) (float)
:key file_lumi_list: (optional, default = []) [{'run_num': 123, 'lumi_section_num': 12},{}....]
:key file_parent_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....]
:key file_assoc_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....]
:key file_output_config_list: (optional, default = []) [{'app_name':..., 'release_version':..., 'pset_hash':...., output_module_label':...},{}.....]
"""
if qInserts in (False, 'False'): qInserts=False
try:
body = request.body.read()
indata = cjson.decode(body)["files"]
if not isinstance(indata, (list, dict)):
dbsExceptionHandler("dbsException-invalid-input", "Invalid Input DataType", self.logger.exception, \
"insertFile expects input as list or dirc")
businput = []
if isinstance(indata, dict):
indata = [indata]
indata = validateJSONInputNoCopy("files", indata)
for f in indata:
f.update({
#"dataset":f["dataset"],
"creation_date": f.get("creation_date", dbsUtils().getTime()),
"create_by" : dbsUtils().getCreateBy(),
"last_modification_date": f.get("last_modification_date", dbsUtils().getTime()),
"last_modified_by": f.get("last_modified_by", dbsUtils().getCreateBy()),
"file_lumi_list":f.get("file_lumi_list", []),
"file_parent_list":f.get("file_parent_list", []),
"file_assoc_list":f.get("assoc_list", []),
"file_output_config_list":f.get("file_output_config_list", [])})
businput.append(f)
self.dbsFile.insertFile(businput, qInserts)
except cjson.DecodeError as dc:
dbsExceptionHandler("dbsException-invalid-input2", "Wrong format/data from insert File input", self.logger.exception, str(dc))
except dbsException as de:
dbsExceptionHandler(de.eCode, de.message, self.logger.exception, de.message)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSWriterModel/insertFile. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to insert a list of file into DBS in DBS. Up to 10 files can be inserted in one request.
:param qInserts: True means that inserts will be queued instead of done immediately. INSERT QUEUE Manager will perform the inserts, within few minutes.
:type qInserts: bool
:param filesList: List of dictionaries containing following information
:type filesList: list of dicts
:key logical_file_name: File to be inserted (str) (Required)
:key is_file_valid: (optional, default = 1): (bool)
:key block: required: /a/b/c#d (str)
:key dataset: required: /a/b/c (str)
:key file_type: (optional, default = EDM) one of the predefined types, (str)
:key check_sum: (optional, default = '-1') (str)
:key event_count: (optional, default = -1) (int)
:key file_size: (optional, default = -1.) (float)
:key adler32: (optional, default = '') (str)
:key md5: (optional, default = '') (str)
:key auto_cross_section: (optional, default = -1.) (float)
:key file_lumi_list: (optional, default = []) [{'run_num': 123, 'lumi_section_num': 12},{}....]
:key file_parent_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....]
:key file_assoc_list: (optional, default = []) [{'file_parent_lfn': 'mylfn'},{}....]
:key file_output_config_list: (optional, default = []) [{'app_name':..., 'release_version':..., 'pset_hash':...., output_module_label':...},{}.....]
|
entailment
|
def updateFile(self, logical_file_name=[], is_file_valid=1, lost=0, dataset=''):
"""
API to update file status
:param logical_file_name: logical_file_name to update (optional), but must have either a fln or
a dataset
:type logical_file_name: str
:param is_file_valid: valid=1, invalid=0 (Required)
:type is_file_valid: bool
:param lost: default lost=0 (optional)
:type lost: bool
:param dataset: default dataset='' (optional),but must have either a fln or a dataset
:type dataset: basestring
"""
if lost in [1, True, 'True', 'true', '1', 'y', 'yes']:
lost = 1
if is_file_valid in [1, True, 'True', 'true', '1', 'y', 'yes']:
dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception,\
"Lost file must set to invalid" )
else: lost = 0
for f in logical_file_name, dataset:
if '*' in f or '%' in f:
dbsExceptionHandler("dbsException-invalid-input2", dbsExceptionCode["dbsException-invalid-input2"], self.logger.exception, "No \
wildcard allow in LFN or dataset for updatefile API." )
try:
self.dbsFile.updateStatus(logical_file_name, is_file_valid, lost, dataset)
except HTTPError as he:
raise he
except Exception as ex:
sError = "DBSWriterModel/updateFile. %s\n. Exception trace: \n %s" \
% (ex, traceback.format_exc())
dbsExceptionHandler('dbsException-server-error', dbsExceptionCode['dbsException-server-error'], self.logger.exception, sError)
|
API to update file status
:param logical_file_name: logical_file_name to update (optional), but must have either a fln or
a dataset
:type logical_file_name: str
:param is_file_valid: valid=1, invalid=0 (Required)
:type is_file_valid: bool
:param lost: default lost=0 (optional)
:type lost: bool
:param dataset: default dataset='' (optional),but must have either a fln or a dataset
:type dataset: basestring
|
entailment
|
def create_from_string(cls, cidr, label=None, whitelist=False):
"""
Converts a CIDR like 192.168.0.0/24 into 2 parts:
start: 3232235520
stop: 3232235775
"""
network = netaddr.IPNetwork(cidr)
start = network.first
stop = start + network.size - 1
obj = cls.objects.create(label=label, start=start, stop=stop,
whitelist=whitelist)
return obj
|
Converts a CIDR like 192.168.0.0/24 into 2 parts:
start: 3232235520
stop: 3232235775
|
entailment
|
def qs_for_ip(cls, ip_str):
"""
Returns a queryset with matching IPNetwork objects for the given IP.
"""
ip = int(netaddr.IPAddress(ip_str))
# ignore IPv6 addresses for now (4294967295 is 0xffffffff, aka the
# biggest 32-bit number)
if ip > 4294967295:
return cls.objects.none()
ip_range_query = {
'start__lte': ip,
'stop__gte': ip
}
return cls.objects.filter(**ip_range_query)
|
Returns a queryset with matching IPNetwork objects for the given IP.
|
entailment
|
def matches_ip(cls, ip_str, read_preference=None):
"""
Return True if provided IP exists in the blacklist and doesn't exist
in the whitelist. Otherwise, return False.
"""
qs = cls.qs_for_ip(ip_str).only('whitelist')
if read_preference:
qs = qs.read_preference(read_preference)
# Return True if any docs match the IP and none of them represent
# a whitelist
return bool(qs) and not any(obj.whitelist for obj in qs)
|
Return True if provided IP exists in the blacklist and doesn't exist
in the whitelist. Otherwise, return False.
|
entailment
|
def dbsExceptionHandler(eCode='', message='', logger=None , serverError=''):
"""
This utility function handles all dbs exceptions. It will log , raise exception
based on input condition. It loggs the traceback on the server log. Send HTTPError 400
for invalid client input and HTTPError 404 for NOT FOUND required pre-existing condition.
"""
if logger:
#HTTP Error
if eCode == "dbsException-invalid-input":
#logger(eCode + ": " + serverError)
raise HTTPError(400, message)
elif eCode == "dbsException-missing-data":
logger( time.asctime(time.gmtime()) + " " + eCode + ": " + serverError)
#print (eCode + ": " + serverError)
raise HTTPError(412, message)
elif eCode == "dbsException-input-too-large":
logger(time.asctime(time.gmtime()) + " " + eCode + ": " + serverError)
raise HTTPError(413, message)
elif eCode == "dbsException-invalid-input2":
logger( time.asctime(time.gmtime()) + " " + eCode + ": " + serverError)
raise HTTPError(400, message)
elif eCode == "dbsException-conflict-data":
logger( time.asctime(time.gmtime()) + " " + eCode + ": " + serverError)
raise HTTPError(409, message)
elif eCode == "dbsException-failed-connect2host":
logger( time.asctime(time.gmtime()) + " " + eCode + ": " + serverError)
raise HTTPError(443, message)
else:
#client gets httperror 500 for server internal error
#print eCode + ": " + serverError
logger( time.asctime(time.gmtime()) + " " + eCode + ": " + serverError)
raise HTTPError(500, message)
else:
#not HTTP Error
raise dbsException(eCode, message, serverError)
|
This utility function handles all dbs exceptions. It will log , raise exception
based on input condition. It loggs the traceback on the server log. Send HTTPError 400
for invalid client input and HTTPError 404 for NOT FOUND required pre-existing condition.
|
entailment
|
def execute(self, conn, block_id="", transaction=False):
"""
simple execute
"""
if not conn:
dbsExceptionHandler("dbsException-db-conn-failed", "Oracle/FileBuffer/List. Expects db connection from upper layer.")
sql = self.sql
binds = { "block_id" : block_id}
cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
result = self.formatCursor(cursors[0])
return result
|
simple execute
|
entailment
|
def execute(self, conn, primary_ds_name="", primary_ds_type="", transaction=False):
"""
Lists all primary datasets if pattern is not provided.
"""
sql = self.sql
binds = {}
#import pdb
#pdb.set_trace()
if primary_ds_name and primary_ds_type in ('', None, '%'):
op = ("=", "like")["%" in primary_ds_name]
sql += "WHERE P.PRIMARY_DS_NAME %s :primary_ds_name" % op
binds.update(primary_ds_name=primary_ds_name)
elif primary_ds_type and primary_ds_name in ('', None, '%'):
op = ("=", "like")["%" in primary_ds_type]
sql += "WHERE PT.PRIMARY_DS_TYPE %s :primary_ds_type" % op
binds.update(primary_ds_type=primary_ds_type)
elif primary_ds_name and primary_ds_type:
op = ("=", "like")["%" in primary_ds_name]
op1 = ("=", "like")["%" in primary_ds_type]
sql += "WHERE P.PRIMARY_DS_NAME %s :primary_ds_name and PT.PRIMARY_DS_TYPE %s :primary_ds_type"\
%(op, op1)
binds.update(primary_ds_name=primary_ds_name)
binds.update(primary_ds_type=primary_ds_type)
else:
pass
cursors = self.dbi.processData(sql, binds, conn, transaction, returnCursor=True)
result = []
for c in cursors:
result.extend(self.formatCursor(c, size=100))
return result
|
Lists all primary datasets if pattern is not provided.
|
entailment
|
def configure_proxy(self, curl_object):
"""configure pycurl proxy settings"""
curl_object.setopt(curl_object.PROXY, self._proxy_hostname)
curl_object.setopt(curl_object.PROXYPORT, self._proxy_port)
curl_object.setopt(curl_object.PROXYTYPE, curl_object.PROXYTYPE_SOCKS5)
if self._proxy_user and self._proxy_passwd:
curl_object.setopt(curl_object.PROXYUSERPWD, '%s:%s' % (self._proxy_user, self._proxy_port))
|
configure pycurl proxy settings
|
entailment
|
def execute( self, conn, daoinput, transaction = False ):
"""
daoinput must be validated to have the following keys:
child_parent_id__list[[cid, pid],...], block_name
"""
binds = {}
bindlist=[]
if isinstance(daoinput, dict) and "block_name" in daoinput.keys():
binds = {"block_name": daoinput["block_name"]}
r = self.dbi.processData(self.sql_sel, binds, conn, False)
bfile = self.format(r)
bfile_list = []
for f in bfile:
bfile_list.append(f[0])
if "child_parent_id_list" in daoinput.keys():
files = []
for i in daoinput["child_parent_id_list"]:
files.append(i[0])
if set(files)-set(bfile_list):
dbsExceptionHandler('dbsException-invalid-input2', "Files required in the same block for FileParent/insert2 dao.", self.logger.exception)
else:
dbsExceptionHandler('dbsException-invalid-input2', "child_parent_id_list required for FileParent/insert2 dao.", self.logger.exception)
else:
dbsExceptionHandler('dbsException-invalid-input2', "Block_name required in the same block for FileParent/insert2 dao.", self.logger.exception)
binds = {}
for pf in daoinput["child_parent_id_list"]:
binds = {"this_file_id":pf[0], "parent_file_id": pf[1]}
bindlist.append(binds)
self.dbi.processData(self.sql, bindlist, conn, transaction)
|
daoinput must be validated to have the following keys:
child_parent_id__list[[cid, pid],...], block_name
|
entailment
|
def execute(self, conn, acquisition_era_name,end_date, transaction = False):
"""
for a given block_id
"""
if not conn:
dbsExceptionHandler("dbsException-failed-connect2host", "dbs/dao/Oracle/AcquisitionEra/updateEndDate expects db connection from upper layer.", self.logger.exception)
binds = { "acquisition_era_name" :acquisition_era_name , "end_date" : end_date }
result = self.dbi.processData(self.sql, binds, conn, transaction)
|
for a given block_id
|
entailment
|
def execute(self, conn, daoinput, transaction = False):
"""
daoinput keys:
migration_status, migration_block_id, migration_request_id
"""
#print daoinput['migration_block_id']
if not conn:
dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/MigrationBlock/Update. Expects db connection from upper layer." ,self.logger.exception)
if daoinput['migration_status'] == 1:
sql = self.sql + " (MIGRATION_STATUS = 0 or MIGRATION_STATUS = 3)"
elif daoinput['migration_status'] == 2 or daoinput['migration_status'] == 3 or daoinput['migration_status'] == 9:
sql = self.sql + " MIGRATION_STATUS = 1 "
else:
dbsExceptionHandler("dbsException-conflict-data", "Oracle/MigrationBlock/Update. Expected migration status to be 1, 2, 3, 0r 9" ,self.logger.exception )
#print sql
if 'migration_request_id' in daoinput:
sql3 = sql + "and MIGRATION_REQUEST_ID =:migration_request_id"
result = self.dbi.processData(sql3, daoinput, conn, transaction)
elif 'migration_block_id' in daoinput:
if type(daoinput['migration_block_id']) is not list:
sql2 = sql+ " and MIGRATION_BLOCK_ID =:migration_block_id"
result = self.dbi.processData(sql2, daoinput, conn, transaction)
else:
bk_id_generator, binds2 = create_token_generator(daoinput['migration_block_id'])
newdaoinput = {}
newdaoinput.update({"migration_status":daoinput["migration_status"],
"last_modification_date":daoinput["last_modification_date"]})
newdaoinput.update(binds2)
sql2 = sql+ """ and MIGRATION_BLOCK_ID in ({bk_id_generator} SELECT TOKEN FROM TOKEN_GENERATOR)
""".format(bk_id_generator=bk_id_generator)
result = self.dbi.processData(sql2, newdaoinput, conn, transaction)
else:
dbsExceptionHandler("dbsException-conflict-data", "Oracle/MigrationBlock/Update. Required IDs not in the input", self.logger.exception)
|
daoinput keys:
migration_status, migration_block_id, migration_request_id
|
entailment
|
def execute(self, conn, file_id_list, transaction=False):
"""
file_id_list : file_id_list
"""
sql=self.sql
binds={}
if file_id_list:
count=0
for an_id in file_id_list:
if count > 0: sql += ", "
sql += ":file_id_%s" %count
binds.update({"file_id_%s" %count : an_id})
count+=1
sql += ")"
else:
dbsExceptionHandler('dbsException-invalid-input', "Oracle/FileParentBlock/List. this_file_id not provided", self.logger.exception)
result = self.dbi.processData(sql, binds, conn, transaction)
plist = self.formatDict(result)
return plist
|
file_id_list : file_id_list
|
entailment
|
def execute(self, conn, app, release_version, pset_hash, output_label, global_tag, transaction = False):
"""
returns id for a given application
This always requires all four variables to be set, because
you better have them in blockInsert
"""
binds = {}
binds["app_name"]=app
binds["release_version"]=release_version
binds["pset_hash"]=pset_hash
binds["output_module_label"]=output_label
binds["global_tag"]=global_tag
result = self.dbi.processData(self.sql, binds, conn, transaction)
plist = self.formatDict(result)
if len(plist) < 1: return -1
return plist[0]["output_mod_config_id"]
|
returns id for a given application
This always requires all four variables to be set, because
you better have them in blockInsert
|
entailment
|
def dumpBlock(self, block_name):
""" This method is used at source server and gets the
information on a single block that is being migrated.
Try to return in a format to be ready for insert calls"""
if '%' in block_name or '*' in block_name:
msg = "No wildcard is allowed in block_name for dumpBlock API"
dbsExceptionHandler('dbsException-invalid-input', msg, self.logger.exception)
conn = self.dbi.connection()
try :
#block name is unique
block1 = self.blocklist.execute(conn, block_name=block_name)
block = []
for b1 in block1:
if not b1:
return {}
else:
block = b1
#a block only has one dataset and one primary dataset
#in order to reduce the number of dao objects, we will not write
#a special migration one. However, we will have to remove the
#extras
#block1 is a generator. When it is empty, it will skip the for loop above. why?
#we cannot test on b1 to decide if the generator is empty or not.
#so have to do below:
if not block: return {}
dataset1 = self.datasetlist.execute(conn,
dataset=block["dataset"], dataset_access_type="")
dataset = []
for d in dataset1:
if d:
dataset = d
dconfig_list = self.outputCoflist.execute(conn, dataset=dataset['dataset'])
else: return {}
#get block parentage
bparent = self.blockparentlist.execute(conn, block['block_name'])
#get dataset parentage
dsparent = self.dsparentlist.execute(conn, dataset['dataset'])
for p in dsparent:
del p['parent_dataset_id']
if 'dataset'in p:
del p['dataset']
elif 'this_dataset' in p:
del p['this_dataset']
else:
pass
fparent_list = self.fplist.execute(conn,
block_id=block['block_id'])
fparent_list2 = []
for fp in fparent_list:
fparent_list2.append(fp)
#print "---YG file Parent List--"
#print fparent_list2
fconfig_list = self.outputCoflist.execute(conn,
block_id=block['block_id'])
acqEra = {}
prsEra = {}
if dataset["acquisition_era_name"] not in ( "", None):
acqEra = self.aelist.execute(conn,
acquisitionEra=dataset["acquisition_era_name"])[0]
if dataset["processing_version"] not in ("", None):
prsEra = self.pelist.execute(conn,
processingV=dataset["processing_version"])[0]
primds = self.primdslist.execute(conn,
primary_ds_name=dataset["primary_ds_name"])[0]
del dataset["primary_ds_name"], dataset['primary_ds_type']
files = self.filelist.execute(conn, block_name=block_name)
for f in files:
#There are a trade off between json sorting and db query.
#We keep lumi sec in a file, but the file parentage seperate
#from file
file_lumi_list = []
for item in self.fllist.execute(conn, logical_file_name=f['logical_file_name'], migration=True):
file_lumi_list.append(item)
#print "---YG file lumi list---"
f.update(file_lumi_list = file_lumi_list)
del file_lumi_list #YG 09/2015
del f['branch_hash_id']
del dataset["acquisition_era_name"], dataset["processing_version"]
del block["dataset"]
result = dict(block=block, dataset=dataset, primds=primds,
files=files, block_parent_list=bparent,
ds_parent_list=dsparent, file_conf_list=fconfig_list,
file_parent_list=fparent_list2, dataset_conf_list=dconfig_list)
if acqEra:
result["acquisition_era"] = acqEra
if prsEra:
result["processing_era"] = prsEra
return result
finally:
if conn:
conn.close()
|
This method is used at source server and gets the
information on a single block that is being migrated.
Try to return in a format to be ready for insert calls
|
entailment
|
def updateStatus(self, block_name="", open_for_writing=0):
"""
Used to toggle the status of a block open_for_writing=1, open for writing, open_for_writing=0, closed
"""
if open_for_writing not in [1, 0, '1', '0']:
msg = "DBSBlock/updateStatus. open_for_writing can only be 0 or 1 : passed %s."\
% open_for_writing
dbsExceptionHandler('dbsException-invalid-input', msg)
conn = self.dbi.connection()
trans = conn.begin()
try :
open_for_writing = int(open_for_writing)
self.updatestatus.execute(conn, block_name, open_for_writing, dbsUtils().getTime(), trans)
trans.commit()
trans = None
except Exception as ex:
if trans:
trans.rollback()
if conn:conn.close()
raise ex
finally:
if conn:conn.close()
|
Used to toggle the status of a block open_for_writing=1, open for writing, open_for_writing=0, closed
|
entailment
|
def updateSiteName(self, block_name, origin_site_name):
"""
Update the origin_site_name for a given block name
"""
if not origin_site_name:
dbsExceptionHandler('dbsException-invalid-input',
"DBSBlock/updateSiteName. origin_site_name is mandatory.")
conn = self.dbi.connection()
trans = conn.begin()
try:
self.updatesitename.execute(conn, block_name, origin_site_name)
except:
if trans:
trans.rollback()
raise
else:
if trans:
trans.commit()
finally:
if conn:
conn.close()
|
Update the origin_site_name for a given block name
|
entailment
|
def listBlockParents(self, block_name=""):
"""
list parents of a block
"""
if not block_name:
msg = " DBSBlock/listBlockParents. Block_name must be provided as a string or a list. \
No wildcards allowed in block_name/s."
dbsExceptionHandler('dbsException-invalid-input', msg)
elif isinstance(block_name, basestring):
try:
block_name = str(block_name)
if '%' in block_name or '*' in block_name:
dbsExceptionHandler("dbsException-invalid-input", "DBSReaderModel/listBlocksParents: \
NO WILDCARDS allowed in block_name.")
except:
dbsExceptionHandler("dbsException-invalid-input", "DBSBlock/listBlockParents. Block_name must be \
provided as a string or a list. No wildcards allowed in block_name/s .")
elif type(block_name) is list:
for b in block_name:
if '%' in b or '*' in b:
dbsExceptionHandler("dbsException-invalid-input", "DBSReaderModel/listBlocksParents: \
NO WILDCARDS allowed in block_name.")
else:
msg = "DBSBlock/listBlockParents. Block_name must be provided as a string or a list. \
No wildcards allowed in block_name/s ."
dbsExceptionHandler("dbsException-invalid-input", msg)
conn = self.dbi.connection()
try:
results = self.blockparentlist.execute(conn, block_name)
return results
finally:
if conn:
conn.close()
|
list parents of a block
|
entailment
|
def listBlockChildren(self, block_name=""):
"""
list parents of a block
"""
if (not block_name) or re.search("['%','*']", block_name):
dbsExceptionHandler("dbsException-invalid-input", "DBSBlock/listBlockChildren. Block_name must be provided." )
conn = self.dbi.connection()
try:
results = self.blockchildlist.execute(conn, block_name)
return results
finally:
if conn:
conn.close()
|
list parents of a block
|
entailment
|
def listBlocks(self, dataset="", block_name="", data_tier_name="", origin_site_name="",
logical_file_name="", run_num=-1, min_cdate=0, max_cdate=0,
min_ldate=0, max_ldate=0, cdate=0, ldate=0, open_for_writing=-1, detail=False):
"""
dataset, block_name, data_tier_name or logical_file_name must be passed.
"""
if (not dataset) or re.search("['%','*']", dataset):
if (not block_name) or re.search("['%','*']", block_name):
if (not logical_file_name) or re.search("['%','*']", logical_file_name):
if not data_tier_name or re.search("['%','*']", data_tier_name):
msg = "DBSBlock/listBlock. You must specify at least one parameter(dataset, block_name,\
data_tier_name, logical_file_name) with listBlocks api"
dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, msg)
if data_tier_name:
if not (min_cdate and max_cdate) or (max_cdate-min_cdate)>32*24*3600:
msg = "min_cdate and max_cdate are mandatory parameters. If data_tier_name parameter is used \
the maximal time range allowed is 31 days"
dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, msg)
if detail:
msg = "DBSBlock/listBlock. Detail parameter not allowed togther with data_tier_name"
dbsExceptionHandler('dbsException-invalid-input2', msg, self.logger.exception, msg)
with self.dbi.connection() as conn:
dao = (self.blockbrieflist, self.blocklist)[detail]
for item in dao.execute(conn, dataset, block_name, data_tier_name, origin_site_name, logical_file_name, run_num,
min_cdate, max_cdate, min_ldate, max_ldate, cdate, ldate):
yield item
|
dataset, block_name, data_tier_name or logical_file_name must be passed.
|
entailment
|
def listBlocksOrigin(self, origin_site_name="", dataset="", block_name=""):
"""
This is the API to list all the blocks/datasets first generated in the site called origin_site_name,
if origin_site_name is provided w/ no wildcards allow. If a fully spelled dataset is provided, then it will
only list the blocks first generated from origin_site_name under the given dataset.
"""
if not (dataset or block_name):
dbsExceptionHandler("dbsException-invalid-input",
"DBSBlock/listBlocksOrigin: dataset or block_name must be provided.")
if re.search("['%', '*']", dataset) or re.search("['%', '*']", block_name):
dbsExceptionHandler("dbsException-invalid-input",
"DBSBlock/listBlocksOrigin: dataset or block_name with wildcard is not supported.")
try:
conn = self.dbi.connection()
result = self.bkOriginlist.execute(conn, origin_site_name, dataset, block_name)
return result
finally:
if conn:
conn.close()
|
This is the API to list all the blocks/datasets first generated in the site called origin_site_name,
if origin_site_name is provided w/ no wildcards allow. If a fully spelled dataset is provided, then it will
only list the blocks first generated from origin_site_name under the given dataset.
|
entailment
|
def insertBlock(self, businput):
"""
Input dictionary has to have the following keys:
blockname
It may have:
open_for_writing, origin_site(name), block_size,
file_count, creation_date, create_by, last_modification_date, last_modified_by
it builds the correct dictionary for dao input and executes the dao
NEED to validate there are no extra keys in the businput
"""
if not ("block_name" in businput and "origin_site_name" in businput ):
dbsExceptionHandler('dbsException-invalid-input', "business/DBSBlock/insertBlock must have block_name and origin_site_name as input")
conn = self.dbi.connection()
tran = conn.begin()
try:
blkinput = {
"last_modification_date":businput.get("last_modification_date", dbsUtils().getTime()),
#"last_modified_by":businput.get("last_modified_by", dbsUtils().getCreateBy()),
"last_modified_by":dbsUtils().getCreateBy(),
#"create_by":businput.get("create_by", dbsUtils().getCreateBy()),
"create_by":dbsUtils().getCreateBy(),
"creation_date":businput.get("creation_date", dbsUtils().getTime()),
"open_for_writing":businput.get("open_for_writing", 1),
"block_size":businput.get("block_size", 0),
"file_count":businput.get("file_count", 0),
"block_name":businput.get("block_name"),
"origin_site_name":businput.get("origin_site_name")
}
ds_name = businput["block_name"].split('#')[0]
blkinput["dataset_id"] = self.datasetid.execute(conn, ds_name, tran)
if blkinput["dataset_id"] == -1 :
msg = "DBSBlock/insertBlock. Dataset %s does not exists" % ds_name
dbsExceptionHandler('dbsException-missing-data', msg)
blkinput["block_id"] = self.sm.increment(conn, "SEQ_BK", tran)
self.blockin.execute(conn, blkinput, tran)
tran.commit()
tran = None
except Exception as e:
if str(e).lower().find("unique constraint") != -1 or str(e).lower().find("duplicate") != -1:
pass
else:
if tran:
tran.rollback()
if conn: conn.close()
raise
finally:
if tran:
tran.rollback()
if conn:
conn.close()
|
Input dictionary has to have the following keys:
blockname
It may have:
open_for_writing, origin_site(name), block_size,
file_count, creation_date, create_by, last_modification_date, last_modified_by
it builds the correct dictionary for dao input and executes the dao
NEED to validate there are no extra keys in the businput
|
entailment
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.