_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q18400
shrink_text_file
train
def shrink_text_file(filename, max_size, removal_marker=None): """Shrink a text file to approximately maxSize bytes by removing lines from the middle of the file. """ file_size = os.path.getsize(filename) assert file_size > max_size # We partition the file into 3 parts: # A) start: maxSize/2 bytes we want to keep # B) middle: part we want to remove # C) end: maxSize/2 bytes we want to keep # Trick taken from StackOverflow: # https://stackoverflow.com/questions/2329417/fastest-way-to-delete-a-line-from-large-file-in-python # We open the file twice at the same time, once for reading (input_file) and once for writing (output_file). # We position output_file at the beginning of part B # and input_file at the beginning of part C. # Then we copy the content of C into B, overwriting what is there. # Afterwards we truncate the file after A+C. with open(filename, 'r+b') as output_file: with open(filename, 'rb') as input_file: # Position outputFile between A and B output_file.seek(max_size // 2) output_file.readline() # jump to end of current line so that we truncate at line boundaries if output_file.tell() == file_size: # readline jumped to end of file because of a long line return if removal_marker: output_file.write(removal_marker.encode()) # Position inputFile between B and C input_file.seek(-max_size // 2, os.SEEK_END) # jump to beginning of second part we want to keep from end of file input_file.readline() # jump to end of current line so that we truncate at line boundaries # Copy C over B copy_all_lines_from_to(input_file, output_file) output_file.truncate()
python
{ "resource": "" }
q18401
read_file
train
def read_file(*path): """ Read the full content of a file. """ with open(os.path.join(*path)) as f: return f.read().strip()
python
{ "resource": "" }
q18402
add_files_to_git_repository
train
def add_files_to_git_repository(base_dir, files, description): """ Add and commit all files given in a list into a git repository in the base_dir directory. Nothing is done if the git repository has local changes. @param files: the files to commit @param description: the commit message """ if not os.path.isdir(base_dir): printOut('Output path is not a directory, cannot add files to git repository.') return # find out root directory of repository gitRoot = subprocess.Popen(['git', 'rev-parse', '--show-toplevel'], cwd=base_dir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) stdout = gitRoot.communicate()[0] if gitRoot.returncode != 0: printOut('Cannot commit results to repository: git rev-parse failed, perhaps output path is not a git directory?') return gitRootDir = decode_to_string(stdout).splitlines()[0] # check whether repository is clean gitStatus = subprocess.Popen(['git','status','--porcelain', '--untracked-files=no'], cwd=gitRootDir, stdout=subprocess.PIPE, stderr=subprocess.PIPE) (stdout, stderr) = gitStatus.communicate() if gitStatus.returncode != 0: printOut('Git status failed! Output was:\n' + decode_to_string(stderr)) return if stdout: printOut('Git repository has local changes, not commiting results.') return # add files to staging area files = [os.path.realpath(file) for file in files] # Use --force to add all files in result-files directory even if .gitignore excludes them gitAdd = subprocess.Popen(['git', 'add', '--force', '--'] + files, cwd=gitRootDir) if gitAdd.wait() != 0: printOut('Git add failed, will not commit results!') return # commit files printOut('Committing results files to git repository in ' + gitRootDir) gitCommit = subprocess.Popen(['git', 'commit', '--file=-', '--quiet'], cwd=gitRootDir, stdin=subprocess.PIPE) gitCommit.communicate(description.encode('UTF-8')) if gitCommit.returncode != 0: printOut('Git commit failed!') return
python
{ "resource": "" }
q18403
setup_logging
train
def setup_logging(format="%(asctime)s - %(levelname)s - %(message)s", level='INFO'): """Setup the logging framework with a basic configuration""" try: import coloredlogs coloredlogs.install(fmt=format, level=level) except ImportError: logging.basicConfig(format=format, level=level)
python
{ "resource": "" }
q18404
_Worker.execute
train
def execute(self, run): """ This function executes the tool with a sourcefile with options. It also calls functions for output before and after the run. """ self.output_handler.output_before_run(run) benchmark = self.benchmark memlimit = benchmark.rlimits.get(MEMLIMIT) args = run.cmdline() logging.debug('Command line of run is %s', args) run_result = \ self.run_executor.execute_run( args, output_filename=run.log_file, output_dir=run.result_files_folder, result_files_patterns=benchmark.result_files_patterns, hardtimelimit=benchmark.rlimits.get(TIMELIMIT), softtimelimit=benchmark.rlimits.get(SOFTTIMELIMIT), walltimelimit=benchmark.rlimits.get(WALLTIMELIMIT), cores=self.my_cpus, memory_nodes=self.my_memory_nodes, memlimit=memlimit, environments=benchmark.environment(), workingDir=benchmark.working_directory(), maxLogfileSize=benchmark.config.maxLogfileSize, files_count_limit=benchmark.config.filesCountLimit, files_size_limit=benchmark.config.filesSizeLimit) if self.run_executor.PROCESS_KILLED: # If the run was interrupted, we ignore the result and cleanup. try: if benchmark.config.debug: os.rename(run.log_file, run.log_file + ".killed") else: os.remove(run.log_file) except OSError: pass return 1 if self.my_cpus: run_result['cpuCores'] = self.my_cpus if self.my_memory_nodes: run_result['memoryNodes'] = self.my_memory_nodes run.set_result(run_result) self.output_handler.output_after_run(run)
python
{ "resource": "" }
q18405
CPUThrottleCheck.has_throttled
train
def has_throttled(self): """ Check whether any of the CPU cores monitored by this instance has throttled since this instance was created. @return a boolean value """ for file, value in self.cpu_throttle_count.items(): try: new_value = int(util.read_file(file)) if new_value > value: return True except Exception as e: logging.warning('Cannot read throttling count of CPU from kernel: %s', e) return False
python
{ "resource": "" }
q18406
SwapCheck.has_swapped
train
def has_swapped(self): """ Check whether any swapping occured on this system since this instance was created. @return a boolean value """ new_values = self._read_swap_count() for key, new_value in new_values.items(): old_value = self.swap_count.get(key, 0) if new_value > old_value: return True return False
python
{ "resource": "" }
q18407
add_basic_executor_options
train
def add_basic_executor_options(argument_parser): """Add some basic options for an executor to an argparse argument_parser.""" argument_parser.add_argument("args", nargs="+", metavar="ARG", help='command line to run (prefix with "--" to ensure all arguments are treated correctly)') argument_parser.add_argument("--version", action="version", version="%(prog)s " + __version__) verbosity = argument_parser.add_mutually_exclusive_group() verbosity.add_argument("--debug", action="store_true", help="show debug output") verbosity.add_argument("--quiet", action="store_true", help="show only warnings")
python
{ "resource": "" }
q18408
BaseExecutor._kill_process
train
def _kill_process(self, pid, sig=signal.SIGKILL): """Try to send signal to given process.""" try: os.kill(pid, sig) except OSError as e: if e.errno == errno.ESRCH: # process itself returned and exited before killing logging.debug("Failure %s while killing process %s with signal %s: %s", e.errno, pid, sig, e.strerror) else: logging.warning("Failure %s while killing process %s with signal %s: %s", e.errno, pid, sig, e.strerror)
python
{ "resource": "" }
q18409
BaseExecutor._start_execution
train
def _start_execution(self, args, stdin, stdout, stderr, env, cwd, temp_dir, cgroups, parent_setup_fn, child_setup_fn, parent_cleanup_fn): """Actually start the tool and the measurements. @param parent_setup_fn a function without parameters that is called in the parent process immediately before the tool is started @param child_setup_fn a function without parameters that is called in the child process before the tool is started @param parent_cleanup_fn a function that is called in the parent process immediately after the tool terminated, with three parameters: the result of parent_setup_fn, the result of the executed process as ProcessExitCode, and the base path for looking up files as parameter values @return: a tuple of PID of process and a blocking function, which waits for the process and a triple of the exit code and the resource usage of the process and the result of parent_cleanup_fn (do not use os.wait) """ def pre_subprocess(): # Do some other setup the caller wants. child_setup_fn() # put us into the cgroup(s) pid = os.getpid() cgroups.add_task(pid) # Set HOME and TMPDIR to fresh directories. tmp_dir = os.path.join(temp_dir, "tmp") home_dir = os.path.join(temp_dir, "home") self._create_dirs_in_temp_dir(tmp_dir, home_dir) env["HOME"] = home_dir env["TMPDIR"] = tmp_dir env["TMP"] = tmp_dir env["TEMPDIR"] = tmp_dir env["TEMP"] = tmp_dir logging.debug("Executing run with $HOME and $TMPDIR below %s.", temp_dir) args = self._build_cmdline(args, env=env) parent_setup = parent_setup_fn() p = subprocess.Popen(args, stdin=stdin, stdout=stdout, stderr=stderr, env=env, cwd=cwd, close_fds=True, preexec_fn=pre_subprocess) def wait_and_get_result(): exitcode, ru_child = self._wait_for_process(p.pid, args[0]) parent_cleanup = parent_cleanup_fn( parent_setup, util.ProcessExitCode.from_raw(exitcode), "") return exitcode, ru_child, parent_cleanup return p.pid, wait_and_get_result
python
{ "resource": "" }
q18410
BaseExecutor._wait_for_process
train
def _wait_for_process(self, pid, name): """Wait for the given process to terminate. @return tuple of exit code and resource usage """ try: logging.debug("Waiting for process %s with pid %s", name, pid) unused_pid, exitcode, ru_child = os.wait4(pid, 0) return exitcode, ru_child except OSError as e: if self.PROCESS_KILLED and e.errno == errno.EINTR: # Interrupted system call seems always to happen # if we killed the process ourselves after Ctrl+C was pressed # We can try again to get exitcode and resource usage. logging.debug("OSError %s while waiting for termination of %s (%s): %s.", e.errno, name, pid, e.strerror) try: unused_pid, exitcode, ru_child = os.wait4(pid, 0) return exitcode, ru_child except OSError: pass # original error will be handled and this ignored logging.critical("OSError %s while waiting for termination of %s (%s): %s.", e.errno, name, pid, e.strerror) return (0, None)
python
{ "resource": "" }
q18411
xml_to_string
train
def xml_to_string(elem, qualified_name=None, public_id=None, system_id=None): """ Return a pretty-printed XML string for the Element. Also allows setting a document type. """ from xml.dom import minidom rough_string = ET.tostring(elem, 'utf-8') reparsed = minidom.parseString(rough_string) if qualified_name: doctype = minidom.DOMImplementation().createDocumentType( qualified_name, public_id, system_id) reparsed.insertBefore(doctype, reparsed.documentElement) return reparsed.toprettyxml(indent=" ")
python
{ "resource": "" }
q18412
Tool.program_files
train
def program_files(self, executable): """ Determine the file paths to be adopted """ if self._get_version() == 6: paths = self.REQUIRED_PATHS_6 elif self._get_version() > 6: paths = self.REQUIRED_PATHS_7_1 return paths
python
{ "resource": "" }
q18413
substitute_vars
train
def substitute_vars(oldList, runSet=None, sourcefile=None): """ This method replaces special substrings from a list of string and return a new list. """ keyValueList = [] if runSet: benchmark = runSet.benchmark # list with tuples (key, value): 'key' is replaced by 'value' keyValueList = [ ('benchmark_name', benchmark.name), ('benchmark_date', benchmark.instance), ('benchmark_path', benchmark.base_dir or '.'), ('benchmark_path_abs', os.path.abspath(benchmark.base_dir)), ('benchmark_file', os.path.basename(benchmark.benchmark_file)), ('benchmark_file_abs', os.path.abspath(os.path.basename(benchmark.benchmark_file))), ('logfile_path', os.path.dirname(runSet.log_folder) or '.'), ('logfile_path_abs', os.path.abspath(runSet.log_folder)), ('rundefinition_name', runSet.real_name if runSet.real_name else ''), ('test_name', runSet.real_name if runSet.real_name else ''), ] if sourcefile: keyValueList.append(('inputfile_name', os.path.basename(sourcefile))) keyValueList.append(('inputfile_path', os.path.dirname(sourcefile) or '.')) keyValueList.append(('inputfile_path_abs', os.path.dirname(os.path.abspath(sourcefile)))) # The following are deprecated: do not use anymore. keyValueList.append(('sourcefile_name', os.path.basename(sourcefile))) keyValueList.append(('sourcefile_path', os.path.dirname(sourcefile) or '.')) keyValueList.append(('sourcefile_path_abs', os.path.dirname(os.path.abspath(sourcefile)))) if sourcefile and sourcefile.endswith(".yml"): keyValueList.append(('taskdef_name', os.path.basename(sourcefile))) keyValueList.append(('taskdef_path', os.path.dirname(sourcefile) or '.')) keyValueList.append(('taskdef_path_abs', os.path.dirname(os.path.abspath(sourcefile)))) # do not use keys twice assert len(set((key for (key, value) in keyValueList))) == len(keyValueList) return [util.substitute_vars(s, keyValueList) for s in oldList]
python
{ "resource": "" }
q18414
load_task_definition_file
train
def load_task_definition_file(task_def_file): """Open and parse a task-definition file in YAML format.""" try: with open(task_def_file) as f: task_def = yaml.safe_load(f) except OSError as e: raise BenchExecException("Cannot open task-definition file: " + str(e)) except yaml.YAMLError as e: raise BenchExecException("Invalid task definition: " + str(e)) if str(task_def.get("format_version")) not in ["0.1", "1.0"]: raise BenchExecException( "Task-definition file {} specifies invalid format_version '{}'." .format(task_def_file, task_def.get("format_version"))) return task_def
python
{ "resource": "" }
q18415
load_tool_info
train
def load_tool_info(tool_name): """ Load the tool-info class. @param tool_name: The name of the tool-info module. Either a full Python package name or a name within the benchexec.tools package. @return: A tuple of the full name of the used tool-info module and an instance of the tool-info class. """ tool_module = tool_name if '.' in tool_name else ("benchexec.tools." + tool_name) try: tool = __import__(tool_module, fromlist=['Tool']).Tool() except ImportError as ie: sys.exit('Unsupported tool "{0}" specified. ImportError: {1}'.format(tool_name, ie)) except AttributeError: sys.exit('The module "{0}" does not define the necessary class "Tool", ' 'it cannot be used as tool info for BenchExec.'.format(tool_module)) return (tool_module, tool)
python
{ "resource": "" }
q18416
RunSet.create_run_from_task_definition
train
def create_run_from_task_definition( self, task_def_file, options, propertyfile, required_files_pattern): """Create a Run from a task definition in yaml format""" task_def = load_task_definition_file(task_def_file) def expand_patterns_from_tag(tag): result = [] patterns = task_def.get(tag, []) if isinstance(patterns, str) or not isinstance(patterns, collections.Iterable): # accept single string in addition to list of strings patterns = [patterns] for pattern in patterns: expanded = util.expand_filename_pattern( str(pattern), os.path.dirname(task_def_file)) if not expanded: raise BenchExecException( "Pattern '{}' in task-definition file {} did not match any paths." .format(pattern, task_def_file)) expanded.sort() result.extend(expanded) return result input_files = expand_patterns_from_tag("input_files") if not input_files: raise BenchExecException( "Task-definition file {} does not define any input files.".format(task_def_file)) required_files = expand_patterns_from_tag("required_files") run = Run( task_def_file, input_files, options, self, propertyfile, required_files_pattern, required_files) # run.propertyfile of Run is fully determined only after Run is created, # thus we handle it and the expected results here. if not run.propertyfile: return run # TODO: support "property_name" attribute in yaml prop = result.Property.create(run.propertyfile, allow_unknown=True) run.properties = [prop] for prop_dict in task_def.get("properties", []): if not isinstance(prop_dict, dict) or "property_file" not in prop_dict: raise BenchExecException( "Missing property file for property in task-definition file {}." .format(task_def_file)) expanded = util.expand_filename_pattern( prop_dict["property_file"], os.path.dirname(task_def_file)) if len(expanded) != 1: raise BenchExecException( "Property pattern '{}' in task-definition file {} does not refer to exactly one file." .format(prop_dict["property_file"], task_def_file)) # TODO We could reduce I/O by checking absolute paths and using os.path.samestat # with cached stat calls. if prop.filename == expanded[0] or os.path.samefile(prop.filename, expanded[0]): expected_result = prop_dict.get("expected_verdict") if expected_result is not None and not isinstance(expected_result, bool): raise BenchExecException( "Invalid expected result '{}' for property {} in task-definition file {}." .format(expected_result, prop_dict["property_file"], task_def_file)) run.expected_results[prop.filename] = \ result.ExpectedResult(expected_result, prop_dict.get("subproperty")) if not run.expected_results: logging.debug( "Ignoring run '%s' because it does not have the property from %s.", run.identifier, run.propertyfile) return None elif len(run.expected_results) > 1: raise BenchExecException( "Property '{}' specified multiple times in task-definition file {}." .format(prop.filename, task_def_file)) else: return run
python
{ "resource": "" }
q18417
RunSet.expand_filename_pattern
train
def expand_filename_pattern(self, pattern, base_dir, sourcefile=None): """ The function expand_filename_pattern expands a filename pattern to a sorted list of filenames. The pattern can contain variables and wildcards. If base_dir is given and pattern is not absolute, base_dir and pattern are joined. """ # replace vars like ${benchmark_path}, # with converting to list and back, we can use the function 'substitute_vars()' expandedPattern = substitute_vars([pattern], self, sourcefile) assert len(expandedPattern) == 1 expandedPattern = expandedPattern[0] if expandedPattern != pattern: logging.debug("Expanded variables in expression %r to %r.", pattern, expandedPattern) fileList = util.expand_filename_pattern(expandedPattern, base_dir) # sort alphabetical, fileList.sort() if not fileList: logging.warning("No files found matching %r.", pattern) return fileList
python
{ "resource": "" }
q18418
Run._analyze_result
train
def _analyze_result(self, exitcode, output, isTimeout, termination_reason): """Return status according to result and output of tool.""" # Ask tool info. tool_status = None if exitcode is not None: logging.debug("My subprocess returned %s.", exitcode) tool_status = self.runSet.benchmark.tool.determine_result( exitcode.value or 0, exitcode.signal or 0, output, isTimeout) if tool_status in result.RESULT_LIST_OTHER: # for unspecific results provide some more information if possible if exitcode.signal == 6: tool_status = 'ABORTED' elif exitcode.signal == 11: tool_status = 'SEGMENTATION FAULT' elif exitcode.signal == 15: tool_status = 'KILLED' elif exitcode.signal: tool_status = 'KILLED BY SIGNAL '+str(exitcode.signal) elif exitcode.value: tool_status = '{} ({})'.format(result.RESULT_ERROR, exitcode.value) # Tools sometimes produce a result even after violating a resource limit. # This should not be counted, so we overwrite the result with TIMEOUT/OOM # here, if this is the case. # However, we don't want to forget more specific results like SEGFAULT, # so we do this only if the result is a "normal" one like TRUE/FALSE # or an unspecific one like UNKNOWN/ERROR. status = None if isTimeout: status = "TIMEOUT" elif termination_reason: status = _ERROR_RESULTS_FOR_TERMINATION_REASON.get(termination_reason, termination_reason) if not status: # regular termination status = tool_status elif tool_status and tool_status not in ( result.RESULT_LIST_OTHER + [status, 'KILLED', 'KILLED BY SIGNAL 9']): # timeout/OOM but tool still returned some result status = '{} ({})'.format(status, tool_status) return status
python
{ "resource": "" }
q18419
Run._is_timeout
train
def _is_timeout(self): ''' try to find out whether the tool terminated because of a timeout ''' if self.cputime is None: is_cpulimit = False else: rlimits = self.runSet.benchmark.rlimits if SOFTTIMELIMIT in rlimits: limit = rlimits[SOFTTIMELIMIT] elif TIMELIMIT in rlimits: limit = rlimits[TIMELIMIT] else: limit = float('inf') is_cpulimit = self.cputime > limit if self.walltime is None: is_walllimit = False else: rlimits = self.runSet.benchmark.rlimits if WALLTIMELIMIT in rlimits: limit = rlimits[WALLTIMELIMIT] else: limit = float('inf') is_walllimit = self.walltime > limit return is_cpulimit or is_walllimit
python
{ "resource": "" }
q18420
expected_results_of_file
train
def expected_results_of_file(filename): """Create a dict of property->ExpectedResult from information encoded in a filename.""" results = {} for (filename_part, (expected_result, for_properties)) in _FILE_RESULTS.items(): if filename_part in filename: expected_result_class = get_result_classification(expected_result) assert expected_result_class in {RESULT_CLASS_TRUE, RESULT_CLASS_FALSE} expected_result = (expected_result_class == RESULT_CLASS_TRUE) subproperty = None if len(for_properties) > 1: assert for_properties == _MEMSAFETY_SUBPROPERTIES and expected_result property = _PROP_MEMSAFETY else: property = next(iter(for_properties)) if property in _MEMSAFETY_SUBPROPERTIES and not expected_result: subproperty = property property = _PROP_MEMSAFETY if property in results: raise BenchExecException( "Duplicate property {} in filename {}".format(property, filename)) results[property] = ExpectedResult(expected_result, subproperty) return results
python
{ "resource": "" }
q18421
_svcomp_score
train
def _svcomp_score(category, result): """ Return the achieved score of a task according to the SV-COMP scoring scheme. @param category: result category as determined by get_result_category @param result: the result given by the tool """ assert result is not None result_class = get_result_classification(result) if category == CATEGORY_CORRECT_UNCONFIRMED: if result_class == RESULT_CLASS_TRUE: return _SCORE_CORRECT_UNCONFIRMED_TRUE elif result_class == RESULT_CLASS_FALSE: return _SCORE_CORRECT_UNCONFIRMED_FALSE else: assert False elif category == CATEGORY_CORRECT: if result_class == RESULT_CLASS_TRUE: return _SCORE_CORRECT_TRUE elif result_class == RESULT_CLASS_FALSE: return _SCORE_CORRECT_FALSE else: assert False, result elif category == CATEGORY_WRONG: if result_class == RESULT_CLASS_TRUE: return _SCORE_WRONG_TRUE elif result_class == RESULT_CLASS_FALSE: return _SCORE_WRONG_FALSE else: assert False else: return _SCORE_UNKNOWN
python
{ "resource": "" }
q18422
score_for_task
train
def score_for_task(properties, category, result): """ Return the possible score of task, depending on whether the result is correct or not. """ assert result is not None if properties and Property.create_from_names(properties).is_svcomp: return _svcomp_score(category, result) return None
python
{ "resource": "" }
q18423
get_result_category
train
def get_result_category(expected_results, result, properties): ''' This function determines the relation between actual result and expected result for the given file and properties. @param filename: The file name of the input file. @param result: The result given by the tool (needs to be one of the RESULT_* strings to be recognized). @param properties: The list of property names to check. @return One of the CATEGORY_* strings. ''' result_class = get_result_classification(result) if result_class == RESULT_CLASS_OTHER: if result == RESULT_UNKNOWN: return CATEGORY_UNKNOWN elif result == RESULT_DONE: return CATEGORY_MISSING else: return CATEGORY_ERROR if not properties: # Without property we cannot return correct or wrong results. return CATEGORY_MISSING # For now, we have at most one property assert len(properties) == 1, properties prop = properties[0] expected_result = expected_results.get(prop.filename) if not expected_result or expected_result.result is None: # expected result of task is unknown return CATEGORY_MISSING if prop.is_well_known: # for well-known properties, only support hard-coded results is_valid_result = result in _VALID_RESULTS_PER_PROPERTY[prop.name] elif expected_result.subproperty: is_valid_result = result in { RESULT_TRUE_PROP, RESULT_FALSE_PROP + "(" + expected_result.subproperty + ")"} else: is_valid_result = (result == RESULT_TRUE_PROP) or result.startswith(RESULT_FALSE_PROP) if not is_valid_result: return CATEGORY_UNKNOWN # result does not match property if expected_result.result: return CATEGORY_CORRECT if result_class == RESULT_CLASS_TRUE else CATEGORY_WRONG else: if expected_result.subproperty: return CATEGORY_CORRECT if result == RESULT_FALSE_PROP + "(" + expected_result.subproperty + ")" else CATEGORY_WRONG else: return CATEGORY_CORRECT if result_class == RESULT_CLASS_FALSE else CATEGORY_WRONG
python
{ "resource": "" }
q18424
Property.create
train
def create(cls, propertyfile, allow_unknown): """ Create a Property instance by attempting to parse the given property file. @param propertyfile: A file name of a property file @param allow_unknown: Whether to accept unknown properties """ with open(propertyfile) as f: content = f.read().strip() # parse content for known properties is_svcomp = False known_properties = [] only_known_svcomp_property = True if content == 'OBSERVER AUTOMATON' or content == 'SATISFIABLE': known_properties = [_PROPERTY_NAMES[content]] elif content.startswith('CHECK'): is_svcomp = True for line in filter(None, content.splitlines()): if content.startswith('CHECK'): # SV-COMP property, either a well-known one or a new one props_in_line = [ prop for (substring, prop) in _PROPERTY_NAMES.items() if substring in line] if len(props_in_line) == 1: known_properties.append(props_in_line[0]) else: only_known_svcomp_property = False else: # not actually an SV-COMP property file is_svcomp = False known_properties = [] break # check if some known property content was found subproperties = None if only_known_svcomp_property and len(known_properties) == 1: is_well_known = True name = known_properties[0] elif only_known_svcomp_property and set(known_properties) == _MEMSAFETY_SUBPROPERTIES: is_well_known = True name = _PROP_MEMSAFETY subproperties = list(known_properties) else: if not allow_unknown: raise BenchExecException( 'File "{0}" does not contain a known property.'.format(propertyfile)) is_well_known = False name = os.path.splitext(os.path.basename(propertyfile))[0] return cls(propertyfile, is_well_known, is_svcomp, name, subproperties)
python
{ "resource": "" }
q18425
Property.create_from_names
train
def create_from_names(cls, property_names): """ Create a Property instance from a list of well-known property names @param property_names: a non-empty list of property names """ assert property_names if len(property_names) == 1: name = property_names[0] subproperties = None else: name = (_PROP_MEMSAFETY if set(property_names) == _MEMSAFETY_SUBPROPERTIES else "unknown property") subproperties = list(property_names) is_well_known = name in _VALID_RESULTS_PER_PROPERTY.keys() is_svcomp = is_well_known and (_PROP_SAT not in property_names) return cls(None, is_well_known, is_svcomp, name, subproperties)
python
{ "resource": "" }
q18426
get_file_list
train
def get_file_list(shortFile): """ The function get_file_list expands a short filename to a sorted list of filenames. The short filename can contain variables and wildcards. """ if "://" in shortFile: # seems to be a URL return [shortFile] # expand tilde and variables expandedFile = os.path.expandvars(os.path.expanduser(shortFile)) # expand wildcards fileList = glob.glob(expandedFile) # sort alphabetical, # if list is emtpy, sorting returns None, so better do not sort if len(fileList) != 0: fileList.sort() else: logging.warning("No file matches '%s'.", shortFile) return fileList
python
{ "resource": "" }
q18427
open_url_seekable
train
def open_url_seekable(path_url, mode='rt'): """Open a URL and ensure that the result is seekable, copying it into a buffer if necessary.""" logging.debug("Making request to '%s'", path_url) response = urllib.request.urlopen(path_url) logging.debug("Got response %s", response.info()) try: response.seek(0) except (IOError, AttributeError): # Copy into buffer to allow seeking. response = io.BytesIO(response.read()) if "b" in mode: return response else: return io.TextIOWrapper(response)
python
{ "resource": "" }
q18428
format_options
train
def format_options(options): '''Helper function for formatting the content of the options line''' # split on one of the following tokens: ' -' or '[[' or ']]' lines = [''] for token in re.split(r'( -|\[\[|\]\])', options): if token in ['[[',']]']: lines.append(token) lines.append('') elif token == ' -': lines.append(token) else: lines[-1] += token # join all non-empty lines and wrap them into 'span'-tags return '<span style="display:block">' + '</span><span style="display:block">'.join(line for line in lines if line.strip()) + '</span>'
python
{ "resource": "" }
q18429
prettylist
train
def prettylist(list_): """ Filter out duplicate values while keeping order. """ if not list_: return '' values = set() uniqueList = [] for entry in list_: if not entry in values: values.add(entry) uniqueList.append(entry) return uniqueList[0] if len(uniqueList) == 1 \ else '[' + '; '.join(uniqueList) + ']'
python
{ "resource": "" }
q18430
get_column_type
train
def get_column_type(column, column_values): """ Returns the type of the given column based on its row values on the given RunSetResult. @param column: the column to return the correct ColumnType for @param column_values: the column values to consider @return: a tuple of a type object describing the column - the concrete ColumnType is stored in the attribute 'type', the display unit of the column, which may be None, the source unit of the column, which may be None, and the scale factor to convert from the source unit to the display unit. If no scaling is necessary for conversion, this value is 1. """ try: return _get_column_type_heur(column, column_values) except util.TableDefinitionError as e: logging.error("Column type couldn't be determined: {}".format(e.message)) return ColumnType.text, None, None, 1
python
{ "resource": "" }
q18431
_get_decimal_digits
train
def _get_decimal_digits(decimal_number_match, number_of_significant_digits): """ Returns the amount of decimal digits of the given regex match, considering the number of significant digits for the provided column. @param decimal_number_match: a regex match of a decimal number, resulting from REGEX_MEASURE.match(x). @param number_of_significant_digits: the number of significant digits required @return: the number of decimal digits of the given decimal number match's representation, after expanding the number to the required amount of significant digits """ assert 'e' not in decimal_number_match.group() # check that only decimal notation is used try: num_of_digits = int(number_of_significant_digits) except TypeError: num_of_digits = DEFAULT_NUMBER_OF_SIGNIFICANT_DIGITS if not decimal_number_match.group(GROUP_DEC_PART): return 0 # If 1 > value > 0, only look at the decimal digits. # In the second condition, we have to remove the first character from the decimal part group because the # first character always is '.' if int(decimal_number_match.group(GROUP_INT_PART)) == 0 \ and int(decimal_number_match.group(GROUP_DEC_PART)[1:]) != 0: max_num_of_digits = len(decimal_number_match.group(GROUP_SIG_DEC_PART)) num_of_digits = min(num_of_digits, max_num_of_digits) # number of needed decimal digits = number of zeroes after decimal point + significant digits curr_dec_digits = len(decimal_number_match.group(GROUP_ZEROES)) + int(num_of_digits) else: max_num_of_digits = \ len(decimal_number_match.group(GROUP_INT_PART)) + len(decimal_number_match.group(GROUP_DEC_PART)) num_of_digits = min(num_of_digits, max_num_of_digits) # number of needed decimal digits = significant digits - number of digits in front of decimal point curr_dec_digits = int(num_of_digits) - len(decimal_number_match.group(GROUP_INT_PART)) return curr_dec_digits
python
{ "resource": "" }
q18432
_find_cgroup_mounts
train
def _find_cgroup_mounts(): """ Return the information which subsystems are mounted where. @return a generator of tuples (subsystem, mountpoint) """ try: with open('/proc/mounts', 'rt') as mountsFile: for mount in mountsFile: mount = mount.split(' ') if mount[2] == 'cgroup': mountpoint = mount[1] options = mount[3] for option in options.split(','): if option in ALL_KNOWN_SUBSYSTEMS: yield (option, mountpoint) except IOError: logging.exception('Cannot read /proc/mounts')
python
{ "resource": "" }
q18433
_register_process_with_cgrulesengd
train
def _register_process_with_cgrulesengd(pid): """Tell cgrulesengd daemon to not move the given process into other cgroups, if libcgroup is available. """ # Logging/printing from inside preexec_fn would end up in the output file, # not in the correct logger, thus it is disabled here. from ctypes import cdll try: libcgroup = cdll.LoadLibrary('libcgroup.so.1') failure = libcgroup.cgroup_init() if failure: pass #print('Could not initialize libcgroup, error {}'.format(success)) else: CGROUP_DAEMON_UNCHANGE_CHILDREN = 0x1 failure = libcgroup.cgroup_register_unchanged_process(pid, CGROUP_DAEMON_UNCHANGE_CHILDREN) if failure: pass #print('Could not register process to cgrulesndg, error {}. ' # 'Probably the daemon will mess up our cgroups.'.format(success)) except OSError: pass
python
{ "resource": "" }
q18434
Cgroup.create_fresh_child_cgroup
train
def create_fresh_child_cgroup(self, *subsystems): """ Create child cgroups of the current cgroup for at least the given subsystems. @return: A Cgroup instance representing the new child cgroup(s). """ assert set(subsystems).issubset(self.per_subsystem.keys()) createdCgroupsPerSubsystem = {} createdCgroupsPerParent = {} for subsystem in subsystems: parentCgroup = self.per_subsystem[subsystem] if parentCgroup in createdCgroupsPerParent: # reuse already created cgroup createdCgroupsPerSubsystem[subsystem] = createdCgroupsPerParent[parentCgroup] continue cgroup = tempfile.mkdtemp(prefix=CGROUP_NAME_PREFIX, dir=parentCgroup) createdCgroupsPerSubsystem[subsystem] = cgroup createdCgroupsPerParent[parentCgroup] = cgroup # add allowed cpus and memory to cgroup if necessary # (otherwise we can't add any tasks) def copy_parent_to_child(name): shutil.copyfile(os.path.join(parentCgroup, name), os.path.join(cgroup, name)) try: copy_parent_to_child('cpuset.cpus') copy_parent_to_child('cpuset.mems') except IOError: # expected to fail if cpuset subsystem is not enabled in this hierarchy pass return Cgroup(createdCgroupsPerSubsystem)
python
{ "resource": "" }
q18435
Cgroup.add_task
train
def add_task(self, pid): """ Add a process to the cgroups represented by this instance. """ _register_process_with_cgrulesengd(pid) for cgroup in self.paths: with open(os.path.join(cgroup, 'tasks'), 'w') as tasksFile: tasksFile.write(str(pid))
python
{ "resource": "" }
q18436
Cgroup.get_all_tasks
train
def get_all_tasks(self, subsystem): """ Return a generator of all PIDs currently in this cgroup for the given subsystem. """ with open(os.path.join(self.per_subsystem[subsystem], 'tasks'), 'r') as tasksFile: for line in tasksFile: yield int(line)
python
{ "resource": "" }
q18437
Cgroup.kill_all_tasks_recursively
train
def kill_all_tasks_recursively(self, kill_process_fn): """ Kill all tasks in this cgroup and all its children cgroups forcefully. Additionally, the children cgroups will be deleted. """ def kill_all_tasks_in_cgroup_recursively(cgroup): files = [os.path.join(cgroup,f) for f in os.listdir(cgroup)] subdirs = filter(os.path.isdir, files) for subCgroup in subdirs: kill_all_tasks_in_cgroup_recursively(subCgroup) remove_cgroup(subCgroup) kill_all_tasks_in_cgroup(cgroup, kill_process_fn) for cgroup in self.paths: kill_all_tasks_in_cgroup_recursively(cgroup)
python
{ "resource": "" }
q18438
Cgroup.has_value
train
def has_value(self, subsystem, option): """ Check whether the given value exists in the given subsystem. Does not make a difference whether the value is readable, writable, or both. Do not include the subsystem name in the option name. Only call this method if the given subsystem is available. """ assert subsystem in self return os.path.isfile(os.path.join(self.per_subsystem[subsystem], subsystem + '.' + option))
python
{ "resource": "" }
q18439
Cgroup.get_value
train
def get_value(self, subsystem, option): """ Read the given value from the given subsystem. Do not include the subsystem name in the option name. Only call this method if the given subsystem is available. """ assert subsystem in self, 'Subsystem {} is missing'.format(subsystem) return util.read_file(self.per_subsystem[subsystem], subsystem + '.' + option)
python
{ "resource": "" }
q18440
Cgroup.get_file_lines
train
def get_file_lines(self, subsystem, option): """ Read the lines of the given file from the given subsystem. Do not include the subsystem name in the option name. Only call this method if the given subsystem is available. """ assert subsystem in self with open(os.path.join(self.per_subsystem[subsystem], subsystem + '.' + option)) as f: for line in f: yield line
python
{ "resource": "" }
q18441
Cgroup.get_key_value_pairs
train
def get_key_value_pairs(self, subsystem, filename): """ Read the lines of the given file from the given subsystem and split the lines into key-value pairs. Do not include the subsystem name in the option name. Only call this method if the given subsystem is available. """ assert subsystem in self return util.read_key_value_pairs_from_file(self.per_subsystem[subsystem], subsystem + '.' + filename)
python
{ "resource": "" }
q18442
Cgroup.set_value
train
def set_value(self, subsystem, option, value): """ Write the given value for the given subsystem. Do not include the subsystem name in the option name. Only call this method if the given subsystem is available. """ assert subsystem in self util.write_file(str(value), self.per_subsystem[subsystem], subsystem + '.' + option)
python
{ "resource": "" }
q18443
Cgroup.remove
train
def remove(self): """ Remove all cgroups this instance represents from the system. This instance is afterwards not usable anymore! """ for cgroup in self.paths: remove_cgroup(cgroup) del self.paths del self.per_subsystem
python
{ "resource": "" }
q18444
parse_time_arg
train
def parse_time_arg(s): """ Parse a time stamp in the "year-month-day hour-minute" format. """ try: return time.strptime(s, "%Y-%m-%d %H:%M") except ValueError as e: raise argparse.ArgumentTypeError(e)
python
{ "resource": "" }
q18445
BenchExec.start
train
def start(self, argv): """ Start BenchExec. @param argv: command-line options for BenchExec """ parser = self.create_argument_parser() self.config = parser.parse_args(argv[1:]) for arg in self.config.files: if not os.path.exists(arg) or not os.path.isfile(arg): parser.error("File {0} does not exist.".format(repr(arg))) if os.path.isdir(self.config.output_path): self.config.output_path = os.path.normpath(self.config.output_path) + os.sep self.setup_logging() self.executor = self.load_executor() returnCode = 0 for arg in self.config.files: if self.stopped_by_interrupt: break logging.debug("Benchmark %r is started.", arg) rc = self.execute_benchmark(arg) returnCode = returnCode or rc logging.debug("Benchmark %r is done.", arg) logging.debug("I think my job is done. Have a nice day!") return returnCode
python
{ "resource": "" }
q18446
BenchExec.execute_benchmark
train
def execute_benchmark(self, benchmark_file): """ Execute a single benchmark as defined in a file. If called directly, ensure that config and executor attributes are set up. @param benchmark_file: the name of a benchmark-definition XML file @return: a result value from the executor module """ benchmark = Benchmark(benchmark_file, self.config, self.config.start_time or time.localtime()) self.check_existing_results(benchmark) self.executor.init(self.config, benchmark) output_handler = OutputHandler(benchmark, self.executor.get_system_info(), self.config.compress_results) logging.debug("I'm benchmarking %r consisting of %s run sets.", benchmark_file, len(benchmark.run_sets)) try: result = self.executor.execute_benchmark(benchmark, output_handler) finally: output_handler.close() # remove useless log folder if it is empty try: os.rmdir(benchmark.log_folder) except: pass if self.config.commit and not self.stopped_by_interrupt: try: util.add_files_to_git_repository(self.config.output_path, output_handler.all_created_files, self.config.commit_message + '\n\n' + output_handler.description + '\n\n' + str(output_handler.statistics)) except OSError as e: logging.warning('Could not add files to git repository: %s', e) return result
python
{ "resource": "" }
q18447
BenchExec.check_existing_results
train
def check_existing_results(self, benchmark): """ Check and abort if the target directory for the benchmark results already exists in order to avoid overwriting results. """ if os.path.exists(benchmark.log_folder): sys.exit('Output directory {0} already exists, will not overwrite existing results.'.format(benchmark.log_folder)) if os.path.exists(benchmark.log_zip): sys.exit('Output archive {0} already exists, will not overwrite existing results.'.format(benchmark.log_zip))
python
{ "resource": "" }
q18448
BenchExec.stop
train
def stop(self): """ Stop the execution of a benchmark. This instance cannot be used anymore afterwards. Timely termination is not guaranteed, and this method may return before everything is terminated. """ self.stopped_by_interrupt = True if self.executor: self.executor.stop()
python
{ "resource": "" }
q18449
allocate_stack
train
def allocate_stack(size=DEFAULT_STACK_SIZE): """Allocate some memory that can be used as a stack. @return: a ctypes void pointer to the *top* of the stack. """ # Allocate memory with appropriate flags for a stack as in https://blog.fefe.de/?ts=a85c8ba7 base = libc.mmap( None, size + GUARD_PAGE_SIZE, libc.PROT_READ | libc.PROT_WRITE, libc.MAP_PRIVATE | libc.MAP_ANONYMOUS | libc.MAP_GROWSDOWN | libc.MAP_STACK, -1, 0) try: # create a guard page that crashes the application when it is written to (on stack overflow) libc.mprotect(base, GUARD_PAGE_SIZE, libc.PROT_NONE) yield ctypes.c_void_p(base + size + GUARD_PAGE_SIZE) finally: libc.munmap(base, size + GUARD_PAGE_SIZE)
python
{ "resource": "" }
q18450
execute_in_namespace
train
def execute_in_namespace(func, use_network_ns=True): """Execute a function in a child process in separate namespaces. @param func: a parameter-less function returning an int (which will be the process' exit value) @return: the PID of the created child process """ flags = (signal.SIGCHLD | libc.CLONE_NEWNS | libc.CLONE_NEWUTS | libc.CLONE_NEWIPC | libc.CLONE_NEWUSER | libc.CLONE_NEWPID) if use_network_ns: flags |= libc.CLONE_NEWNET # We use the syscall clone() here, which is similar to fork(). # Calling it without letting Python know about it is dangerous (especially because # we want to execute Python code in the child, too), but so far it seems to work. # Basically we attempt to do (almost) the same that os.fork() does (cf. function os_fork_impl # in https://github.com/python/cpython/blob/master/Modules/posixmodule.c). # We currently do not take the import lock os.lock() does because it is only available # via an internal API, and because the child should never import anything anyway # (inside the container, modules might not be visible). # It is very important, however, that we have the GIL during clone(), # otherwise the child will often deadlock when trying to execute Python code. # Luckily, the ctypes module allows us to hold the GIL while executing the # function by using ctypes.PyDLL as library access instead of ctypes.CLL. def child_func(): # This is necessary for correcting the Python interpreter state after a # fork-like operation. For example, it resets the GIL and fixes state of # several modules like threading and signal. ctypes.pythonapi.PyOS_AfterFork() return func() with allocate_stack() as stack: pid = libc.clone(ctypes.CFUNCTYPE(ctypes.c_int)(child_func), stack, flags, None) return pid
python
{ "resource": "" }
q18451
activate_network_interface
train
def activate_network_interface(iface): """Bring up the given network interface. @raise OSError: if interface does not exist or permissions are missing """ iface = iface.encode() SIOCGIFFLAGS = 0x8913 # /usr/include/bits/ioctls.h SIOCSIFFLAGS = 0x8914 # /usr/include/bits/ioctls.h IFF_UP = 0x1 # /usr/include/net/if.h # We need to use instances of "struct ifreq" for communicating with the kernel. # This struct is complex with a big contained union, we define here only the few necessary # fields for the two cases we need. # The layout is given in the format used by the struct module: STRUCT_IFREQ_LAYOUT_IFADDR_SAFAMILY = b"16sH14s" # ifr_name, ifr_addr.sa_family, padding STRUCT_IFREQ_LAYOUT_IFFLAGS = b"16sH14s" # ifr_name, ifr_flags, padding sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_IP) try: # Get current interface flags from kernel ifreq = struct.pack(STRUCT_IFREQ_LAYOUT_IFADDR_SAFAMILY, iface, socket.AF_INET, b'0' * 14) ifreq = fcntl.ioctl(sock, SIOCGIFFLAGS, ifreq) if_flags = struct.unpack(STRUCT_IFREQ_LAYOUT_IFFLAGS, ifreq)[1] # Set new flags ifreq = struct.pack(STRUCT_IFREQ_LAYOUT_IFFLAGS, iface, if_flags | IFF_UP, b'0' * 14) fcntl.ioctl(sock, SIOCSIFFLAGS, ifreq) finally: sock.close()
python
{ "resource": "" }
q18452
get_mount_points
train
def get_mount_points(): """Get all current mount points of the system. Changes to the mount points during iteration may be reflected in the result. @return a generator of (source, target, fstype, options), where options is a list of bytes instances, and the others are bytes instances (this avoids encoding problems with mount points with problematic characters). """ def decode_path(path): # Replace tab, space, newline, and backslash escapes with actual characters. # According to man 5 fstab, only tab and space escaped, but Linux escapes more: # https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/fs/proc_namespace.c?id=12a54b150fb5b6c2f3da932dc0e665355f8a5a48#n85 return path.replace(br"\011", b"\011").replace(br"\040", b"\040").replace(br"\012", b"\012").replace(br"\134", b"\134") with open("/proc/self/mounts", "rb") as mounts: # The format of this file is the same as of /etc/fstab (cf. man 5 fstab) for mount in mounts: source, target, fstype, options, unused1, unused2 = mount.split(b" ") options = set(options.split(b",")) yield (decode_path(source), decode_path(target), fstype, options)
python
{ "resource": "" }
q18453
remount_with_additional_flags
train
def remount_with_additional_flags(mountpoint, existing_options, mountflags): """Remount an existing mount point with additional flags. @param mountpoint: the mount point as bytes @param existing_options: dict with current mount existing_options as bytes @param mountflags: int with additional mount existing_options (cf. libc.MS_* constants) """ mountflags |= libc.MS_REMOUNT|libc.MS_BIND for option, flag in libc.MOUNT_FLAGS.items(): if option in existing_options: mountflags |= flag libc.mount(None, mountpoint, None, mountflags, None)
python
{ "resource": "" }
q18454
make_bind_mount
train
def make_bind_mount(source, target, recursive=False, private=False, read_only=False): """Make a bind mount. @param source: the source directory as bytes @param target: the target directory as bytes @param recursive: whether to also recursively bind mount all mounts below source @param private: whether to mark the bind as private, i.e., changes to the existing mounts won't propagate and vice-versa (changes to files/dirs will still be visible). """ flags = libc.MS_BIND if recursive: flags |= libc.MS_REC if private: flags |= libc.MS_PRIVATE if read_only: flags |= libc.MS_RDONLY libc.mount(source, target, None, flags, None)
python
{ "resource": "" }
q18455
drop_capabilities
train
def drop_capabilities(keep=[]): """ Drop all capabilities this process has. @param keep: list of capabilities to not drop """ capdata = (libc.CapData * 2)() for cap in keep: capdata[0].effective |= (1 << cap) capdata[0].permitted |= (1 << cap) libc.capset(ctypes.byref(libc.CapHeader(version=libc.LINUX_CAPABILITY_VERSION_3, pid=0)), ctypes.byref(capdata))
python
{ "resource": "" }
q18456
forward_all_signals_async
train
def forward_all_signals_async(target_pid, process_name): """Install all signal handler that forwards all signals to the given process.""" def forwarding_signal_handler(signum): _forward_signal(signum, process_name, forwarding_signal_handler.target_pid) # Somehow we get a Python SystemError sometimes if we access target_pid directly from inside function. forwarding_signal_handler.target_pid = target_pid for signum in _FORWARDABLE_SIGNALS: # Need to directly access libc function, # the state of the signal module is incorrect due to the clone() # (it may think we are in a different thread than the main thread). libc.signal(signum, forwarding_signal_handler) # Reactivate delivery of signals such that our handler gets called. reset_signal_handling()
python
{ "resource": "" }
q18457
wait_for_child_and_forward_all_signals
train
def wait_for_child_and_forward_all_signals(child_pid, process_name): """Wait for a child to terminate and in the meantime forward all signals the current process receives to this child. @return a tuple of exit code and resource usage of the child as given by os.waitpid """ assert _HAS_SIGWAIT block_all_signals() while True: logging.debug("Waiting for signals") signum = signal.sigwait(_ALL_SIGNALS) if signum == signal.SIGCHLD: pid, exitcode, ru_child = os.wait4(-1, os.WNOHANG) while pid != 0: if pid == child_pid: return exitcode, ru_child else: logging.debug("Received unexpected SIGCHLD for PID %s", pid) pid, exitcode, ru_child = os.wait4(-1, os.WNOHANG) else: _forward_signal(signum, child_pid, process_name)
python
{ "resource": "" }
q18458
close_open_fds
train
def close_open_fds(keep_files=[]): """Close all open file descriptors except those in a given set. @param keep_files: an iterable of file descriptors or file-like objects. """ keep_fds = set() for file in keep_files: if isinstance(file, int): keep_fds.add(file) else: try: keep_fds.add(file.fileno()) except Exception: pass for fd in os.listdir("/proc/self/fd"): fd = int(fd) if fd not in keep_fds: try: os.close(fd) except OSError: # irrelevant and expected # (the fd that was used by os.listdir() of course always fails) pass
python
{ "resource": "" }
q18459
setup_container_system_config
train
def setup_container_system_config(basedir, mountdir=None): """Create a minimal system configuration for use in a container. @param basedir: The directory where the configuration files should be placed as bytes. @param mountdir: If present, bind mounts to the configuration files will be added below this path (given as bytes). """ etc = os.path.join(basedir, b"etc") if not os.path.exists(etc): os.mkdir(etc) for file, content in CONTAINER_ETC_FILE_OVERRIDE.items(): # Create "basedir/etc/file" util.write_file(content, etc, file) if mountdir: # Create bind mount to "mountdir/etc/file" make_bind_mount( os.path.join(etc, file), os.path.join(mountdir, b"etc", file), private=True) os.symlink(b"/proc/self/mounts", os.path.join(etc, b"mtab"))
python
{ "resource": "" }
q18460
Tool.cmdline
train
def cmdline(self, executable, options, tasks, propertyfile=None, rlimits={}): """ Compose the command line to execute from the name of the executable, the user-specified options, and the inputfile to analyze. This method can get overridden, if, for example, some options should be enabled or if the order of arguments must be changed. All paths passed to this method (executable, tasks, and propertyfile) are either absolute or have been made relative to the designated working directory. @param executable: the path to the executable of the tool (typically the result of executable()) @param options: a list of options, in the same order as given in the XML-file. @param tasks: a list of tasks, that should be analysed with the tool in one run. In most cases we we have only _one_ inputfile. @param propertyfile: contains a specification for the verifier. @param rlimits: This dictionary contains resource-limits for a run, for example: time-limit, soft-time-limit, hard-time-limit, memory-limit, cpu-core-limit. All entries in rlimits are optional, so check for existence before usage! """ directory = os.path.dirname(executable) # Ignore propertyfile since we run only reachability return [os.path.join('.', directory, self.BINS[1]), directory] + options + tasks
python
{ "resource": "" }
q18461
FileWriter.append
train
def append(self, newContent, keep=True): """ Add content to the represented file. If keep is False, the new content will be forgotten during the next call to this method. """ content = self.__content + newContent if keep: self.__content = content if self.__needsRewrite: """ Replace the content of the file. A temporary file is used to avoid loss of data through an interrupt. """ tmpFilename = self.filename + ".tmp" util.write_file(content, tmpFilename) os.rename(tmpFilename, self.filename) else: with open(self.filename, "a") as file: file.write(newContent) self.__needsRewrite = not keep
python
{ "resource": "" }
q18462
format_energy_results
train
def format_energy_results(energy): """Take the result of an energy measurement and return a flat dictionary that contains all values.""" if not energy: return {} result = {} cpuenergy = Decimal(0) for pkg, domains in energy.items(): for domain, value in domains.items(): if domain == DOMAIN_PACKAGE: cpuenergy += value result['cpuenergy-pkg{}'.format(pkg)] = value else: result['cpuenergy-pkg{}-{}'.format(pkg, domain)] = value result['cpuenergy'] = cpuenergy result = collections.OrderedDict(sorted(result.items())) return result
python
{ "resource": "" }
q18463
EnergyMeasurement.start
train
def start(self): """Starts the external measurement program.""" assert not self.is_running(), 'Attempted to start an energy measurement while one was already running.' self._measurement_process = subprocess.Popen( [self._executable, '-r'], stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=10000, preexec_fn=os.setpgrp, # Prevent delivery of Ctrl+C to subprocess )
python
{ "resource": "" }
q18464
EnergyMeasurement.stop
train
def stop(self): """Stops the external measurement program and returns the measurement result, if the measurement was running.""" consumed_energy = collections.defaultdict(dict) if not self.is_running(): return None # cpu-energy-meter expects SIGINT to stop and report its result self._measurement_process.send_signal(signal.SIGINT) (out, err) = self._measurement_process.communicate() assert self._measurement_process.returncode is not None if self._measurement_process.returncode: logging.debug( "Energy measurement terminated with return code %s", self._measurement_process.returncode) self._measurement_process = None for line in err.splitlines(): logging.debug("energy measurement stderr: %s", line) for line in out.splitlines(): line = line.decode('ASCII') logging.debug("energy measurement output: %s", line) match = re.match(r'cpu(\d+)_([a-z]+)_joules=(\d+\.?\d*)', line) if not match: continue cpu, domain, energy = match.groups() cpu = int(cpu) energy = Decimal(energy) consumed_energy[cpu][domain] = energy return consumed_energy
python
{ "resource": "" }
q18465
AuthController.login
train
def login(self): """ Show login form. """ if request.method != 'POST': cfg = NipapConfig() try: c.welcome_message = cfg.get('www', 'welcome_message') except NoOptionError: pass return render('login.html') # Verify username and password. try: auth_fact = AuthFactory() auth = auth_fact.get_auth(request.params.get('username'), request.params.get('password'), 'nipap') if not auth.authenticate(): c.error = 'Invalid username or password' return render('login.html') except AuthError as exc: c.error = 'Authentication error' return render('login.html') # Mark user as logged in session['user'] = auth.username session['full_name'] = auth.full_name session['readonly'] = auth.readonly session['current_vrfs'] = {} session.save() # Send user back to the page he originally wanted to get to if session.get('path_before_login'): redirect(session['path_before_login']) else: # if previous target is unknown just send the user to a welcome page redirect(url(controller='prefix', action='list'))
python
{ "resource": "" }
q18466
XhrController.extract_prefix_attr
train
def extract_prefix_attr(cls, req): """ Extract prefix attributes from arbitary dict. """ # TODO: add more? attr = {} if 'id' in req: attr['id'] = int(req['id']) if 'prefix' in req: attr['prefix'] = req['prefix'] if 'pool' in req: attr['pool_id'] = int(req['pool']) if 'node' in req: attr['node'] = req['node'] if 'type' in req: attr['type'] = req['type'] if 'country' in req: attr['country'] = req['country'] if 'indent' in req: attr['indent'] = req['indent'] return attr
python
{ "resource": "" }
q18467
XhrController.extract_pool_attr
train
def extract_pool_attr(cls, req): """ Extract pool attributes from arbitary dict. """ attr = {} if 'id' in req: attr['id'] = int(req['id']) if 'name' in req: attr['name'] = req['name'] if 'description' in req: attr['description'] = req['description'] if 'default_type' in req: attr['default_type'] = req['default_type'] if 'ipv4_default_prefix_length' in req: attr['ipv4_default_prefix_length'] = int(req['ipv4_default_prefix_length']) if 'ipv6_default_prefix_length' in req: attr['ipv6_default_prefix_length'] = int(req['ipv6_default_prefix_length']) return attr
python
{ "resource": "" }
q18468
XhrController.list_vrf
train
def list_vrf(self): """ List VRFs and return JSON encoded result. """ try: vrfs = VRF.list() except NipapError, e: return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__}) return json.dumps(vrfs, cls=NipapJSONEncoder)
python
{ "resource": "" }
q18469
XhrController.add_vrf
train
def add_vrf(self): """ Add a new VRF to NIPAP and return its data. """ v = VRF() if 'rt' in request.json: v.rt = validate_string(request.json, 'rt') if 'name' in request.json: v.name = validate_string(request.json, 'name') if 'description' in request.json: v.description = validate_string(request.json, 'description') if 'tags' in request.json: v.tags = request.json['tags'] if 'avps' in request.json: v.avps = request.json['avps'] try: v.save() except NipapError, e: return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__}) return json.dumps(v, cls=NipapJSONEncoder)
python
{ "resource": "" }
q18470
XhrController.list_pool
train
def list_pool(self): """ List pools and return JSON encoded result. """ # fetch attributes from request.json attr = XhrController.extract_pool_attr(request.json) try: pools = Pool.list(attr) except NipapError, e: return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__}) return json.dumps(pools, cls=NipapJSONEncoder)
python
{ "resource": "" }
q18471
XhrController.list_prefix
train
def list_prefix(self): """ List prefixes and return JSON encoded result. """ # fetch attributes from request.json attr = XhrController.extract_prefix_attr(request.json) try: prefixes = Prefix.list(attr) except NipapError, e: return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__}) return json.dumps(prefixes, cls=NipapJSONEncoder)
python
{ "resource": "" }
q18472
XhrController.search_prefix
train
def search_prefix(self): """ Search prefixes. Does not yet incorporate all the functions of the search_prefix API function due to difficulties with transferring a complete 'dict-to-sql' encoded data structure. Instead, a list of prefix attributes can be given which will be matched with the 'equals' operator if notheing else is specified. If multiple attributes are given, they will be combined with the 'and' operator. Currently, it is not possible to specify different operators for different attributes. """ # extract operator if 'operator' in request.json: operator = request.json['operator'] else: operator = 'equals' # fetch attributes from request.json attr = XhrController.extract_prefix_attr(request.json) # build query dict n = 0 q = {} for key, val in attr.items(): if n == 0: q = { 'operator': operator, 'val1': key, 'val2': val } else: q = { 'operator': 'and', 'val1': { 'operator': operator, 'val1': key, 'val2': val }, 'val2': q } n += 1 # extract search options search_opts = {} if 'children_depth' in request.json: search_opts['children_depth'] = request.json['children_depth'] if 'parents_depth' in request.json: search_opts['parents_depth'] = request.json['parents_depth'] if 'include_neighbors' in request.json: search_opts['include_neighbors'] = request.json['include_neighbors'] if 'max_result' in request.json: search_opts['max_result'] = request.json['max_result'] if 'offset' in request.json: search_opts['offset'] = request.json['offset'] try: result = Prefix.search(q, search_opts) except NipapError, e: return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__}) return json.dumps(result, cls=NipapJSONEncoder)
python
{ "resource": "" }
q18473
XhrController.smart_search_prefix
train
def smart_search_prefix(self): """ Perform a smart search. The smart search function tries extract a query from a text string. This query is then passed to the search_prefix function, which performs the search. """ search_options = {} extra_query = None vrf_filter = None if 'query_id' in request.json: search_options['query_id'] = request.json['query_id'] if 'include_all_parents' in request.json: if request.json['include_all_parents'] == 'true': search_options['include_all_parents'] = True else: search_options['include_all_parents'] = False if 'include_all_children' in request.json: if request.json['include_all_children'] == 'true': search_options['include_all_children'] = True else: search_options['include_all_children'] = False if 'parents_depth' in request.json: search_options['parents_depth'] = request.json['parents_depth'] if 'children_depth' in request.json: search_options['children_depth'] = request.json['children_depth'] if 'include_neighbors' in request.json: if request.json['include_neighbors'] == 'true': search_options['include_neighbors'] = True else: search_options['include_neighbors'] = False if 'max_result' in request.json: if request.json['max_result'] == 'false': search_options['max_result'] = False else: search_options['max_result'] = request.json['max_result'] if 'offset' in request.json: search_options['offset'] = request.json['offset'] if 'parent_prefix' in request.json: search_options['parent_prefix'] = request.json['parent_prefix'] if 'vrf_filter' in request.json: vrf_filter_parts = [] # Fetch VRF IDs from search query and build extra query dict for # smart_search_prefix. vrfs = request.json['vrf_filter'] if len(vrfs) > 0: vrf = vrfs[0] vrf_filter = { 'operator': 'equals', 'val1': 'vrf_id', 'val2': vrf if vrf != 'null' else None } for vrf in vrfs[1:]: vrf_filter = { 'operator': 'or', 'val1': vrf_filter, 'val2': { 'operator': 'equals', 'val1': 'vrf_id', 'val2': vrf if vrf != 'null' else None } } if vrf_filter: extra_query = vrf_filter if 'indent' in request.json: if extra_query: extra_query = { 'operator': 'and', 'val1': extra_query, 'val2': { 'operator': 'equals', 'val1': 'indent', 'val2': request.json['indent'] } } else: extra_query = { 'operator': 'equals', 'val1': 'indent', 'val2': request.json['indent'] } try: result = Prefix.smart_search(request.json['query_string'], search_options, extra_query) # Remove error key in result from backend as it interferes with the # error handling of the web interface. # TODO: Reevaluate how to deal with different types of errors; soft # errors like query string parser errors and hard errors like lost # database. del result['error'] except NipapError, e: return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__}) return json.dumps(result, cls=NipapJSONEncoder)
python
{ "resource": "" }
q18474
XhrController.add_current_vrf
train
def add_current_vrf(self): """ Add VRF to filter list session variable """ vrf_id = request.json['vrf_id'] if vrf_id is not None: if vrf_id == 'null': vrf = VRF() else: vrf_id = int(vrf_id) vrf = VRF.get(vrf_id) session['current_vrfs'][vrf_id] = { 'id': vrf.id, 'rt': vrf.rt, 'name': vrf.name, 'description': vrf.description } session.save() return json.dumps(session.get('current_vrfs', {}))
python
{ "resource": "" }
q18475
XhrController.del_current_vrf
train
def del_current_vrf(self): """ Remove VRF to filter list session variable """ vrf_id = int(request.json['vrf_id']) if vrf_id in session['current_vrfs']: del session['current_vrfs'][vrf_id] session.save() return json.dumps(session.get('current_vrfs', {}))
python
{ "resource": "" }
q18476
XhrController.get_current_vrfs
train
def get_current_vrfs(self): """ Return VRF filter list from session variable Before returning list, make a search for all VRFs currently in the list to verify that they still exist. """ # Verify that all currently selected VRFs still exists cur_vrfs = session.get('current_vrfs', {}).items() if len(cur_vrfs) > 0: q = { 'operator': 'equals', 'val1': 'id', 'val2': cur_vrfs[0][0] } if len(cur_vrfs) > 1: for vrf_id, vrf in cur_vrfs[1:]: q = { 'operator': 'or', 'val1': q, 'val2': { 'operator': 'equals', 'val1': 'id', 'val2': vrf_id } } res = VRF.search(q) session['current_vrfs'] = {} for vrf in res['result']: session['current_vrfs'][vrf.id] = { 'id': vrf.id, 'rt': vrf.rt, 'name': vrf.name, 'description': vrf.description } session.save() return json.dumps(session.get('current_vrfs', {}))
python
{ "resource": "" }
q18477
XhrController.list_tags
train
def list_tags(self): """ List Tags and return JSON encoded result. """ try: tags = Tags.list() except NipapError, e: return json.dumps({'error': 1, 'message': e.args, 'type': type(e).__name__}) return json.dumps(tags, cls=NipapJSONEncoder)
python
{ "resource": "" }
q18478
NipapConfig.read_file
train
def read_file(self): """ Read the configuration file """ # don't try to parse config file if we don't have one set if not self._cfg_path: return try: cfg_fp = open(self._cfg_path, 'r') self.readfp(cfg_fp) except IOError as exc: raise NipapConfigError(str(exc))
python
{ "resource": "" }
q18479
VersionController.index
train
def index(self): """ Display NIPAP version info """ c.pynipap_version = pynipap.__version__ try: c.nipapd_version = pynipap.nipapd_version() except: c.nipapd_version = 'unknown' c.nipap_db_version = pynipap.nipap_db_version() return render('/version.html')
python
{ "resource": "" }
q18480
AuthFactory._init_backends
train
def _init_backends(self): """ Initialize auth backends. """ # fetch auth backends from config file self._backends = {} for section in self._config.sections(): # does the section define an auth backend? section_components = section.rsplit('.', 1) if section_components[0] == 'auth.backends': auth_backend = section_components[1] self._backends[auth_backend] = eval(self._config.get(section, 'type')) self._logger.debug("Registered auth backends %s" % str(self._backends))
python
{ "resource": "" }
q18481
SqliteAuth._create_database
train
def _create_database(self): """ Set up database Creates tables required for the authentication module. """ self._logger.info('creating user database') sql = '''CREATE TABLE IF NOT EXISTS user ( username NOT NULL PRIMARY KEY, pwd_salt NOT NULL, pwd_hash NOT NULL, full_name, trusted NOT NULL DEFAULT 0, readonly NOT NULL DEFAULT 0 )''' self._db_curs.execute(sql) self._db_conn.commit()
python
{ "resource": "" }
q18482
SqliteAuth.get_user
train
def get_user(self, username): """ Fetch the user from the database The function will return None if the user is not found """ sql = '''SELECT * FROM user WHERE username = ?''' self._db_curs.execute(sql, (username, )) user = self._db_curs.fetchone() return user
python
{ "resource": "" }
q18483
SqliteAuth.modify_user
train
def modify_user(self, username, data): """ Modify user in SQLite database. Since username is used as primary key and we only have a single argument for it we can't modify the username right now. """ if 'password' in data: # generate salt char_set = string.ascii_letters + string.digits data['pwd_salt'] = ''.join(random.choice(char_set) for x in range(8)) data['pwd_hash'] = self._gen_hash(data['password'], data['pwd_salt']) del(data['password']) sql = "UPDATE user SET " sql += ', '.join("%s = ?" % k for k in sorted(data)) sql += " WHERE username = ?" vals = [] for k in sorted(data): vals.append(data[k]) vals.append(username) try: self._db_curs.execute(sql, vals) self._db_conn.commit() except (sqlite3.OperationalError, sqlite3.IntegrityError) as error: raise AuthError(error)
python
{ "resource": "" }
q18484
SqliteAuth._gen_hash
train
def _gen_hash(self, password, salt): """ Generate password hash. """ # generate hash h = hashlib.sha1() h.update(salt) h.update(password) return h.hexdigest()
python
{ "resource": "" }
q18485
requires_rw
train
def requires_rw(f): """ Adds readwrite authorization This will check if the user is a readonly user and if so reject the query. Apply this decorator to readwrite functions. """ @wraps(f) def decorated(*args, **kwargs): auth = args[1] if auth.readonly: logger = logging.getLogger() logger.info("read-only user '%s' is not authorized to run function '%s'" % (auth.username, f.__name__)) raise authlib.AuthorizationFailed("read-only user '%s' is not authorized to run function '%s'" % (auth.username, f.__name__)) return f(*args, **kwargs) return decorated
python
{ "resource": "" }
q18486
_parse_expires
train
def _parse_expires(expires): """ Parse the 'expires' attribute, guessing what format it is in and returning a datetime """ # none is used to signify positive infinity if expires is None or expires in ('never', 'infinity'): return 'infinity' try: return dateutil.parser.parse(unicode(expires)) except ValueError as exc: pass try: # use parsedatetime for "human readable" time specs exp = pdt.parse(expires)[0] # and convert to datetime return datetime.datetime.fromtimestamp(time.mktime(exp)) except ValueError as exc: pass raise NipapValueError("Invalid date specification for expires")
python
{ "resource": "" }
q18487
Nipap._register_inet
train
def _register_inet(self, oid=None, conn_or_curs=None): """ Create the INET type and an Inet adapter.""" from psycopg2 import extensions as _ext if not oid: oid = 869 _ext.INET = _ext.new_type((oid, ), "INET", lambda data, cursor: data and Inet(data) or None) _ext.register_type(_ext.INET, self._con_pg) return _ext.INET
python
{ "resource": "" }
q18488
Nipap._is_ipv4
train
def _is_ipv4(self, ip): """ Return true if given arg is a valid IPv4 address """ try: p = IPy.IP(ip) except ValueError: return False if p.version() == 4: return True return False
python
{ "resource": "" }
q18489
Nipap._connect_db
train
def _connect_db(self): """ Open database connection """ # Get database configuration db_args = {} db_args['host'] = self._cfg.get('nipapd', 'db_host') db_args['database'] = self._cfg.get('nipapd', 'db_name') db_args['user'] = self._cfg.get('nipapd', 'db_user') db_args['password'] = self._cfg.get('nipapd', 'db_pass') db_args['sslmode'] = self._cfg.get('nipapd', 'db_sslmode') db_args['port'] = self._cfg.get('nipapd', 'db_port') # delete keys that are None, for example if we want to connect over a # UNIX socket, the 'host' argument should not be passed into the DSN if db_args['host'] is not None and db_args['host'] == '': db_args['host'] = None for key in db_args.copy(): if db_args[key] is None: del(db_args[key]) # Create database connection while True: try: self._con_pg = psycopg2.connect(**db_args) self._con_pg.set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) self._curs_pg = self._con_pg.cursor(cursor_factory=psycopg2.extras.DictCursor) self._register_inet() psycopg2.extras.register_hstore(self._con_pg, globally=True, unicode=True) except psycopg2.Error as exc: if re.search("database.*does not exist", unicode(exc)): raise NipapDatabaseNonExistentError("Database '%s' does not exist" % db_args['database']) # no hstore extension, assume empty db (it wouldn't work # otherwise) and do auto upgrade? if re.search("hstore type not found in the database", unicode(exc)): # automatically install if auto-install is enabled if self._auto_install_db: self._db_install(db_args['database']) continue raise NipapDatabaseMissingExtensionError("hstore extension not found in the database") self._logger.error("pgsql: %s" % exc) raise NipapError("Backend unable to connect to database") except psycopg2.Warning as warn: self._logger.warning('pgsql: %s' % warn) # check db version try: current_db_version = self._get_db_version() except NipapDatabaseNoVersionError as exc: # if there's no db schema version we assume the database is # empty... if self._auto_install_db: # automatically install schema? self._db_install(db_args['database']) continue raise exc except NipapError as exc: self._logger.error(unicode(exc)) raise exc if current_db_version != nipap.__db_version__: if self._auto_upgrade_db: self._db_upgrade(db_args['database']) continue raise NipapDatabaseWrongVersionError("NIPAP PostgreSQL database is outdated. Schema version %s is required to run but you are using %s" % (nipap.__db_version__, current_db_version)) # if we reach this we should be fine and done break
python
{ "resource": "" }
q18490
Nipap._get_updated_rows
train
def _get_updated_rows(self, auth, function): """ Get rows updated by last update query * `function` [function] Function to use for searching (one of the search_* functions). Helper function used to fetch all rows which was updated by the latest UPDATE ... RETURNING id query. """ # Get dicts for all rows which were edited by building a query for # search_*. Each row returned from UPDATE ... RETURNING id gives us one # query part (qp) which then are combined to one big query for the # search_* API call. qps = [] for row in self._curs_pg: qps.append( { 'operator': 'equals', 'val1': 'id', 'val2': row['id'] } ) # if we didn't update anything return empty list if len(qps) == 0: return [] # fetch list of objects based on IDs q = qps[0] for qp in qps[1:]: q = { 'operator': 'or', 'val1': q, 'val2': qp } updated = function(auth, q, { 'max_result': 10000 })['result'] return updated
python
{ "resource": "" }
q18491
Nipap._get_query_parts
train
def _get_query_parts(self, query_str, search_options=None): """ Split a query string into its parts """ if search_options is None: search_options = {} if query_str is None: raise NipapValueError("'query_string' must not be None") # find query parts query_str_parts = [] try: for part in shlex.split(query_str.encode('utf-8')): query_str_parts.append({ 'string': part.decode('utf-8') }) except ValueError as exc: if unicode(exc) == 'No closing quotation': raise NipapValueError(unicode(exc)) raise exc # Handle empty search. # We need something to iterate over, but shlex.split() returns # zero-element list for an empty string, so we have to append one # manually if len(query_str_parts) == 0: query_str_parts.append({ 'string': '' }) return query_str_parts
python
{ "resource": "" }
q18492
Nipap._get_db_version
train
def _get_db_version(self): """ Get the schema version of the nipap psql db. """ dbname = self._cfg.get('nipapd', 'db_name') self._execute("SELECT description FROM pg_shdescription JOIN pg_database ON objoid = pg_database.oid WHERE datname = '%s'" % dbname) comment = self._curs_pg.fetchone() if comment is None: raise NipapDatabaseNoVersionError("Could not find comment of psql database %s" % dbname) db_version = None m = re.match('NIPAP database - schema version: ([0-9]+)', comment[0]) if m: db_version = int(m.group(1)) else: raise NipapError("Could not match schema version database comment") return db_version
python
{ "resource": "" }
q18493
Nipap._db_install
train
def _db_install(self, db_name): """ Install nipap database schema """ self._logger.info("Installing NIPAP database schemas into db") self._execute(db_schema.ip_net % (db_name)) self._execute(db_schema.functions) self._execute(db_schema.triggers)
python
{ "resource": "" }
q18494
Nipap._db_upgrade
train
def _db_upgrade(self, db_name): """ Upgrade nipap database schema """ current_db_version = self._get_db_version() self._execute(db_schema.functions) for i in range(current_db_version, nipap.__db_version__): self._logger.info("Upgrading DB schema:", i, "to", i+1) upgrade_sql = db_schema.upgrade[i-1] # 0 count on array self._execute(upgrade_sql % (db_name)) self._execute(db_schema.triggers)
python
{ "resource": "" }
q18495
Nipap._expand_vrf_spec
train
def _expand_vrf_spec(self, spec): """ Expand VRF specification to SQL. id [integer] internal database id of VRF name [string] name of VRF A VRF is referenced either by its internal database id or by its name. Both are used for exact matching and so no wildcard or regular expressions are allowed. Only one key may be used and an error will be thrown if both id and name is specified. """ if type(spec) is not dict: raise NipapInputError("vrf specification must be a dict") allowed_values = ['id', 'name', 'rt'] for a in spec: if a not in allowed_values: raise NipapExtraneousInputError("extraneous specification key %s" % a) if 'id' in spec: if type(spec['id']) not in (int, long): raise NipapValueError("VRF specification key 'id' must be an integer.") elif 'rt' in spec: if type(spec['rt']) != type(''): raise NipapValueError("VRF specification key 'rt' must be a string.") elif 'name' in spec: if type(spec['name']) != type(''): raise NipapValueError("VRF specification key 'name' must be a string.") if len(spec) > 1: raise NipapExtraneousInputError("VRF specification contains too many keys, specify VRF id, vrf or name.") where, params = self._sql_expand_where(spec, 'spec_') return where, params
python
{ "resource": "" }
q18496
Nipap.list_vrf
train
def list_vrf(self, auth, spec=None): """ Return a list of VRFs matching `spec`. * `auth` [BaseAuth] AAA options. * `spec` [vrf_spec] A VRF specification. If omitted, all VRFs are returned. Returns a list of dicts. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.list_vrf` for full understanding. """ if spec is None: spec = {} self._logger.debug("list_vrf called; spec: %s" % unicode(spec)) sql = "SELECT * FROM ip_net_vrf" params = list() # no spec lists all VRFs if spec is not None and not {}: where, params = self._expand_vrf_spec(spec) if len(params) > 0: sql += " WHERE " + where sql += " ORDER BY vrf_rt_order(rt) NULLS FIRST" self._execute(sql, params) res = list() for row in self._curs_pg: res.append(dict(row)) return res
python
{ "resource": "" }
q18497
Nipap._get_vrf
train
def _get_vrf(self, auth, spec, prefix = 'vrf_'): """ Get a VRF based on prefix spec Shorthand function to reduce code in the functions below, since more or less all of them needs to perform the actions that are specified here. The major difference to :func:`list_vrf` is that we always return results - empty results if no VRF is specified in prefix spec. """ # find VRF from attributes vrf, vrf_id or vrf_name vrf = [] if prefix + 'id' in spec: # if None, mangle it to being 0, ie our default VRF if spec[prefix + 'id'] is None: spec[prefix + 'id'] = 0 vrf = self.list_vrf(auth, { 'id': spec[prefix + 'id'] }) elif prefix + 'rt' in spec: vrf = self.list_vrf(auth, { 'rt': spec[prefix + 'rt'] }) elif prefix + 'name' in spec: vrf = self.list_vrf(auth, { 'name': spec[prefix + 'name'] }) else: # no VRF specified - return VRF "default" vrf = self.list_vrf(auth, { 'id': 0 }) if len(vrf) > 0: return vrf[0] raise NipapNonExistentError('No matching VRF found.')
python
{ "resource": "" }
q18498
Nipap.edit_vrf
train
def edit_vrf(self, auth, spec, attr): """ Update VRFs matching `spec` with attributes `attr`. * `auth` [BaseAuth] AAA options. * `spec` [vrf_spec] Attibutes specifying what VRF to edit. * `attr` [vrf_attr] Dict specifying fields to be updated and their new values. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.edit_vrf` for full understanding. """ self._logger.debug("edit_vrf called; spec: %s attr: %s" % (unicode(spec), unicode(attr))) # sanity check - do we have all attributes? self._check_attr(attr, [], _vrf_attrs) # get list of VRFs which will be changed before changing them vrfs = self.list_vrf(auth, spec) where, params1 = self._expand_vrf_spec(spec) update, params2 = self._sql_expand_update(attr) params = dict(params2.items() + params1.items()) if len(attr) == 0: raise NipapInputError("'attr' must not be empty.") sql = "UPDATE ip_net_vrf SET " + update sql += " WHERE " + where sql += " RETURNING id" self._execute(sql, params) updated_vrfs = self._get_updated_rows(auth, self.search_vrf) # write to audit table for v in vrfs: audit_params = { 'vrf_id': v['id'], 'vrf_rt': v['rt'], 'vrf_name': v['name'], 'username': auth.username, 'authenticated_as': auth.authenticated_as, 'full_name': auth.full_name, 'authoritative_source': auth.authoritative_source, 'description': 'Edited VRF %s attr: %s' % (v['rt'], unicode(attr)) } sql, params = self._sql_expand_insert(audit_params) self._execute('INSERT INTO ip_net_log %s' % sql, params) return updated_vrfs
python
{ "resource": "" }
q18499
Nipap.smart_search_vrf
train
def smart_search_vrf(self, auth, query_str, search_options=None, extra_query=None): """ Perform a smart search on VRF list. * `auth` [BaseAuth] AAA options. * `query_str` [string] Search string * `search_options` [options_dict] Search options. See :func:`search_vrf`. * `extra_query` [dict_to_sql] Extra search terms, will be AND:ed together with what is extracted from the query string. Return a dict with three elements: * :attr:`interpretation` - How the query string was interpreted. * :attr:`search_options` - Various search_options. * :attr:`result` - The search result. The :attr:`interpretation` is given as a list of dicts, each explaining how a part of the search key was interpreted (ie. what VRF attribute the search operation was performed on). The :attr:`result` is a list of dicts containing the search result. The smart search function tries to convert the query from a text string to a `query` dict which is passed to the :func:`search_vrf` function. If multiple search keys are detected, they are combined with a logical AND. It will basically just take each search term and try to match it against the name or description column with regex match or the VRF column with an exact match. See the :func:`search_vrf` function for an explanation of the `search_options` argument. This is the documentation of the internal backend function. It's exposed over XML-RPC, please also see the XML-RPC documentation for :py:func:`nipap.xmlrpc.NipapXMLRPC.smart_search_vrf` for full understanding. """ if search_options is None: search_options = {} self._logger.debug("smart_search_vrf query string: %s" % query_str) success, query = self._parse_vrf_query(query_str) if not success: return { 'interpretation': query, 'search_options': search_options, 'result': [], 'error': True, 'error_message': 'query interpretation failed' } if extra_query is not None: query = { 'operator': 'and', 'val1': query, 'val2': extra_query } self._logger.debug("smart_search_vrf; query expanded to: %s" % unicode(query)) search_result = self.search_vrf(auth, query, search_options) search_result['interpretation'] = query search_result['error'] = False return search_result
python
{ "resource": "" }