sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def init_and_get_conf(argv: list=None) -> Config: """Initialize a YABT CLI environment and return a Config instance. :param argv: Manual override of command-line params to parse (for tests). """ colorama.init() work_dir = os.path.abspath(os.curdir) project_root = search_for_parent_dir(work_dir, with_files=set([BUILD_PROJ_FILE])) parser = make_parser(find_project_config_file(project_root)) settings_module = get_user_settings_module(project_root) call_user_func(settings_module, 'extend_cli', parser) argcomplete.autocomplete(parser) args = parser.parse(argv) get_build_flavor(settings_module, args) config = Config(args, project_root, work_dir, settings_module) config.common_conf = call_user_func( config.settings, 'get_common_config', config, args) config.flavor_conf = call_user_func( config.settings, 'get_flavored_config', config, args) call_user_func(config.settings, 'extend_config', config, args) if not args.no_policies: config.policies = listify(call_user_func( config.settings, 'get_policies', config)) return config
Initialize a YABT CLI environment and return a Config instance. :param argv: Manual override of command-line params to parse (for tests).
entailment
def stable_reverse_topological_sort(graph): """Return a list of nodes in topological sort order. This topological sort is a **unique** permutation of the nodes such that an edge from u to v implies that u appears before v in the topological sort order. Parameters ---------- graph : NetworkX digraph A directed graph Raises ------ NetworkXError Topological sort is defined for directed graphs only. If the graph G is undirected, a NetworkXError is raised. NetworkXUnfeasible If G is not a directed acyclic graph (DAG) no topological sort exists and a NetworkXUnfeasible exception is raised. Notes ----- - This algorithm is based on a description and proof in The Algorithm Design Manual [1]_ . - This implementation is modified from networkx 1.11 implementation [2]_ to achieve stability, support only reverse (allows yielding instead of returning a list), and remove the `nbunch` argument (had no use for it). See also -------- is_directed_acyclic_graph References ---------- .. [1] Skiena, S. S. The Algorithm Design Manual (Springer-Verlag, 1998). http://www.amazon.com/exec/obidos/ASIN/0387948600/ref=ase_thealgorithmrepo/ .. [2] networkx on GitHub https://github.com/networkx/networkx/blob/8358afac209c00b7feb3e81c901098852a9413b3/networkx/algorithms/dag.py#L88-L168 """ if not graph.is_directed(): raise networkx.NetworkXError( 'Topological sort not defined on undirected graphs.') # nonrecursive version seen = set() explored = set() for v in sorted(graph.nodes()): if v in explored: continue fringe = [v] # nodes yet to look at while fringe: w = fringe[-1] # depth first search if w in explored: # already looked down this branch fringe.pop() continue seen.add(w) # mark as seen # Check successors for cycles and for new nodes new_nodes = [] for n in sorted(graph[w]): if n not in explored: if n in seen: # CYCLE!! OH NOOOO!! raise networkx.NetworkXUnfeasible( 'Graph contains a cycle.') new_nodes.append(n) if new_nodes: # Add new_nodes to fringe fringe.extend(new_nodes) else: # No new nodes so w is fully explored explored.add(w) yield w fringe.pop()
Return a list of nodes in topological sort order. This topological sort is a **unique** permutation of the nodes such that an edge from u to v implies that u appears before v in the topological sort order. Parameters ---------- graph : NetworkX digraph A directed graph Raises ------ NetworkXError Topological sort is defined for directed graphs only. If the graph G is undirected, a NetworkXError is raised. NetworkXUnfeasible If G is not a directed acyclic graph (DAG) no topological sort exists and a NetworkXUnfeasible exception is raised. Notes ----- - This algorithm is based on a description and proof in The Algorithm Design Manual [1]_ . - This implementation is modified from networkx 1.11 implementation [2]_ to achieve stability, support only reverse (allows yielding instead of returning a list), and remove the `nbunch` argument (had no use for it). See also -------- is_directed_acyclic_graph References ---------- .. [1] Skiena, S. S. The Algorithm Design Manual (Springer-Verlag, 1998). http://www.amazon.com/exec/obidos/ASIN/0387948600/ref=ase_thealgorithmrepo/ .. [2] networkx on GitHub https://github.com/networkx/networkx/blob/8358afac209c00b7feb3e81c901098852a9413b3/networkx/algorithms/dag.py#L88-L168
entailment
def raise_unresolved_targets(build_context, conf, unknown_seeds, seed_refs): """Raise error about unresolved targets during graph parsing.""" def format_target(target_name): # TODO: suggest similar known target names build_module = split_build_module(target_name) return '{} (in {})'.format(target_name, conf.get_build_file_path(build_module)) def format_unresolved(seed): if seed not in seed_refs: return seed seed_ref = seed_refs[seed] reasons = [] if seed_ref.on_cli: reasons.append('seen on command line') if seed_ref.from_default: reasons.append('specified as default target in {}' .format(conf.get_project_build_file)) if seed_ref.dep_of: reasons.append( 'dependency of ' + ', '.join(format_target(target_name) for target_name in sorted(seed_ref.dep_of))) if seed_ref.buildenv_of: reasons.append( 'buildenv of ' + ', '.join(format_target(target_name) for target_name in sorted(seed_ref.buildenv_of))) return '{} - {}'.format(seed, ', '.join(reasons)) unresolved_str = '\n'.join(format_unresolved(target_name) for target_name in sorted(unknown_seeds)) num_target_str = '{} target'.format(len(unknown_seeds)) if len(unknown_seeds) > 1: num_target_str += 's' raise ValueError('Could not resolve {}:\n{}' .format(num_target_str, unresolved_str))
Raise error about unresolved targets during graph parsing.
entailment
def register_scm_provider(scm_name: str): """Return a decorator for registering a SCM provider named `scm_name`.""" def register_decorator(scm_class: SourceControl): """Decorator for registering SCM provider.""" if scm_name in ScmManager.providers: raise KeyError('{} already registered!'.format(scm_name)) ScmManager.providers[scm_name] = scm_class SourceControl.register(scm_class) logger.debug('Registered {0} SCM from {1.__module__}.{1.__name__}', scm_name, scm_class) return scm_class return register_decorator
Return a decorator for registering a SCM provider named `scm_name`.
entailment
def get_provider(cls, scm_name: str, conf) -> SourceControl: """Load and return named SCM provider instance. :param conf: A yabt.config.Config object used to initialize the SCM provider instance. :raises KeyError: If no SCM provider with name `scm_name` registered. """ for entry_point in pkg_resources.iter_entry_points('yabt.scm', scm_name): entry_point.load() logger.debug('Loaded SCM provider {0.name} from {0.module_name} ' '(dist {0.dist})', entry_point) logger.debug('Loaded {} SCM providers', len(cls.providers)) if scm_name not in cls.providers: raise KeyError('Unknown SCM identifier {}'.format(scm_name)) return cls.providers[scm_name](conf)
Load and return named SCM provider instance. :param conf: A yabt.config.Config object used to initialize the SCM provider instance. :raises KeyError: If no SCM provider with name `scm_name` registered.
entailment
def write_dot(build_context, conf: Config, out_f): """Write build graph in dot format to `out_f` file-like object.""" not_buildenv_targets = get_not_buildenv_targets(build_context) prebuilt_targets = get_prebuilt_targets(build_context) out_f.write('strict digraph {\n') for node in build_context.target_graph.nodes: if conf.show_buildenv_deps or node in not_buildenv_targets: cached = node in prebuilt_targets fillcolor = 'fillcolor="grey",style=filled' if cached else '' color = TARGETS_COLORS.get( build_context.targets[node].builder_name, 'black') out_f.write(' "{}" [color="{}",{}];\n'.format(node, color, fillcolor)) out_f.writelines(' "{}" -> "{}";\n'.format(u, v) for u, v in build_context.target_graph.edges if conf.show_buildenv_deps or (u in not_buildenv_targets and v in not_buildenv_targets)) out_f.write('}\n\n')
Write build graph in dot format to `out_f` file-like object.
entailment
def get_workspace(self, *parts) -> str: """Return a path to a private workspace dir. Create sub-tree of dirs using strings from `parts` inside workspace, and return full path to innermost directory. Upon returning successfully, the directory will exist (potentially changed to a safe FS name), even if it didn't exist before, including any intermediate parent directories. """ workspace_dir = os.path.join(self.conf.get_workspace_path(), *(get_safe_path(part) for part in parts)) if not os.path.isdir(workspace_dir): # exist_ok=True in case of concurrent creation of the same dir os.makedirs(workspace_dir, exist_ok=True) return workspace_dir
Return a path to a private workspace dir. Create sub-tree of dirs using strings from `parts` inside workspace, and return full path to innermost directory. Upon returning successfully, the directory will exist (potentially changed to a safe FS name), even if it didn't exist before, including any intermediate parent directories.
entailment
def get_bin_dir(self, build_module: str) -> str: """Return a path to the binaries dir for a build module dir. Create sub-tree of missing dirs as needed, and return full path to innermost directory. """ bin_dir = os.path.join(self.conf.get_bin_path(), build_module) if not os.path.isdir(bin_dir): # exist_ok=True in case of concurrent creation of the same dir os.makedirs(bin_dir, exist_ok=True) return bin_dir
Return a path to the binaries dir for a build module dir. Create sub-tree of missing dirs as needed, and return full path to innermost directory.
entailment
def walk_target_deps_topological_order(self, target: Target): """Generate all dependencies of `target` by topological sort order.""" all_deps = get_descendants(self.target_graph, target.name) for dep_name in topological_sort(self.target_graph): if dep_name in all_deps: yield self.targets[dep_name]
Generate all dependencies of `target` by topological sort order.
entailment
def generate_direct_deps(self, target: Target): """Generate only direct dependencies of `target`.""" yield from (self.targets[dep_name] for dep_name in sorted(target.deps))
Generate only direct dependencies of `target`.
entailment
def generate_dep_names(self, target: Target): """Generate names of all dependencies (descendants) of `target`.""" yield from sorted(get_descendants(self.target_graph, target.name))
Generate names of all dependencies (descendants) of `target`.
entailment
def generate_all_deps(self, target: Target): """Generate all dependencies of `target` (the target nodes).""" yield from (self.targets[dep_name] for dep_name in self.generate_dep_names(target))
Generate all dependencies of `target` (the target nodes).
entailment
def register_target(self, target: Target): """Register a `target` instance in this build context. A registered target is saved in the `targets` map and in the `targets_by_module` map, but is not added to the target graph until target extraction is completed (thread safety considerations). """ if target.name in self.targets: first = self.targets[target.name] raise NameError( 'Target with name "{0.name}" ({0.builder_name} from module ' '"{1}") already exists - defined first as ' '{2.builder_name} in module "{3}"'.format( target, split_build_module(target.name), first, split_build_module(first.name))) self.targets[target.name] = target self.targets_by_module[split_build_module(target.name)].add( target.name)
Register a `target` instance in this build context. A registered target is saved in the `targets` map and in the `targets_by_module` map, but is not added to the target graph until target extraction is completed (thread safety considerations).
entailment
def remove_target(self, target_name: str): """Remove (unregister) a `target` from this build context. Removes the target instance with the given name, if it exists, from both the `targets` map and the `targets_by_module` map. Doesn't do anything if no target with that name is found. Doesn't touch the target graph, if it exists. """ if target_name in self.targets: del self.targets[target_name] build_module = split_build_module(target_name) if build_module in self.targets_by_module: self.targets_by_module[build_module].remove(target_name)
Remove (unregister) a `target` from this build context. Removes the target instance with the given name, if it exists, from both the `targets` map and the `targets_by_module` map. Doesn't do anything if no target with that name is found. Doesn't touch the target graph, if it exists.
entailment
def get_target_extraction_context(self, build_file_path: str) -> dict: """Return a build file parser target extraction context. The target extraction context is a build-file-specific mapping from builder-name to target extraction function, for every registered builder. """ extraction_context = {} for name, builder in Plugin.builders.items(): extraction_context[name] = extractor(name, builder, build_file_path, self) return extraction_context
Return a build file parser target extraction context. The target extraction context is a build-file-specific mapping from builder-name to target extraction function, for every registered builder.
entailment
def get_buildenv_graph(self): """Return a graph induced by buildenv nodes""" # This implementation first obtains all subsets of nodes that all # buildenvs depend on, and then builds a subgraph induced by the union # of these subsets. This can be very non-optimal. # TODO(itamar): Reimplement efficient algo, or redesign buildenvs buildenvs = set(target.buildenv for target in self.targets.values() if target.buildenv) return nx.DiGraph(self.target_graph.subgraph(reduce( lambda x, y: x | set(y), (get_descendants(self.target_graph, buildenv) for buildenv in buildenvs), buildenvs)))
Return a graph induced by buildenv nodes
entailment
def ready_nodes_iter(self, graph_copy): """Generate ready targets from the graph `graph_copy`. The input graph is mutated by this method, so it has to be a mutable copy of the graph (e.g. not original copy, or read-only view). Caller **must** call `done()` after processing every generated target, so additional ready targets can be added to the queue. The invariant: a target may be yielded from this generator only after all its descendant targets were notified "done". """ def is_ready(target_name): """Return True if the node `target_name` is "ready" in the graph `graph_copy`. "Ready" means that the graph doesn't contain any more nodes that `target_name` depends on (e.g. it has no successors). """ try: next(graph_copy.successors(target_name)) except StopIteration: return True return False ready_nodes = deque(sorted( target_name for target_name in graph_copy.nodes if is_ready(target_name))) produced_event = threading.Event() failed_event = threading.Event() def make_done_callback(target: Target): """Return a callable "done" notifier to report a target as processed.""" def done_notifier(): """Mark target as done, adding new ready nodes to queue""" if graph_copy.has_node(target.name): affected_nodes = list(sorted( graph_copy.predecessors(target.name))) graph_copy.remove_node(target.name) ready_nodes.extend( target_name for target_name in affected_nodes if is_ready(target_name)) produced_event.set() return done_notifier def make_retry_callback(target: Target): """Return a callable "retry" notifier to report a target as in need of retry. Currently for tests we rebuild the target when it's not necessary.""" def retry_notifier(): """Mark target as retry, re-entering node to end of queue""" if graph_copy.has_node(target.name): ready_nodes.append(target.name) produced_event.set() return retry_notifier def make_fail_callback(target: Target): """Return a callable "fail" notifier to report a target as failed after all retries.""" def fail_notifier(ex): """Mark target as failed, taking it and ancestors out of the queue""" # TODO(Dana) separate "failed to build target" errors from # "failed to run" errors. # see: https://github.com/resonai/ybt/issues/124 if isinstance(ex, CalledProcessError): sys.stdout.write(ex.stdout.decode('utf-8')) sys.stderr.write(ex.stderr.decode('utf-8')) if graph_copy.has_node(target.name): self.failed_nodes[target.name] = ex # removing all ancestors (nodes that depend on this one) affected_nodes = get_ancestors(graph_copy, target.name) graph_copy.remove_node(target.name) for affected_node in affected_nodes: if affected_node in self.skipped_nodes: continue if graph_copy.has_node(affected_node): self.skipped_nodes.append(affected_node) graph_copy.remove_node(affected_node) if self.conf.continue_after_fail: logger.info('Failed target: {} due to error: {}', target.name, ex) produced_event.set() else: failed_event.set() fatal('`{}\': {}', target.name, ex) return fail_notifier while True: while len(ready_nodes) == 0: if graph_copy.order() == 0: return if failed_event.is_set(): return produced_event.wait(0.5) produced_event.clear() next_node = ready_nodes.popleft() node = self.targets[next_node] node.done = make_done_callback(node) # TODO(bergden) retry assumes no need to update predecessors: # This means we don't support retries for targets that are # prerequisites of other targets (builds, installs) node.retry = make_retry_callback(node) node.fail = make_fail_callback(node) yield node
Generate ready targets from the graph `graph_copy`. The input graph is mutated by this method, so it has to be a mutable copy of the graph (e.g. not original copy, or read-only view). Caller **must** call `done()` after processing every generated target, so additional ready targets can be added to the queue. The invariant: a target may be yielded from this generator only after all its descendant targets were notified "done".
entailment
def run_in_buildenv( self, buildenv_target_name: str, cmd: list, cmd_env: dict=None, work_dir: str=None, auto_uid: bool=True, runtime: str=None, **kwargs): """Run a command in a named BuildEnv Docker image. :param buildenv_target_name: A named Docker image target in which the command should be run. :param cmd: The command to run, as you'd pass to subprocess.run() :param cmd_env: A dictionary of environment variables for the command. :param work_dir: A different work dir to run in. Either absolute path, or relative to project root. :param auto_uid: Whether to run as the active uid:gid, or as root. :param kwargs: Extra keyword arguments that are passed to the subprocess.run() call that runs the BuildEnv container (for, e.g. timeout arg, stdout/err redirection, etc.) :raises KeyError: If named BuildEnv is not a registered BuildEnv image """ buildenv_target = self.targets[buildenv_target_name] # TODO(itamar): Assert that buildenv_target is up to date redirection = any( stream_key in kwargs for stream_key in ('stdin', 'stdout', 'stderr', 'input')) docker_run = ['docker', 'run'] # if not self.conf.non_interactive: # docker_run.append('-i') if not redirection: docker_run.append('-t') project_vol = (self.conf.docker_volume if self.conf.docker_volume else self.conf.project_root) container_work_dir = PurePath('/project') if work_dir: container_work_dir /= work_dir if runtime: docker_run.extend([ '--runtime', runtime, ]) docker_run.extend([ '--rm', '-v', project_vol + ':/project', # TODO: windows containers? '-w', container_work_dir.as_posix(), ]) if cmd_env: for key, value in cmd_env.items(): # TODO(itamar): escaping docker_run.extend(['-e', '{}={}'.format(key, value)]) if platform.system() == 'Linux' and auto_uid: # Fix permissions for bind-mounted project dir # The fix is not needed when using Docker For Mac / Windows, # because it is somehow taken care of by the sharing mechanics docker_run.extend([ '-u', '{}:{}'.format(os.getuid(), os.getgid()), '-v', '/etc/shadow:/etc/shadow:ro', '-v', '/etc/group:/etc/group:ro', '-v', '/etc/passwd:/etc/passwd:ro', '-v', '/etc/sudoers:/etc/sudoers:ro', ]) docker_run.append(format_qualified_image_name(buildenv_target)) docker_run.extend(cmd) logger.info('Running command in build env "{}" using command {}', buildenv_target_name, docker_run) # TODO: Consider changing the PIPEs to temp files. if 'stderr' not in kwargs: kwargs['stderr'] = PIPE if 'stdout' not in kwargs: kwargs['stdout'] = PIPE result = run(docker_run, check=True, **kwargs) # TODO(Dana): Understand what is the right enconding and remove the # try except if kwargs['stdout'] is PIPE: try: sys.stdout.write(result.stdout.decode('utf-8')) except UnicodeEncodeError as e: sys.stderr.write('tried writing the stdout of {},\n but it ' 'has a problematic character:\n {}\n' 'hex dump of stdout:\n{}\n' .format(docker_run, str(e), codecs.encode( result.stdout, 'hex').decode('utf8'))) if kwargs['stderr'] is PIPE: try: sys.stderr.write(result.stderr.decode('utf-8')) except UnicodeEncodeError as e: sys.stderr.write('tried writing the stderr of {},\n but it ' 'has a problematic character:\n {}\n' 'hex dump of stderr:\n{}\n' .format(docker_run, str(e), codecs.encode( result.stderr, 'hex').decode('utf8'))) return result
Run a command in a named BuildEnv Docker image. :param buildenv_target_name: A named Docker image target in which the command should be run. :param cmd: The command to run, as you'd pass to subprocess.run() :param cmd_env: A dictionary of environment variables for the command. :param work_dir: A different work dir to run in. Either absolute path, or relative to project root. :param auto_uid: Whether to run as the active uid:gid, or as root. :param kwargs: Extra keyword arguments that are passed to the subprocess.run() call that runs the BuildEnv container (for, e.g. timeout arg, stdout/err redirection, etc.) :raises KeyError: If named BuildEnv is not a registered BuildEnv image
entailment
def build_target(self, target: Target): """Invoke the builder function for a target.""" builder = Plugin.builders[target.builder_name] if builder.func: logger.debug('About to invoke the {} builder function for {}', target.builder_name, target.name) builder.func(self, target) else: logger.debug('Skipping {} builder function for target {} (no ' 'function registered)', target.builder_name, target)
Invoke the builder function for a target.
entailment
def register_target_artifact_metadata(self, target: str, metadata: dict): """Register the artifact metadata dictionary for a built target.""" with self.context_lock: self.artifacts_metadata[target.name] = metadata
Register the artifact metadata dictionary for a built target.
entailment
def write_artifacts_metadata(self): """Write out a JSON file with all built targets artifact metadata, if such output file is specified.""" if self.conf.artifacts_metadata_file: logger.info('Writing artifacts metadata to file "%s"', self.conf.artifacts_metadata_file) with open(self.conf.artifacts_metadata_file, 'w') as fp: json.dump(self.artifacts_metadata, fp)
Write out a JSON file with all built targets artifact metadata, if such output file is specified.
entailment
def can_use_cache(self, target: Target) -> bool: """Return True if should attempt to load `target` from cache. Return False if `target` has to be built, regardless of its cache status (because cache is disabled, or dependencies are dirty). """ # if caching is disabled for this execution, then all targets are dirty if self.conf.no_build_cache: return False # if the target's `cachable` prop is falsy, then it is dirty if not target.props.cachable: return False # if any dependency of the target is dirty, then the target is dirty if any(self.targets[dep].is_dirty for dep in target.deps): return False # if the target has a dirty buildenv then it's also dirty if target.buildenv and self.targets[target.buildenv].is_dirty: return False return True
Return True if should attempt to load `target` from cache. Return False if `target` has to be built, regardless of its cache status (because cache is disabled, or dependencies are dirty).
entailment
def get_build_file_path(self, build_module) -> str: """Return a full path to the build file of `build_module`. The returned path will always be OS-native, regardless of the format of project_root (native) and build_module (with '/'). """ project_root = Path(self.project_root) build_module = norm_proj_path(build_module, '') return str(project_root / build_module / (BUILD_PROJ_FILE if '' == build_module else self.build_file_name))
Return a full path to the build file of `build_module`. The returned path will always be OS-native, regardless of the format of project_root (native) and build_module (with '/').
entailment
def guess_uri_type(uri: str, hint: str=None): """Return a guess for the URI type based on the URI string `uri`. If `hint` is given, it is assumed to be the correct type. Otherwise, the URI is inspected using urlparse, and we try to guess whether it's a remote Git repository, a remote downloadable archive, or a local-only data. """ # TODO(itamar): do this better if hint: return hint norm_uri = uri.lower() parsed_uri = urlparse(norm_uri) if parsed_uri.path.endswith('.git'): return 'git' if parsed_uri.scheme in ('http', 'https'): ext = splitext(parsed_uri.path)[-1] if ext in KNOWN_ARCHIVES: return 'archive' return 'single' return 'local'
Return a guess for the URI type based on the URI string `uri`. If `hint` is given, it is assumed to be the correct type. Otherwise, the URI is inspected using urlparse, and we try to guess whether it's a remote Git repository, a remote downloadable archive, or a local-only data.
entailment
def git_handler(unused_build_context, target, fetch, package_dir, tar): """Handle remote Git repository URI. Clone the repository under the private builder workspace (unless already cloned), and add it to the package tar (filtering out git internals). TODO(itamar): Support branches / tags / specific commit hashes TODO(itamar): Support updating a cloned repository TODO(itamar): Handle submodules? TODO(itamar): Handle force pulls? """ target_name = split_name(target.name) # clone the repository under a private builder workspace repo_dir = join(package_dir, fetch.name) if fetch.name else package_dir try: repo = git.Repo(repo_dir) except (InvalidGitRepositoryError, NoSuchPathError): repo = git.Repo.clone_from(fetch.uri, repo_dir) assert repo.working_tree_dir == repo_dir tar.add(package_dir, arcname=target_name, filter=gitfilter)
Handle remote Git repository URI. Clone the repository under the private builder workspace (unless already cloned), and add it to the package tar (filtering out git internals). TODO(itamar): Support branches / tags / specific commit hashes TODO(itamar): Support updating a cloned repository TODO(itamar): Handle submodules? TODO(itamar): Handle force pulls?
entailment
def fetch_url(url, dest, parent_to_remove_before_fetch): """Helper function to fetch a file from a URL.""" logger.debug('Downloading file {} from {}', dest, url) try: shutil.rmtree(parent_to_remove_before_fetch) except FileNotFoundError: pass os.makedirs(parent_to_remove_before_fetch) # TODO(itamar): Better downloading (multi-process-multi-threaded?) # Consider offloading this to a "standalone app" invoked with Docker resp = requests.get(url, stream=True) with open(dest, 'wb') as fetch_file: for chunk in resp.iter_content(chunk_size=32 * 1024): fetch_file.write(chunk)
Helper function to fetch a file from a URL.
entailment
def archive_handler(unused_build_context, target, fetch, package_dir, tar): """Handle remote downloadable archive URI. Download the archive and cache it under the private builer workspace (unless already downloaded), extract it, and add the content to the package tar. TODO(itamar): Support re-downloading if remote changed compared to local. TODO(itamar): Support more archive formats (currently only tarballs). """ package_dest = join(package_dir, basename(urlparse(fetch.uri).path)) package_content_dir = join(package_dir, 'content') extract_dir = (join(package_content_dir, fetch.name) if fetch.name else package_content_dir) fetch_url(fetch.uri, package_dest, package_dir) # TODO(itamar): Avoid repetition of splitting extension here and above # TODO(itamar): Don't use `extractall` on potentially untrsuted archives ext = splitext(package_dest)[-1].lower() if ext in ('.gz', '.bz2', '.tgz'): with tarfile.open(package_dest, 'r:*') as src_tar: src_tar.extractall(extract_dir) elif ext in ('.zip',): with ZipFile(package_dest, 'r') as zipf: zipf.extractall(extract_dir) else: raise ValueError('Unsupported extension {}'.format(ext)) tar.add(package_content_dir, arcname=split_name(target.name))
Handle remote downloadable archive URI. Download the archive and cache it under the private builer workspace (unless already downloaded), extract it, and add the content to the package tar. TODO(itamar): Support re-downloading if remote changed compared to local. TODO(itamar): Support more archive formats (currently only tarballs).
entailment
def fetch_file_handler(unused_build_context, target, fetch, package_dir, tar): """Handle remote downloadable file URI. Download the file and cache it under the private builer workspace (unless already downloaded), and add it to the package tar. TODO(itamar): Support re-downloading if remote changed compared to local. """ dl_dir = join(package_dir, fetch.name) if fetch.name else package_dir fetch_url(fetch.uri, join(dl_dir, basename(urlparse(fetch.uri).path)), dl_dir) tar.add(package_dir, arcname=split_name(target.name))
Handle remote downloadable file URI. Download the file and cache it under the private builer workspace (unless already downloaded), and add it to the package tar. TODO(itamar): Support re-downloading if remote changed compared to local.
entailment
def get_installer_desc(build_context, target) -> tuple: """Return a target_name, script_name, package_tarball tuple for `target`""" workspace_dir = build_context.get_workspace('CustomInstaller', target.name) target_name = split_name(target.name) script_name = basename(target.props.script) package_tarball = '{}.tar.gz'.format(join(workspace_dir, target_name)) return target_name, script_name, package_tarball
Return a target_name, script_name, package_tarball tuple for `target`
entailment
def get_prebuilt_targets(build_context): """Return set of target names that are contained within cached base images These targets may be considered "pre-built", and skipped during build. """ logger.info('Scanning for cached base images') # deps that are part of cached based images contained_deps = set() # deps that are needed by images that are going to be built, # but are not part of their base images required_deps = set() # mapping from target name to set of all its deps (descendants) cached_descendants = CachedDescendants(build_context.target_graph) for target_name, target in build_context.targets.items(): if 'image_caching_behavior' not in target.props: continue image_name = get_image_name(target) image_tag = target.props.image_tag icb = ImageCachingBehavior(image_name, image_tag, target.props.image_caching_behavior) target.image_id = handle_build_cache(build_context.conf, image_name, image_tag, icb) if target.image_id: # mark deps of cached base image as "contained" image_deps = cached_descendants.get(target_name) contained_deps.update(image_deps) contained_deps.add(target.name) else: # mark deps of image that is going to be built # (and are not deps of its base image) as "required" image_deps = cached_descendants.get(target_name) base_image_deps = cached_descendants.get(target.props.base_image) required_deps.update(image_deps - base_image_deps) return contained_deps - required_deps
Return set of target names that are contained within cached base images These targets may be considered "pre-built", and skipped during build.
entailment
def write_summary(summary: dict, cache_dir: str): """Write the `summary` JSON to `cache_dir`. Updated the accessed timestamp to now before writing. """ # update the summary last-accessed timestamp summary['accessed'] = time() with open(join(cache_dir, 'summary.json'), 'w') as summary_file: summary_file.write(json.dumps(summary, indent=4, sort_keys=True))
Write the `summary` JSON to `cache_dir`. Updated the accessed timestamp to now before writing.
entailment
def load_target_from_cache(target: Target, build_context) -> (bool, bool): """Load `target` from build cache, restoring cached artifacts & summary. Return (build_cached, test_cached) tuple. `build_cached` is True if target restored successfully. `test_cached` is True if build is cached and test_time metadata is valid. """ cache_dir = build_context.conf.get_cache_dir(target, build_context) if not isdir(cache_dir): logger.debug('No cache dir found for target {}', target.name) return False, False # read summary file and restore relevant fields into target with open(join(cache_dir, 'summary.json'), 'r') as summary_file: summary = json.loads(summary_file.read()) for field in ('build_time', 'test_time', 'created', 'accessed'): target.summary[field] = summary.get(field) # compare artifacts hash if (hash_tree(join(cache_dir, 'artifacts.json')) != summary.get('artifacts_hash', 'no hash')): return False, False # read cached artifacts metadata with open(join(cache_dir, 'artifacts.json'), 'r') as artifacts_meta_file: artifact_desc = json.loads(artifacts_meta_file.read()) # restore all artifacts for type_name, artifact_list in artifact_desc.items(): artifact_type = getattr(AT, type_name) for artifact in artifact_list: # restore artifact to its expected src path if artifact_type not in _NO_CACHE_TYPES: if not restore_artifact( artifact['src'], artifact['hash'], build_context.conf): target.artifacts.reset() return False, False if artifact_type in (AT.docker_image,): # "restore" docker image from local registry image_id = artifact['src'] image_full_name = artifact['dst'] try: tag_docker_image(image_id, image_full_name) except: logger.debug('Docker image with ID {} not found locally', image_id) target.artifacts.reset() return False, False target.image_id = image_id target.artifacts.add( artifact_type, artifact['src'], artifact['dst']) write_summary(summary, cache_dir) # check that the testing cache exists. if not isfile(join(cache_dir, 'tested.json')): logger.debug('No testing cache found for target {}', target.name) return True, False # read the testing cache. with open(join(cache_dir, 'tested.json'), 'r') as tested_file: target.tested = json.loads(tested_file.read()) test_key = target.test_hash(build_context) return True, (target.tested.get(test_key) is not None)
Load `target` from build cache, restoring cached artifacts & summary. Return (build_cached, test_cached) tuple. `build_cached` is True if target restored successfully. `test_cached` is True if build is cached and test_time metadata is valid.
entailment
def copy_artifact(src_path: str, artifact_hash: str, conf: Config): """Copy the artifact at `src_path` with hash `artifact_hash` to artifacts cache dir. If an artifact already exists at that location, it is assumed to be identical (since it's based on hash), and the copy is skipped. TODO: pruning policy to limit cache size. """ cache_dir = conf.get_artifacts_cache_dir() if not isdir(cache_dir): makedirs(cache_dir) cached_artifact_path = join(cache_dir, artifact_hash) if isfile(cached_artifact_path) or isdir(cached_artifact_path): logger.debug('Skipping copy of existing cached artifact {} -> {}', src_path, cached_artifact_path) return abs_src_path = join(conf.project_root, src_path) logger.debug('Caching artifact {} under {}', abs_src_path, cached_artifact_path) shutil.copy(abs_src_path, cached_artifact_path)
Copy the artifact at `src_path` with hash `artifact_hash` to artifacts cache dir. If an artifact already exists at that location, it is assumed to be identical (since it's based on hash), and the copy is skipped. TODO: pruning policy to limit cache size.
entailment
def restore_artifact(src_path: str, artifact_hash: str, conf: Config): """Restore the artifact whose hash is `artifact_hash` to `src_path`. Return True if cached artifact is found, valid, and restored successfully. Otherwise return False. """ cache_dir = conf.get_artifacts_cache_dir() if not isdir(cache_dir): return False cached_artifact_path = join(cache_dir, artifact_hash) if isfile(cached_artifact_path) or isdir(cached_artifact_path): # verify cached item hash matches expected hash actual_hash = hash_tree(cached_artifact_path) if actual_hash != artifact_hash: logger.warning( 'Cached artifact {} expected hash {} != actual hash {}', src_path, artifact_hash, actual_hash) rmnode(cached_artifact_path) return False # if something exists in src_path, check if it matches the cached item abs_src_path = join(conf.project_root, src_path) if isfile(abs_src_path) or isdir(abs_src_path): existing_hash = hash_tree(src_path) if existing_hash == artifact_hash: logger.debug('Existing artifact {} matches cached hash {}', src_path, artifact_hash) return True logger.debug('Replacing existing artifact {} with cached one', src_path) rmnode(abs_src_path) logger.debug('Restoring cached artifact {} to {}', artifact_hash, src_path) shutil.copy(cached_artifact_path, abs_src_path) return True logger.debug('No cached artifact for {} with hash {}', src_path, artifact_hash) return False
Restore the artifact whose hash is `artifact_hash` to `src_path`. Return True if cached artifact is found, valid, and restored successfully. Otherwise return False.
entailment
def save_target_in_cache(target: Target, build_context): """Save `target` to build cache for future reuse. The target hash is used to determine its cache location, where the target metadata and artifacts metadata are seriazlied to JSON. In addition, relevant artifacts produced by the target are copied under the artifacts cache dir by their content hash. TODO: pruning policy to limit cache size. """ cache_dir = build_context.conf.get_cache_dir(target, build_context) if isdir(cache_dir): rmtree(cache_dir) makedirs(cache_dir) logger.debug('Saving target metadata in cache under {}', cache_dir) # write target metadata with open(join(cache_dir, 'target.json'), 'w') as meta_file: meta_file.write(target.json(build_context)) # copy artifacts to artifact cache by hash artifacts = target.artifacts.get_all() artifact_hashes = {} for artifact_type, artifact_map in artifacts.items(): if artifact_type in (AT.docker_image,): continue for dst_path, src_path in artifact_map.items(): artifact_hashes[dst_path] = hash_tree(src_path) # not caching "app" artifacts, since they're part # of the source tree if artifact_type not in _NO_CACHE_TYPES: copy_artifact(src_path, artifact_hashes[dst_path], build_context.conf) # serialize target artifacts metadata + hashes artifacts_desc = { artifact_type.name: [{'dst': dst_path, 'src': src_path, 'hash': artifact_hashes.get(dst_path)} for dst_path, src_path in artifact_map.items()] for artifact_type, artifact_map in artifacts.items() } with open(join(cache_dir, 'artifacts.json'), 'w') as artifacts_meta_file: artifacts_meta_file.write(json.dumps(artifacts_desc, indent=4, sort_keys=True)) # copying the summary dict so I can modify it without mutating the target summary = dict(target.summary) summary['name'] = target.name summary['artifacts_hash'] = hash_tree(join(cache_dir, 'artifacts.json')) if summary.get('created') is None: summary['created'] = time() write_summary(summary, cache_dir)
Save `target` to build cache for future reuse. The target hash is used to determine its cache location, where the target metadata and artifacts metadata are seriazlied to JSON. In addition, relevant artifacts produced by the target are copied under the artifacts cache dir by their content hash. TODO: pruning policy to limit cache size.
entailment
def get(self, key): """Return set of descendants of node named `key` in `target_graph`. Returns from cached dict if exists, otherwise compute over the graph and cache results in the dict. """ if key not in self: self[key] = set(get_descendants(self._target_graph, key)) return self[key]
Return set of descendants of node named `key` in `target_graph`. Returns from cached dict if exists, otherwise compute over the graph and cache results in the dict.
entailment
def fatal(msg, *args, **kwargs): """Print a red `msg` to STDERR and exit. To be used in a context of an exception, also prints out the exception. The message is formatted with `args` & `kwargs`. """ exc_str = format_exc() if exc_str.strip() != 'NoneType: None': logger.info('{}', format_exc()) fatal_noexc(msg, *args, **kwargs)
Print a red `msg` to STDERR and exit. To be used in a context of an exception, also prints out the exception. The message is formatted with `args` & `kwargs`.
entailment
def fatal_noexc(msg, *args, **kwargs): """Print a red `msg` to STDERR and exit. The message is formatted with `args` & `kwargs`. """ print(Fore.RED + 'Fatal: ' + msg.format(*args, **kwargs) + Style.RESET_ALL, file=sys.stderr) sys.exit(1)
Print a red `msg` to STDERR and exit. The message is formatted with `args` & `kwargs`.
entailment
def rmnode(path: str): """Forcibly remove file or directory tree at `path`. Fail silently if base dir doesn't exist.""" if isdir(path): rmtree(path) elif isfile(path): os.remove(path)
Forcibly remove file or directory tree at `path`. Fail silently if base dir doesn't exist.
entailment
def link_node(abs_src: str, abs_dest: str, force: bool=False): """Sync source node (file / dir) to destination path using hard links.""" dest_parent_dir = split(abs_dest)[0] if not isdir(dest_parent_dir): # exist_ok=True in case of concurrent creation of the same # parent dir os.makedirs(dest_parent_dir, exist_ok=True) if isfile(abs_src): # sync file by linking it to dest link_func(abs_src, abs_dest, force) elif isdir(abs_src): # sync dir by recursively linking files under it to dest shutil.copytree(abs_src, abs_dest, copy_function=functools.partial(link_func, force=force), ignore=shutil.ignore_patterns('.git')) else: raise FileNotFoundError(abs_src)
Sync source node (file / dir) to destination path using hard links.
entailment
def link_files(files: set, workspace_src_dir: str, common_parent: str, conf): """Sync the list of files and directories in `files` to destination directory specified by `workspace_src_dir`. "Sync" in the sense that every file given in `files` will be hard-linked under `workspace_src_dir` after this function returns, and no other files will exist under `workspace_src_dir`. For directories in `files`, hard-links of contained files are created recursively. All paths in `files`, and the `workspace_src_dir`, must be relative to `conf.project_root`. If `common_parent` is given, and it is a common parent directory of all `files`, then the `commonm_parent` part is truncated from the sync'ed files destination path under `workspace_src_dir`. :raises FileNotFoundError: If `files` contains files or directories that do not exist. :raises ValueError: If `common_parent` is given (not `None`), but is *NOT* a common parent of all `files`. """ norm_dir = normpath(workspace_src_dir) base_dir = '' if common_parent: common_parent = normpath(common_parent) base_dir = commonpath(list(files) + [common_parent]) if base_dir != common_parent: raise ValueError('{} is not the common parent of all target ' 'sources and data'.format(common_parent)) logger.debug( 'Rebasing files in image relative to common parent dir {}', base_dir) num_linked = 0 for src in files: abs_src = join(conf.project_root, src) abs_dest = join(conf.project_root, workspace_src_dir, relpath(src, base_dir)) link_node(abs_src, abs_dest, conf.builders_workspace_dir in src) num_linked += 1 return num_linked
Sync the list of files and directories in `files` to destination directory specified by `workspace_src_dir`. "Sync" in the sense that every file given in `files` will be hard-linked under `workspace_src_dir` after this function returns, and no other files will exist under `workspace_src_dir`. For directories in `files`, hard-links of contained files are created recursively. All paths in `files`, and the `workspace_src_dir`, must be relative to `conf.project_root`. If `common_parent` is given, and it is a common parent directory of all `files`, then the `commonm_parent` part is truncated from the sync'ed files destination path under `workspace_src_dir`. :raises FileNotFoundError: If `files` contains files or directories that do not exist. :raises ValueError: If `common_parent` is given (not `None`), but is *NOT* a common parent of all `files`.
entailment
def norm_proj_path(path, build_module): """Return a normalized path for the `path` observed in `build_module`. The normalized path is "normalized" (in the `os.path.normpath` sense), relative from the project root directory, and OS-native. Supports making references from project root directory by prefixing the path with "//". :raises ValueError: If path references outside the project sandbox. """ if path == '//': return '' if path.startswith('//'): norm = normpath(path[2:]) if norm[0] in ('.', '/', '\\'): raise ValueError("Invalid path: `{}'".format(path)) return norm if path.startswith('/'): raise ValueError("Invalid path: `{}' - use '//' to start from " "project root".format(path)) if build_module == '//': build_module = '' norm = normpath(join(build_module, path)) if norm.startswith('..'): raise ValueError( "Invalid path `{}' - must remain inside project sandbox" .format(path)) return norm.strip('.')
Return a normalized path for the `path` observed in `build_module`. The normalized path is "normalized" (in the `os.path.normpath` sense), relative from the project root directory, and OS-native. Supports making references from project root directory by prefixing the path with "//". :raises ValueError: If path references outside the project sandbox.
entailment
def search_for_parent_dir(start_at: str=None, with_files: set=None, with_dirs: set=None) -> str: """Return absolute path of first parent directory of `start_at` that contains all files `with_files` and all dirs `with_dirs` (including `start_at`). If `start_at` not specified, start at current working directory. :param start_at: Initial path for searching for the project build file. Returns `None` upon reaching FS root without finding a project buildfile. """ if not start_at: start_at = os.path.abspath(os.curdir) if not with_files: with_files = set() if not with_dirs: with_dirs = set() exp_hits = len(with_files) + len(with_dirs) while start_at: num_hits = 0 for entry in scandir(start_at): if ((entry.is_file() and entry.name in with_files) or (entry.is_dir() and entry.name in with_dirs)): num_hits += 1 if num_hits == exp_hits: return start_at cur_level = start_at start_at = os.path.split(cur_level)[0] if os.path.realpath(cur_level) == os.path.realpath(start_at): # looped on root once break
Return absolute path of first parent directory of `start_at` that contains all files `with_files` and all dirs `with_dirs` (including `start_at`). If `start_at` not specified, start at current working directory. :param start_at: Initial path for searching for the project build file. Returns `None` upon reaching FS root without finding a project buildfile.
entailment
def acc_hash(filepath: str, hasher): """Accumulate content of file at `filepath` in `hasher`.""" with open(filepath, 'rb') as f: while True: chunk = f.read(_BUF_SIZE) if not chunk: break hasher.update(chunk)
Accumulate content of file at `filepath` in `hasher`.
entailment
def hash_file(filepath: str) -> str: """Return the hexdigest MD5 hash of content of file at `filepath`.""" md5 = hashlib.md5() acc_hash(filepath, md5) return md5.hexdigest()
Return the hexdigest MD5 hash of content of file at `filepath`.
entailment
def hash_tree(filepath: str) -> str: """Return the hexdigest MD5 hash of file or directory at `filepath`. If file - just hash file content. If directory - walk the directory, and accumulate hashes of all the relative paths + contents of files under the directory. """ if isfile(filepath): return hash_file(filepath) if isdir(filepath): base_dir = filepath md5 = hashlib.md5() for root, dirs, files in walk(base_dir): dirs.sort() for fname in sorted(files): filepath = join(root, fname) # consistent hashing between POSIX & Windows md5.update(relpath(filepath, base_dir) .replace('\\', '/').encode('utf8')) acc_hash(filepath, md5) return md5.hexdigest() return None
Return the hexdigest MD5 hash of file or directory at `filepath`. If file - just hash file content. If directory - walk the directory, and accumulate hashes of all the relative paths + contents of files under the directory.
entailment
def add(self, artifact_type: ArtifactType, src_path: str, dst_path: str=None): """Add an artifact of type `artifact_type` at `src_path`. `src_path` should be the path of the file relative to project root. `dst_path`, if given, is the desired path of the artifact in dependent targets, relative to its base path (by type). """ if dst_path is None: dst_path = src_path other_src_path = self._artifacts[artifact_type].setdefault( dst_path, src_path) if src_path != other_src_path: raise RuntimeError( '{} artifact with dest path {} exists with different src ' 'path: {} != {}'.format(artifact_type, dst_path, src_path, other_src_path))
Add an artifact of type `artifact_type` at `src_path`. `src_path` should be the path of the file relative to project root. `dst_path`, if given, is the desired path of the artifact in dependent targets, relative to its base path (by type).
entailment
def extend(self, artifact_type: ArtifactType, src_paths: list): """Add all `src_paths` as artifact of type `artifact_type`.""" for src_path in src_paths: self.add(artifact_type, src_path, src_path)
Add all `src_paths` as artifact of type `artifact_type`.
entailment
def link_types(self, base_dir: str, types: list, conf: Config) -> int: """Link all artifacts with types `types` under `base_dir` and return the number of linked artifacts.""" num_linked = 0 for kind in types: artifact_map = self._artifacts.get(kind) if not artifact_map: continue num_linked += self._link(join(base_dir, self.type_to_dir[kind]), artifact_map, conf) return num_linked
Link all artifacts with types `types` under `base_dir` and return the number of linked artifacts.
entailment
def link_for_image(self, base_dir: str, conf: Config) -> int: """Link all artifacts required for a Docker image under `base_dir` and return the number of linked artifacts.""" return self.link_types( base_dir, [ArtifactType.app, ArtifactType.binary, ArtifactType.gen_py], conf)
Link all artifacts required for a Docker image under `base_dir` and return the number of linked artifacts.
entailment
def _link(self, base_dir: str, artifact_map: dict, conf: Config): """Link all artifacts in `artifact_map` under `base_dir` and return the number of artifacts linked.""" num_linked = 0 for dst, src in artifact_map.items(): abs_src = join(conf.project_root, src) abs_dest = join(conf.project_root, base_dir, dst) link_node(abs_src, abs_dest) num_linked += 1 return num_linked
Link all artifacts in `artifact_map` under `base_dir` and return the number of artifacts linked.
entailment
def get_readme(): """Read and return the content of the project README file.""" base_dir = path.abspath(path.dirname(__file__)) with open(path.join(base_dir, 'README.md'), encoding='utf-8') as readme_f: return readme_f.read()
Read and return the content of the project README file.
entailment
def args_to_props(target: Target, builder: Builder, args: list, kwargs: dict): """Convert build file `args` and `kwargs` to `target` props. Use builder signature to validate builder usage in build-file, raising appropriate exceptions on signature-mismatches. Use builder signature default values to assign props values to args that were not passed in the build-file call. This function handles only the arg/kwargs-to-prop assignment, including default values when necessary. When it returns, if no exception was raised, it is guaranteed that `target.props` contains all args defined in the builder registered signature, with values taken either from the build-file call, or from default values provided in the signature. Specifically, this function DOES NOT do anything about the arg types defined in the builder signature. :raise TypeError: On signature-call mismatch. """ if len(args) > len(builder.sig): # too many positional arguments supplied - say how many we can take raise TypeError('{}() takes {}, but {} were given' .format(target.builder_name, format_num_positional_arguments(builder), len(args))) # read given args into the matching props according to the signature for arg_name, value in zip(builder.sig.keys(), args): target.props[arg_name] = value # read given kwargs into the named props, asserting matching sig arg names for arg_name, value in kwargs.items(): if arg_name not in builder.sig: raise TypeError("{}() got an unexpected keyword argument '{}'" .format(target.builder_name, arg_name)) if arg_name in target.props: raise TypeError("{}() got multiple values for argument '{}'" .format(target.builder_name, arg_name)) target.props[arg_name] = value # go over signature args, assigning default values to anything that wasn't # assigned from args / kwargs, making sure no positional args are missing missing_args = [] for arg_name, sig_spec in builder.sig.items(): if arg_name not in target.props: if sig_spec.default == Empty: missing_args.append(arg_name) else: target.props[arg_name] = sig_spec.default if missing_args: # not enough positional arguments supplied - say which # TODO(itamar): match Python's error more closely (last "and "): # foo() missing 3 required positional arguments: 'a', 'b', and 'c' # TODO(itamar): use inflect raise TypeError('{}() missing {} required positional argument{}: {}' .format(target.builder_name, len(missing_args), 's' if len(missing_args) > 1 else '', ', '.join("'{}'".format(arg) for arg in missing_args))) logger.debug('Got props for target: {}', target)
Convert build file `args` and `kwargs` to `target` props. Use builder signature to validate builder usage in build-file, raising appropriate exceptions on signature-mismatches. Use builder signature default values to assign props values to args that were not passed in the build-file call. This function handles only the arg/kwargs-to-prop assignment, including default values when necessary. When it returns, if no exception was raised, it is guaranteed that `target.props` contains all args defined in the builder registered signature, with values taken either from the build-file call, or from default values provided in the signature. Specifically, this function DOES NOT do anything about the arg types defined in the builder signature. :raise TypeError: On signature-call mismatch.
entailment
def extractor( builder_name: str, builder: Builder, build_file_path: str, build_context) -> types.FunctionType: """Return a target extraction function for a specific builder and a specific build file.""" build_module = to_build_module(build_file_path, build_context.conf) def extract_target(*args, **kwargs): """The actual target extraction function that is executed when any builder function is called in a build file.""" target = Target(builder_name=builder_name) # convert args/kwargs to target.props and handle arg types args_to_props(target, builder, args, kwargs) raw_name = target.props.name handle_typed_args(target, builder, build_module) logger.debug('Extracting target: {}', target) # promote the `name` and `deps` from props to the target instance target.name = target.props.pop('name') target.deps = target.props.pop('deps', []) if target.deps: logger.debug('Got deps for target "{0.name}": {0.deps}', target) # invoke builder hooks on extracted target for hook_name, hook in Plugin.get_hooks_for_builder(builder_name): logger.debug('About to invoke hook {} on target {}', hook_name, target) hook(build_context, target) # save the target in the build context build_context.register_target(target) logger.debug('Registered {}', target) return extract_target
Return a target extraction function for a specific builder and a specific build file.
entailment
def rdopkg_runner(): """ default rdopkg action runner including rdopkg action modules """ aman = ActionManager() # assume all actions.* modules are action modules aman.add_actions_modules(actions) aman.fill_aliases() # additional rdopkg action module logic should go here return ActionRunner(action_manager=aman)
default rdopkg action runner including rdopkg action modules
entailment
def rdopkg(*cargs): """ rdopkg CLI interface Execute rdopkg action with specified arguments and return shell friendly exit code. This is the default high level way to interact with rdopkg. py> rdopkg('new-version', '1.2.3') is equivalent to $> rdopkg new-version 1.2.3 """ runner = rdopkg_runner() return shell.run(runner, cargs=cargs, prog='rdopkg', version=__version__)
rdopkg CLI interface Execute rdopkg action with specified arguments and return shell friendly exit code. This is the default high level way to interact with rdopkg. py> rdopkg('new-version', '1.2.3') is equivalent to $> rdopkg new-version 1.2.3
entailment
def getDynDnsClientForConfig(config, plugins=None): """Instantiate and return a complete and working dyndns client. :param config: a dictionary with configuration keys :param plugins: an object that implements PluginManager """ initparams = {} if "interval" in config: initparams["detect_interval"] = config["interval"] if plugins is not None: initparams["plugins"] = plugins if "updater" in config: for updater_name, updater_options in config["updater"]: initparams["updater"] = get_updater_class(updater_name)(**updater_options) # find class and instantiate the detector: if "detector" in config: detector_name, detector_opts = config["detector"][-1] try: klass = get_detector_class(detector_name) except KeyError as exc: LOG.warning("Invalid change detector configuration: '%s'", detector_name, exc_info=exc) return None thedetector = klass(**detector_opts) initparams["detector"] = thedetector return DynDnsClient(**initparams)
Instantiate and return a complete and working dyndns client. :param config: a dictionary with configuration keys :param plugins: an object that implements PluginManager
entailment
def sync(self): """ Synchronize the registered IP with the detected IP (if needed). This can be expensive, mostly depending on the detector, but also because updating the dynamic ip in itself is costly. Therefore, this method should usually only be called on startup or when the state changes. """ detected_ip = self.detector.detect() if detected_ip is None: LOG.debug("Couldn't detect the current IP using detector %r", self.detector.names()[-1]) # we don't have a value to set it to, so don't update! Still shouldn't happen though elif self.dns.detect() != detected_ip: LOG.info("%s: dns IP '%s' does not match detected IP '%s', updating", self.updater.hostname, self.dns.get_current_value(), detected_ip) self.status = self.updater.update(detected_ip) self.plugins.after_remote_ip_update(detected_ip, self.status) else: self.status = 0 LOG.debug("%s: nothing to do, dns '%s' equals detection '%s'", self.updater.hostname, self.dns.get_current_value(), self.detector.get_current_value())
Synchronize the registered IP with the detected IP (if needed). This can be expensive, mostly depending on the detector, but also because updating the dynamic ip in itself is costly. Therefore, this method should usually only be called on startup or when the state changes.
entailment
def has_state_changed(self): """ Detect changes in offline detector and real DNS value. Detect a change either in the offline detector or a difference between the real DNS value and what the online detector last got. This is efficient, since it only generates minimal dns traffic for online detectors and no traffic at all for offline detectors. :rtype: boolean """ self.lastcheck = time.time() # prefer offline state change detection: if self.detector.can_detect_offline(): self.detector.detect() elif not self.dns.detect() == self.detector.get_current_value(): # The following produces traffic, but probably less traffic # overall than the detector self.detector.detect() if self.detector.has_changed(): LOG.debug("detector changed") return True elif self.dns.has_changed(): LOG.debug("dns changed") return True return False
Detect changes in offline detector and real DNS value. Detect a change either in the offline detector or a difference between the real DNS value and what the online detector last got. This is efficient, since it only generates minimal dns traffic for online detectors and no traffic at all for offline detectors. :rtype: boolean
entailment
def needs_check(self): """ Check if enough time has elapsed to perform a check(). If this time has elapsed, a state change check through has_state_changed() should be performed and eventually a sync(). :rtype: boolean """ if self.lastcheck is None: return True return time.time() - self.lastcheck >= self.ipchangedetection_sleep
Check if enough time has elapsed to perform a check(). If this time has elapsed, a state change check through has_state_changed() should be performed and eventually a sync(). :rtype: boolean
entailment
def needs_sync(self): """ Check if enough time has elapsed to perform a sync(). A call to sync() should be performed every now and then, no matter what has_state_changed() says. This is really just a safety thing to enforce consistency in case the state gets messed up. :rtype: boolean """ if self.lastforce is None: self.lastforce = time.time() return time.time() - self.lastforce >= self.forceipchangedetection_sleep
Check if enough time has elapsed to perform a sync(). A call to sync() should be performed every now and then, no matter what has_state_changed() says. This is really just a safety thing to enforce consistency in case the state gets messed up. :rtype: boolean
entailment
def check(self): """ Check if the detector changed and call sync() accordingly. If the sleep time has elapsed, this method will see if the attached detector has had a state change and call sync() accordingly. """ if self.needs_check(): if self.has_state_changed(): LOG.debug("state changed, syncing...") self.sync() elif self.needs_sync(): LOG.debug("forcing sync after %s seconds", self.forceipchangedetection_sleep) self.lastforce = time.time() self.sync() else: # nothing to be done pass
Check if the detector changed and call sync() accordingly. If the sleep time has elapsed, this method will see if the attached detector has had a state change and call sync() accordingly.
entailment
def _parser_jsonip(text): """Parse response text like the one returned by http://jsonip.com/.""" import json try: return str(json.loads(text).get("ip")) except ValueError as exc: LOG.debug("Text '%s' could not be parsed", exc_info=exc) return None
Parse response text like the one returned by http://jsonip.com/.
entailment
def detect(self): """ Try to contact a remote webservice and parse the returned output. Determine the IP address from the parsed output and return. """ if self.opts_url and self.opts_parser: url = self.opts_url parser = self.opts_parser else: url, parser = choice(self.urls) # noqa: S311 parser = globals().get("_parser_" + parser) theip = _get_ip_from_url(url, parser) if theip is None: LOG.info("Could not detect IP using webcheck! Offline?") self.set_current_value(theip) return theip
Try to contact a remote webservice and parse the returned output. Determine the IP address from the parsed output and return.
entailment
def add_plugin(self, plugin, call): """Add plugin to list of plugins. Will be added if it has the attribute I'm bound to. """ meth = getattr(plugin, call, None) if meth is not None: self.plugins.append((plugin, meth))
Add plugin to list of plugins. Will be added if it has the attribute I'm bound to.
entailment
def listcall(self, *arg, **kw): """Call each plugin sequentially. Return the first result that is not None. """ final_result = None for _, meth in self.plugins: result = meth(*arg, **kw) if final_result is None and result is not None: final_result = result return final_result
Call each plugin sequentially. Return the first result that is not None.
entailment
def add_plugin(self, plugin): """Add the given plugin.""" # allow plugins loaded via entry points to override builtin plugins new_name = self.plugin_name(plugin) self._plugins[:] = [p for p in self._plugins if self.plugin_name(p) != new_name] self._plugins.append(plugin)
Add the given plugin.
entailment
def configure(self, args): """Configure the set of plugins with the given args. After configuration, disabled plugins are removed from the plugins list. """ for plug in self._plugins: plug_name = self.plugin_name(plug) plug.enabled = getattr(args, "plugin_%s" % plug_name, False) if plug.enabled and getattr(plug, "configure", None): if callable(getattr(plug, "configure", None)): plug.configure(args) LOG.debug("Available plugins: %s", self._plugins) self.plugins = [plugin for plugin in self._plugins if getattr(plugin, "enabled", False)] LOG.debug("Enabled plugins: %s", self.plugins)
Configure the set of plugins with the given args. After configuration, disabled plugins are removed from the plugins list.
entailment
def options(self, parser, env): """Register commandline options with the given parser. Implement this method for normal options behavior with protection from OptionConflictErrors. If you override this method and want the default --with-$name option to be registered, be sure to call super(). :param parser: argparse parser object :param env: """ def get_help(plug): """Extract the help docstring from the given plugin.""" import textwrap if plug.__class__.__doc__: # doc sections are often indented; compress the spaces return textwrap.dedent(plug.__class__.__doc__) return "(no help available)" for plug in self._plugins: env_opt = ENV_PREFIX + self.plugin_name(plug).upper() env_opt = env_opt.replace("-", "_") parser.add_argument("--with-%s" % self.plugin_name(plug), action="store_true", dest="plugin_%s" % self.plugin_name(plug), default=env.get(env_opt), help="Enable plugin %s: %s [%s]" % (plug.__class__.__name__, get_help(plug), env_opt))
Register commandline options with the given parser. Implement this method for normal options behavior with protection from OptionConflictErrors. If you override this method and want the default --with-$name option to be registered, be sure to call super(). :param parser: argparse parser object :param env:
entailment
def load_plugins(self): """Load plugins from entry point(s).""" from pkg_resources import iter_entry_points seen = set() for entry_point in self.entry_points: for ep in iter_entry_points(entry_point): if ep.name in seen: continue seen.add(ep.name) try: plugincls = ep.load() except Exception as exc: # never let a plugin load kill us warn("Unable to load plugin %s: %s" % (ep, exc), RuntimeWarning) continue plugin = plugincls() self.add_plugin(plugin) super(EntryPointPluginManager, self).load_plugins()
Load plugins from entry point(s).
entailment
def load_plugins(self): """Load plugins from `dyndnsc.plugins.builtin`.""" from dyndnsc.plugins.builtin import PLUGINS for plugin in PLUGINS: self.add_plugin(plugin()) super(BuiltinPluginManager, self).load_plugins()
Load plugins from `dyndnsc.plugins.builtin`.
entailment
def update(self, ip): """Update the IP on the remote service.""" return self.handler.update_record(name=self._recordname, address=ip)
Update the IP on the remote service.
entailment
def load(self): """(Re)Load config file.""" try: with open(self.config_file) as configfile: self.config = yaml.load(configfile) except TypeError: # no config file (use environment variables) pass if self.config: self.prefix = self.config.get('config_prefix', None) if not self.prefix: if os.getenv(self.config_prefix): self.prefix = os.getenv(self.config_prefix) else: for path in [ os.path.join(self.basepath, self.default_file), os.path.join(self.config_root, self.default_file) ]: if os.path.exists(path): with open(path) as conf: config = yaml.load(conf) prefix = config.get( self.config_prefix.lower(), None ) if prefix: self.prefix = prefix break
(Re)Load config file.
entailment
def get(self, var, section=None, **kwargs): """Retrieve a config var. Return environment variable if it exists (ie [self.prefix + _] + [section + _] + var) otherwise var from config file. If both are null and no default is set a ConfigError will be raised, otherwise default will be returned. :param var: key to look up :param default: default value, must be supplied as keyword """ # default is not a specified keyword argument so we can distinguish # between a default set to None and no default sets if not section and self.section: section = self.section default = kwargs.get('default', None) env_var = "{}{}{}".format( _suffix(self.prefix) if self.prefix else '', _suffix(alphasnake(section)) if section else '', alphasnake(str(var)) ).upper() config = self.config.get(section, {}) if section else self.config result = config.get(var, default) result = os.getenv(env_var, default=result) # no default keyword supplied (and no result) # use is None to allow empty lists etc if result is None and 'default' not in kwargs: msg = "Could not find '{}'".format(var) if section: msg = "{} in section '{}'.".format( msg, section ) msg = "{} Checked environment variable: {}".format(msg, env_var) if self.config_file: msg = "{} and file: {}".format(msg, self.config_file) raise ConfigError(msg) return result
Retrieve a config var. Return environment variable if it exists (ie [self.prefix + _] + [section + _] + var) otherwise var from config file. If both are null and no default is set a ConfigError will be raised, otherwise default will be returned. :param var: key to look up :param default: default value, must be supplied as keyword
entailment
def keys(self, section=None): """Provide dict like keys method""" if not section and self.section: section = self.section config = self.config.get(section, {}) if section else self.config return config.keys()
Provide dict like keys method
entailment
def items(self, section=None): """Provide dict like items method""" if not section and self.section: section = self.section config = self.config.get(section, {}) if section else self.config return config.items()
Provide dict like items method
entailment
def values(self, section=None): """Provide dict like values method""" if not section and self.section: section = self.section config = self.config.get(section, {}) if section else self.config return config.values()
Provide dict like values method
entailment
def _get_filepath(self, filename=None, config_dir=None): """ Get config file. :param filename: name of config file (not path) :param config_dir: dir name prepended to file name. Note: we use e.g. GBR_CONFIG_DIR here, this is the default value in GBR but it is actually self.env_prefix + '_DIR' etc. If config_dir is not supplied it will be set to the value of the environment variable GBR_CONFIG_DIR or None. If filename is not supplied and the environment variable GBR_CONFIG is set and contains a path, its value will be tested to see if a file exists, if so that is returned as the config file otherwise filename will be set to GBR_CONFIG, if it exists, otherwise 'config.yaml'. If a filename is supplied or GBR_CONFIG is not an existing file: If the environment variable GBR_CONFIG_PATH exists the path GBR_CONFIG_PATH/config_dir/filename is checked. If it doesn't exist config/CONFIG_DIR/filename is checked (relative to the root of the (GBR) repo) finally GBR_CONFIG_DEFAULT/CONFIG_DIR/filename is tried If no file is found None will be returned. """ # pylint: disable=no-self-use config_file = None config_dir_env_var = self.env_prefix + '_DIR' if not filename: # Check env vars for config filename = os.getenv(self.env_prefix, default=self.default_file) # contains path so try directly if os.path.dirname(filename) and os.path.exists(filename): config_file = filename if not config_file: # Cannot contain path filename = os.path.basename(filename) if not config_dir: config_dir = os.getenv(config_dir_env_var, default='') for path in [self.basepath, self.config_root]: filepath = os.path.join(path, config_dir, filename) if os.path.exists(filepath): config_file = filepath break return config_file
Get config file. :param filename: name of config file (not path) :param config_dir: dir name prepended to file name. Note: we use e.g. GBR_CONFIG_DIR here, this is the default value in GBR but it is actually self.env_prefix + '_DIR' etc. If config_dir is not supplied it will be set to the value of the environment variable GBR_CONFIG_DIR or None. If filename is not supplied and the environment variable GBR_CONFIG is set and contains a path, its value will be tested to see if a file exists, if so that is returned as the config file otherwise filename will be set to GBR_CONFIG, if it exists, otherwise 'config.yaml'. If a filename is supplied or GBR_CONFIG is not an existing file: If the environment variable GBR_CONFIG_PATH exists the path GBR_CONFIG_PATH/config_dir/filename is checked. If it doesn't exist config/CONFIG_DIR/filename is checked (relative to the root of the (GBR) repo) finally GBR_CONFIG_DEFAULT/CONFIG_DIR/filename is tried If no file is found None will be returned.
entailment
def update(self, ip): """Update the IP on the remote service.""" timeout = 60 LOG.debug("Updating '%s' to '%s' at service '%s'", self.hostname, ip, self._updateurl) params = {"domains": self.hostname.partition(".")[0], "token": self.__token} if ip is None: params["ip"] = "" else: params["ip"] = ip # LOG.debug("Update params: %r", params) req = requests.get(self._updateurl, params=params, headers=constants.REQUEST_HEADERS_DEFAULT, timeout=timeout) LOG.debug("status %i, %s", req.status_code, req.text) # duckdns response codes seem undocumented... if req.status_code == 200: if req.text.startswith("OK"): return ip return req.text return "invalid http status code: %s" % req.status_code
Update the IP on the remote service.
entailment
def resolve(hostname, family=AF_UNSPEC): """ Resolve hostname to one or more IP addresses through the operating system. Resolution is carried out for the given address family. If no address family is specified, only IPv4 and IPv6 addresses are returned. If multiple IP addresses are found, all are returned. :param family: AF_INET or AF_INET6 or AF_UNSPEC (default) :return: tuple of unique IP addresses """ af_ok = (AF_INET, AF_INET6) if family != AF_UNSPEC and family not in af_ok: raise ValueError("Invalid family '%s'" % family) ips = () try: addrinfo = socket.getaddrinfo(hostname, None, family) except socket.gaierror as exc: # EAI_NODATA and EAI_NONAME are expected if this name is not (yet) # present in DNS if exc.errno not in (socket.EAI_NODATA, socket.EAI_NONAME): LOG.debug("socket.getaddrinfo() raised an exception", exc_info=exc) else: if family == AF_UNSPEC: ips = tuple({item[4][0] for item in addrinfo if item[0] in af_ok}) else: ips = tuple({item[4][0] for item in addrinfo}) return ips
Resolve hostname to one or more IP addresses through the operating system. Resolution is carried out for the given address family. If no address family is specified, only IPv4 and IPv6 addresses are returned. If multiple IP addresses are found, all are returned. :param family: AF_INET or AF_INET6 or AF_UNSPEC (default) :return: tuple of unique IP addresses
entailment
def detect(self): """ Resolve the hostname to an IP address through the operating system. Depending on the 'family' option, either ipv4 or ipv6 resolution is carried out. If multiple IP addresses are found, the first one is returned. :return: ip address """ theip = next(iter(resolve(self.opts_hostname, self.opts_family)), None) self.set_current_value(theip) return theip
Resolve the hostname to an IP address through the operating system. Depending on the 'family' option, either ipv4 or ipv6 resolution is carried out. If multiple IP addresses are found, the first one is returned. :return: ip address
entailment
def list_presets(cfg, out=sys.stdout): """Write a human readable list of available presets to out. :param cfg: ConfigParser instance :param out: file object to write to """ for section in cfg.sections(): if section.startswith("preset:"): out.write((section.replace("preset:", "")) + os.linesep) for k, v in cfg.items(section): out.write("\t%s = %s" % (k, v) + os.linesep)
Write a human readable list of available presets to out. :param cfg: ConfigParser instance :param out: file object to write to
entailment
def create_argparser(): """Instantiate an `argparse.ArgumentParser`. Adds all basic cli options including default values. """ parser = argparse.ArgumentParser() arg_defaults = { "daemon": False, "loop": False, "listpresets": False, "config": None, "debug": False, "sleeptime": 300, "version": False, "verbose_count": 0 } # add generic client options to the CLI: parser.add_argument("-c", "--config", dest="config", help="config file", default=arg_defaults["config"]) parser.add_argument("--list-presets", dest="listpresets", help="list all available presets", action="store_true", default=arg_defaults["listpresets"]) parser.add_argument("-d", "--daemon", dest="daemon", help="go into daemon mode (implies --loop)", action="store_true", default=arg_defaults["daemon"]) parser.add_argument("--debug", dest="debug", help="increase logging level to DEBUG (DEPRECATED, please use -vvv)", action="store_true", default=arg_defaults["debug"]) parser.add_argument("--loop", dest="loop", help="loop forever (default is to update once)", action="store_true", default=arg_defaults["loop"]) parser.add_argument("--sleeptime", dest="sleeptime", help="how long to sleep between checks in seconds", default=arg_defaults["sleeptime"]) parser.add_argument("--version", dest="version", help="show version and exit", action="store_true", default=arg_defaults["version"]) parser.add_argument("-v", "--verbose", dest="verbose_count", action="count", default=arg_defaults["verbose_count"], help="increases log verbosity for each occurrence") return parser, arg_defaults
Instantiate an `argparse.ArgumentParser`. Adds all basic cli options including default values.
entailment
def run_forever(dyndnsclients): """ Run an endless loop accross the give dynamic dns clients. :param dyndnsclients: list of DynDnsClients """ while True: try: # Do small sleeps in the main loop, needs_check() is cheap and does # the rest. time.sleep(15) for dyndnsclient in dyndnsclients: dyndnsclient.check() except (KeyboardInterrupt,): break except (Exception,) as exc: LOG.critical("An exception occurred in the dyndns loop", exc_info=exc) return 0
Run an endless loop accross the give dynamic dns clients. :param dyndnsclients: list of DynDnsClients
entailment
def main(): """ Run the main CLI program. Initializes the stack, parses command line arguments, and fires requested logic. """ plugins = DefaultPluginManager() plugins.load_plugins() parser, _ = create_argparser() # add the updater protocol options to the CLI: for kls in updater_classes(): kls.register_arguments(parser) for kls in detector_classes(): kls.register_arguments(parser) # add the plugin options to the CLI: from os import environ plugins.options(parser, environ) args = parser.parse_args() if args.debug: args.verbose_count = 5 # some high number log_level = max(int(logging.WARNING / 10) - args.verbose_count, 0) * 10 # print(log_level) logging.basicConfig(level=log_level, format="%(levelname)s %(message)s") # logging.debug("args %r", args) if args.version: from . import __version__ print("dyndnsc %s" % __version__) # noqa return 0 # silence 'requests' logging requests_log = logging.getLogger("requests") requests_log.setLevel(logging.WARNING) logging.debug(parser) cfg = get_configuration(args.config) if args.listpresets: list_presets(cfg) return 0 if args.config: collected_configs = collect_config(cfg) else: parsed_args = parse_cmdline_args(args, updater_classes().union(detector_classes())) logging.debug("parsed_args %r", parsed_args) collected_configs = { "cmdline": { "interval": int(args.sleeptime) } } collected_configs["cmdline"].update(parsed_args) plugins.configure(args) plugins.initialize() logging.debug("collected_configs: %r", collected_configs) dyndnsclients = [] for thisconfig in collected_configs: logging.debug("Initializing client for '%s'", thisconfig) # done with options, bring on the dancing girls dyndnsclient = getDynDnsClientForConfig( collected_configs[thisconfig], plugins=plugins) if dyndnsclient is None: return 1 # do an initial synchronization, before going into endless loop: dyndnsclient.sync() dyndnsclients.append(dyndnsclient) run_forever_callable = partial(run_forever, dyndnsclients) if args.daemon: import daemonocle daemon = daemonocle.Daemon(worker=run_forever_callable) daemon.do_action("start") args.loop = True if args.loop: run_forever_callable() return 0
Run the main CLI program. Initializes the stack, parses command line arguments, and fires requested logic.
entailment
def tidy_ssh_user(url=None, user=None): """make sure a git repo ssh:// url has a user set""" if url and url.startswith('ssh://'): # is there a user already ? match = re.compile('ssh://([^@]+)@.+').match(url) if match: ssh_user = match.group(1) if user and ssh_user != user: # assume prevalence of argument url = url.replace(re.escape(ssh_user) + '@', user + '@') elif user: url = 'ssh://' + \ user + '@' + \ url[len('ssh://'):] return url
make sure a git repo ssh:// url has a user set
entailment
def get_commit_bzs(self, from_revision, to_revision=None): """ Return a list of tuples, one per commit. Each tuple is (sha1, subject, bz_list). bz_list is a (possibly zero-length) list of numbers. """ rng = self.rev_range(from_revision, to_revision) GIT_COMMIT_FIELDS = ['id', 'subject', 'body'] GIT_LOG_FORMAT = ['%h', '%s', '%b'] GIT_LOG_FORMAT = '%x1f'.join(GIT_LOG_FORMAT) + '%x1e' log_out = self('log', '--format=%s' % GIT_LOG_FORMAT, rng, log_cmd=False, fatal=False) if not log_out: return [] log = log_out.strip('\n\x1e').split("\x1e") log = [row.strip('\n\t ').split("\x1f") for row in log] log = [dict(zip(GIT_COMMIT_FIELDS, row)) for row in log] result = [] for commit in log: bzs = search_bug_references(commit['subject']) bzs.extend(search_bug_references(commit['body'])) result.append((commit['id'], commit['subject'], bzs)) return result
Return a list of tuples, one per commit. Each tuple is (sha1, subject, bz_list). bz_list is a (possibly zero-length) list of numbers.
entailment
def config_get(self, param, default=None): '''Return the value of a git configuration option. This will return the value of the default parameter (which defaults to None) if the given option does not exist.''' try: return self("config", "--get", param, log_fail=False, log_cmd=False) except exception.CommandFailed: return default
Return the value of a git configuration option. This will return the value of the default parameter (which defaults to None) if the given option does not exist.
entailment
def get_configuration(config_file=None): """Return an initialized ConfigParser. If no config filename is presented, `DEFAULT_USER_INI` is used if present. Also reads the built-in presets. :param config_file: string path """ parser = configparser.ConfigParser() if config_file is None: # fallback to default user config file config_file = os.path.join(os.getenv("HOME"), DEFAULT_USER_INI) if not os.path.isfile(config_file): config_file = None else: if not os.path.isfile(config_file): raise ValueError("%s is not a file" % config_file) configs = [get_filename(PRESETS_INI)] if config_file: configs.append(config_file) LOG.debug("Attempting to read configuration from %r", configs) read_configs = parser.read(configs) LOG.debug("Successfully read configuration from %r", read_configs) LOG.debug("config file sections: %r", parser.sections()) return parser
Return an initialized ConfigParser. If no config filename is presented, `DEFAULT_USER_INI` is used if present. Also reads the built-in presets. :param config_file: string path
entailment
def _iraw_client_configs(cfg): """ Generate (client_name, client_cfg_dict) tuples from the configuration. Conflates the presets and removes traces of the preset configuration so that the returned dict can be used directly on a dyndnsc factory. :param cfg: ConfigParser """ client_names = cfg.get("dyndnsc", "configs").split(",") _preset_prefix = "preset:" _use_preset = "use_preset" for client_name in (x.strip() for x in client_names if x.strip()): client_cfg_dict = dict(cfg.items(client_name)) if cfg.has_option(client_name, _use_preset): prf = dict( cfg.items(_preset_prefix + cfg.get(client_name, _use_preset))) prf.update(client_cfg_dict) client_cfg_dict = prf else: # raw config with NO preset in use, so no updating of dict pass logging.debug("raw config for '%s': %r", client_name, client_cfg_dict) if _use_preset in client_cfg_dict: del client_cfg_dict[_use_preset] yield client_name, client_cfg_dict
Generate (client_name, client_cfg_dict) tuples from the configuration. Conflates the presets and removes traces of the preset configuration so that the returned dict can be used directly on a dyndnsc factory. :param cfg: ConfigParser
entailment
def collect_config(cfg): """ Construct configuration dictionary from configparser. Resolves presets and returns a dictionary containing: .. code-block:: bash { "client_name": { "detector": ("detector_name", detector_opts), "updater": [ ("updater_name", updater_opts), ... ] }, ... } :param cfg: ConfigParser """ collected_configs = {} _updater_str = "updater" _detector_str = "detector" _dash = "-" for client_name, client_cfg_dict in _iraw_client_configs(cfg): detector_name = None detector_options = {} updater_name = None updater_options = {} collected_config = {} for k in client_cfg_dict: if k.startswith(_detector_str + _dash): detector_options[ k.replace(_detector_str + _dash, "")] = client_cfg_dict[k] elif k == _updater_str: updater_name = client_cfg_dict.get(k) elif k == _detector_str: detector_name = client_cfg_dict.get(k) elif k.startswith(_updater_str + _dash): updater_options[ k.replace(_updater_str + _dash, "")] = client_cfg_dict[k] else: # options passed "as is" to the dyndnsc client collected_config[k] = client_cfg_dict[k] collected_config[_detector_str] = [(detector_name, detector_options)] collected_config[_updater_str] = [(updater_name, updater_options)] collected_configs[client_name] = collected_config return collected_configs
Construct configuration dictionary from configparser. Resolves presets and returns a dictionary containing: .. code-block:: bash { "client_name": { "detector": ("detector_name", detector_opts), "updater": [ ("updater_name", updater_opts), ... ] }, ... } :param cfg: ConfigParser
entailment
def detect(self): """Detect and return the IP address.""" if PY3: # py23 import subprocess # noqa: S404 @UnresolvedImport pylint: disable=import-error else: import commands as subprocess # @UnresolvedImport pylint: disable=import-error try: theip = subprocess.getoutput(self.opts_command) # noqa: S605 except Exception: theip = None self.set_current_value(theip) return theip
Detect and return the IP address.
entailment
def find_ip(family=AF_INET, flavour="opendns"): """Find the publicly visible IP address of the current system. This uses public DNS infrastructure that implement a special DNS "hack" to return the IP address of the requester rather than some other address. :param family: address family, optional, default AF_INET (ipv4) :param flavour: selector for public infrastructure provider, optional """ flavours = { "opendns": { AF_INET: { "@": ("resolver1.opendns.com", "resolver2.opendns.com"), "qname": "myip.opendns.com", "rdtype": "A", }, AF_INET6: { "@": ("resolver1.ipv6-sandbox.opendns.com", "resolver2.ipv6-sandbox.opendns.com"), "qname": "myip.opendns.com", "rdtype": "AAAA", }, }, } flavour = flavours["opendns"] resolver = dns.resolver.Resolver() # specify the custom nameservers to be used (as IPs): resolver.nameservers = [next(iter(resolve(h, family=family))) for h in flavour[family]["@"]] answers = resolver.query(qname=flavour[family]["qname"], rdtype=flavour[family]["rdtype"]) for rdata in answers: return rdata.address return None
Find the publicly visible IP address of the current system. This uses public DNS infrastructure that implement a special DNS "hack" to return the IP address of the requester rather than some other address. :param family: address family, optional, default AF_INET (ipv4) :param flavour: selector for public infrastructure provider, optional
entailment
def detect(self): """ Detect the WAN IP of the current process through DNS. Depending on the 'family' option, either ipv4 or ipv6 resolution is carried out. :return: ip address """ theip = find_ip(family=self.opts_family) self.set_current_value(theip) return theip
Detect the WAN IP of the current process through DNS. Depending on the 'family' option, either ipv4 or ipv6 resolution is carried out. :return: ip address
entailment
def set_current_value(self, value): """Set the detected IP in the current run (if any).""" self._oldvalue = self.get_current_value() self._currentvalue = value if self._oldvalue != value: # self.notify_observers("new_ip_detected", {"ip": value}) LOG.debug("%s.set_current_value(%s)", self.__class__.__name__, value) return value
Set the detected IP in the current run (if any).
entailment
def parse_cmdline_args(args, classes): """ Parse all updater and detector related arguments from args. Returns a list of ("name", { "k": "v"}) :param args: argparse arguments """ if args is None: raise ValueError("args must not be None") parsed_args = {} for kls in classes: prefix = kls.configuration_key_prefix() name = kls.configuration_key if getattr(args, "%s_%s" % (prefix, name), False): logging.debug( "Gathering initargs for '%s.%s'", prefix, name) initargs = {} for arg_name in kls.init_argnames(): val = getattr(args, "%s_%s_%s" % (prefix, name, arg_name)) if val is not None: initargs[arg_name] = val if prefix not in parsed_args: parsed_args[prefix] = [] parsed_args[prefix].append((name, initargs)) return parsed_args
Parse all updater and detector related arguments from args. Returns a list of ("name", { "k": "v"}) :param args: argparse arguments
entailment
def register_arguments(cls, parser): """Register command line options. Implement this method for normal options behavior with protection from OptionConflictErrors. If you override this method and want the default --$name option(s) to be registered, be sure to call super(). """ if hasattr(cls, "_dont_register_arguments"): return prefix = cls.configuration_key_prefix() cfgkey = cls.configuration_key parser.add_argument("--%s-%s" % (prefix, cfgkey), action="store_true", dest="%s_%s" % (prefix, cfgkey), default=False, help="%s: %s" % (cls.__name__, cls.help())) args = cls.init_argnames() defaults = cls._init_argdefaults() for arg in args[0:len(args) - len(defaults)]: parser.add_argument("--%s-%s-%s" % (prefix, cfgkey, arg), dest="%s_%s_%s" % (prefix, cfgkey, arg), help="") for i, arg in enumerate(args[len(args) - len(defaults):]): parser.add_argument("--%s-%s-%s" % (prefix, cfgkey, arg), dest="%s_%s_%s" % (prefix, cfgkey, arg), default=defaults[i], help="default: %(default)s")
Register command line options. Implement this method for normal options behavior with protection from OptionConflictErrors. If you override this method and want the default --$name option(s) to be registered, be sure to call super().
entailment
def tag_patches_branch(package, local_patches_branch, patches_branch, force=False, push=False): """ Tag the local_patches_branch with this package's NVR. """ vr = specfile.Spec().get_vr(epoch=False) nvr_tag = package + '-' + vr tag_cmd = ['tag', nvr_tag, local_patches_branch] if force: tag_cmd.append('-f') git(*tag_cmd) if push: patches_remote = patches_branch.partition('/')[0] git('push', patches_remote, nvr_tag) else: print('Not pushing tag. Run "git push patches %s" by hand.' % nvr_tag)
Tag the local_patches_branch with this package's NVR.
entailment
def load_class(module_name, class_name): """Return class object specified by module name and class name. Return None if module failed to be imported. :param module_name: string module name :param class_name: string class name """ try: plugmod = import_module(module_name) except Exception as exc: warn("Importing built-in plugin %s.%s raised an exception: %r" % (module_name, class_name, repr(exc)), ImportWarning) return None else: return getattr(plugmod, class_name)
Return class object specified by module name and class name. Return None if module failed to be imported. :param module_name: string module name :param class_name: string class name
entailment
def find_class(name, classes): """Return class in ``classes`` identified by configuration key ``name``.""" name = name.lower() cls = next((c for c in classes if c.configuration_key == name), None) if cls is None: raise ValueError("No class named '%s' could be found" % name) return cls
Return class in ``classes`` identified by configuration key ``name``.
entailment