_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q24400
StrategyMixin._install_wrappers
train
def _install_wrappers(self): """ Install our PluginLoader monkey patches and update global variables with references to the real functions. """ global action_loader__get action_loader__get = ansible_mitogen.loaders.action_loader.get ansible_mitogen.loaders.action_loader.get = wrap_action_loader__get global connection_loader__get connection_loader__get = ansible_mitogen.loaders.connection_loader.get ansible_mitogen.loaders.connection_loader.get = wrap_connection_loader__get global worker__run worker__run = ansible.executor.process.worker.WorkerProcess.run ansible.executor.process.worker.WorkerProcess.run = wrap_worker__run
python
{ "resource": "" }
q24401
StrategyMixin._remove_wrappers
train
def _remove_wrappers(self): """ Uninstall the PluginLoader monkey patches. """ ansible_mitogen.loaders.action_loader.get = action_loader__get ansible_mitogen.loaders.connection_loader.get = connection_loader__get ansible.executor.process.worker.WorkerProcess.run = worker__run
python
{ "resource": "" }
q24402
StrategyMixin._add_plugin_paths
train
def _add_plugin_paths(self): """ Add the Mitogen plug-in directories to the ModuleLoader path, avoiding the need for manual configuration. """ base_dir = os.path.join(os.path.dirname(__file__), 'plugins') ansible_mitogen.loaders.connection_loader.add_directory( os.path.join(base_dir, 'connection') ) ansible_mitogen.loaders.action_loader.add_directory( os.path.join(base_dir, 'action') )
python
{ "resource": "" }
q24403
StrategyMixin._queue_task
train
def _queue_task(self, host, task, task_vars, play_context): """ Many PluginLoader caches are defective as they are only populated in the ephemeral WorkerProcess. Touch each plug-in path before forking to ensure all workers receive a hot cache. """ ansible_mitogen.loaders.module_loader.find_plugin( name=task.action, mod_type='', ) ansible_mitogen.loaders.connection_loader.get( name=play_context.connection, class_only=True, ) ansible_mitogen.loaders.action_loader.get( name=task.action, class_only=True, ) return super(StrategyMixin, self)._queue_task( host=host, task=task, task_vars=task_vars, play_context=play_context, )
python
{ "resource": "" }
q24404
expose
train
def expose(policy): """ Annotate a method to permit access to contexts matching an authorization policy. The annotation may be specified multiple times. Methods lacking any authorization policy are not accessible. :: @mitogen.service.expose(policy=mitogen.service.AllowParents()) def unsafe_operation(self): ... :param mitogen.service.Policy policy: The policy to require. """ def wrapper(func): func.mitogen_service__policies = ( [policy] + getattr(func, 'mitogen_service__policies', []) ) return func return wrapper
python
{ "resource": "" }
q24405
PushFileService.get
train
def get(self, path): """ Fetch a file from the cache. """ assert isinstance(path, mitogen.core.UnicodeType) self._lock.acquire() try: if path in self._cache: return self._cache[path] latch = mitogen.core.Latch() waiters = self._waiters.setdefault(path, []) waiters.append(lambda: latch.put(None)) finally: self._lock.release() LOG.debug('%r.get(%r) waiting for uncached file to arrive', self, path) latch.get() LOG.debug('%r.get(%r) -> %r', self, path, self._cache[path]) return self._cache[path]
python
{ "resource": "" }
q24406
PushFileService.propagate_paths_and_modules
train
def propagate_paths_and_modules(self, context, paths, modules): """ One size fits all method to ensure a target context has been preloaded with a set of small files and Python modules. """ for path in paths: self.propagate_to(context, mitogen.core.to_text(path)) self.router.responder.forward_modules(context, modules)
python
{ "resource": "" }
q24407
FileService.register
train
def register(self, path): """ Authorize a path for access by children. Repeat calls with the same path has no effect. :param str path: File path. """ if path not in self._paths: LOG.debug('%r: registering %r', self, path) self._paths.add(path)
python
{ "resource": "" }
q24408
FileService.register_prefix
train
def register_prefix(self, path): """ Authorize a path and any subpaths for access by children. Repeat calls with the same path has no effect. :param str path: File path. """ if path not in self._prefixes: LOG.debug('%r: registering prefix %r', self, path) self._prefixes.add(path)
python
{ "resource": "" }
q24409
FileService.fetch
train
def fetch(self, path, sender, msg): """ Start a transfer for a registered path. :param str path: File path. :param mitogen.core.Sender sender: Sender to receive file data. :returns: Dict containing the file metadata: * ``size``: File size in bytes. * ``mode``: Integer file mode. * ``owner``: Owner account name on host machine. * ``group``: Owner group name on host machine. * ``mtime``: Floating point modification time. * ``ctime``: Floating point change time. :raises Error: Unregistered path, or Sender did not match requestee context. """ if path not in self._paths and not self._prefix_is_authorized(path): msg.reply(mitogen.core.CallError( Error(self.unregistered_msg % (path,)) )) return if msg.src_id != sender.context.context_id: msg.reply(mitogen.core.CallError( Error(self.context_mismatch_msg) )) return LOG.debug('Serving %r', path) # Response must arrive first so requestee can begin receive loop, # otherwise first ack won't arrive until all pending chunks were # delivered. In that case max BDP would always be 128KiB, aka. max # ~10Mbit/sec over a 100ms link. try: fp = open(path, 'rb', self.IO_SIZE) msg.reply(self._generate_stat(path)) except IOError: msg.reply(mitogen.core.CallError( sys.exc_info()[1] )) return stream = self.router.stream_by_id(sender.context.context_id) state = self._state_by_stream.setdefault(stream, FileStreamState()) state.lock.acquire() try: state.jobs.append((sender, fp)) self._schedule_pending_unlocked(state) finally: state.lock.release()
python
{ "resource": "" }
q24410
FileService.acknowledge
train
def acknowledge(self, size, msg): """ Acknowledge bytes received by a transfer target, scheduling new chunks to keep the window full. This should be called for every chunk received by the target. """ stream = self.router.stream_by_id(msg.src_id) state = self._state_by_stream[stream] state.lock.acquire() try: if state.unacked < size: LOG.error('%r.acknowledge(src_id %d): unacked=%d < size %d', self, msg.src_id, state.unacked, size) state.unacked -= min(state.unacked, size) self._schedule_pending_unlocked(state) finally: state.lock.release()
python
{ "resource": "" }
q24411
get_subclasses
train
def get_subclasses(klass): """ Rather than statically import every interesting subclass, forcing it all to be transferred and potentially disrupting the debugged environment, enumerate only those loaded in memory. Also returns the original class. """ stack = [klass] seen = set() while stack: klass = stack.pop() seen.add(klass) stack.extend(klass.__subclasses__()) return seen
python
{ "resource": "" }
q24412
simplegeneric
train
def simplegeneric(func): """Make a trivial single-dispatch generic function""" registry = {} def wrapper(*args, **kw): ob = args[0] try: cls = ob.__class__ except AttributeError: cls = type(ob) try: mro = cls.__mro__ except AttributeError: try: class cls(cls, object): pass mro = cls.__mro__[1:] except TypeError: mro = object, # must be an ExtensionClass or some such :( for t in mro: if t in registry: return registry[t](*args, **kw) else: return func(*args, **kw) try: wrapper.__name__ = func.__name__ except (TypeError, AttributeError): pass # Python 2.3 doesn't allow functions to be renamed def register(typ, func=None): if func is None: return lambda f: register(typ, f) registry[typ] = func return func wrapper.__dict__ = func.__dict__ wrapper.__doc__ = func.__doc__ wrapper.register = register return wrapper
python
{ "resource": "" }
q24413
get_importer
train
def get_importer(path_item): """Retrieve a PEP 302 importer for the given path item The returned importer is cached in sys.path_importer_cache if it was newly created by a path hook. If there is no importer, a wrapper around the basic import machinery is returned. This wrapper is never inserted into the importer cache (None is inserted instead). The cache (or part of it) can be cleared manually if a rescan of sys.path_hooks is necessary. """ try: importer = sys.path_importer_cache[path_item] except KeyError: for path_hook in sys.path_hooks: try: importer = path_hook(path_item) break except ImportError: pass else: importer = None sys.path_importer_cache.setdefault(path_item, importer) if importer is None: try: importer = ImpImporter(path_item) except ImportError: importer = None return importer
python
{ "resource": "" }
q24414
iter_importers
train
def iter_importers(fullname=""): """Yield PEP 302 importers for the given module name If fullname contains a '.', the importers will be for the package containing fullname, otherwise they will be importers for sys.meta_path, sys.path, and Python's "classic" import machinery, in that order. If the named module is in a package, that package is imported as a side effect of invoking this function. Non PEP 302 mechanisms (e.g. the Windows registry) used by the standard import machinery to find files in alternative locations are partially supported, but are searched AFTER sys.path. Normally, these locations are searched BEFORE sys.path, preventing sys.path entries from shadowing them. For this to cause a visible difference in behaviour, there must be a module or package name that is accessible via both sys.path and one of the non PEP 302 file system mechanisms. In this case, the emulation will find the former version, while the builtin import mechanism will find the latter. Items of the following types can be affected by this discrepancy: imp.C_EXTENSION, imp.PY_SOURCE, imp.PY_COMPILED, imp.PKG_DIRECTORY """ if fullname.startswith('.'): raise ImportError("Relative module names not supported") if '.' in fullname: # Get the containing package's __path__ pkg = '.'.join(fullname.split('.')[:-1]) if pkg not in sys.modules: __import__(pkg) path = getattr(sys.modules[pkg], '__path__', None) or [] else: for importer in sys.meta_path: yield importer path = sys.path for item in path: yield get_importer(item) if '.' not in fullname: yield ImpImporter()
python
{ "resource": "" }
q24415
get_loader
train
def get_loader(module_or_name): """Get a PEP 302 "loader" object for module_or_name If the module or package is accessible via the normal import mechanism, a wrapper around the relevant part of that machinery is returned. Returns None if the module cannot be found or imported. If the named module is not already imported, its containing package (if any) is imported, in order to establish the package __path__. This function uses iter_importers(), and is thus subject to the same limitations regarding platform-specific special import locations such as the Windows registry. """ if module_or_name in sys.modules: module_or_name = sys.modules[module_or_name] if isinstance(module_or_name, ModuleType): module = module_or_name loader = getattr(module, '__loader__', None) if loader is not None: return loader fullname = module.__name__ else: fullname = module_or_name return find_loader(fullname)
python
{ "resource": "" }
q24416
find_loader
train
def find_loader(fullname): """Find a PEP 302 "loader" object for fullname If fullname contains dots, path must be the containing package's __path__. Returns None if the module cannot be found or imported. This function uses iter_importers(), and is thus subject to the same limitations regarding platform-specific special import locations such as the Windows registry. """ for importer in iter_importers(fullname): loader = importer.find_module(fullname) if loader is not None: return loader return None
python
{ "resource": "" }
q24417
get_data
train
def get_data(package, resource): """Get a resource from a package. This is a wrapper round the PEP 302 loader get_data API. The package argument should be the name of a package, in standard module format (foo.bar). The resource argument should be in the form of a relative filename, using '/' as the path separator. The parent directory name '..' is not allowed, and nor is a rooted name (starting with a '/'). The function returns a binary string, which is the contents of the specified resource. For packages located in the filesystem, which have already been imported, this is the rough equivalent of d = os.path.dirname(sys.modules[package].__file__) data = open(os.path.join(d, resource), 'rb').read() If the package cannot be located or loaded, or it uses a PEP 302 loader which does not support get_data(), then None is returned. """ loader = get_loader(package) if loader is None or not hasattr(loader, 'get_data'): return None mod = sys.modules.get(package) or loader.load_module(package) if mod is None or not hasattr(mod, '__file__'): return None # Modify the resource name to be compatible with the loader.get_data # signature - an os.path format "filename" starting with the dirname of # the package's __file__ parts = resource.split('/') parts.insert(0, os.path.dirname(mod.__file__)) resource_name = os.path.join(*parts) return loader.get_data(resource_name)
python
{ "resource": "" }
q24418
invoke
train
def invoke(invocation): """ Find a Planner subclass corresnding to `invocation` and use it to invoke the module. :param Invocation invocation: :returns: Module return dict. :raises ansible.errors.AnsibleError: Unrecognized/unsupported module type. """ (invocation.module_path, invocation.module_source) = get_module_data(invocation.module_name) planner = _get_planner(invocation) if invocation.wrap_async: response = _invoke_async_task(invocation, planner) elif planner.should_fork(): response = _invoke_isolated_task(invocation, planner) else: _propagate_deps(invocation, planner, invocation.connection.context) response = invocation.connection.get_chain().call( ansible_mitogen.target.run_module, kwargs=planner.get_kwargs(), ) return invocation.action._postprocess_response(response)
python
{ "resource": "" }
q24419
getenv_int
train
def getenv_int(key, default=0): """ Get an integer-valued environment variable `key`, if it exists and parses as an integer, otherwise return `default`. """ try: return int(os.environ.get(key, str(default))) except ValueError: return default
python
{ "resource": "" }
q24420
MuxProcess.start
train
def start(cls, _init_logging=True): """ Arrange for the subprocess to be started, if it is not already running. The parent process picks a UNIX socket path the child will use prior to fork, creates a socketpair used essentially as a semaphore, then blocks waiting for the child to indicate the UNIX socket is ready for use. :param bool _init_logging: For testing, if :data:`False`, don't initialize logging. """ if cls.worker_sock is not None: return if faulthandler is not None: faulthandler.enable() mitogen.utils.setup_gil() cls.unix_listener_path = mitogen.unix.make_socket_path() cls.worker_sock, cls.child_sock = socket.socketpair() atexit.register(lambda: clean_shutdown(cls.worker_sock)) mitogen.core.set_cloexec(cls.worker_sock.fileno()) mitogen.core.set_cloexec(cls.child_sock.fileno()) cls.profiling = os.environ.get('MITOGEN_PROFILING') is not None if cls.profiling: mitogen.core.enable_profiling() if _init_logging: ansible_mitogen.logging.setup() cls.original_env = dict(os.environ) cls.child_pid = os.fork() if cls.child_pid: save_pid('controller') ansible_mitogen.logging.set_process_name('top') ansible_mitogen.affinity.policy.assign_controller() cls.child_sock.close() cls.child_sock = None mitogen.core.io_op(cls.worker_sock.recv, 1) else: save_pid('mux') ansible_mitogen.logging.set_process_name('mux') ansible_mitogen.affinity.policy.assign_muxprocess() cls.worker_sock.close() cls.worker_sock = None self = cls() self.worker_main()
python
{ "resource": "" }
q24421
MuxProcess._setup_master
train
def _setup_master(self): """ Construct a Router, Broker, and mitogen.unix listener """ self.broker = mitogen.master.Broker(install_watcher=False) self.router = mitogen.master.Router( broker=self.broker, max_message_size=4096 * 1048576, ) self._setup_responder(self.router.responder) mitogen.core.listen(self.broker, 'shutdown', self.on_broker_shutdown) mitogen.core.listen(self.broker, 'exit', self.on_broker_exit) self.listener = mitogen.unix.Listener( router=self.router, path=self.unix_listener_path, backlog=C.DEFAULT_FORKS, ) self._enable_router_debug() self._enable_stack_dumps()
python
{ "resource": "" }
q24422
MuxProcess._setup_services
train
def _setup_services(self): """ Construct a ContextService and a thread to service requests for it arriving from worker processes. """ self.pool = mitogen.service.Pool( router=self.router, services=[ mitogen.service.FileService(router=self.router), mitogen.service.PushFileService(router=self.router), ansible_mitogen.services.ContextService(self.router), ansible_mitogen.services.ModuleDepService(self.router), ], size=getenv_int('MITOGEN_POOL_SIZE', default=32), ) LOG.debug('Service pool configured: size=%d', self.pool.size)
python
{ "resource": "" }
q24423
MuxProcess.on_broker_exit
train
def on_broker_exit(self): """ Respond to the broker thread about to exit by sending SIGTERM to ourself. In future this should gracefully join the pool, but TERM is fine for now. """ if not self.profiling: # In normal operation we presently kill the process because there is # not yet any way to cancel connect(). When profiling, threads # including the broker must shut down gracefully, otherwise pstats # won't be written. os.kill(os.getpid(), signal.SIGTERM)
python
{ "resource": "" }
q24424
utf8
train
def utf8(s): """ Coerce an object to bytes if it is Unicode. """ if isinstance(s, mitogen.core.UnicodeType): s = s.encode('utf-8') return s
python
{ "resource": "" }
q24425
EnvironmentFileWatcher._remove_existing
train
def _remove_existing(self): """ When a change is detected, remove keys that existed in the old file. """ for key in self._keys: if key in os.environ: LOG.debug('%r: removing old key %r', self, key) del os.environ[key] self._keys = []
python
{ "resource": "" }
q24426
ProgramRunner._get_program
train
def _get_program(self): """ Fetch the module binary from the master if necessary. """ return ansible_mitogen.target.get_small_file( context=self.service_context, path=self.path, )
python
{ "resource": "" }
q24427
ProgramRunner.revert
train
def revert(self): """ Delete the temporary program file. """ if self.program_fp: self.program_fp.close() super(ProgramRunner, self).revert()
python
{ "resource": "" }
q24428
ScriptRunner._rewrite_source
train
def _rewrite_source(self, s): """ Mutate the source according to the per-task parameters. """ # While Ansible rewrites the #! using ansible_*_interpreter, it is # never actually used to execute the script, instead it is a shell # fragment consumed by shell/__init__.py::build_module_command(). new = [b('#!') + utf8(self.interpreter_fragment)] if self.is_python: new.append(self.b_ENCODING_STRING) _, _, rest = bytes_partition(s, b('\n')) new.append(rest) return b('\n').join(new)
python
{ "resource": "" }
q24429
_connect_ssh
train
def _connect_ssh(spec): """ Return ContextService arguments for an SSH connection. """ if C.HOST_KEY_CHECKING: check_host_keys = 'enforce' else: check_host_keys = 'ignore' # #334: tilde-expand private_key_file to avoid implementation difference # between Python and OpenSSH. private_key_file = spec.private_key_file() if private_key_file is not None: private_key_file = os.path.expanduser(private_key_file) return { 'method': 'ssh', 'kwargs': { 'check_host_keys': check_host_keys, 'hostname': spec.remote_addr(), 'username': spec.remote_user(), 'compression': convert_bool( default(spec.mitogen_ssh_compression(), True) ), 'password': spec.password(), 'port': spec.port(), 'python_path': spec.python_path(), 'identity_file': private_key_file, 'identities_only': False, 'ssh_path': spec.ssh_executable(), 'connect_timeout': spec.ansible_ssh_timeout(), 'ssh_args': spec.ssh_args(), 'ssh_debug_level': spec.mitogen_ssh_debug_level(), 'remote_name': get_remote_name(spec), } }
python
{ "resource": "" }
q24430
_connect_docker
train
def _connect_docker(spec): """ Return ContextService arguments for a Docker connection. """ return { 'method': 'docker', 'kwargs': { 'username': spec.remote_user(), 'container': spec.remote_addr(), 'python_path': spec.python_path(), 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), 'remote_name': get_remote_name(spec), } }
python
{ "resource": "" }
q24431
_connect_kubectl
train
def _connect_kubectl(spec): """ Return ContextService arguments for a Kubernetes connection. """ return { 'method': 'kubectl', 'kwargs': { 'pod': spec.remote_addr(), 'python_path': spec.python_path(), 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), 'kubectl_path': spec.mitogen_kubectl_path(), 'kubectl_args': spec.extra_args(), 'remote_name': get_remote_name(spec), } }
python
{ "resource": "" }
q24432
_connect_jail
train
def _connect_jail(spec): """ Return ContextService arguments for a FreeBSD jail connection. """ return { 'method': 'jail', 'kwargs': { 'username': spec.remote_user(), 'container': spec.remote_addr(), 'python_path': spec.python_path(), 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), 'remote_name': get_remote_name(spec), } }
python
{ "resource": "" }
q24433
_connect_lxc
train
def _connect_lxc(spec): """ Return ContextService arguments for an LXC Classic container connection. """ return { 'method': 'lxc', 'kwargs': { 'container': spec.remote_addr(), 'python_path': spec.python_path(), 'lxc_attach_path': spec.mitogen_lxc_attach_path(), 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), 'remote_name': get_remote_name(spec), } }
python
{ "resource": "" }
q24434
_connect_lxd
train
def _connect_lxd(spec): """ Return ContextService arguments for an LXD container connection. """ return { 'method': 'lxd', 'kwargs': { 'container': spec.remote_addr(), 'python_path': spec.python_path(), 'lxc_path': spec.mitogen_lxc_path(), 'connect_timeout': spec.ansible_ssh_timeout() or spec.timeout(), 'remote_name': get_remote_name(spec), } }
python
{ "resource": "" }
q24435
_connect_setns
train
def _connect_setns(spec, kind=None): """ Return ContextService arguments for a mitogen_setns connection. """ return { 'method': 'setns', 'kwargs': { 'container': spec.remote_addr(), 'username': spec.remote_user(), 'python_path': spec.python_path(), 'kind': kind or spec.mitogen_kind(), 'docker_path': spec.mitogen_docker_path(), 'lxc_path': spec.mitogen_lxc_path(), 'lxc_info_path': spec.mitogen_lxc_info_path(), 'machinectl_path': spec.mitogen_machinectl_path(), } }
python
{ "resource": "" }
q24436
_connect_su
train
def _connect_su(spec): """ Return ContextService arguments for su as a become method. """ return { 'method': 'su', 'enable_lru': True, 'kwargs': { 'username': spec.become_user(), 'password': spec.become_pass(), 'python_path': spec.python_path(), 'su_path': spec.become_exe(), 'connect_timeout': spec.timeout(), 'remote_name': get_remote_name(spec), } }
python
{ "resource": "" }
q24437
_connect_sudo
train
def _connect_sudo(spec): """ Return ContextService arguments for sudo as a become method. """ return { 'method': 'sudo', 'enable_lru': True, 'kwargs': { 'username': spec.become_user(), 'password': spec.become_pass(), 'python_path': spec.python_path(), 'sudo_path': spec.become_exe(), 'connect_timeout': spec.timeout(), 'sudo_args': spec.sudo_args(), 'remote_name': get_remote_name(spec), } }
python
{ "resource": "" }
q24438
_connect_mitogen_su
train
def _connect_mitogen_su(spec): """ Return ContextService arguments for su as a first class connection. """ return { 'method': 'su', 'kwargs': { 'username': spec.remote_user(), 'password': spec.password(), 'python_path': spec.python_path(), 'su_path': spec.become_exe(), 'connect_timeout': spec.timeout(), 'remote_name': get_remote_name(spec), } }
python
{ "resource": "" }
q24439
_connect_mitogen_sudo
train
def _connect_mitogen_sudo(spec): """ Return ContextService arguments for sudo as a first class connection. """ return { 'method': 'sudo', 'kwargs': { 'username': spec.remote_user(), 'password': spec.password(), 'python_path': spec.python_path(), 'sudo_path': spec.become_exe(), 'connect_timeout': spec.timeout(), 'sudo_args': spec.sudo_args(), 'remote_name': get_remote_name(spec), } }
python
{ "resource": "" }
q24440
_connect_mitogen_doas
train
def _connect_mitogen_doas(spec): """ Return ContextService arguments for doas as a first class connection. """ return { 'method': 'doas', 'kwargs': { 'username': spec.remote_user(), 'password': spec.password(), 'python_path': spec.python_path(), 'doas_path': spec.become_exe(), 'connect_timeout': spec.timeout(), 'remote_name': get_remote_name(spec), } }
python
{ "resource": "" }
q24441
Connection.on_action_run
train
def on_action_run(self, task_vars, delegate_to_hostname, loader_basedir): """ Invoked by ActionModuleMixin to indicate a new task is about to start executing. We use the opportunity to grab relevant bits from the task-specific data. :param dict task_vars: Task variable dictionary. :param str delegate_to_hostname: :data:`None`, or the template-expanded inventory hostname this task is being delegated to. A similar variable exists on PlayContext when ``delegate_to:`` is active, however it is unexpanded. :param str loader_basedir: Loader base directory; see :attr:`loader_basedir`. """ self.inventory_hostname = task_vars['inventory_hostname'] self._task_vars = task_vars self.host_vars = task_vars['hostvars'] self.delegate_to_hostname = delegate_to_hostname self.loader_basedir = loader_basedir self._mitogen_reset(mode='put')
python
{ "resource": "" }
q24442
Connection.get_task_var
train
def get_task_var(self, key, default=None): """ Fetch the value of a task variable related to connection configuration, or, if delegate_to is active, fetch the same variable via HostVars for the delegated-to machine. When running with delegate_to, Ansible tasks have variables associated with the original machine, not the delegated-to machine, therefore it does not make sense to extract connection-related configuration for the delegated-to machine from them. """ if self._task_vars: if self.delegate_to_hostname is None: if key in self._task_vars: return self._task_vars[key] else: delegated_vars = self._task_vars['ansible_delegated_vars'] if self.delegate_to_hostname in delegated_vars: task_vars = delegated_vars[self.delegate_to_hostname] if key in task_vars: return task_vars[key] return default
python
{ "resource": "" }
q24443
Connection._connect_broker
train
def _connect_broker(self): """ Establish a reference to the Broker, Router and parent context used for connections. """ if not self.broker: self.broker = mitogen.master.Broker() self.router, self.parent = mitogen.unix.connect( path=ansible_mitogen.process.MuxProcess.unix_listener_path, broker=self.broker, )
python
{ "resource": "" }
q24444
Connection._build_stack
train
def _build_stack(self): """ Construct a list of dictionaries representing the connection configuration between the controller and the target. This is additionally used by the integration tests "mitogen_get_stack" action to fetch the would-be connection configuration. """ return self._stack_from_spec( ansible_mitogen.transport_config.PlayContextSpec( connection=self, play_context=self._play_context, transport=self.transport, inventory_name=self.inventory_hostname, ) )
python
{ "resource": "" }
q24445
Connection._connect_stack
train
def _connect_stack(self, stack): """ Pass `stack` to ContextService, requesting a copy of the context object representing the last tuple element. If no connection exists yet, ContextService will recursively establish it before returning it or throwing an error. See :meth:`ansible_mitogen.services.ContextService.get` docstring for description of the returned dictionary. """ try: dct = self.parent.call_service( service_name='ansible_mitogen.services.ContextService', method_name='get', stack=mitogen.utils.cast(list(stack)), ) except mitogen.core.CallError: LOG.warning('Connection failed; stack configuration was:\n%s', pprint.pformat(stack)) raise if dct['msg']: if dct['method_name'] in self.become_methods: raise ansible.errors.AnsibleModuleError(dct['msg']) raise ansible.errors.AnsibleConnectionFailure(dct['msg']) self.context = dct['context'] self.chain = CallChain(self, self.context, pipelined=True) if self._play_context.become: self.login_context = dct['via'] else: self.login_context = self.context self.init_child_result = dct['init_child_result']
python
{ "resource": "" }
q24446
Connection._connect
train
def _connect(self): """ Establish a connection to the master process's UNIX listener socket, constructing a mitogen.master.Router to communicate with the master, and a mitogen.parent.Context to represent it. Depending on the original transport we should emulate, trigger one of the _connect_*() service calls defined above to cause the master process to establish the real connection on our behalf, or return a reference to the existing one. """ if self.connected: return self._connect_broker() stack = self._build_stack() self._connect_stack(stack)
python
{ "resource": "" }
q24447
Connection.reset
train
def reset(self): """ Explicitly terminate the connection to the remote host. This discards any local state we hold for the connection, returns the Connection to the 'disconnected' state, and informs ContextService the connection is bad somehow, and should be shut down and discarded. """ if self._task_vars is None: self._reset_find_task_vars() if self._play_context.remote_addr is None: # <2.5.6 incorrectly populate PlayContext for reset_connection # https://github.com/ansible/ansible/issues/27520 raise ansible.errors.AnsibleConnectionFailure( self.reset_compat_msg ) self._connect() self._mitogen_reset(mode='reset') self._shutdown_broker()
python
{ "resource": "" }
q24448
Connection.spawn_isolated_child
train
def spawn_isolated_child(self): """ Fork or launch a new child off the target context. :returns: mitogen.core.Context of the new child. """ return self.get_chain(use_fork=True).call( ansible_mitogen.target.spawn_isolated_child )
python
{ "resource": "" }
q24449
write_all
train
def write_all(fd, s, deadline=None): """Arrange for all of bytestring `s` to be written to the file descriptor `fd`. :param int fd: File descriptor to write to. :param bytes s: Bytestring to write to file descriptor. :param float deadline: If not :data:`None`, absolute UNIX timestamp after which timeout should occur. :raises mitogen.core.TimeoutError: Bytestring could not be written entirely before deadline was exceeded. :raises mitogen.parent.EofError: Stream indicated EOF, suggesting the child process has exitted. :raises mitogen.core.StreamError: File descriptor was disconnected before write could complete. """ timeout = None written = 0 poller = PREFERRED_POLLER() poller.start_transmit(fd) try: while written < len(s): if deadline is not None: timeout = max(0, deadline - time.time()) if timeout == 0: raise mitogen.core.TimeoutError('write timed out') if mitogen.core.PY3: window = memoryview(s)[written:] else: window = buffer(s, written) for fd in poller.poll(timeout): n, disconnected = mitogen.core.io_op(os.write, fd, window) if disconnected: raise EofError('EOF on stream during write') written += n finally: poller.close()
python
{ "resource": "" }
q24450
_upgrade_broker
train
def _upgrade_broker(broker): """ Extract the poller state from Broker and replace it with the industrial strength poller for this OS. Must run on the Broker thread. """ # This function is deadly! The act of calling start_receive() generates log # messages which must be silenced as the upgrade progresses, otherwise the # poller state will change as it is copied, resulting in write fds that are # lost. (Due to LogHandler->Router->Stream->Broker->Poller, where Stream # only calls start_transmit() when transitioning from empty to non-empty # buffer. If the start_transmit() is lost, writes from the child hang # permanently). root = logging.getLogger() old_level = root.level root.setLevel(logging.CRITICAL) old = broker.poller new = PREFERRED_POLLER() for fd, data in old.readers: new.start_receive(fd, data) for fd, data in old.writers: new.start_transmit(fd, data) old.close() broker.poller = new root.setLevel(old_level) LOG.debug('replaced %r with %r (new: %d readers, %d writers; ' 'old: %d readers, %d writers)', old, new, len(new.readers), len(new.writers), len(old.readers), len(old.writers))
python
{ "resource": "" }
q24451
stream_by_method_name
train
def stream_by_method_name(name): """ Given the name of a Mitogen connection method, import its implementation module and return its Stream subclass. """ if name == u'local': name = u'parent' module = mitogen.core.import_module(u'mitogen.' + name) return module.Stream
python
{ "resource": "" }
q24452
PartialZlib.append
train
def append(self, s): """ Append the bytestring `s` to the compressor state and return the final compressed output. """ if self._compressor is None: return zlib.compress(self.s + s, 9) else: compressor = self._compressor.copy() out = self._out out += compressor.compress(s) return out + compressor.flush()
python
{ "resource": "" }
q24453
Stream.construct
train
def construct(self, max_message_size, remote_name=None, python_path=None, debug=False, connect_timeout=None, profiling=False, unidirectional=False, old_router=None, **kwargs): """Get the named context running on the local machine, creating it if it does not exist.""" super(Stream, self).construct(**kwargs) self.max_message_size = max_message_size if python_path: self.python_path = python_path if connect_timeout: self.connect_timeout = connect_timeout if remote_name is None: remote_name = get_default_remote_name() if '/' in remote_name or '\\' in remote_name: raise ValueError('remote_name= cannot contain slashes') self.remote_name = remote_name self.debug = debug self.profiling = profiling self.unidirectional = unidirectional self.max_message_size = max_message_size self.connect_deadline = time.time() + self.connect_timeout
python
{ "resource": "" }
q24454
Stream.on_shutdown
train
def on_shutdown(self, broker): """Request the slave gracefully shut itself down.""" LOG.debug('%r closing CALL_FUNCTION channel', self) self._send( mitogen.core.Message( src_id=mitogen.context_id, dst_id=self.remote_id, handle=mitogen.core.SHUTDOWN, ) )
python
{ "resource": "" }
q24455
Stream._reap_child
train
def _reap_child(self): """ Reap the child process during disconnection. """ if self.detached and self.child_is_immediate_subprocess: LOG.debug('%r: immediate child is detached, won\'t reap it', self) return if self.profiling: LOG.info('%r: wont kill child because profiling=True', self) return if self._reaped: # on_disconnect() may be invoked more than once, for example, if # there is still a pending message to be sent after the first # on_disconnect() call. return try: pid, status = os.waitpid(self.pid, os.WNOHANG) except OSError: e = sys.exc_info()[1] if e.args[0] == errno.ECHILD: LOG.warn('%r: waitpid(%r) produced ECHILD', self, self.pid) return raise self._reaped = True if pid: LOG.debug('%r: PID %d %s', self, pid, wstatus_to_str(status)) return if not self._router.profiling: # For processes like sudo we cannot actually send sudo a signal, # because it is setuid, so this is best-effort only. LOG.debug('%r: child process still alive, sending SIGTERM', self) try: os.kill(self.pid, signal.SIGTERM) except OSError: e = sys.exc_info()[1] if e.args[0] != errno.EPERM: raise
python
{ "resource": "" }
q24456
Stream._adorn_eof_error
train
def _adorn_eof_error(self, e): """ Used by subclasses to provide additional information in the case of a failed connection. """ if self.eof_error_hint: e.args = ('%s\n\n%s' % (e.args[0], self.eof_error_hint),)
python
{ "resource": "" }
q24457
CallChain.reset
train
def reset(self): """ Instruct the target to forget any related exception. """ if not self.chain_id: return saved, self.chain_id = self.chain_id, None try: self.call_no_reply(mitogen.core.Dispatcher.forget_chain, saved) finally: self.chain_id = saved
python
{ "resource": "" }
q24458
Context.shutdown
train
def shutdown(self, wait=False): """ Arrange for the context to receive a ``SHUTDOWN`` message, triggering graceful shutdown. Due to a lack of support for timers, no attempt is made yet to force terminate a hung context using this method. This will be fixed shortly. :param bool wait: If :data:`True`, block the calling thread until the context has completely terminated. :returns: If `wait` is :data:`False`, returns a :class:`mitogen.core.Latch` whose :meth:`get() <mitogen.core.Latch.get>` method returns :data:`None` when shutdown completes. The `timeout` parameter may be used to implement graceful timeouts. """ LOG.debug('%r.shutdown() sending SHUTDOWN', self) latch = mitogen.core.Latch() mitogen.core.listen(self, 'disconnect', lambda: latch.put(None)) self.send( mitogen.core.Message( handle=mitogen.core.SHUTDOWN, ) ) if wait: latch.get() else: return latch
python
{ "resource": "" }
q24459
RouteMonitor._send_one
train
def _send_one(self, stream, handle, target_id, name): """ Compose and send an update message on a stream. :param mitogen.core.Stream stream: Stream to send it on. :param int handle: :data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE` :param int target_id: ID of the connecting or disconnecting context. :param str name: Context name or :data:`None`. """ if not stream: # We may not have a stream during shutdown. return data = str(target_id) if name: data = '%s:%s' % (target_id, name) stream.send( mitogen.core.Message( handle=handle, data=data.encode('utf-8'), dst_id=stream.remote_id, ) )
python
{ "resource": "" }
q24460
RouteMonitor._propagate_up
train
def _propagate_up(self, handle, target_id, name=None): """ In a non-master context, propagate an update towards the master. :param int handle: :data:`mitogen.core.ADD_ROUTE` or :data:`mitogen.core.DEL_ROUTE` :param int target_id: ID of the connecting or disconnecting context. :param str name: For :data:`mitogen.core.ADD_ROUTE`, the name of the new context assigned by its parent. This is used by parents to assign the :attr:`mitogen.core.Context.name` attribute. """ if self.parent: stream = self.router.stream_by_id(self.parent.context_id) self._send_one(stream, handle, target_id, name)
python
{ "resource": "" }
q24461
RouteMonitor._on_stream_disconnect
train
def _on_stream_disconnect(self, stream): """ Respond to disconnection of a local stream by propagating DEL_ROUTE for any contexts we know were attached to it. """ # During a stream crash it is possible for disconnect signal to fire # twice, in which case ignore the second instance. routes = self._routes_by_stream.pop(stream, None) if routes is None: return LOG.debug('%r: %r is gone; propagating DEL_ROUTE for %r', self, stream, routes) for target_id in routes: self.router.del_route(target_id) self._propagate_up(mitogen.core.DEL_ROUTE, target_id) self._propagate_down(mitogen.core.DEL_ROUTE, target_id) context = self.router.context_by_id(target_id, create=False) if context: mitogen.core.fire(context, 'disconnect')
python
{ "resource": "" }
q24462
Router.get_streams
train
def get_streams(self): """ Return a snapshot of all streams in existence at time of call. """ self._write_lock.acquire() try: return itervalues(self._stream_by_id) finally: self._write_lock.release()
python
{ "resource": "" }
q24463
reset_logging_framework
train
def reset_logging_framework(): """ After fork, ensure any logging.Handler locks are recreated, as a variety of threads in the parent may have been using the logging package at the moment of fork. It is not possible to solve this problem in general; see https://github.com/dw/mitogen/issues/150 for a full discussion. """ logging._lock = threading.RLock() # The root logger does not appear in the loggerDict. for name in [None] + list(logging.Logger.manager.loggerDict): for handler in logging.getLogger(name).handlers: handler.createLock() root = logging.getLogger() root.handlers = [ handler for handler in root.handlers if not isinstance(handler, mitogen.core.LogHandler) ]
python
{ "resource": "" }
q24464
on_fork
train
def on_fork(): """ Should be called by any program integrating Mitogen each time the process is forked, in the context of the new child. """ reset_logging_framework() # Must be first! fixup_prngs() mitogen.core.Latch._on_fork() mitogen.core.Side._on_fork() mitogen.core.ExternalContext.service_stub_lock = threading.Lock() mitogen__service = sys.modules.get('mitogen.service') if mitogen__service: mitogen__service._pool_lock = threading.Lock()
python
{ "resource": "" }
q24465
main
train
def main(log_level='INFO', profiling=_default_profiling): """ Convenience decorator primarily useful for writing discardable test scripts. In the master process, when `func` is defined in the :mod:`__main__` module, arranges for `func(router)` to be invoked immediately, with :py:class:`mitogen.master.Router` construction and destruction handled just as in :py:func:`mitogen.utils.run_with_router`. In slaves, this function does nothing. :param str log_level: Logging package level to configure via :py:func:`mitogen.utils.log_to_file`. :param bool profiling: If :py:data:`True`, equivalent to setting :py:attr:`mitogen.master.Router.profiling` prior to router construction. This causes ``/tmp`` files to be created everywhere at the end of a successful run with :py:mod:`cProfile` output for every thread. Example: :: import mitogen import requests def get_url(url): return requests.get(url).text @mitogen.main() def main(router): z = router.ssh(hostname='k3') print(z.call(get_url, 'https://example.org/'))))) """ def wrapper(func): if func.__module__ != '__main__': return func import mitogen.parent import mitogen.utils if profiling: mitogen.core.enable_profiling() mitogen.master.Router.profiling = profiling utils.log_to_file(level=log_level) return mitogen.core._profile_hook( 'app.main', utils.run_with_router, func, ) return wrapper
python
{ "resource": "" }
q24466
get_small_file
train
def get_small_file(context, path): """ Basic in-memory caching module fetcher. This generates one roundtrip for every previously unseen file, so it is only a temporary solution. :param context: Context we should direct FileService requests to. For now (and probably forever) this is just the top-level Mitogen connection manager process. :param path: Path to fetch from FileService, must previously have been registered by a privileged context using the `register` command. :returns: Bytestring file data. """ pool = mitogen.service.get_or_create_pool(router=context.router) service = pool.get_service(u'mitogen.service.PushFileService') return service.get(path)
python
{ "resource": "" }
q24467
spawn_isolated_child
train
def spawn_isolated_child(econtext): """ For helper functions executed in the fork parent context, arrange for the context's router to be upgraded as necessary and for a new child to be prepared. The actual fork occurs from the 'virginal fork parent', which does not have any Ansible modules loaded prior to fork, to avoid conflicts resulting from custom module_utils paths. """ mitogen.parent.upgrade_router(econtext) if FORK_SUPPORTED: context = econtext.router.fork() else: context = econtext.router.local() LOG.debug('create_fork_child() -> %r', context) return context
python
{ "resource": "" }
q24468
write_path
train
def write_path(path, s, owner=None, group=None, mode=None, utimes=None, sync=False): """ Writes bytes `s` to a filesystem `path`. """ path = os.path.abspath(path) fd, tmp_path = tempfile.mkstemp(suffix='.tmp', prefix='.ansible_mitogen_transfer-', dir=os.path.dirname(path)) fp = os.fdopen(fd, 'wb', mitogen.core.CHUNK_SIZE) LOG.debug('write_path(path=%r) temporary file: %s', path, tmp_path) try: try: if mode: set_file_mode(tmp_path, mode, fd=fp.fileno()) if owner or group: set_file_owner(tmp_path, owner, group, fd=fp.fileno()) fp.write(s) finally: fp.close() if sync: os.fsync(fp.fileno()) os.rename(tmp_path, path) except BaseException: os.unlink(tmp_path) raise if utimes: os.utime(path, utimes)
python
{ "resource": "" }
q24469
AsyncRunner._update
train
def _update(self, dct): """ Update an async job status file. """ LOG.info('%r._update(%r, %r)', self, self.job_id, dct) dct.setdefault('ansible_job_id', self.job_id) dct.setdefault('data', '') fp = open(self.path + '.tmp', 'w') try: fp.write(json.dumps(dct)) finally: fp.close() os.rename(self.path + '.tmp', self.path)
python
{ "resource": "" }
q24470
ActionModuleMixin._make_tmp_path
train
def _make_tmp_path(self, remote_user=None): """ Create a temporary subdirectory as a child of the temporary directory managed by the remote interpreter. """ LOG.debug('_make_tmp_path(remote_user=%r)', remote_user) path = self._generate_tmp_path() LOG.debug('Temporary directory: %r', path) self._connection.get_chain().call_no_reply(os.mkdir, path) self._connection._shell.tmpdir = path return path
python
{ "resource": "" }
q24471
ActionModuleMixin._fixup_perms2
train
def _fixup_perms2(self, remote_paths, remote_user=None, execute=True): """ Mitogen always executes ActionBase helper methods in the context of the target user account, so it is never necessary to modify permissions except to ensure the execute bit is set if requested. """ LOG.debug('_fixup_perms2(%r, remote_user=%r, execute=%r)', remote_paths, remote_user, execute) if execute and self._task.action not in self.FIXUP_PERMS_RED_HERRING: return self._remote_chmod(remote_paths, mode='u+x') return self.COMMAND_RESULT.copy()
python
{ "resource": "" }
q24472
minimize_source
train
def minimize_source(source): """Remove comments and docstrings from Python `source`, preserving line numbers and syntax of empty blocks. :param str source: The source to minimize. :returns str: The minimized source. """ source = mitogen.core.to_text(source) tokens = tokenize.generate_tokens(StringIO(source).readline) tokens = strip_comments(tokens) tokens = strip_docstrings(tokens) tokens = reindent(tokens) return tokenize.untokenize(tokens)
python
{ "resource": "" }
q24473
strip_comments
train
def strip_comments(tokens): """Drop comment tokens from a `tokenize` stream. Comments on lines 1-2 are kept, to preserve hashbang and encoding. Trailing whitespace is remove from all lines. """ prev_typ = None prev_end_col = 0 for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens: if typ in (tokenize.NL, tokenize.NEWLINE): if prev_typ in (tokenize.NL, tokenize.NEWLINE): start_col = 0 else: start_col = prev_end_col end_col = start_col + 1 elif typ == tokenize.COMMENT and start_row > 2: continue prev_typ = typ prev_end_col = end_col yield typ, tok, (start_row, start_col), (end_row, end_col), line
python
{ "resource": "" }
q24474
strip_docstrings
train
def strip_docstrings(tokens): """Replace docstring tokens with NL tokens in a `tokenize` stream. Any STRING token not part of an expression is deemed a docstring. Indented docstrings are not yet recognised. """ stack = [] state = 'wait_string' for t in tokens: typ = t[0] if state == 'wait_string': if typ in (tokenize.NL, tokenize.COMMENT): yield t elif typ in (tokenize.DEDENT, tokenize.INDENT, tokenize.STRING): stack.append(t) elif typ == tokenize.NEWLINE: stack.append(t) start_line, end_line = stack[0][2][0], stack[-1][3][0]+1 for i in range(start_line, end_line): yield tokenize.NL, '\n', (i, 0), (i,1), '\n' for t in stack: if t[0] in (tokenize.DEDENT, tokenize.INDENT): yield t[0], t[1], (i+1, t[2][1]), (i+1, t[3][1]), t[4] del stack[:] else: stack.append(t) for t in stack: yield t del stack[:] state = 'wait_newline' elif state == 'wait_newline': if typ == tokenize.NEWLINE: state = 'wait_string' yield t
python
{ "resource": "" }
q24475
reindent
train
def reindent(tokens, indent=' '): """Replace existing indentation in a token steam, with `indent`. """ old_levels = [] old_level = 0 new_level = 0 for typ, tok, (start_row, start_col), (end_row, end_col), line in tokens: if typ == tokenize.INDENT: old_levels.append(old_level) old_level = len(tok) new_level += 1 tok = indent * new_level elif typ == tokenize.DEDENT: old_level = old_levels.pop() new_level -= 1 start_col = max(0, start_col - old_level + new_level) if start_row == end_row: end_col = start_col + len(tok) yield typ, tok, (start_row, start_col), (end_row, end_col), line
python
{ "resource": "" }
q24476
get_file_contents
train
def get_file_contents(path): """ Get the contents of a file. """ with open(path, 'rb') as fp: # mitogen.core.Blob() is a bytes subclass with a repr() that returns a # summary of the blob, rather than the raw blob data. This makes # logging output *much* nicer. Unlike most custom types, blobs can be # serialized. return mitogen.core.Blob(fp.read())
python
{ "resource": "" }
q24477
streamy_download_file
train
def streamy_download_file(context, path): """ Fetch a file from the FileService hosted by `context`. """ bio = io.BytesIO() # FileService.get() is not actually an exposed service method, it's just a # classmethod that wraps up the complicated dance of implementing the # transfer. ok, metadata = mitogen.service.FileService.get(context, path, bio) return { 'success': ok, 'metadata': metadata, 'size': len(bio.getvalue()), }
python
{ "resource": "" }
q24478
get_password_hash
train
def get_password_hash(username): """ Fetch a user's password hash. """ try: h = spwd.getspnam(username) except KeyError: return None # mitogen.core.Secret() is a Unicode subclass with a repr() that hides the # secret data. This keeps secret stuff out of logs. Like blobs, secrets can # also be serialized. return mitogen.core.Secret(h)
python
{ "resource": "" }
q24479
work_on_machine
train
def work_on_machine(context): """ Do stuff to a remote context. """ print("Created context. Context ID is", context.context_id) # You don't need to understand any/all of this, but it's helpful to grok # the whole chain: # - Context.call() is a light wrapper around .call_async(), the wrapper # simply blocks the caller until a reply arrives. # - .call_async() serializes the call signature into a message and passes # it to .send_async() # - .send_async() creates a mitogen.core.Receiver() on the local router. # The receiver constructor uses Router.add_handle() to allocate a # 'reply_to' handle and install a callback function that wakes the # receiver when a reply message arrives. # - .send_async() puts the reply handle in Message.reply_to field and # passes it to .send() # - Context.send() stamps the destination context ID into the # Message.dst_id field and passes it to Router.route() # - Router.route() uses Broker.defer() to schedule _async_route(msg) # on the Broker thread. # [broker thread] # - The broker thread wakes and calls _async_route(msg) # - Router._async_route() notices 'dst_id' is for a remote context and # looks up the stream on which messages for dst_id should be sent (may be # direct connection or not), and calls Stream.send() # - Stream.send() packs the message into a bytestring, appends it to # Stream._output_buf, and calls Broker.start_transmit() # - Broker finishes work, reenters IO loop. IO loop wakes due to writeable # stream. # - Stream.on_transmit() writes the full/partial buffer to SSH, calls # stop_transmit() to mark the stream unwriteable once _output_buf is # empty. # - Broker IO loop sleeps, no readers/writers. # - Broker wakes due to SSH stream readable. # - Stream.on_receive() called, reads the reply message, converts it to a # Message and passes it to Router._async_route(). # - Router._async_route() notices message is for local context, looks up # target handle in the .add_handle() registry. # - Receiver._on_receive() called, appends message to receiver queue. # [main thread] # - Receiver.get() used to block the original Context.call() wakes and pops # the message from the queue. # - Message data (pickled return value) is deserialized and returned to the # caller. print("It's running on the local machine. Its PID is", context.call(os.getpid)) # Now let's call a function defined in this module. On receiving the # function call request, the child attempts to import __main__, which is # initially missing, causing the importer in the child to request it from # its parent. That causes _this script_ to be sent as the module source # over the wire. print("Calling md5sum(/etc/passwd) in the child:", context.call(md5sum, '/etc/passwd')) # Now let's "transfer" a file. The simplest way to do this is calling a # function that returns the file data, which is totally fine for small # files. print("Download /etc/passwd via function call: %d bytes" % ( len(context.call(get_file_contents, '/etc/passwd')) )) # And using function calls, in the other direction: print("Upload /tmp/blah via function call: %s" % ( context.call(put_file_contents, '/tmp/blah', b'blah!'), )) # Now lets transfer what might be a big files. The problem with big files # is that they may not fit in RAM. This uses mitogen.services.FileService # to implement streamy file transfer instead. The sender must have a # 'service pool' running that will host FileService. First let's do the # 'upload' direction, where the master hosts FileService. # Steals the 'Router' reference from the context object. In a real app the # pool would be constructed once at startup, this is just demo code. file_service = mitogen.service.FileService(context.router) # Start the pool. pool = mitogen.service.Pool(context.router, services=[file_service]) # Grant access to a file on the local disk from unprivileged contexts. # .register() is also exposed as a service method -- you can call it on a # child context from any more privileged context. file_service.register('/etc/passwd') # Now call our wrapper function that knows how to handle the transfer. In a # real app, this wrapper might also set ownership/modes or do any other # app-specific stuff relating to the file that was transferred. print("Streamy upload /etc/passwd: remote result: %s" % ( context.call( streamy_download_file, # To avoid hard-wiring streamy_download_file(), we want to pass it # a Context object that hosts the file service it should request # files from. Router.myself() returns a Context referring to this # process. context=router.myself(), path='/etc/passwd', ), )) # Shut down the pool now we're done with it, else app will hang at exit. # Once again, this should only happen once at app startup/exit, not for # every file transfer! pool.stop(join=True) # Now let's do the same thing but in reverse: we use FileService on the # remote download a file. This uses context.call_service(), which invokes a # special code path that causes auto-initialization of a thread pool in the # target, and auto-construction of the target service, but only if the # service call was made by a more privileged context. We could write a # helper function that runs in the remote to do all that by hand, but the # library handles it for us. # Make the file accessible. A future FileService could avoid the need for # this for privileged contexts. context.call_service( service_name=mitogen.service.FileService, method_name='register', path='/etc/passwd' ) # Now we can use our streamy_download_file() function in reverse -- running # it from this process and having it fetch from the remote process: print("Streamy download /etc/passwd: result: %s" % ( streamy_download_file(context, '/etc/passwd'), ))
python
{ "resource": "" }
q24480
parse_script_interpreter
train
def parse_script_interpreter(source): """ Parse the script interpreter portion of a UNIX hashbang using the rules Linux uses. :param str source: String like "/usr/bin/env python". :returns: Tuple of `(interpreter, arg)`, where `intepreter` is the script interpreter and `arg` is its sole argument if present, otherwise :py:data:`None`. """ # Find terminating newline. Assume last byte of binprm_buf if absent. nl = source.find(b'\n', 0, 128) if nl == -1: nl = min(128, len(source)) # Split once on the first run of whitespace. If no whitespace exists, # bits just contains the interpreter filename. bits = source[0:nl].strip().split(None, 1) if len(bits) == 1: return mitogen.core.to_text(bits[0]), None return mitogen.core.to_text(bits[0]), mitogen.core.to_text(bits[1])
python
{ "resource": "" }
q24481
quantize
train
def quantize(image, bits_per_channel=None): '''Reduces the number of bits per channel in the given image.''' if bits_per_channel is None: bits_per_channel = 6 assert image.dtype == np.uint8 shift = 8-bits_per_channel halfbin = (1 << shift) >> 1 return ((image.astype(int) >> shift) << shift) + halfbin
python
{ "resource": "" }
q24482
pack_rgb
train
def pack_rgb(rgb): '''Packs a 24-bit RGB triples into a single integer, works on both arrays and tuples.''' orig_shape = None if isinstance(rgb, np.ndarray): assert rgb.shape[-1] == 3 orig_shape = rgb.shape[:-1] else: assert len(rgb) == 3 rgb = np.array(rgb) rgb = rgb.astype(int).reshape((-1, 3)) packed = (rgb[:, 0] << 16 | rgb[:, 1] << 8 | rgb[:, 2]) if orig_shape is None: return packed else: return packed.reshape(orig_shape)
python
{ "resource": "" }
q24483
unpack_rgb
train
def unpack_rgb(packed): '''Unpacks a single integer or array of integers into one or more 24-bit RGB values. ''' orig_shape = None if isinstance(packed, np.ndarray): assert packed.dtype == int orig_shape = packed.shape packed = packed.reshape((-1, 1)) rgb = ((packed >> 16) & 0xff, (packed >> 8) & 0xff, (packed) & 0xff) if orig_shape is None: return rgb else: return np.hstack(rgb).reshape(orig_shape + (3,))
python
{ "resource": "" }
q24484
get_bg_color
train
def get_bg_color(image, bits_per_channel=None): '''Obtains the background color from an image or array of RGB colors by grouping similar colors into bins and finding the most frequent one. ''' assert image.shape[-1] == 3 quantized = quantize(image, bits_per_channel).astype(int) packed = pack_rgb(quantized) unique, counts = np.unique(packed, return_counts=True) packed_mode = unique[counts.argmax()] return unpack_rgb(packed_mode)
python
{ "resource": "" }
q24485
rgb_to_sv
train
def rgb_to_sv(rgb): '''Convert an RGB image or array of RGB colors to saturation and value, returning each one as a separate 32-bit floating point array or value. ''' if not isinstance(rgb, np.ndarray): rgb = np.array(rgb) axis = len(rgb.shape)-1 cmax = rgb.max(axis=axis).astype(np.float32) cmin = rgb.min(axis=axis).astype(np.float32) delta = cmax - cmin saturation = delta.astype(np.float32) / cmax.astype(np.float32) saturation = np.where(cmax == 0, 0, saturation) value = cmax/255.0 return saturation, value
python
{ "resource": "" }
q24486
postprocess
train
def postprocess(output_filename, options): '''Runs the postprocessing command on the file provided.''' assert options.postprocess_cmd base, _ = os.path.splitext(output_filename) post_filename = base + options.postprocess_ext cmd = options.postprocess_cmd cmd = cmd.replace('%i', output_filename) cmd = cmd.replace('%o', post_filename) cmd = cmd.replace('%e', options.postprocess_ext) subprocess_args = shlex.split(cmd) if os.path.exists(post_filename): os.unlink(post_filename) if not options.quiet: print(' running "{}"...'.format(cmd), end=' ') sys.stdout.flush() try: result = subprocess.call(subprocess_args) before = os.stat(output_filename).st_size after = os.stat(post_filename).st_size except OSError: result = -1 if result == 0: if not options.quiet: print('{:.1f}% reduction'.format( 100*(1.0-float(after)/before))) return post_filename else: sys.stderr.write('warning: postprocessing failed!\n') return None
python
{ "resource": "" }
q24487
load
train
def load(input_filename): '''Load an image with Pillow and convert it to numpy array. Also returns the image DPI in x and y as a tuple.''' try: pil_img = Image.open(input_filename) except IOError: sys.stderr.write('warning: error opening {}\n'.format( input_filename)) return None, None if pil_img.mode != 'RGB': pil_img = pil_img.convert('RGB') if 'dpi' in pil_img.info: dpi = pil_img.info['dpi'] else: dpi = (300, 300) img = np.array(pil_img) return img, dpi
python
{ "resource": "" }
q24488
sample_pixels
train
def sample_pixels(img, options): '''Pick a fixed percentage of pixels in the image, returned in random order.''' pixels = img.reshape((-1, 3)) num_pixels = pixels.shape[0] num_samples = int(num_pixels*options.sample_fraction) idx = np.arange(num_pixels) np.random.shuffle(idx) return pixels[idx[:num_samples]]
python
{ "resource": "" }
q24489
get_fg_mask
train
def get_fg_mask(bg_color, samples, options): '''Determine whether each pixel in a set of samples is foreground by comparing it to the background color. A pixel is classified as a foreground pixel if either its value or saturation differs from the background by a threshold.''' s_bg, v_bg = rgb_to_sv(bg_color) s_samples, v_samples = rgb_to_sv(samples) s_diff = np.abs(s_bg - s_samples) v_diff = np.abs(v_bg - v_samples) return ((v_diff >= options.value_threshold) | (s_diff >= options.sat_threshold))
python
{ "resource": "" }
q24490
get_palette
train
def get_palette(samples, options, return_mask=False, kmeans_iter=40): '''Extract the palette for the set of sampled RGB values. The first palette entry is always the background color; the rest are determined from foreground pixels by running K-means clustering. Returns the palette, as well as a mask corresponding to the foreground pixels. ''' if not options.quiet: print(' getting palette...') bg_color = get_bg_color(samples, 6) fg_mask = get_fg_mask(bg_color, samples, options) centers, _ = kmeans(samples[fg_mask].astype(np.float32), options.num_colors-1, iter=kmeans_iter) palette = np.vstack((bg_color, centers)).astype(np.uint8) if not return_mask: return palette else: return palette, fg_mask
python
{ "resource": "" }
q24491
apply_palette
train
def apply_palette(img, palette, options): '''Apply the pallete to the given image. The first step is to set all background pixels to the background color; then, nearest-neighbor matching is used to map each foreground color to the closest one in the palette. ''' if not options.quiet: print(' applying palette...') bg_color = palette[0] fg_mask = get_fg_mask(bg_color, img, options) orig_shape = img.shape pixels = img.reshape((-1, 3)) fg_mask = fg_mask.flatten() num_pixels = pixels.shape[0] labels = np.zeros(num_pixels, dtype=np.uint8) labels[fg_mask], _ = vq(pixels[fg_mask], palette) return labels.reshape(orig_shape[:-1])
python
{ "resource": "" }
q24492
get_global_palette
train
def get_global_palette(filenames, options): '''Fetch the global palette for a series of input files by merging their samples together into one large array. ''' input_filenames = [] all_samples = [] if not options.quiet: print('building global palette...') for input_filename in filenames: img, _ = load(input_filename) if img is None: continue if not options.quiet: print(' processing {}...'.format(input_filename)) samples = sample_pixels(img, options) input_filenames.append(input_filename) all_samples.append(samples) num_inputs = len(input_filenames) all_samples = [s[:int(round(float(s.shape[0])/num_inputs))] for s in all_samples] all_samples = np.vstack(tuple(all_samples)) global_palette = get_palette(all_samples, options) if not options.quiet: print(' done\n') return input_filenames, global_palette
python
{ "resource": "" }
q24493
emit_pdf
train
def emit_pdf(outputs, options): '''Runs the PDF conversion command to generate the PDF.''' cmd = options.pdf_cmd cmd = cmd.replace('%o', options.pdfname) if len(outputs) > 2: cmd_print = cmd.replace('%i', ' '.join(outputs[:2] + ['...'])) else: cmd_print = cmd.replace('%i', ' '.join(outputs)) cmd = cmd.replace('%i', ' '.join(outputs)) if not options.quiet: print('running PDF command "{}"...'.format(cmd_print)) try: result = subprocess.call(shlex.split(cmd)) except OSError: result = -1 if result == 0: if not options.quiet: print(' wrote', options.pdfname) else: sys.stderr.write('warning: PDF command failed\n')
python
{ "resource": "" }
q24494
notescan_main
train
def notescan_main(options): '''Main function for this program when run as script.''' filenames = get_filenames(options) outputs = [] do_global = options.global_palette and len(filenames) > 1 if do_global: filenames, palette = get_global_palette(filenames, options) do_postprocess = bool(options.postprocess_cmd) for input_filename in filenames: img, dpi = load(input_filename) if img is None: continue output_filename = '{}{:04d}.png'.format( options.basename, len(outputs)) if not options.quiet: print('opened', input_filename) if not do_global: samples = sample_pixels(img, options) palette = get_palette(samples, options) labels = apply_palette(img, palette, options) save(output_filename, labels, palette, dpi, options) if do_postprocess: post_filename = postprocess(output_filename, options) if post_filename: output_filename = post_filename else: do_postprocess = False outputs.append(output_filename) if not options.quiet: print(' done\n') emit_pdf(outputs, options)
python
{ "resource": "" }
q24495
generic_type_name
train
def generic_type_name(v): """ Return a descriptive type name that isn't Python specific. For example, an int type will return 'integer' rather than 'int'. """ if isinstance(v, AstExampleRef): return "reference" elif isinstance(v, numbers.Integral): # Must come before real numbers check since integrals are reals too return 'integer' elif isinstance(v, numbers.Real): return 'float' elif isinstance(v, (tuple, list)): return 'list' elif isinstance(v, six.string_types): return 'string' elif v is None: return 'null' else: return type(v).__name__
python
{ "resource": "" }
q24496
unwrap
train
def unwrap(data_type): """ Convenience method to unwrap all Aliases and Nullables from around a DataType. This checks for nullable wrapping aliases, as well as aliases wrapping nullables. Args: data_type (DataType): The target to unwrap. Return: Tuple[DataType, bool, bool]: The underlying data type; a bool that is set if a nullable was present; a bool that is set if an alias was present. """ unwrapped_nullable = False unwrapped_alias = False while is_alias(data_type) or is_nullable_type(data_type): if is_nullable_type(data_type): unwrapped_nullable = True if is_alias(data_type): unwrapped_alias = True data_type = data_type.data_type return data_type, unwrapped_nullable, unwrapped_alias
python
{ "resource": "" }
q24497
get_custom_annotations_for_alias
train
def get_custom_annotations_for_alias(data_type): """ Given a Stone data type, returns all custom annotations applied to it. """ # annotations can only be applied to Aliases, but they can be wrapped in # Nullable. also, Aliases pointing to other Aliases don't automatically # inherit their custom annotations, so we might have to traverse. result = [] data_type, _ = unwrap_nullable(data_type) while is_alias(data_type): result.extend(data_type.custom_annotations) data_type, _ = unwrap_nullable(data_type.data_type) return result
python
{ "resource": "" }
q24498
get_custom_annotations_recursive
train
def get_custom_annotations_recursive(data_type): """ Given a Stone data type, returns all custom annotations applied to any of its memebers, as well as submembers, ..., to an arbitrary depth. """ # because Stone structs can contain references to themselves (or otherwise # be cyclical), we need ot keep track of the data types we've already seen data_types_seen = set() def recurse(data_type): if data_type in data_types_seen: return data_types_seen.add(data_type) dt, _, _ = unwrap(data_type) if is_struct_type(dt) or is_union_type(dt): for field in dt.fields: for annotation in recurse(field.data_type): yield annotation for annotation in field.custom_annotations: yield annotation elif is_list_type(dt): for annotation in recurse(dt.data_type): yield annotation elif is_map_type(dt): for annotation in recurse(dt.value_data_type): yield annotation for annotation in get_custom_annotations_for_alias(data_type): yield annotation return recurse(data_type)
python
{ "resource": "" }
q24499
UserDefined.has_documented_type_or_fields
train
def has_documented_type_or_fields(self, include_inherited_fields=False): """Returns whether this type, or any of its fields, are documented. Use this when deciding whether to create a block of documentation for this type. """ if self.doc: return True else: return self.has_documented_fields(include_inherited_fields)
python
{ "resource": "" }