_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q46000
ScssInspector._get_recursive_dependancies
train
def _get_recursive_dependancies(self, dependencies_map, sourcepath, recursive=True): """ Return all dependencies of a source, recursively searching through its dependencies. This is a common method used by ``children`` and ``parents`` methods. Args: dependencies_map (dict): Internal buffer (internal buffers ``_CHILDREN_MAP`` or ``_PARENTS_MAP``) to use for searching. sourcepath (str): Source file path to start searching for dependencies. Keyword Arguments: recursive (bool): Switch to enable recursive finding (if True). Default to True. Raises: CircularImport: If circular error is detected from a source. Returns: set: List of dependencies paths. """ # Direct dependencies collected = set([]) collected.update(dependencies_map.get(sourcepath, [])) # Sequence of 'dependencies_map' items to explore sequence = collected.copy() # Exploration list walkthrough = [] # Recursive search starting from direct dependencies if recursive: while True: if not sequence: break item = sequence.pop() # Add current source to the explorated source list walkthrough.append(item) # Current item children current_item_dependancies = dependencies_map.get(item, []) for dependency in current_item_dependancies: # Allready visited item, ignore and continue to the new # item if dependency in walkthrough: continue # Unvisited item yet, add its children to dependencies and # item to explore else: collected.add(dependency) sequence.add(dependency) # Sourcepath has allready been visited but present itself # again, assume it's a circular import if sourcepath in walkthrough: msg = "A circular import has occured by '{}'" raise CircularImport(msg.format(current_item_dependancies)) # No more item to explore, break loop if not sequence: break return collected
python
{ "resource": "" }
q46001
ScssInspector.children
train
def children(self, sourcepath, recursive=True): """ Recursively find all children that are imported from the given source path. Args: sourcepath (str): Source file path to search for. Keyword Arguments: recursive (bool): Switch to enabled recursive finding (if True). Default to True. Returns: set: List of finded parents path. """ return self._get_recursive_dependancies( self._CHILDREN_MAP, sourcepath, recursive=True )
python
{ "resource": "" }
q46002
ScssInspector.parents
train
def parents(self, sourcepath, recursive=True): """ Recursively find all parents that import the given source path. Args: sourcepath (str): Source file path to search for. Keyword Arguments: recursive (bool): Switch to enabled recursive finding (if True). Default to True. Returns: set: List of finded parents path. """ return self._get_recursive_dependancies( self._PARENTS_MAP, sourcepath, recursive=True )
python
{ "resource": "" }
q46003
AWSInfo.show_cloudwatch_logs
train
def show_cloudwatch_logs(self, count=10, grp_name=None): """ Show ``count`` latest CloudWatch Logs entries for our lambda function. :param count: number of log entries to show :type count: int """ if grp_name is None: grp_name = '/aws/lambda/%s' % self.config.func_name logger.debug('Log Group Name: %s', grp_name) logger.debug('Connecting to AWS Logs API') conn = client('logs') logger.debug('Getting log streams') streams = conn.describe_log_streams( logGroupName=grp_name, orderBy='LastEventTime', descending=True, limit=count # at worst, we have 1 event per stream ) logger.debug('Found %d log streams', len(streams['logStreams'])) shown = 0 for stream in streams['logStreams']: if (count - shown) < 1: break shown += self._show_log_stream(conn, grp_name, stream['logStreamName'], (count - shown))
python
{ "resource": "" }
q46004
AWSInfo._show_log_stream
train
def _show_log_stream(self, conn, grp_name, stream_name, max_count=10): """ Show up to ``max`` events from a specified log stream; return the number of events shown. :param conn: AWS Logs API connection :type conn: :py:class:`botocore:CloudWatchLogs.Client` :param grp_name: log group name :type grp_name: str :param stream_name: log stream name :type stream_name: str :param max_count: maximum number of events to show :type max_count: int :return: count of events shown :rtype: int """ logger.debug('Showing up to %d events from stream %s', max_count, stream_name) events = conn.get_log_events( logGroupName=grp_name, logStreamName=stream_name, limit=max_count, startFromHead=False ) if len(events['events']) > 0: print('## Log Group \'%s\'; Log Stream \'%s\'' % ( grp_name, stream_name)) shown = 0 for evt in events['events']: if shown >= max_count: break shown += 1 dt = datetime.fromtimestamp(evt['timestamp'] / 1000.0) print("%s => %s" % (dt, evt['message'].strip())) logger.debug('displayed %d events from stream', shown) return shown
python
{ "resource": "" }
q46005
AWSInfo._url_for_queue
train
def _url_for_queue(self, conn, name): """ Given a queue name, return the URL for it. :param conn: SQS API connection :type conn: :py:class:`botocore:SQS.Client` :param name: queue name, or None for all queues in config. :type name: str :return: queue URL :rtype: str """ res = conn.get_queue_url(QueueName=name) return res['QueueUrl']
python
{ "resource": "" }
q46006
AWSInfo._delete_msg
train
def _delete_msg(self, conn, queue_url, receipt_handle): """ Delete the message specified by ``receipt_handle`` in the queue specified by ``queue_url``. :param conn: SQS API connection :type conn: :py:class:`botocore:SQS.Client` :param queue_url: queue URL to delete the message from :type queue_url: str :param receipt_handle: message receipt handle :type receipt_handle: str """ resp = conn.delete_message(QueueUrl=queue_url, ReceiptHandle=receipt_handle) if resp['ResponseMetadata']['HTTPStatusCode'] != 200: logger.error('Error: message with receipt handle %s in queue %s ' 'was not successfully deleted (HTTP %s)', receipt_handle, queue_url, resp['ResponseMetadata']['HTTPStatusCode']) return logger.info('Message with receipt handle %s deleted from queue %s', receipt_handle, queue_url)
python
{ "resource": "" }
q46007
AWSInfo._show_one_queue
train
def _show_one_queue(self, conn, name, count, delete=False): """ Show ``count`` messages from the specified SQS queue. :param conn: SQS API connection :type conn: :py:class:`botocore:SQS.Client` :param name: queue name, or None for all queues in config. :type name: str :param count: maximum number of messages to get from queue :type count: int :param delete: whether or not to delete messages after receipt :type delete: bool """ url = self._url_for_queue(conn, name) logger.debug("Queue '%s' url: %s", name, url) logger.warning('Receiving %d messages from queue\'%s\'; this may take ' 'up to 20 seconds.', count, name) if not delete: logger.warning("WARNING: Displayed messages will be invisible in " "queue for 60 seconds!") seen_ids = [] all_msgs = [] empty_polls = 0 # continue getting messages until we get 2 empty polls in a row while empty_polls < 2 and len(all_msgs) < count: logger.debug('Polling queue %s for messages (empty_polls=%d)', name, empty_polls) msgs = conn.receive_message( QueueUrl=url, AttributeNames=['All'], MessageAttributeNames=['All'], MaxNumberOfMessages=count, VisibilityTimeout=60, WaitTimeSeconds=20 ) if 'Messages' in msgs and len(msgs['Messages']) > 0: empty_polls = 0 logger.debug("Queue %s - got %d messages", name, len(msgs['Messages'])) for m in msgs['Messages']: if m['MessageId'] in seen_ids: continue seen_ids.append(m['MessageId']) all_msgs.append(m) continue # no messages found logger.debug('Queue %s - got no messages', name) empty_polls += 1 logger.debug('received %d messages', len(all_msgs)) if len(all_msgs) == 0: print('=> Queue \'%s\' appears empty.' % name) return print("=> Queue '%s' (%s)" % (name, url)) if len(all_msgs) > count: all_msgs = all_msgs[:count] for m in all_msgs: try: m['Body'] = json.loads(m['Body']) except Exception: pass print(pretty_json(m)) if delete: self._delete_msg(conn, url, m['ReceiptHandle'])
python
{ "resource": "" }
q46008
AWSInfo._all_queue_names
train
def _all_queue_names(self): """ Return a list of all unique queue names in our config. :return: list of all queue names (str) :rtype: :std:term:`list` """ queues = set() endpoints = self.config.get('endpoints') for e in endpoints: for q in endpoints[e]['queues']: queues.add(q) return sorted(queues)
python
{ "resource": "" }
q46009
AWSInfo.show_queue
train
def show_queue(self, name=None, count=10, delete=False): """ Show up to ``count`` messages from the queue named ``name``. If ``name`` is None, show for each queue in our config. If ``delete`` is True, delete the messages after showing them. :param name: queue name, or None for all queues in config. :type name: str :param count: maximum number of messages to get from queue :type count: int :param delete: whether or not to delete messages after receipt :type delete: bool """ logger.debug('Connecting to SQS API') conn = client('sqs') if name is not None: queues = [name] else: queues = self._all_queue_names for q_name in queues: try: self._show_one_queue(conn, q_name, count, delete=delete) except Exception: logger.error("Error showing queue '%s'", q_name, exc_info=1)
python
{ "resource": "" }
q46010
AWSInfo.get_api_id
train
def get_api_id(self): """ Return the API ID. :return: API ID :rtype: str """ logger.debug('Connecting to AWS apigateway API') conn = client('apigateway') apis = conn.get_rest_apis() api_id = None for api in apis['items']: if api['name'] == self.config.func_name: api_id = api['id'] logger.debug('Found API id: %s', api_id) break if api_id is None: raise Exception('Unable to find ReST API named %s' % self.config.func_name) return api_id
python
{ "resource": "" }
q46011
AWSInfo._add_method_setting
train
def _add_method_setting(self, conn, api_id, stage_name, path, key, value, op): """ Update a single method setting on the specified stage. This uses the 'add' operation to PATCH the resource. :param conn: APIGateway API connection :type conn: :py:class:`botocore:APIGateway.Client` :param api_id: ReST API ID :type api_id: str :param stage_name: stage name :type stage_name: str :param path: path to patch (see https://docs.aws.amazon.com/apigateway/\ api-reference/resource/stage/#methodSettings) :type path: str :param key: the dictionary key this should update :type key: str :param value: new value to set :param op: PATCH operation to perform, 'add' or 'replace' :type op: str """ logger.debug('update_stage PATCH %s on %s; value=%s', op, path, str(value)) res = conn.update_stage( restApiId=api_id, stageName=stage_name, patchOperations=[ { 'op': op, 'path': path, 'value': str(value) } ] ) if res['methodSettings']['*/*'][key] != value: logger.error('methodSettings PATCH expected to update %s to %s,' 'but instead found value as %s', key, value, res['methodSettings']['*/*'][key]) else: logger.info('Successfully updated methodSetting %s to %s', key, value)
python
{ "resource": "" }
q46012
initialize_repository
train
def initialize_repository(path, spor_dir='.spor'): """Initialize a spor repository in `path` if one doesn't already exist. Args: path: Path to any file or directory within the repository. spor_dir: The name of the directory containing spor data. Returns: A `Repository` instance. Raises: ValueError: A repository already exists at `path`. """ path = pathlib.Path(path) spor_path = path / spor_dir if spor_path.exists(): raise ValueError('spor directory already exists: {}'.format(spor_path)) spor_path.mkdir() return Repository(path, spor_dir)
python
{ "resource": "" }
q46013
open_repository
train
def open_repository(path, spor_dir='.spor'): """Open an existing repository. Args: path: Path to any file or directory within the repository. spor_dir: The name of the directory containing spor data. Returns: A `Repository` instance. Raises: ValueError: No repository is found. """ root = _find_root_dir(path, spor_dir) return Repository(root, spor_dir)
python
{ "resource": "" }
q46014
_find_root_dir
train
def _find_root_dir(path, spor_dir): """Search for a spor repo containing `path`. This searches for `spor_dir` in directories dominating `path`. If a directory containing `spor_dir` is found, then that directory is returned as a `pathlib.Path`. Returns: The dominating directory containing `spor_dir` as a `pathlib.Path`. Raises: ValueError: No repository is found. """ start_path = pathlib.Path(os.getcwd() if path is None else path) paths = [start_path] + list(start_path.parents) for path in paths: data_dir = path / spor_dir if data_dir.exists() and data_dir.is_dir(): return path raise ValueError('No spor repository found')
python
{ "resource": "" }
q46015
Repository.add
train
def add(self, anchor): """Add a new anchor to the repository. This will create a new ID for the anchor and provision new storage for it. Returns: The storage ID for the Anchor which can be used to retrieve the anchor later. """ anchor_id = uuid.uuid4().hex anchor_path = self._anchor_path(anchor_id) with anchor_path.open(mode='wt') as f: save_anchor(f, anchor, self.root) return anchor_id
python
{ "resource": "" }
q46016
Repository._anchor_path
train
def _anchor_path(self, anchor_id): "Absolute path to the data file for `anchor_id`." file_name = '{}.yml'.format(anchor_id) file_path = self._spor_dir / file_name return file_path
python
{ "resource": "" }
q46017
log
train
def log(**data): """RPC method for logging events Makes entry with new account creating Return None """ # Get data from request body entry = { "module": data["params"]["module"], "event": data["params"]["event"], "timestamp": data["params"]["timestamp"], "arguments": data["params"]["arguments"] } # Call create metod for writing data to database history.create(entry)
python
{ "resource": "" }
q46018
HistoryHandler.post
train
def post(self): """Accepts jsorpc post request. Retrieves data from request body. Calls log method for writung data to database """ data = json.loads(self.request.body.decode()) response = dispatch([log],{'jsonrpc': '2.0', 'method': 'log', 'params': data, 'id': 1})
python
{ "resource": "" }
q46019
MMCAlign.start_optimisation
train
def start_optimisation(self, rounds: int, max_angle: float, max_distance: float, temp: float=298.15, stop_when=None, verbose=None): """Starts the loop fitting protocol. Parameters ---------- rounds : int The number of Monte Carlo moves to be evaluated. max_angle : float The maximum variation in rotation that can moved per step. max_distance : float The maximum distance the can be moved per step. temp : float, optional Temperature used during fitting process. stop_when : float, optional Stops fitting when energy is less than or equal to this value. """ self._generate_initial_score() self._mmc_loop(rounds, max_angle, max_distance, temp=temp, stop_when=stop_when, verbose=verbose) return
python
{ "resource": "" }
q46020
MMCAlign._generate_initial_score
train
def _generate_initial_score(self): """Runs the evaluation function for the initial pose.""" self.current_energy = self.eval_fn(self.polypeptide, *self.eval_args) self.best_energy = copy.deepcopy(self.current_energy) self.best_model = copy.deepcopy(self.polypeptide) return
python
{ "resource": "" }
q46021
MMCAlign._mmc_loop
train
def _mmc_loop(self, rounds, max_angle, max_distance, temp=298.15, stop_when=None, verbose=True): """The main Metropolis Monte Carlo loop.""" current_round = 0 while current_round < rounds: working_model = copy.deepcopy(self.polypeptide) random_vector = unit_vector(numpy.random.uniform(-1, 1, size=3)) mode = random.choice(['rotate', 'rotate', 'rotate', 'translate']) if mode == 'rotate': random_angle = numpy.random.rand() * max_angle working_model.rotate(random_angle, random_vector, working_model.centre_of_mass) else: random_translation = random_vector * (numpy.random.rand() * max_distance) working_model.translate(random_translation) proposed_energy = self.eval_fn(working_model, *self.eval_args) move_accepted = self.check_move(proposed_energy, self.current_energy, t=temp) if move_accepted: self.current_energy = proposed_energy if self.current_energy < self.best_energy: self.polypeptide = working_model self.best_energy = copy.deepcopy(self.current_energy) self.best_model = copy.deepcopy(working_model) if verbose: sys.stdout.write( '\rRound: {}, Current RMSD: {}, Proposed RMSD: {} ' '(best {}), {}. ' .format(current_round, self.float_f(self.current_energy), self.float_f(proposed_energy), self.float_f( self.best_energy), "ACCEPTED" if move_accepted else "DECLINED") ) sys.stdout.flush() current_round += 1 if stop_when: if self.best_energy <= stop_when: break return
python
{ "resource": "" }
q46022
MMCAlign.check_move
train
def check_move(new, old, t): """Determines if a model will be accepted.""" if (t <= 0) or numpy.isclose(t, 0.0): return False K_BOLTZ = 1.9872041E-003 # kcal/mol.K if new < old: return True else: move_prob = math.exp(-(new - old) / (K_BOLTZ * t)) if move_prob > random.uniform(0, 1): return True return False
python
{ "resource": "" }
q46023
Discover.scan_backends
train
def scan_backends(self, backends): """ From given backends create and return engine, filename and extension indexes. Arguments: backends (list): List of backend engines to scan. Order does matter since resulted indexes are stored in an ``OrderedDict``. So discovering will stop its job if it meets the first item. Returns: tuple: Engine, filename and extension indexes where: * Engines are indexed on their kind name with their backend object as value; * Filenames are indexed on their filename with engine kind name as value; * Extensions are indexed on their extension with engine kind name as value; """ engines = OrderedDict() filenames = OrderedDict() extensions = OrderedDict() for item in backends: engines[item._kind_name] = item filenames[item._default_filename] = item._kind_name extensions[item._file_extension] = item._kind_name return engines, filenames, extensions
python
{ "resource": "" }
q46024
Discover.get_engine
train
def get_engine(self, filepath, kind=None): """ From given filepath try to discover which backend format to use. Discovering is pretty naive as it find format from file extension. Args: filepath (str): Settings filepath or filename. Keyword Arguments: kind (str): A format name to enforce a specific backend. Can be any value from attribute ``_kind_name`` of available backend engines. Raises: boussole.exceptions.SettingsDiscoveryError: If extension is unknowed or if given format name is unknowed. Returns: object: Backend engine class. """ if not kind: extension = os.path.splitext(filepath)[1] if not extension: msg = ("Unable to discover settings format from an empty file " "extension: {}") raise SettingsDiscoveryError(msg.format(filepath)) elif extension[1:] not in self.extensions: msg = ("Settings file extension is unknowed from available " "backends: {}") raise SettingsDiscoveryError(msg.format(filepath)) kind = self.extensions[extension[1:]] elif kind not in self.engines: msg = "Given settings format is unknow: {}" raise SettingsDiscoveryError(msg.format(kind)) return self.engines[kind]
python
{ "resource": "" }
q46025
Discover.guess_filename
train
def guess_filename(self, basedir, kind=None): """ Try to find existing settings filename from base directory using default filename from available engines. First finded filename from available engines win. So registred engines order matter. Arguments: basedir (string): Directory path where to search for. Keyword Arguments: kind (string): Backend engine kind name to search for default settings filename. If not given, search will be made for default settings filename from all available backend engines. Returns: tuple: Absolute filepath and backend engine class. """ if kind: filepath = os.path.join(basedir, self.engines[kind]._default_filename) if os.path.exists(filepath): return filepath, self.engines[kind] for filename, kind in self.filenames.items(): filepath = os.path.join(basedir, filename) if os.path.exists(filepath): return filepath, self.engines[kind] msg = "Unable to find any settings in directory: {}" raise SettingsDiscoveryError(msg.format(basedir))
python
{ "resource": "" }
q46026
Discover.search
train
def search(self, filepath=None, basedir=None, kind=None): """ Search for a settings file. Keyword Arguments: filepath (string): Path to a config file, either absolute or relative. If absolute set its directory as basedir (omitting given basedir argument). If relative join it to basedir. basedir (string): Directory path where to search for. kind (string): Backend engine kind name (value of attribute ``_kind_name``) to help discovering with empty or relative filepath. Also if explicit absolute filepath is given, this will enforce the backend engine (such as yaml kind will be forced for a ``foo.json`` file). Returns: tuple: Absolute filepath and backend engine class. """ # None values would cause trouble with path joining if filepath is None: filepath = '' if basedir is None: basedir = '.' if not basedir and not filepath: msg = "Either basedir or filepath is required for discovering" raise SettingsDiscoveryError(msg) if kind and kind not in self.engines: msg = "Given settings format is unknow: {}" raise SettingsDiscoveryError(msg.format(kind)) # Implicit filename to find from backend if not filepath: filename, engine = self.guess_filename(basedir, kind) filepath = os.path.join(basedir, filename) # Explicit filename dont have to search for default backend file and # blindly force given backend if any else: if os.path.isabs(filepath): basedir, filename = os.path.split(filepath) else: filepath = os.path.join(basedir, filepath) if not os.path.exists(filepath): msg = "Given settings file does not exists: {}" raise SettingsDiscoveryError(msg.format(filepath)) engine = self.get_engine(filepath, kind) return filepath, engine
python
{ "resource": "" }
q46027
create
train
def create(python, env_dir, system, prompt, bare, virtualenv_py=None): """Main entry point to use this as a module. """ if not python or python == sys.executable: _create_with_this( env_dir=env_dir, system=system, prompt=prompt, bare=bare, virtualenv_py=virtualenv_py, ) else: _create_with_python( python=python, env_dir=env_dir, system=system, prompt=prompt, bare=bare, virtualenv_py=virtualenv_py, )
python
{ "resource": "" }
q46028
event.matches
train
def matches(self, client, event_data): """True if all filters are matching.""" for f in self.filters: if not f(client, event_data): return False return True
python
{ "resource": "" }
q46029
event.filter_events
train
def filter_events(cls, client, event_data): """Filter registered events and yield them.""" for event in cls.events: # try event filters if event.matches(client, event_data): yield event
python
{ "resource": "" }
q46030
event.filter_callbacks
train
def filter_callbacks(cls, client, event_data): """Filter registered events and yield all of their callbacks.""" for event in cls.filter_events(client, event_data): for cb in event.callbacks: yield cb
python
{ "resource": "" }
q46031
ImportProcessor.validate_parameters
train
def validate_parameters(self): """Validate that the parameters are correctly specified.""" for p in self.params: if p not in self.known_params: raise errors.UnknownParameter(p, self.known_params)
python
{ "resource": "" }
q46032
get_api_id
train
def get_api_id(config, args): """ Get the API ID from Terraform, or from AWS if that fails. :param config: configuration :type config: :py:class:`~.Config` :param args: command line arguments :type args: :py:class:`argparse.Namespace` :return: API Gateway ID :rtype: str """ try: logger.debug('Trying to get Terraform rest_api_id output') runner = TerraformRunner(config, args.tf_path) outputs = runner._get_outputs() depl_id = outputs['rest_api_id'] logger.debug("Terraform rest_api_id output: '%s'", depl_id) except Exception: logger.info('Unable to find API rest_api_id from Terraform state;' ' querying AWS.', exc_info=1) aws = AWSInfo(config) depl_id = aws.get_api_id() logger.debug("AWS API ID: '%s'", depl_id) return depl_id
python
{ "resource": "" }
q46033
cmdline
train
def cmdline(argv=sys.argv[1:]): """ Script for merging different collections of stop words. """ parser = ArgumentParser( description='Create and merge collections of stop words') parser.add_argument( 'language', help='The language used in the collection') parser.add_argument('sources', metavar='FILE', nargs='+', help='Source files to parse') options = parser.parse_args(argv) factory = StopWordFactory() language = options.language stop_words = factory.get_stop_words(language, fail_safe=True) for filename in options.sources: stop_words += StopWord(language, factory.read_collection(filename)) filename = factory.get_collection_filename(stop_words.language) factory.write_collection(filename, stop_words.collection)
python
{ "resource": "" }
q46034
authenticate
train
def authenticate(api_key, api_url, **kwargs): """Returns a muddle instance, with API key and url set for requests.""" muddle = Muddle(**kwargs) # Login. muddle.authenticate(api_key, api_url) return muddle
python
{ "resource": "" }
q46035
ConfigManager.get_job_config
train
def get_job_config(conf): """ Extract handler names from job_conf.xml """ rval = [] root = elementtree.parse(conf).getroot() for handler in root.find('handlers'): rval.append({'service_name' : handler.attrib['id']}) return rval
python
{ "resource": "" }
q46036
ConfigManager.__load_state
train
def __load_state(self): """ Read persisted state from the JSON statefile """ try: return ConfigState(json.load(open(self.config_state_path))) except (OSError, IOError) as exc: if exc.errno == errno.ENOENT: self.__dump_state({}) return json.load(open(self.config_state_path)) raise
python
{ "resource": "" }
q46037
ConfigManager._deregister_config_file
train
def _deregister_config_file(self, key): """ Deregister a previously registered config file. The caller should ensure that it was previously registered. """ state = self.__load_state() if 'remove_configs' not in state: state['remove_configs'] = {} state['remove_configs'][key] = (state['config_files'].pop(key)) self.__dump_state(state)
python
{ "resource": "" }
q46038
ConfigManager._purge_config_file
train
def _purge_config_file(self, key): """ Forget a previously deregister config file. The caller should ensure that it was previously deregistered. """ state = self.__load_state() del state['remove_configs'][key] self.__dump_state(state)
python
{ "resource": "" }
q46039
ConfigManager.register_config_changes
train
def register_config_changes(self, configs, meta_changes): """ Persist config changes to the JSON state file. When a config changes, a process manager may perform certain actions based on these changes. This method can be called once the actions are complete. """ for config_file in meta_changes['remove_configs'].keys(): self._purge_config_file(config_file) for config_file, config in configs.items(): if 'update_attribs' in config: config['attribs'] = config.pop('update_attribs') if 'update_instance_name' in config: config['instance_name'] = config.pop('update_instance_name') if 'update_services' in config or 'remove_services' in config: remove = config.pop('remove_services', []) services = config.pop('update_services', []) # need to prevent old service defs from overwriting new ones for service in config['services']: if service not in remove and service not in services: services.append(service) config['services'] = services self._register_config_file(config_file, config)
python
{ "resource": "" }
q46040
ConfigManager.get_registered_configs
train
def get_registered_configs(self, instances=None): """ Return the persisted values of all config files registered with the config manager. """ configs = self.state.get('config_files', {}) if instances is not None: for config_file, config in configs.items(): if config['instance_name'] not in instances: configs.pop(config_file) return configs
python
{ "resource": "" }
q46041
ConfigManager.get_registered_instances
train
def get_registered_instances(self, include_removed=False): """ Return the persisted names of all instances across all registered configs. """ rval = [] configs = self.state.get('config_files', {}).values() if include_removed: configs.extend(self.state.get('remove_configs', {}).values()) for config in configs: if config['instance_name'] not in rval: rval.append(config['instance_name']) return rval
python
{ "resource": "" }
q46042
main
train
async def main(): """`sublemon` library example!""" for c in (1, 2, 4,): async with Sublemon(max_concurrency=c) as s: start = time.perf_counter() await asyncio.gather(one(s), two(s)) end = time.perf_counter() print('Limiting to', c, 'concurrent subprocess(es) took', end-start, 'seconds\n')
python
{ "resource": "" }
q46043
catalog_to_moc
train
def catalog_to_moc(catalog, radius, order, **kwargs): """ Convert a catalog to a MOC. The catalog is given as an Astropy SkyCoord object containing multiple coordinates. The radius of catalog entries can be given as an Astropy Quantity (with units), otherwise it is assumed to be in arcseconds. Any additional keyword arguments are passed on to `catalog_to_cells`. """ # Generate list of MOC cells. cells = catalog_to_cells(catalog, radius, order, **kwargs) # Create new MOC object using our collection of cells. moc = MOC(moctype='CATALOG') moc.add(order, cells, no_validation=True) return moc
python
{ "resource": "" }
q46044
_catalog_to_cells_neighbor
train
def _catalog_to_cells_neighbor(catalog, radius, order): """ Convert a catalog to a list of cells. This is the original implementation of the `catalog_to_cells` function which does not make use of the Healpy `query_disc` routine. Note: this function uses a simple flood-filling approach and is very slow, especially when used with a large radius for catalog objects or a high resolution order. """ if not isinstance(radius, Quantity): radius = radius * arcsecond nside = 2 ** order # Ensure catalog is in ICRS coordinates. catalog = catalog.icrs # Determine central cell for each catalog entry. phi = catalog.ra.radian theta = (pi / 2) - catalog.dec.radian cells = np.unique(ang2pix(nside, theta, phi, nest=True)) # Iteratively consider the neighbors of cells within our # catalog regions. new_cells = cells rejected = np.array((), dtype=np.int64) while True: # Find new valid neighboring cells which we didn't already # consider. neighbors = np.unique(np.ravel( get_all_neighbours(nside, new_cells, nest=True))) neighbors = np.extract( [(x != -1) and (x not in cells) and (x not in rejected) for x in neighbors], neighbors) # Get the coordinates of each of these neighbors and compare them # to the catalog entries. (theta, phi) = pix2ang(nside, neighbors, nest=True) coords = SkyCoord(phi, (pi / 2) - theta, frame='icrs', unit='rad') (idx, sep2d, dist3d) = coords.match_to_catalog_sky(catalog) within_range = (sep2d < radius) # If we didn't find any new cells within range, # end the iterative process. if not np.any(within_range): break new_cells = neighbors[within_range] cells = np.concatenate((cells, new_cells)) rejected = np.concatenate(( rejected, neighbors[np.logical_not(within_range)])) return cells
python
{ "resource": "" }
q46045
catalog_to_cells
train
def catalog_to_cells(catalog, radius, order, include_fallback=True, **kwargs): """ Convert a catalog to a set of cells. This function is intended to be used via `catalog_to_moc` but is available for separate usage. It takes the same arguments as that function. This function uses the Healpy `query_disc` function to get a list of cells for each item in the catalog in turn. Additional keyword arguments, if specified, are passed to `query_disc`. This can include, for example, `inclusive` (set to `True` to include cells overlapping the radius as well as those with centers within it) and `fact` (to control sampling when `inclusive` is specified). If cells at the given order are bigger than the given radius, then `query_disc` may find none inside the radius. In this case, if `include_fallback` is `True` (the default), the cell at each position is included. If the given radius is zero (or smaller) then Healpy `query_disc` is not used -- instead the fallback position is used automatically. """ nside = 2 ** order # Ensure catalog is in ICRS coordinates. catalog = catalog.icrs # Ensure radius is in radians. if isinstance(radius, Quantity): radius = radius.to(radian).value else: radius = radius * pi / (180.0 * 3600.0) # Convert coordinates to position vectors. phi = catalog.ra.radian theta = (pi / 2) - catalog.dec.radian vectors = ang2vec(theta, phi) # Ensure we can iterate over vectors (it might be a single position). if catalog.isscalar: vectors = [vectors] # Query for a list of cells for each catalog position. cells = set() for vector in vectors: if radius > 0.0: # Try "disc" query. vector_cells = query_disc(nside, vector, radius, nest=True, **kwargs) if vector_cells.size > 0: cells.update(vector_cells.tolist()) continue elif not include_fallback: continue # The query didn't find anything -- include the cell at the # given position at least. cell = vec2pix(nside, vector[0], vector[1], vector[2], nest=True) cells.add(cell.item()) return cells
python
{ "resource": "" }
q46046
read_ascii_catalog
train
def read_ascii_catalog(filename, format_, unit=None): """ Read an ASCII catalog file using Astropy. This routine is used by pymoctool to load coordinates from a catalog file in order to generate a MOC representation. """ catalog = ascii.read(filename, format=format_) columns = catalog.columns if 'RA' in columns and 'Dec' in columns: if unit is None: unit = (hour, degree) coords = SkyCoord(catalog['RA'], catalog['Dec'], unit=unit, frame='icrs') elif 'Lat' in columns and 'Lon' in columns: if unit is None: unit = (degree, degree) coords = SkyCoord(catalog['Lon'], catalog['Lat'], unit=unit, frame='galactic') else: raise Exception('columns RA,Dec or Lon,Lat not found') return coords
python
{ "resource": "" }
q46047
ThreadFixAPI._build_list_params
train
def _build_list_params(param_name, key, values): """Builds a list of POST parameters from a list or single value.""" params = {} if hasattr(values, '__iter__'): index = 0 for value in values: params[str(param_name) + '[' + str(index) + '].' + str(key)] = str(value) index += 1 else: params[str(param_name) + '[0].' + str(key)] = str(values) return params
python
{ "resource": "" }
q46048
ThreadFixAPI._request
train
def _request(self, method, url, params=None, files=None): """Common handler for all HTTP requests.""" if not params: params = {} params['apiKey'] = self.api_key headers = { 'User-Agent': self.user_agent, 'Accept': 'application/json' } try: if self.debug: print(method + ' ' + url) print(params) response = requests.request(method=method, url=self.host + url, params=params, files=files, headers=headers, timeout=self.timeout, verify=self.verify_ssl, cert=self.cert) if self.debug: print(response.status_code) print(response.text) try: json_response = response.json() message = json_response['message'] success = json_response['success'] response_code = json_response['responseCode'] data = json_response['object'] return ThreadFixResponse(message=message, success=success, response_code=response_code, data=data) except ValueError: return ThreadFixResponse(message='JSON response could not be decoded.', success=False) except requests.exceptions.SSLError: return ThreadFixResponse(message='An SSL error occurred.', success=False) except requests.exceptions.ConnectionError: return ThreadFixResponse(message='A connection error occurred.', success=False) except requests.exceptions.Timeout: return ThreadFixResponse(message='The request timed out after ' + str(self.timeout) + ' seconds.', success=False) except requests.exceptions.RequestException: return ThreadFixResponse(message='There was an error while handling the request.', success=False)
python
{ "resource": "" }
q46049
ThreadFixResponse.data_json
train
def data_json(self, pretty=False): """Returns the data as a valid JSON string.""" if pretty: return json.dumps(self.data, sort_keys=True, indent=4, separators=(',', ': ')) else: return json.dumps(self.data)
python
{ "resource": "" }
q46050
get_anchor_diff
train
def get_anchor_diff(anchor): """Get the get_anchor_diff between an anchor and the current state of its source. Returns: A tuple of get_anchor_diff lines. If there is not different, then this returns an empty tuple. """ new_anchor = make_anchor( file_path=anchor.file_path, offset=anchor.context.offset, width=len(anchor.context.topic), context_width=anchor.context.width, metadata=anchor.metadata) assert anchor.file_path == new_anchor.file_path assert anchor.context.offset == new_anchor.context.offset assert len(anchor.context.topic) == len(new_anchor.context.topic) assert anchor.metadata == new_anchor.metadata return tuple( _context_diff( anchor.file_path, anchor.context, new_anchor.context))
python
{ "resource": "" }
q46051
make_anchor
train
def make_anchor(file_path: pathlib.Path, offset: int, width: int, context_width: int, metadata, encoding: str = 'utf-8', handle=None): """Construct a new `Anchor`. Args: file_path: The absolute path to the target file for the anchor. offset: The offset of the anchored text in codepoints in `file_path`'s contents. width: The width in codepoints of the anchored text. context_width: The width in codepoints of context on either side of the anchor. metadata: The metadata to attach to the anchor. Must be json-serializeable. encoding: The encoding of the contents of `file_path`. handle: If not `None`, this is a file-like object the contents of which are used to calculate the context of the anchor. If `None`, then the file indicated by `file_path` is opened instead. Raises: ValueError: `width` characters can't be read at `offset`. ValueError: `file_path` is not absolute. """ @contextmanager def get_handle(): if handle is None: with file_path.open(mode='rt', encoding=encoding) as fp: yield fp else: yield handle with get_handle() as fp: context = _make_context(fp, offset, width, context_width) return Anchor( file_path=file_path, encoding=encoding, context=context, metadata=metadata)
python
{ "resource": "" }
q46052
Encoder.rgba_to_int
train
def rgba_to_int(cls, red, green, blue, alpha): """ Encodes the color as an Integer in RGBA encoding Returns None if any of red, green or blue are None. If alpha is None we use 255 by default. :return: Integer :rtype: int """ red = unwrap(red) green = unwrap(green) blue = unwrap(blue) alpha = unwrap(alpha) if red is None or green is None or blue is None: return None if alpha is None: alpha = 255 r = red << 24 g = green << 16 b = blue << 8 a = alpha << 0 rgba_int = r+g+b+a if (rgba_int > (2**31-1)): # convert to signed 32-bit int rgba_int = rgba_int - 2**32 return rgba_int
python
{ "resource": "" }
q46053
amerge
train
async def amerge(*agens) -> AsyncGenerator[Any, None]: """Thin wrapper around aiostream.stream.merge.""" xs = stream.merge(*agens) async with xs.stream() as streamer: async for x in streamer: yield x
python
{ "resource": "" }
q46054
crossplat_loop_run
train
def crossplat_loop_run(coro) -> Any: """Cross-platform method for running a subprocess-spawning coroutine.""" if sys.platform == 'win32': signal.signal(signal.SIGINT, signal.SIG_DFL) loop = asyncio.ProactorEventLoop() else: loop = asyncio.new_event_loop() asyncio.set_event_loop(loop) with contextlib.closing(loop): return loop.run_until_complete(coro)
python
{ "resource": "" }
q46055
parse_args
train
def parse_args(args): ''' Parse an argument string http://stackoverflow.com/questions/18160078/ how-do-you-write-tests-for-the-argparse-portion-of-a-python-module ''' parser = argparse.ArgumentParser() parser.add_argument('config_file', nargs='?', help='Configuration yaml file', default=None) parser.add_argument( '--log', '-l', help='Logging level (e.g. DEBUG, INFO, WARNING, ERROR, CRITICAL)', default='INFO') args_parsed = parser.parse_args(args) if not args_parsed.config_file: parser.error("You have to specify " "a configuration file") # pragma: no cover, sysexit return args_parsed
python
{ "resource": "" }
q46056
_factory
train
def _factory(importname, base_class_type, path=None, *args, **kargs): ''' Load a module of a given base class type Parameter -------- importname: string Name of the module, etc. converter base_class_type: class type E.g converter path: Absoulte path of the module Neede for extensions. If not given module is in online_monitor package *args, **kargs: Arguments to pass to the object init Return ------ Object of given base class type ''' def is_base_class(item): return isclass(item) and item.__module__ == importname if path: # Needed to find the module in forked processes; if you know a better # way tell me! sys.path.append(path) # Absolute full path of python module absolute_path = os.path.join(path, importname) + '.py' module = imp.load_source(importname, absolute_path) else: module = import_module(importname) # Get the defined base class in the loaded module to be name indendend clsmembers = getmembers(module, is_base_class) if not len(clsmembers): raise ValueError('Found no matching class in %s.' % importname) else: cls = clsmembers[0][1] return cls(*args, **kargs)
python
{ "resource": "" }
q46057
json_numpy_obj_hook
train
def json_numpy_obj_hook(dct): """Decodes a previously encoded numpy ndarray with proper shape and dtype. And decompresses the data with blosc :param dct: (dict) json encoded ndarray :return: (ndarray) if input was an encoded ndarray """ if isinstance(dct, dict) and '__ndarray__' in dct: array = dct['__ndarray__'] # http://stackoverflow.com/questions/24369666/typeerror-b1-is-not-json-serializable if sys.version_info >= (3, 0): array = array.encode('utf-8') data = base64.b64decode(array) if has_blosc: data = blosc.decompress(data) try: dtype = np.dtype(ast.literal_eval(dct['dtype'])) except ValueError: # If the array is not a recarray dtype = dct['dtype'] return np.frombuffer(data, dtype).reshape(dct['shape']) return dct
python
{ "resource": "" }
q46058
NumpyEncoder.default
train
def default(self, obj): """If input object is an ndarray it will be converted into a dict holding dtype, shape and the data, base64 encoded and blosc compressed. """ if isinstance(obj, np.ndarray): if obj.flags['C_CONTIGUOUS']: obj_data = obj.data else: cont_obj = np.ascontiguousarray(obj) assert(cont_obj.flags['C_CONTIGUOUS']) obj_data = cont_obj.data if has_blosc: obj_data = blosc.compress(obj_data, typesize=8) data_b64 = base64.b64encode(obj_data) # http://stackoverflow.com/questions/24369666/typeerror-b1-is-not-json-serializable if sys.version_info >= (3, 0): data_b64 = data_b64.decode('utf-8') return dict(__ndarray__=data_b64, dtype=str(obj.dtype), shape=obj.shape) return json.JSONEncoder.default(self, obj)
python
{ "resource": "" }
q46059
extract_all_ss_dssp
train
def extract_all_ss_dssp(in_dssp, path=True): """Uses DSSP to extract secondary structure information on every residue. Parameters ---------- in_dssp : str Path to DSSP file. path : bool, optional Indicates if pdb is a path or a string. Returns ------- dssp_residues : [tuple] Each internal list contains: [0] int Residue number [1] str Secondary structure type [2] str Chain identifier [3] str Residue type [4] float Phi torsion angle [5] float Psi torsion angle [6] int dssp solvent accessibility """ if path: with open(in_dssp, 'r') as inf: dssp_out = inf.read() else: dssp_out = in_dssp[:] dssp_residues = [] active = False for line in dssp_out.splitlines(): if active: try: res_num = int(line[5:10].strip()) chain = line[10:12].strip() residue = line[13] ss_type = line[16] phi = float(line[103:109].strip()) psi = float(line[109:116].strip()) acc = int(line[35:38].strip()) dssp_residues.append( (res_num, ss_type, chain, residue, phi, psi, acc)) except ValueError: pass else: if line[2] == '#': active = True return dssp_residues
python
{ "resource": "" }
q46060
tag_dssp_data
train
def tag_dssp_data(assembly, loop_assignments=(' ', 'B', 'S', 'T')): """Adds output data from DSSP to an Assembly. A dictionary will be added to the `tags` dictionary of each residue called `dssp_data`, which contains the secondary structure definition, solvent accessibility phi and psi values from DSSP. A list of regions of continuous secondary assignments will also be added to each `Polypeptide`. The tags are added in place, so nothing is returned from this function. Parameters ---------- assembly : ampal.Assembly An Assembly containing some protein. loop_assignments : tuple or list A tuple containing the DSSP secondary structure identifiers to that are classed as loop regions. """ dssp_out = run_dssp(assembly.pdb, path=False) dssp_data = extract_all_ss_dssp(dssp_out, path=False) for record in dssp_data: rnum, sstype, chid, _, phi, psi, sacc = record assembly[chid][str(rnum)].tags['dssp_data'] = { 'ss_definition': sstype, 'solvent_accessibility': sacc, 'phi': phi, 'psi': psi } ss_regions = find_ss_regions(dssp_data, loop_assignments) for region in ss_regions: chain = region[0][2] ss_type = ' ' if region[0][1] in loop_assignments else region[0][1] first_residue = str(region[0][0]) last_residue = str(region[-1][0]) if not 'ss_regions' in assembly[chain].tags: assembly[chain].tags['ss_regions'] = [] assembly[chain].tags['ss_regions'].append( (first_residue, last_residue, ss_type)) return
python
{ "resource": "" }
q46061
get_ss_regions
train
def get_ss_regions(assembly, ss_types): """Returns an Assembly containing Polymers for each region of structure. Parameters ---------- assembly : ampal.Assembly `Assembly` object to be searched secondary structure regions. ss_types : list List of secondary structure tags to be separate i.e. ['H'] would return helices, ['H', 'E'] would return helices and strands. Returns ------- fragments : Assembly `Assembly` containing a `Polymer` for each region of specified secondary structure. """ if not any(map(lambda x: 'ss_regions' in x.tags, assembly)): raise ValueError( 'This assembly does not have any tagged secondary structure ' 'regions. Use `ampal.dssp.tag_dssp_data` to add the tags.' ) fragments = Assembly() for polypeptide in assembly: if 'ss_regions' in polypeptide.tags: for start, end, ss_type in polypeptide.tags['ss_regions']: if ss_type in ss_types: fragment = polypeptide.get_slice_from_res_id(start, end) fragments.append(fragment) if not fragments: raise ValueError('No regions matching that secondary structure type' ' have been found. Use standard DSSP labels.') return fragments
python
{ "resource": "" }
q46062
snaql_migration
train
def snaql_migration(ctx, db_uri, migrations, app, config): """ Lightweight SQL Schema migration tool based on Snaql queries """ if config: migrations_config = _parse_config(config) else: if db_uri and migrations and app: migrations_config = _generate_config(db_uri, migrations, app) else: raise click.ClickException('If --config is not set, then --db-uri, --migrations and --app must be provided') ctx.obj = { 'config': migrations_config } try: ctx.obj['db'] = DBWrapper(ctx.obj['config']['db_uri']) except Exception as e: raise click.ClickException('Unable to connect to database, exception is "{0}"'.format(str(e)))
python
{ "resource": "" }
q46063
show
train
def show(ctx): """ Show migrations list """ for app_name, app in ctx.obj['config']['apps'].items(): click.echo(click.style(app_name, fg='green', bold=True)) for migration in app['migrations']: applied = ctx.obj['db'].is_migration_applied(app_name, migration) click.echo(' {0} {1}'.format(migration, click.style('(applied)', bold=True) if applied else ''))
python
{ "resource": "" }
q46064
BaseHandler.bundle
train
def bundle(self, name: str) -> models.Bundle: """Fetch a bundle from the store.""" return self.Bundle.filter_by(name=name).first()
python
{ "resource": "" }
q46065
BaseHandler.version
train
def version(self, bundle: str, date: dt.datetime) -> models.Version: """Fetch a version from the store.""" return (self.Version.query .join(models.Version.bundle) .filter(models.Bundle.name == bundle, models.Version.created_at == date) .first())
python
{ "resource": "" }
q46066
BaseHandler.tag
train
def tag(self, name: str) -> models.Tag: """Fetch a tag from the database.""" return self.Tag.filter_by(name=name).first()
python
{ "resource": "" }
q46067
BaseHandler.new_bundle
train
def new_bundle(self, name: str, created_at: dt.datetime=None) -> models.Bundle: """Create a new file bundle.""" new_bundle = self.Bundle(name=name, created_at=created_at) return new_bundle
python
{ "resource": "" }
q46068
BaseHandler.new_version
train
def new_version(self, created_at: dt.datetime, expires_at: dt.datetime=None) -> models.Version: """Create a new bundle version.""" new_version = self.Version(created_at=created_at, expires_at=expires_at) return new_version
python
{ "resource": "" }
q46069
BaseHandler.new_file
train
def new_file(self, path: str, checksum: str=None, to_archive: bool=False, tags: List[models.Tag]=None) -> models.File: """Create a new file.""" new_file = self.File(path=path, checksum=checksum, to_archive=to_archive, tags=tags) return new_file
python
{ "resource": "" }
q46070
BaseHandler.new_tag
train
def new_tag(self, name: str, category: str=None) -> models.Tag: """Create a new tag.""" new_tag = self.Tag(name=name, category=category) return new_tag
python
{ "resource": "" }
q46071
BaseHandler.files
train
def files(self, *, bundle: str=None, tags: List[str]=None, version: int=None, path: str=None) -> models.File: """Fetch files from the store.""" query = self.File.query if bundle: query = (query.join(self.File.version, self.Version.bundle) .filter(self.Bundle.name == bundle)) if tags: # require records to match ALL tags query = ( query.join(self.File.tags) .filter(self.Tag.name.in_(tags)) .group_by(models.File.id) .having(func.count(models.Tag.name) == len(tags)) ) if version: query = query.join(self.File.version).filter(self.Version.id == version) if path: query = query.filter_by(path=path) return query
python
{ "resource": "" }
q46072
BaseHandler.files_before
train
def files_before(self, *, bundle: str=None, tags: List[str]=None, before: str=None) -> models.File: """Fetch files before date from store""" query = self.files(tags=tags, bundle=bundle) if before: before_dt = parse_date(before) query = query.join(models.Version).filter(models.Version.created_at < before_dt) return query
python
{ "resource": "" }
q46073
BaseHandler.files_ondisk
train
def files_ondisk(self, file_objs: models.File) -> set: """Returns a list of files that are not on disk.""" return set([ file_obj for file_obj in file_objs if Path(file_obj.full_path).is_file() ])
python
{ "resource": "" }
q46074
LineBasedParser.readline
train
def readline(self): """Get the next line including the newline or '' on EOF.""" self.lineno += 1 if self._buffer: return self._buffer.pop() else: return self.input.readline()
python
{ "resource": "" }
q46075
LineBasedParser.push_line
train
def push_line(self, line): """Push line back onto the line buffer. :param line: the line with no trailing newline """ self.lineno -= 1 self._buffer.append(line + b'\n')
python
{ "resource": "" }
q46076
LineBasedParser.read_bytes
train
def read_bytes(self, count): """Read a given number of bytes from the input stream. Throws MissingBytes if the bytes are not found. Note: This method does not read from the line buffer. :return: a string """ result = self.input.read(count) found = len(result) self.lineno += result.count(b'\n') if found != count: self.abort(errors.MissingBytes, count, found) return result
python
{ "resource": "" }
q46077
LineBasedParser.read_until
train
def read_until(self, terminator): """Read the input stream until the terminator is found. Throws MissingTerminator if the terminator is not found. Note: This method does not read from the line buffer. :return: the bytes read up to but excluding the terminator. """ lines = [] term = terminator + b'\n' while True: line = self.input.readline() if line == term: break else: lines.append(line) return b''.join(lines)
python
{ "resource": "" }
q46078
ImportParser.iter_commands
train
def iter_commands(self): """Iterator returning ImportCommand objects.""" while True: line = self.next_line() if line is None: if b'done' in self.features: raise errors.PrematureEndOfStream(self.lineno) break elif len(line) == 0 or line.startswith(b'#'): continue # Search for commands in order of likelihood elif line.startswith(b'commit '): yield self._parse_commit(line[len(b'commit '):]) elif line.startswith(b'blob'): yield self._parse_blob() elif line.startswith(b'done'): break elif line.startswith(b'progress '): yield commands.ProgressCommand(line[len(b'progress '):]) elif line.startswith(b'reset '): yield self._parse_reset(line[len(b'reset '):]) elif line.startswith(b'tag '): yield self._parse_tag(line[len(b'tag '):]) elif line.startswith(b'checkpoint'): yield commands.CheckpointCommand() elif line.startswith(b'feature'): yield self._parse_feature(line[len(b'feature '):]) else: self.abort(errors.InvalidCommand, line)
python
{ "resource": "" }
q46079
ImportParser.iter_file_commands
train
def iter_file_commands(self): """Iterator returning FileCommand objects. If an invalid file command is found, the line is silently pushed back and iteration ends. """ while True: line = self.next_line() if line is None: break elif len(line) == 0 or line.startswith(b'#'): continue # Search for file commands in order of likelihood elif line.startswith(b'M '): yield self._parse_file_modify(line[2:]) elif line.startswith(b'D '): path = self._path(line[2:]) yield commands.FileDeleteCommand(path) elif line.startswith(b'R '): old, new = self._path_pair(line[2:]) yield commands.FileRenameCommand(old, new) elif line.startswith(b'C '): src, dest = self._path_pair(line[2:]) yield commands.FileCopyCommand(src, dest) elif line.startswith(b'deleteall'): yield commands.FileDeleteAllCommand() else: self.push_line(line) break
python
{ "resource": "" }
q46080
ImportParser._parse_blob
train
def _parse_blob(self): """Parse a blob command.""" lineno = self.lineno mark = self._get_mark_if_any() data = self._get_data(b'blob') return commands.BlobCommand(mark, data, lineno)
python
{ "resource": "" }
q46081
ImportParser._parse_commit
train
def _parse_commit(self, ref): """Parse a commit command.""" lineno = self.lineno mark = self._get_mark_if_any() author = self._get_user_info(b'commit', b'author', False) more_authors = [] while True: another_author = self._get_user_info(b'commit', b'author', False) if another_author is not None: more_authors.append(another_author) else: break committer = self._get_user_info(b'commit', b'committer') message = self._get_data(b'commit', b'message') from_ = self._get_from() merges = [] while True: merge = self._get_merge() if merge is not None: # while the spec suggests it's illegal, git-fast-export # outputs multiple merges on the one line, e.g. # merge :x :y :z these_merges = merge.split(b' ') merges.extend(these_merges) else: break properties = {} while True: name_value = self._get_property() if name_value is not None: name, value = name_value properties[name] = value else: break return commands.CommitCommand(ref, mark, author, committer, message, from_, merges, list(self.iter_file_commands()), lineno=lineno, more_authors=more_authors, properties=properties)
python
{ "resource": "" }
q46082
ImportParser._parse_feature
train
def _parse_feature(self, info): """Parse a feature command.""" parts = info.split(b'=', 1) name = parts[0] if len(parts) > 1: value = self._path(parts[1]) else: value = None self.features[name] = value return commands.FeatureCommand(name, value, lineno=self.lineno)
python
{ "resource": "" }
q46083
ImportParser._parse_file_modify
train
def _parse_file_modify(self, info): """Parse a filemodify command within a commit. :param info: a string in the format "mode dataref path" (where dataref might be the hard-coded literal 'inline'). """ params = info.split(b' ', 2) path = self._path(params[2]) mode = self._mode(params[0]) if params[1] == b'inline': dataref = None data = self._get_data(b'filemodify') else: dataref = params[1] data = None return commands.FileModifyCommand(path, mode, dataref, data)
python
{ "resource": "" }
q46084
ImportParser._parse_reset
train
def _parse_reset(self, ref): """Parse a reset command.""" from_ = self._get_from() return commands.ResetCommand(ref, from_)
python
{ "resource": "" }
q46085
ImportParser._parse_tag
train
def _parse_tag(self, name): """Parse a tag command.""" from_ = self._get_from(b'tag') tagger = self._get_user_info(b'tag', b'tagger', accept_just_who=True) message = self._get_data(b'tag', b'message') return commands.TagCommand(name, from_, tagger, message)
python
{ "resource": "" }
q46086
ImportParser._get_mark_if_any
train
def _get_mark_if_any(self): """Parse a mark section.""" line = self.next_line() if line.startswith(b'mark :'): return line[len(b'mark :'):] else: self.push_line(line) return None
python
{ "resource": "" }
q46087
ImportParser._get_from
train
def _get_from(self, required_for=None): """Parse a from section.""" line = self.next_line() if line is None: return None elif line.startswith(b'from '): return line[len(b'from '):] elif required_for: self.abort(errors.MissingSection, required_for, 'from') else: self.push_line(line) return None
python
{ "resource": "" }
q46088
ImportParser._get_merge
train
def _get_merge(self): """Parse a merge section.""" line = self.next_line() if line is None: return None elif line.startswith(b'merge '): return line[len(b'merge '):] else: self.push_line(line) return None
python
{ "resource": "" }
q46089
ImportParser._get_property
train
def _get_property(self): """Parse a property section.""" line = self.next_line() if line is None: return None elif line.startswith(b'property '): return self._name_value(line[len(b'property '):]) else: self.push_line(line) return None
python
{ "resource": "" }
q46090
ImportParser._get_user_info
train
def _get_user_info(self, cmd, section, required=True, accept_just_who=False): """Parse a user section.""" line = self.next_line() if line.startswith(section + b' '): return self._who_when(line[len(section + b' '):], cmd, section, accept_just_who=accept_just_who) elif required: self.abort(errors.MissingSection, cmd, section) else: self.push_line(line) return None
python
{ "resource": "" }
q46091
ImportParser._get_data
train
def _get_data(self, required_for, section=b'data'): """Parse a data section.""" line = self.next_line() if line.startswith(b'data '): rest = line[len(b'data '):] if rest.startswith(b'<<'): return self.read_until(rest[2:]) else: size = int(rest) read_bytes = self.read_bytes(size) # optional LF after data. next_line = self.input.readline() self.lineno += 1 if len(next_line) > 1 or next_line != b'\n': self.push_line(next_line[:-1]) return read_bytes else: self.abort(errors.MissingSection, required_for, section)
python
{ "resource": "" }
q46092
ImportParser._who_when
train
def _who_when(self, s, cmd, section, accept_just_who=False): """Parse who and when information from a string. :return: a tuple of (name,email,timestamp,timezone). name may be the empty string if only an email address was given. """ match = _WHO_AND_WHEN_RE.search(s) if match: datestr = match.group(3).lstrip() if self.date_parser is None: # auto-detect the date format if len(datestr.split(b' ')) == 2: date_format = 'raw' elif datestr == b'now': date_format = 'now' else: date_format = 'rfc2822' self.date_parser = dates.DATE_PARSERS_BY_NAME[date_format] try: when = self.date_parser(datestr, self.lineno) except ValueError: print("failed to parse datestr '%s'" % (datestr,)) raise name = match.group(1).rstrip() email = match.group(2) else: match = _WHO_RE.search(s) if accept_just_who and match: # HACK around missing time # TODO: output a warning here when = dates.DATE_PARSERS_BY_NAME['now']('now') name = match.group(1) email = match.group(2) elif self.strict: self.abort(errors.BadFormat, cmd, section, s) else: name = s email = None when = dates.DATE_PARSERS_BY_NAME['now']('now') if len(name) > 0: if name.endswith(b' '): name = name[:-1] # While it shouldn't happen, some datasets have email addresses # which contain unicode characters. See bug 338186. We sanitize # the data at this level just in case. if self.user_mapper: name, email = self.user_mapper.map_name_and_email(name, email) return Authorship(name, email, when[0], when[1])
python
{ "resource": "" }
q46093
ImportParser._path
train
def _path(self, s): """Parse a path.""" if s.startswith(b'"'): if not s.endswith(b'"'): self.abort(errors.BadFormat, '?', '?', s) else: return _unquote_c_string(s[1:-1]) return s
python
{ "resource": "" }
q46094
ImportParser._path_pair
train
def _path_pair(self, s): """Parse two paths separated by a space.""" # TODO: handle a space in the first path if s.startswith(b'"'): parts = s[1:].split(b'" ', 1) else: parts = s.split(b' ', 1) if len(parts) != 2: self.abort(errors.BadFormat, '?', '?', s) elif parts[1].startswith(b'"') and parts[1].endswith(b'"'): parts[1] = parts[1][1:-1] elif parts[1].startswith(b'"') or parts[1].endswith(b'"'): self.abort(errors.BadFormat, '?', '?', s) return [_unquote_c_string(s) for s in parts]
python
{ "resource": "" }
q46095
ImportParser._mode
train
def _mode(self, s): """Check file mode format and parse into an int. :return: mode as integer """ # Note: Output from git-fast-export slightly different to spec if s in [b'644', b'100644', b'0100644']: return 0o100644 elif s in [b'755', b'100755', b'0100755']: return 0o100755 elif s in [b'040000', b'0040000']: return 0o40000 elif s in [b'120000', b'0120000']: return 0o120000 elif s in [b'160000', b'0160000']: return 0o160000 else: self.abort(errors.BadFormat, 'filemodify', 'mode', s)
python
{ "resource": "" }
q46096
common_directory
train
def common_directory(paths): """Find the deepest common directory of a list of paths. :return: if no paths are provided, None is returned; if there is no common directory, '' is returned; otherwise the common directory with a trailing / is returned. """ import posixpath def get_dir_with_slash(path): if path == b'' or path.endswith(b'/'): return path else: dirname, basename = posixpath.split(path) if dirname == b'': return dirname else: return dirname + b'/' if not paths: return None elif len(paths) == 1: return get_dir_with_slash(paths[0]) else: common = common_path(paths[0], paths[1]) for path in paths[2:]: common = common_path(common, path) return get_dir_with_slash(common)
python
{ "resource": "" }
q46097
is_inside
train
def is_inside(directory, fname): """True if fname is inside directory. The parameters should typically be passed to osutils.normpath first, so that . and .. and repeated slashes are eliminated, and the separators are canonical for the platform. The empty string as a dir name is taken as top-of-tree and matches everything. """ # XXX: Most callers of this can actually do something smarter by # looking at the inventory if directory == fname: return True if directory == b'': return True if not directory.endswith(b'/'): directory += b'/' return fname.startswith(directory)
python
{ "resource": "" }
q46098
is_inside_any
train
def is_inside_any(dir_list, fname): """True if fname is inside any of given dirs.""" for dirname in dir_list: if is_inside(dirname, fname): return True return False
python
{ "resource": "" }
q46099
binary_stream
train
def binary_stream(stream): """Ensure a stream is binary on Windows. :return: the stream """ try: import os if os.name == 'nt': fileno = getattr(stream, 'fileno', None) if fileno: no = fileno() if no >= 0: # -1 means we're working as subprocess import msvcrt msvcrt.setmode(no, os.O_BINARY) except ImportError: pass return stream
python
{ "resource": "" }