sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def ask_yesno(msg="Proceed?", dft=None): """Prompts the user for a yes or no answer. Returns True for yes, False for no.""" yes = ["y", "yes", "Y", "YES"] no = ["n", "no", "N", "NO"] if dft != None: dft = yes[0] if (dft in yes or dft == True) else no[0] return ask(msg, dft=dft, vld=yes+no) in yes
Prompts the user for a yes or no answer. Returns True for yes, False for no.
entailment
def ask_int(msg="Enter an integer", dft=None, vld=None, hlp=None): """Prompts the user for an integer.""" vld = vld or [int] return ask(msg, dft=dft, vld=vld, fmt=partial(cast, typ=int), hlp=hlp)
Prompts the user for an integer.
entailment
def ask_float(msg="Enter a float", dft=None, vld=None, hlp=None): """Prompts the user for a float.""" vld = vld or [float] return ask(msg, dft=dft, vld=vld, fmt=partial(cast, typ=float), hlp=hlp)
Prompts the user for a float.
entailment
def ask_str(msg="Enter a string", dft=None, vld=None, shw=True, blk=True, hlp=None): """Prompts the user for a string.""" vld = vld or [str] return ask(msg, dft=dft, vld=vld, shw=shw, blk=blk, hlp=hlp)
Prompts the user for a string.
entailment
def ask_captcha(length=4): """Prompts the user for a random string.""" captcha = "".join(random.choice(string.ascii_lowercase) for _ in range(length)) ask_str('Enter the following letters, "%s"' % (captcha), vld=[captcha, captcha.upper()], blk=False)
Prompts the user for a random string.
entailment
def clear(): """Clears the console.""" if sys.platform.startswith("win"): call("cls", shell=True) else: call("clear", shell=True)
Clears the console.
entailment
def status(*args, **kwargs): """Prints a status message at the start and finish of an associated function. Can be used as a function decorator or as a function that accepts another function as the first parameter. **Params**: The following parameters are available when used as a decorator: - msg (str) [args] - Message to print at start of `func`. The following parameters are available when used as a function: - msg (str) [args] - Message to print at start of `func`. - func (func) - Function to call. First `args` if using `status()` as a function. Automatically provided if using `status()` as a decorator. - fargs (list) - List of `args` passed to `func`. - fkrgs (dict) - Dictionary of `kwargs` passed to `func`. - fin (str) [kwargs] - Message to print when `func` finishes. **Examples**: :: @qprompt.status("Something is happening...") def do_something(a): time.sleep(a) do_something() # [!] Something is happening... DONE. qprompt.status("Doing a thing...", myfunc, [arg1], {krgk: krgv}) # [!] Doing a thing... DONE. """ def decor(func): @wraps(func) def wrapper(*args, **krgs): echo("[!] " + msg, end=" ", flush=True) result = func(*args, **krgs) echo(fin, flush=True) return result return wrapper fin = kwargs.pop('fin', "DONE.") args = list(args) if len(args) > 1 and callable(args[1]): msg = args.pop(0) func = args.pop(0) try: fargs = args.pop(0) except: fargs = [] try: fkrgs = args.pop(0) except: fkrgs = {} return decor(func)(*fargs, **fkrgs) msg = args.pop(0) return decor
Prints a status message at the start and finish of an associated function. Can be used as a function decorator or as a function that accepts another function as the first parameter. **Params**: The following parameters are available when used as a decorator: - msg (str) [args] - Message to print at start of `func`. The following parameters are available when used as a function: - msg (str) [args] - Message to print at start of `func`. - func (func) - Function to call. First `args` if using `status()` as a function. Automatically provided if using `status()` as a decorator. - fargs (list) - List of `args` passed to `func`. - fkrgs (dict) - Dictionary of `kwargs` passed to `func`. - fin (str) [kwargs] - Message to print when `func` finishes. **Examples**: :: @qprompt.status("Something is happening...") def do_something(a): time.sleep(a) do_something() # [!] Something is happening... DONE. qprompt.status("Doing a thing...", myfunc, [arg1], {krgk: krgv}) # [!] Doing a thing... DONE.
entailment
def fatal(msg, exitcode=1, **kwargs): """Prints a message then exits the program. Optionally pause before exit with `pause=True` kwarg.""" # NOTE: Can't use normal arg named `pause` since function has same name. pause_before_exit = kwargs.pop("pause") if "pause" in kwargs.keys() else False echo("[FATAL] " + msg, **kwargs) if pause_before_exit: pause() sys.exit(exitcode)
Prints a message then exits the program. Optionally pause before exit with `pause=True` kwarg.
entailment
def hrule(width=None, char=None): """Outputs or returns a horizontal line of the given character and width. Returns printed string.""" width = width or HRWIDTH char = char or HRCHAR return echo(getline(char, width))
Outputs or returns a horizontal line of the given character and width. Returns printed string.
entailment
def title(msg): """Sets the title of the console window.""" if sys.platform.startswith("win"): ctypes.windll.kernel32.SetConsoleTitleW(tounicode(msg))
Sets the title of the console window.
entailment
def wrap(item, args=None, krgs=None, **kwargs): """Wraps the given item content between horizontal lines. Item can be a string or a function. **Examples**: :: qprompt.wrap("Hi, this will be wrapped.") # String item. qprompt.wrap(myfunc, [arg1, arg2], {'krgk': krgv}) # Func item. """ with Wrap(**kwargs): if callable(item): args = args or [] krgs = krgs or {} item(*args, **krgs) else: echo(item)
Wraps the given item content between horizontal lines. Item can be a string or a function. **Examples**: :: qprompt.wrap("Hi, this will be wrapped.") # String item. qprompt.wrap(myfunc, [arg1, arg2], {'krgk': krgv}) # Func item.
entailment
def _guess_name(desc, taken=None): """Attempts to guess the menu entry name from the function name.""" taken = taken or [] name = "" # Try to find the shortest name based on the given description. for word in desc.split(): c = word[0].lower() if not c.isalnum(): continue name += c if name not in taken: break # If name is still taken, add a number postfix. count = 2 while name in taken: name = name + str(count) count += 1 return name
Attempts to guess the menu entry name from the function name.
entailment
def add(self, name, desc, func=None, args=None, krgs=None): """Add a menu entry.""" self.entries.append(MenuEntry(name, desc, func, args or [], krgs or {}))
Add a menu entry.
entailment
def enum(self, desc, func=None, args=None, krgs=None): """Add a menu entry whose name will be an auto indexed number.""" name = str(len(self.entries)+1) self.entries.append(MenuEntry(name, desc, func, args or [], krgs or {}))
Add a menu entry whose name will be an auto indexed number.
entailment
def show(self, **kwargs): """Shows the menu. Any `kwargs` supplied will be passed to `show_menu()`.""" show_kwargs = copy.deepcopy(self._show_kwargs) show_kwargs.update(kwargs) return show_menu(self.entries, **show_kwargs)
Shows the menu. Any `kwargs` supplied will be passed to `show_menu()`.
entailment
def run(self, name): """Runs the function associated with the given entry `name`.""" for entry in self.entries: if entry.name == name: run_func(entry) break
Runs the function associated with the given entry `name`.
entailment
def main(self, auto=None, loop=False, quit=("q", "Quit"), **kwargs): """Runs the standard menu main logic. Any `kwargs` supplied will be pass to `Menu.show()`. If `argv` is provided to the script, it will be used as the `auto` parameter. **Params**: - auto ([str]) - If provided, the list of strings with be used as input for the menu prompts. - loop (bool) - If true, the menu will loop until quit. - quit ((str,str)) - If provided, adds a quit option to the menu. """ def _main(): global _AUTO if quit: if self.entries[-1][:2] != quit: self.add(*quit, func=lambda: quit[0]) if stdin_auto.auto: _AUTO = True result = None if loop: note = "Menu loops until quit." try: while True: mresult = self.show(note=note, **kwargs) if mresult in quit: break result = mresult except EOFError: pass return result else: note = "Menu does not loop, single entry." result = self.show(note=note, **kwargs) return result global _AUTO if _AUTO: return _main() else: with stdin_auto: return _main()
Runs the standard menu main logic. Any `kwargs` supplied will be pass to `Menu.show()`. If `argv` is provided to the script, it will be used as the `auto` parameter. **Params**: - auto ([str]) - If provided, the list of strings with be used as input for the menu prompts. - loop (bool) - If true, the menu will loop until quit. - quit ((str,str)) - If provided, adds a quit option to the menu.
entailment
def partition_scripts(scripts, start_type1, start_type2): """Return two lists of scripts out of the original `scripts` list. Scripts that begin with a `start_type1` or `start_type2` blocks are returned first. All other scripts are returned second. """ match, other = [], [] for script in scripts: if (HairballPlugin.script_start_type(script) == start_type1 or HairballPlugin.script_start_type(script) == start_type2): match.append(script) else: other.append(script) return match, other
Return two lists of scripts out of the original `scripts` list. Scripts that begin with a `start_type1` or `start_type2` blocks are returned first. All other scripts are returned second.
entailment
def attribute_result(cls, sprites): """Return mapping of attributes to if they were initialized or not.""" retval = dict((x, True) for x in cls.ATTRIBUTES) for properties in sprites.values(): for attribute, state in properties.items(): retval[attribute] &= state != cls.STATE_MODIFIED return retval
Return mapping of attributes to if they were initialized or not.
entailment
def attribute_state(cls, scripts, attribute): """Return the state of the scripts for the given attribute. If there is more than one 'when green flag clicked' script and they both modify the attribute, then the attribute is considered to not be initialized. """ green_flag, other = partition_scripts(scripts, cls.HAT_GREEN_FLAG, cls.HAT_CLONE) block_set = cls.BLOCKMAPPING[attribute] state = cls.STATE_NOT_MODIFIED # TODO: Any regular broadcast blocks encountered in the initialization # zone should be added to this loop for conflict checking. for script in green_flag: in_zone = True for name, level, _ in cls.iter_blocks(script.blocks): if name == 'broadcast %s and wait': # TODO: Follow the broadcast and wait scripts that occur in # the initialization zone in_zone = False if (name, 'absolute') in block_set: if in_zone and level == 0: # Success! if state == cls.STATE_NOT_MODIFIED: state = cls.STATE_INITIALIZED else: # Multiple when green flag clicked conflict state = cls.STATE_MODIFIED elif in_zone: continue # Conservative ignore for nested absolutes else: state = cls.STATE_MODIFIED break # The state of the script has been determined elif (name, 'relative') in block_set: state = cls.STATE_MODIFIED break if state != cls.STATE_NOT_MODIFIED: return state # Check the other scripts to see if the attribute was ever modified for script in other: for name, _, _ in cls.iter_blocks(script.blocks): if name in [x[0] for x in block_set]: return cls.STATE_MODIFIED return cls.STATE_NOT_MODIFIED
Return the state of the scripts for the given attribute. If there is more than one 'when green flag clicked' script and they both modify the attribute, then the attribute is considered to not be initialized.
entailment
def output_results(cls, sprites): """Output whether or not each attribute was correctly initialized. Attributes that were not modified at all are considered to be properly initialized. """ print(' '.join(cls.ATTRIBUTES)) format_strs = ['{{{}!s:^{}}}'.format(x, len(x)) for x in cls.ATTRIBUTES] print(' '.join(format_strs).format(**cls.attribute_result(sprites)))
Output whether or not each attribute was correctly initialized. Attributes that were not modified at all are considered to be properly initialized.
entailment
def sprite_changes(cls, sprite): """Return a mapping of attributes to their initilization state.""" retval = dict((x, cls.attribute_state(sprite.scripts, x)) for x in (x for x in cls.ATTRIBUTES if x != 'background')) return retval
Return a mapping of attributes to their initilization state.
entailment
def analyze(self, scratch, **kwargs): """Run and return the results of the AttributeInitialization plugin.""" changes = dict((x.name, self.sprite_changes(x)) for x in scratch.sprites) changes['stage'] = { 'background': self.attribute_state(scratch.stage.scripts, 'costume')} # self.output_results(changes) return {'initialized': changes}
Run and return the results of the AttributeInitialization plugin.
entailment
def variable_state(cls, scripts, variables): """Return the initialization state for each variable in variables. The state is determined based on the scripts passed in via the scripts parameter. If there is more than one 'when green flag clicked' script and they both modify the attribute, then the attribute is considered to not be initialized. """ def conditionally_set_not_modified(): """Set the variable to modified if it hasn't been altered.""" state = variables.get(block.args[0], None) if state == cls.STATE_NOT_MODIFIED: variables[block.args[0]] = cls.STATE_MODIFIED green_flag, other = partition_scripts(scripts, cls.HAT_GREEN_FLAG) variables = dict((x, cls.STATE_NOT_MODIFIED) for x in variables) for script in green_flag: in_zone = True for name, level, block in cls.iter_blocks(script.blocks): if name == 'broadcast %s and wait': in_zone = False if name == 'set %s effect to %s': state = variables.get(block.args[0], None) if state is None: continue # Not a variable we care about if in_zone and level == 0: # Success! if state == cls.STATE_NOT_MODIFIED: state = cls.STATE_INITIALIZED else: # Multiple when green flag clicked conflict # TODO: Need to allow multiple sets of a variable # within the same script # print 'CONFLICT', script state = cls.STATE_MODIFIED elif in_zone: continue # Conservative ignore for nested absolutes elif state == cls.STATE_NOT_MODIFIED: state = cls.STATE_MODIFIED variables[block.args[0]] = state elif name == 'change %s effect by %s': conditionally_set_not_modified() for script in other: for name, _, block in cls.iter_blocks(script.blocks): if name in ('change %s effect by %s', 'set %s effect to %s'): conditionally_set_not_modified() return variables
Return the initialization state for each variable in variables. The state is determined based on the scripts passed in via the scripts parameter. If there is more than one 'when green flag clicked' script and they both modify the attribute, then the attribute is considered to not be initialized.
entailment
def analyze(self, scratch, **kwargs): """Run and return the results of the VariableInitialization plugin.""" variables = dict((x, self.variable_state(x.scripts, x.variables)) for x in scratch.sprites) variables['global'] = self.variable_state(self.iter_scripts(scratch), scratch.stage.variables) # Output for now import pprint pprint.pprint(variables) return {'variables': variables}
Run and return the results of the VariableInitialization plugin.
entailment
def finalize(self): """Output the default sprite names found in the project.""" print('{} default sprite names found:'.format(self.total_default)) for name in self.list_default: print(name)
Output the default sprite names found in the project.
entailment
def analyze(self, scratch, **kwargs): """Run and return the results from the SpriteNaming plugin.""" for sprite in self.iter_sprites(scratch): for default in self.default_names: if default in sprite.name: self.total_default += 1 self.list_default.append(sprite.name)
Run and return the results from the SpriteNaming plugin.
entailment
def get_tunneling(handler, registry): """ Allows all methods to be tunneled via GET for dev/debuging purposes. """ log.info('get_tunneling enabled') def get_tunneling(request): if request.method == 'GET': method = request.GET.pop('_m', 'GET') request.method = method if method in ['POST', 'PUT', 'PATCH']: get_params = request.GET.mixed() valid_params = drop_reserved_params(get_params) request.body = six.b(json.dumps(valid_params)) request.content_type = 'application/json' request._tunneled_get = True return handler(request) return get_tunneling
Allows all methods to be tunneled via GET for dev/debuging purposes.
entailment
def enable_selfalias(config, id_name): """ This allows replacing id_name with "self". e.g. /users/joe/account == /users/self/account if joe is in the session as an authorized user """ def context_found_subscriber(event): request = event.request user = getattr(request, 'user', None) if (request.matchdict and request.matchdict.get(id_name, None) == 'self' and user): request.matchdict[id_name] = user.username config.add_subscriber(context_found_subscriber, ContextFound)
This allows replacing id_name with "self". e.g. /users/joe/account == /users/self/account if joe is in the session as an authorized user
entailment
def convert_dotted(params): """ Convert dotted keys in :params: dictset to a nested dictset. E.g. {'settings.foo': 'bar'} -> {'settings': {'foo': 'bar'}} """ if not isinstance(params, dictset): params = dictset(params) dotted_items = {k: v for k, v in params.items() if '.' in k} if dotted_items: dicts = [str2dict(key, val) for key, val in dotted_items.items()] dotted = six.functools.reduce(merge_dicts, dicts) params = params.subset(['-' + k for k in dotted_items.keys()]) params.update(dict(dotted)) return params
Convert dotted keys in :params: dictset to a nested dictset. E.g. {'settings.foo': 'bar'} -> {'settings': {'foo': 'bar'}}
entailment
def prepare_request_params(self, _query_params, _json_params): """ Prepare query and update params. """ self._query_params = dictset( _query_params or self.request.params.mixed()) self._json_params = dictset(_json_params) ctype = self.request.content_type if self.request.method in ['POST', 'PUT', 'PATCH']: if ctype == 'application/json': try: self._json_params.update(self.request.json) except simplejson.JSONDecodeError: log.error( "Expecting JSON. Received: '{}'. " "Request: {} {}".format( self.request.body, self.request.method, self.request.url)) self._json_params = BaseView.convert_dotted(self._json_params) self._query_params = BaseView.convert_dotted(self._query_params) self._params = self._query_params.copy() self._params.update(self._json_params)
Prepare query and update params.
entailment
def set_override_rendered(self): """ Set self.request.override_renderer if needed. """ if '' in self.request.accept: self.request.override_renderer = self._default_renderer elif 'application/json' in self.request.accept: self.request.override_renderer = 'nefertari_json' elif 'text/plain' in self.request.accept: self.request.override_renderer = 'string'
Set self.request.override_renderer if needed.
entailment
def _setup_aggregation(self, aggregator=None): """ Wrap `self.index` method with ESAggregator. This makes `self.index` to first try to run aggregation and only on fail original method is run. Method is wrapped only if it is defined and `elasticsearch.enable_aggregations` setting is true. """ from nefertari.elasticsearch import ES if aggregator is None: aggregator = ESAggregator aggregations_enabled = ( ES.settings and ES.settings.asbool('enable_aggregations')) if not aggregations_enabled: log.debug('Elasticsearch aggregations are not enabled') return index = getattr(self, 'index', None) index_defined = index and index != self.not_allowed_action if index_defined: self.index = aggregator(self).wrap(self.index)
Wrap `self.index` method with ESAggregator. This makes `self.index` to first try to run aggregation and only on fail original method is run. Method is wrapped only if it is defined and `elasticsearch.enable_aggregations` setting is true.
entailment
def get_collection_es(self): """ Query ES collection and return results. This is default implementation of querying ES collection with `self._query_params`. It must return found ES collection results for default response renderers to work properly. """ from nefertari.elasticsearch import ES return ES(self.Model.__name__).get_collection(**self._query_params)
Query ES collection and return results. This is default implementation of querying ES collection with `self._query_params`. It must return found ES collection results for default response renderers to work properly.
entailment
def fill_null_values(self): """ Fill missing model fields in JSON with {key: null value}. Only run for PUT requests. """ if not self.Model: log.info("%s has no model defined" % self.__class__.__name__) return empty_values = self.Model.get_null_values() for field, value in empty_values.items(): if field not in self._json_params: self._json_params[field] = value
Fill missing model fields in JSON with {key: null value}. Only run for PUT requests.
entailment
def set_public_limits(self): """ Set public limits if auth is enabled and user is not authenticated. Also sets default limit for GET, HEAD requests. """ if self.request.method.upper() in ['GET', 'HEAD']: self._query_params.process_int_param('_limit', 20) if self._auth_enabled and not getattr(self.request, 'user', None): wrappers.set_public_limits(self)
Set public limits if auth is enabled and user is not authenticated. Also sets default limit for GET, HEAD requests.
entailment
def convert_ids2objects(self): """ Convert object IDs from `self._json_params` to objects if needed. Only IDs that belong to relationship field of `self.Model` are converted. """ if not self.Model: log.info("%s has no model defined" % self.__class__.__name__) return for field in self._json_params.keys(): if not engine.is_relationship_field(field, self.Model): continue rel_model_cls = engine.get_relationship_cls(field, self.Model) self.id2obj(field, rel_model_cls)
Convert object IDs from `self._json_params` to objects if needed. Only IDs that belong to relationship field of `self.Model` are converted.
entailment
def setup_default_wrappers(self): """ Setup defaulf wrappers. Wrappers are applied when view method does not return instance of Response. In this case nefertari renderers call wrappers and handle response generation. """ # Index self._after_calls['index'] = [ wrappers.wrap_in_dict(self.request), wrappers.add_meta(self.request), wrappers.add_object_url(self.request), ] # Show self._after_calls['show'] = [ wrappers.wrap_in_dict(self.request), wrappers.add_meta(self.request), wrappers.add_object_url(self.request), ] # Create self._after_calls['create'] = [ wrappers.wrap_in_dict(self.request), wrappers.add_meta(self.request), wrappers.add_object_url(self.request), ] # Update self._after_calls['update'] = [ wrappers.wrap_in_dict(self.request), wrappers.add_meta(self.request), wrappers.add_object_url(self.request), ] # Replace self._after_calls['replace'] = [ wrappers.wrap_in_dict(self.request), wrappers.add_meta(self.request), wrappers.add_object_url(self.request), ] # Privacy wrappers if self._auth_enabled: for meth in ('index', 'show', 'create', 'update', 'replace'): self._after_calls[meth] += [ wrappers.apply_privacy(self.request), ] for meth in ('update', 'replace', 'update_many'): self._before_calls[meth] += [ wrappers.apply_request_privacy( self.Model, self._json_params), ]
Setup defaulf wrappers. Wrappers are applied when view method does not return instance of Response. In this case nefertari renderers call wrappers and handle response generation.
entailment
def migrate_codec(config_old, config_new): '''Migrate data from mongodict <= 0.2.1 to 0.3.0 `config_old` and `config_new` should be dictionaries with the keys regarding to MongoDB server: - `host` - `port` - `database` - `collection` ''' assert mongodict.__version__ in [(0, 3, 0), (0, 3, 1)] connection = pymongo.Connection(host=config_old['host'], port=config_old['port']) database = connection[config_old['database']] collection = database[config_old['collection']] new_dict = mongodict.MongoDict(**config_new) # uses pickle codec by default total_pairs = collection.count() start_time = time.time() for counter, pair in enumerate(collection.find(), start=1): key, value = pair['_id'], pair['value'] new_dict[key] = value if counter % REPORT_INTERVAL == 0: print_report(counter, total_pairs, start_time) print_report(counter, total_pairs, start_time) print('')
Migrate data from mongodict <= 0.2.1 to 0.3.0 `config_old` and `config_new` should be dictionaries with the keys regarding to MongoDB server: - `host` - `port` - `database` - `collection`
entailment
def from_dict(cls, data, model): """ Generate map of `fieldName: clsInstance` from dict. :param data: Dict where keys are field names and values are new values of field. :param model: Model class to which fields from :data: belong. """ model_provided = model is not None result = {} for name, new_value in data.items(): kwargs = { 'name': name, 'new_value': new_value, } if model_provided: kwargs['params'] = model.get_field_params(name) result[name] = cls(**kwargs) return result
Generate map of `fieldName: clsInstance` from dict. :param data: Dict where keys are field names and values are new values of field. :param model: Model class to which fields from :data: belong.
entailment
def register(self): """ Register new user by POSTing all required data. """ user, created = self.Model.create_account( self._json_params) if not created: raise JHTTPConflict('Looks like you already have an account.') self.request._user = user pk_field = user.pk_field() headers = remember(self.request, getattr(user, pk_field)) return JHTTPOk('Registered', headers=headers)
Register new user by POSTing all required data.
entailment
def register(self): """ Register a new user by POSTing all required data. User's `Authorization` header value is returned in `WWW-Authenticate` header. """ user, created = self.Model.create_account(self._json_params) if user.api_key is None: raise JHTTPBadRequest('Failed to generate ApiKey for user') if not created: raise JHTTPConflict('Looks like you already have an account.') self.request._user = user headers = remember(self.request, user.username) return JHTTPOk('Registered', headers=headers)
Register a new user by POSTing all required data. User's `Authorization` header value is returned in `WWW-Authenticate` header.
entailment
def claim_token(self, **params): """Claim current token by POSTing 'login' and 'password'. User's `Authorization` header value is returned in `WWW-Authenticate` header. """ self._json_params.update(params) success, self.user = self.Model.authenticate_by_password( self._json_params) if success: headers = remember(self.request, self.user.username) return JHTTPOk('Token claimed', headers=headers) if self.user: raise JHTTPUnauthorized('Wrong login or password') else: raise JHTTPNotFound('User not found')
Claim current token by POSTing 'login' and 'password'. User's `Authorization` header value is returned in `WWW-Authenticate` header.
entailment
def reset_token(self, **params): """ Reset current token by POSTing 'login' and 'password'. User's `Authorization` header value is returned in `WWW-Authenticate` header. """ response = self.claim_token(**params) if not self.user: return response self.user.api_key.reset_token() headers = remember(self.request, self.user.username) return JHTTPOk('Registered', headers=headers)
Reset current token by POSTing 'login' and 'password'. User's `Authorization` header value is returned in `WWW-Authenticate` header.
entailment
def _apply_nested_privacy(self, data): """ Apply privacy to nested documents. :param data: Dict of data to which privacy is already applied. """ kw = { 'is_admin': self.is_admin, 'drop_hidden': self.drop_hidden, } for key, val in data.items(): if is_document(val): data[key] = apply_privacy(self.request)(result=val, **kw) elif isinstance(val, list) and val and is_document(val[0]): data[key] = [apply_privacy(self.request)(result=doc, **kw) for doc in val] return data
Apply privacy to nested documents. :param data: Dict of data to which privacy is already applied.
entailment
def _set_object_self(self, obj): """ Add '_self' key value to :obj: dict. """ from nefertari.elasticsearch import ES location = self.request.path_url route_kwargs = {} """ Check for parents """ if self.request.matchdict: route_kwargs.update(self.request.matchdict) try: type_, obj_pk = obj['_type'], obj['_pk'] except KeyError: return resource = (self.model_collections.get(type_) or self.model_collections.get(ES.src2type(type_))) if resource is not None: route_kwargs.update({resource.id_name: obj_pk}) location = self.request.route_url( resource.uid, **route_kwargs) obj.setdefault('_self', location)
Add '_self' key value to :obj: dict.
entailment
def get_root_resource(config): """Returns the root resource.""" app_package_name = get_app_package_name(config) return config.registry._root_resources.setdefault( app_package_name, Resource(config))
Returns the root resource.
entailment
def add_resource_routes(config, view, member_name, collection_name, **kwargs): """ ``view`` is a dotted name of (or direct reference to) a Python view class, e.g. ``'my.package.views.MyView'``. ``member_name`` should be the appropriate singular version of the resource given your locale and used with members of the collection. ``collection_name`` will be used to refer to the resource collection methods and should be a plural version of the member_name argument. All keyword arguments are optional. ``path_prefix`` Prepends the URL path for the Route with the path_prefix given. This is most useful for cases where you want to mix resources or relations between resources. ``name_prefix`` Prepends the route names that are generated with the name_prefix given. Combined with the path_prefix option, it's easy to generate route names and paths that represent resources that are in relations. Example:: config.add_resource_routes( 'myproject.views:CategoryView', 'message', 'messages', path_prefix='/category/{category_id}', name_prefix="category_") # GET /category/7/messages/1 # has named route "category_message" """ view = maybe_dotted(view) path_prefix = kwargs.pop('path_prefix', '') name_prefix = kwargs.pop('name_prefix', '') if config.route_prefix: name_prefix = "%s_%s" % (config.route_prefix, name_prefix) if collection_name: id_name = '/{%s}' % (kwargs.pop('id_name', None) or DEFAULT_ID_NAME) else: id_name = '' path = path_prefix.strip('/') + '/' + (collection_name or member_name) _factory = kwargs.pop('factory', None) # If factory is not set, than auth should be False _auth = kwargs.pop('auth', None) and _factory _traverse = (kwargs.pop('traverse', None) or id_name) if _factory else None action_route = {} added_routes = {} def add_route_and_view(config, action, route_name, path, request_method, **route_kwargs): if route_name not in added_routes: config.add_route( route_name, path, factory=_factory, request_method=['GET', 'POST', 'PUT', 'PATCH', 'DELETE', 'OPTIONS'], **route_kwargs) added_routes[route_name] = path action_route[action] = route_name if _auth: permission = PERMISSIONS[action] else: permission = None config.add_view(view=view, attr=action, route_name=route_name, request_method=request_method, permission=permission, **kwargs) config.commit() if collection_name == member_name: collection_name = collection_name + '_collection' if collection_name: add_route_and_view( config, 'index', name_prefix + collection_name, path, 'GET') add_route_and_view( config, 'collection_options', name_prefix + collection_name, path, 'OPTIONS') add_route_and_view( config, 'show', name_prefix + member_name, path + id_name, 'GET', traverse=_traverse) add_route_and_view( config, 'item_options', name_prefix + member_name, path + id_name, 'OPTIONS', traverse=_traverse) add_route_and_view( config, 'replace', name_prefix + member_name, path + id_name, 'PUT', traverse=_traverse) add_route_and_view( config, 'update', name_prefix + member_name, path + id_name, 'PATCH', traverse=_traverse) add_route_and_view( config, 'create', name_prefix + (collection_name or member_name), path, 'POST') add_route_and_view( config, 'delete', name_prefix + member_name, path + id_name, 'DELETE', traverse=_traverse) if collection_name: add_route_and_view( config, 'update_many', name_prefix + (collection_name or member_name), path, 'PUT', traverse=_traverse) add_route_and_view( config, 'update_many', name_prefix + (collection_name or member_name), path, 'PATCH', traverse=_traverse) add_route_and_view( config, 'delete_many', name_prefix + (collection_name or member_name), path, 'DELETE', traverse=_traverse) return action_route
``view`` is a dotted name of (or direct reference to) a Python view class, e.g. ``'my.package.views.MyView'``. ``member_name`` should be the appropriate singular version of the resource given your locale and used with members of the collection. ``collection_name`` will be used to refer to the resource collection methods and should be a plural version of the member_name argument. All keyword arguments are optional. ``path_prefix`` Prepends the URL path for the Route with the path_prefix given. This is most useful for cases where you want to mix resources or relations between resources. ``name_prefix`` Prepends the route names that are generated with the name_prefix given. Combined with the path_prefix option, it's easy to generate route names and paths that represent resources that are in relations. Example:: config.add_resource_routes( 'myproject.views:CategoryView', 'message', 'messages', path_prefix='/category/{category_id}', name_prefix="category_") # GET /category/7/messages/1 # has named route "category_message"
entailment
def get_default_view_path(resource): "Returns the dotted path to the default view class." parts = [a.member_name for a in resource.ancestors] +\ [resource.collection_name or resource.member_name] if resource.prefix: parts.insert(-1, resource.prefix) view_file = '%s' % '_'.join(parts) view = '%s:%sView' % (view_file, snake2camel(view_file)) app_package_name = get_app_package_name(resource.config) return '%s.views.%s' % (app_package_name, view)
Returns the dotted path to the default view class.
entailment
def get_ancestors(self): "Returns the list of ancestor resources." if self._ancestors: return self._ancestors if not self.parent: return [] obj = self.resource_map.get(self.parent.uid) while obj and obj.member_name: self._ancestors.append(obj) obj = obj.parent self._ancestors.reverse() return self._ancestors
Returns the list of ancestor resources.
entailment
def add(self, member_name, collection_name='', parent=None, uid='', **kwargs): """ :param member_name: singular name of the resource. It should be the appropriate singular version of the resource given your locale and used with members of the collection. :param collection_name: plural name of the resource. It will be used to refer to the resource collection methods and should be a plural version of the ``member_name`` argument. Note: if collection_name is empty, it means resource is singular :param parent: parent resource name or object. :param uid: unique name for the resource :param kwargs: view: custom view to overwrite the default one. the rest of the keyward arguments are passed to add_resource_routes call. :return: ResourceMap object """ # self is the parent resource on which this method is called. parent = (self.resource_map.get(parent) if type(parent) is str else parent or self) prefix = kwargs.pop('prefix', '') uid = (uid or ':'.join(filter(bool, [parent.uid, prefix, member_name]))) if uid in self.resource_map: raise ValueError('%s already exists in resource map' % uid) # Use id_name of parent for singular views to make url generation # easier id_name = kwargs.get('id_name', '') if not id_name and parent: id_name = parent.id_name new_resource = Resource(self.config, member_name=member_name, collection_name=collection_name, parent=parent, uid=uid, id_name=id_name, prefix=prefix) view = maybe_dotted( kwargs.pop('view', None) or get_default_view_path(new_resource)) for name, val in kwargs.pop('view_args', {}).items(): setattr(view, name, val) root_resource = self.config.get_root_resource() view.root_resource = root_resource new_resource.view = view path_segs = [] kwargs['path_prefix'] = '' for res in new_resource.ancestors: if not res.is_singular: if res.id_name: id_full = res.id_name else: id_full = "%s_%s" % (res.member_name, DEFAULT_ID_NAME) path_segs.append('%s/{%s}' % (res.collection_name, id_full)) else: path_segs.append(res.member_name) if path_segs: kwargs['path_prefix'] = '/'.join(path_segs) if prefix: kwargs['path_prefix'] += '/' + prefix name_segs = [a.member_name for a in new_resource.ancestors] name_segs.insert(1, prefix) name_segs = [seg for seg in name_segs if seg] if name_segs: kwargs['name_prefix'] = '_'.join(name_segs) + ':' new_resource.renderer = kwargs.setdefault( 'renderer', view._default_renderer) kwargs.setdefault('auth', root_resource.auth) kwargs.setdefault('factory', root_resource.default_factory) _factory = maybe_dotted(kwargs['factory']) kwargs['auth'] = kwargs.get('auth', root_resource.auth) kwargs['http_cache'] = kwargs.get( 'http_cache', root_resource.http_cache) new_resource.action_route_map = add_resource_routes( self.config, view, member_name, collection_name, **kwargs) self.resource_map[uid] = new_resource # add all route names for this resource as keys in the dict, # so its easy to find it in the view. self.resource_map.update(dict.fromkeys( list(new_resource.action_route_map.values()), new_resource)) # Store resources in {modelName: resource} map if: # * Its view has Model defined # * It's not singular # * Its parent is root or it's not already stored model = new_resource.view.Model is_collection = model is not None and not new_resource.is_singular if is_collection: is_needed = (model.__name__ not in self.model_collections or new_resource.parent is root_resource) if is_needed: self.model_collections[model.__name__] = new_resource parent.children.append(new_resource) view._resource = new_resource view._factory = _factory return new_resource
:param member_name: singular name of the resource. It should be the appropriate singular version of the resource given your locale and used with members of the collection. :param collection_name: plural name of the resource. It will be used to refer to the resource collection methods and should be a plural version of the ``member_name`` argument. Note: if collection_name is empty, it means resource is singular :param parent: parent resource name or object. :param uid: unique name for the resource :param kwargs: view: custom view to overwrite the default one. the rest of the keyward arguments are passed to add_resource_routes call. :return: ResourceMap object
entailment
def add_from_child(self, resource, **kwargs): """ Add a resource with its all children resources to the current resource. """ new_resource = self.add( resource.member_name, resource.collection_name, **kwargs) for child in resource.children: new_resource.add_from_child(child, **kwargs)
Add a resource with its all children resources to the current resource.
entailment
def add(self, path): """ Add the path of a data set to the list of available sets NOTE: a data set is assumed to be a pickled and gzip compressed Pandas DataFrame Parameters ---------- path : str """ name_with_ext = os.path.split(path)[1] # split directory and filename name = name_with_ext.split('.')[0] # remove extension self.list.update({name: path})
Add the path of a data set to the list of available sets NOTE: a data set is assumed to be a pickled and gzip compressed Pandas DataFrame Parameters ---------- path : str
entailment
def unpack(self, name): """ Unpacks a data set to a Pandas DataFrame Parameters ---------- name : str call `.list` to see all availble datasets Returns ------- pd.DataFrame """ path = self.list[name] df = pd.read_pickle(path, compression='gzip') return df
Unpacks a data set to a Pandas DataFrame Parameters ---------- name : str call `.list` to see all availble datasets Returns ------- pd.DataFrame
entailment
def six_frame(genome, table, minimum = 10): """ translate each sequence into six reading frames """ for seq in parse_fasta(genome): dna = Seq(seq[1].upper().replace('U', 'T'), IUPAC.ambiguous_dna) counter = 0 for sequence in ['f', dna], ['rc', dna.reverse_complement()]: direction, sequence = sequence for frame in range(0, 3): for prot in \ sequence[frame:].\ translate(table = table, to_stop = False).split('*'): if len(prot) < minimum: continue counter += 1 header = '%s_%s table=%s frame=%s-%s %s' % \ (seq[0].split()[0], counter, table, frame+1, \ direction, ' '.join(seq[0].split()[1:])) yield [header, prot]
translate each sequence into six reading frames
entailment
def publish_processed_network_packets( name="not-set", task_queue=None, result_queue=None, need_response=False, shutdown_msg="SHUTDOWN"): """ # Redis/RabbitMQ/SQS messaging endpoints for pub-sub routing_key = ev("PUBLISH_EXCHANGE", "reporting.accounts") queue_name = ev("PUBLISH_QUEUE", "reporting.accounts") auth_url = ev("PUB_BROKER_URL", "redis://localhost:6379/15") serializer = "json" """ # these keys need to be cycled to prevent # exploiting static keys filter_key = ev("IGNORE_KEY", INCLUDED_IGNORE_KEY) forward_host = ev("FORWARD_HOST", "127.0.0.1") forward_port = int(ev("FORWARD_PORT", "80")) include_filter_key = ev("FILTER_KEY", "") if not include_filter_key and filter_key: include_filter_key = filter_key filter_keys = [filter_key] log.info(("START consumer={} " "forward={}:{} with " "key={} filters={}") .format(name, forward_host, forward_port, include_filter_key, filter_key)) forward_skt = None not_done = True while not_done: if not forward_skt: forward_skt = connect_forwarder( forward_host=forward_host, forward_port=forward_port) next_task = task_queue.get() if next_task: if str(next_task) == shutdown_msg: # Poison pill for shutting down log.info(("{}: DONE CALLBACK " "Exiting msg={}") .format(name, next_task)) task_queue.task_done() break # end of handling shutdown case try: log.debug(("{} parsing") .format(name)) source = next_task.source packet = next_task.payload if not packet: log.error(("{} invalid task found " "{} missing payload") .format(name, next_task)) break log.debug(("{} found msg from src={}") .format(name, source)) network_data = parse_network_data( data_packet=packet, include_filter_key=include_filter_key, filter_keys=filter_keys) if network_data["status"] == VALID: if network_data["data_type"] == TCP \ or network_data["data_type"] == UDP \ or network_data["data_type"] == ARP \ or network_data["data_type"] == ICMP: log.info(("{} valid={} packet={} " "data={}") .format(name, network_data["id"], network_data["data_type"], network_data["target_data"])) if not forward_skt: forward_skt = connect_forwarder( forward_host=forward_host, forward_port=forward_port) if forward_skt: if network_data["stream"]: sent = False while not sent: try: log.info("sending={}".format( network_data["stream"])) send_msg( forward_skt, network_data["stream"] .encode("utf-8")) sent = True except Exception as e: sent = False time.sleep(0.5) try: forward_skt.close() forward_skt = None except Exception as w: forward_skt = None forward_skt = connect_forwarder( forward_host=forward_host, forward_port=forward_port) # end of reconnecting log.info("sent={}".format( network_data["stream"])) if need_response: log.info("receiving") cdr_res = forward_skt.recv(1024) log.info(("cdr - res{}") .format(cdr_res)) else: log.info(("{} EMPTY stream={} " "error={} status={}") .format( name, network_data["stream"], network_data["err"], network_data["status"])) else: log.info(("{} not_supported valid={} " "packet data_type={} status={}") .format(name, network_data["id"], network_data["data_type"], network_data["status"])) elif network_data["status"] == FILTERED: log.info(("{} filtered={} status={}") .format(name, network_data["filtered"], network_data["status"])) else: if network_data["status"] == INVALID: log.info(("{} invalid={} packet={} " "error={} status={}") .format(name, network_data["id"], network_data["data_type"], network_data["error"], network_data["status"])) else: log.info(("{} unknown={} packet={} " "error={} status={}") .format(name, network_data["id"], network_data["data_type"], network_data["error"], network_data["status"])) # end of if valid or not data except KeyboardInterrupt as k: log.info(("{} stopping") .format(name)) break except Exception as e: log.error(("{} failed packaging packet to forward " "with ex={}") .format(name, e)) break # end of try/ex during payload processing # end of if found a next_task log.info(("Consumer: {} {}") .format(name, next_task)) task_queue.task_done() if need_response: answer = "processed: {}".format(next_task()) result_queue.put(answer) # end of while if forward_skt: try: forward_skt.close() log.info("CLOSED connection") forward_skt = None except Exception: log.info("CLOSED connection") # end of cleaning up forwarding socket log.info("{} Done".format(name)) return
# Redis/RabbitMQ/SQS messaging endpoints for pub-sub routing_key = ev("PUBLISH_EXCHANGE", "reporting.accounts") queue_name = ev("PUBLISH_QUEUE", "reporting.accounts") auth_url = ev("PUB_BROKER_URL", "redis://localhost:6379/15") serializer = "json"
entailment
def run_main( need_response=False, callback=None): """run_main start the packet consumers and the packet processors :param need_response: should send response back to publisher :param callback: handler method """ stop_file = ev("STOP_FILE", "/opt/stop_recording") num_workers = int(ev("NUM_WORKERS", "1")) shutdown_msg = "SHUTDOWN" log.info("Start - {}".format(name)) log.info("Creating multiprocessing queue") tasks = multiprocessing.JoinableQueue() queue_to_consume = multiprocessing.Queue() host = "localhost" # Start consumers log.info("Starting Consumers to process queued tasks") consumers = start_consumers_for_queue( num_workers=num_workers, tasks=tasks, queue_to_consume=queue_to_consume, shutdown_msg=shutdown_msg, consumer_class=WorkerToProcessPackets, callback=callback) log.info("creating socket") skt = create_layer_2_socket() log.info("socket created") not_done = True while not_done: if not skt: log.info("Failed to create layer 2 socket") log.info("Please make sure to run as root") not_done = False break try: if os.path.exists(stop_file): log.info(("Detected stop_file={}") .format(stop_file)) not_done = False break # stop if the file exists # Only works on linux packet = skt.recvfrom(65565) if os.path.exists(stop_file): log.info(("Detected stop_file={}") .format(stop_file)) not_done = False break # stop if the file was created during a wait loop tasks.put(NetworkPacketTask(source=host, payload=packet)) except KeyboardInterrupt as k: log.info("Stopping") not_done = False break except Exception as e: log.error(("Failed reading socket with ex={}") .format(e)) not_done = False break # end of try/ex during socket receving # end of while processing network packets log.info(("Shutting down consumers={}") .format(len(consumers))) shutdown_consumers(num_workers=num_workers, tasks=tasks) # Wait for all of the tasks to finish if need_response: log.info("Waiting for tasks to finish") tasks.join() log.info("Done waiting for tasks to finish")
run_main start the packet consumers and the packet processors :param need_response: should send response back to publisher :param callback: handler method
entailment
def best_model(seq2hmm): """ determine the best model: archaea, bacteria, eukarya (best score) """ for seq in seq2hmm: best = [] for model in seq2hmm[seq]: best.append([model, sorted([i[-1] for i in seq2hmm[seq][model]], reverse = True)[0]]) best_model = sorted(best, key = itemgetter(1), reverse = True)[0][0] seq2hmm[seq] = [best_model] + [seq2hmm[seq][best_model]] return seq2hmm
determine the best model: archaea, bacteria, eukarya (best score)
entailment
def check_gaps(matches, gap_threshold = 0): """ check for large gaps between alignment windows """ gaps = [] prev = None for match in sorted(matches, key = itemgetter(0)): if prev is None: prev = match continue if match[0] - prev[1] >= gap_threshold: gaps.append([prev, match]) prev = match return [[i[0][1], i[1][0]] for i in gaps]
check for large gaps between alignment windows
entailment
def check_overlap(current, hit, overlap = 200): """ determine if sequence has already hit the same part of the model, indicating that this hit is for another 16S rRNA gene """ for prev in current: p_coords = prev[2:4] coords = hit[2:4] if get_overlap(coords, p_coords) >= overlap: return True return False
determine if sequence has already hit the same part of the model, indicating that this hit is for another 16S rRNA gene
entailment
def check_order(current, hit, overlap = 200): """ determine if hits are sequential on model and on the same strand * if not, they should be split into different groups """ prev_model = current[-1][2:4] prev_strand = current[-1][-2] hit_model = hit[2:4] hit_strand = hit[-2] # make sure they are on the same strand if prev_strand != hit_strand: return False # check for sequential hits on + strand if prev_strand == '+' and (prev_model[1] - hit_model[0] >= overlap): return False # check for sequential hits on - strand if prev_strand == '-' and (hit_model[1] - prev_model[0] >= overlap): return False else: return True
determine if hits are sequential on model and on the same strand * if not, they should be split into different groups
entailment
def hit_groups(hits): """ * each sequence may have more than one 16S rRNA gene * group hits for each gene """ groups = [] current = False for hit in sorted(hits, key = itemgetter(0)): if current is False: current = [hit] elif check_overlap(current, hit) is True or check_order(current, hit) is False: groups.append(current) current = [hit] else: current.append(hit) groups.append(current) return groups
* each sequence may have more than one 16S rRNA gene * group hits for each gene
entailment
def find_coordinates(hmms, bit_thresh): """ find 16S rRNA gene sequence coordinates """ # get coordinates from cmsearch output seq2hmm = parse_hmm(hmms, bit_thresh) seq2hmm = best_model(seq2hmm) group2hmm = {} # group2hmm[seq][group] = [model, strand, coordinates, matches, gaps] for seq, info in list(seq2hmm.items()): group2hmm[seq] = {} # info = [model, [[hit1], [hit2], ...]] for group_num, group in enumerate(hit_groups(info[1])): # group is a group of hits to a single 16S gene # determine matching strand based on best hit best = sorted(group, reverse = True, key = itemgetter(-1))[0] strand = best[5] coordinates = [i[0] for i in group] + [i[1] for i in group] coordinates = [min(coordinates), max(coordinates), strand] # make sure all hits are to the same strand matches = [i for i in group if i[5] == strand] # gaps = [[gstart, gend], [gstart2, gend2]] gaps = check_gaps(matches) group2hmm[seq][group_num] = [info[0], strand, coordinates, matches, gaps] return group2hmm
find 16S rRNA gene sequence coordinates
entailment
def get_info(line, bit_thresh): """ get info from either ssu-cmsearch or cmsearch output """ if len(line) >= 18: # output is from cmsearch id, model, bit, inc = line[0].split()[0], line[2], float(line[14]), line[16] sstart, send, strand = int(line[7]), int(line[8]), line[9] mstart, mend = int(line[5]), int(line[6]) elif len(line) == 9: # output is from ssu-cmsearch if bit_thresh == 0: print('# ssu-cmsearch does not include a model-specific inclusion threshold, ', file=sys.stderr) print('# please specify a bit score threshold', file=sys.stderr) exit() id, model, bit = line[1].split()[0], line[0], float(line[6]) inc = '!' # this is not a feature of ssu-cmsearch sstart, send = int(line[2]), int(line[3]) mstart, mend = int(4), int(5) if send >= sstart: strand = '+' else: strand = '-' else: print('# unsupported hmm format:', file=sys.stderr) print('# provide tabular output from ssu-cmsearch and cmsearch supported', file=sys.stderr) exit() coords = [sstart, send] sstart, send = min(coords), max(coords) mcoords = [mstart, mend] mstart, mend = min(mcoords), max(mcoords) return id, model, bit, sstart, send, mstart, mend, strand, inc
get info from either ssu-cmsearch or cmsearch output
entailment
def check_buffer(coords, length, buffer): """ check to see how much of the buffer is being used """ s = min(coords[0], buffer) e = min(length - coords[1], buffer) return [s, e]
check to see how much of the buffer is being used
entailment
def convert_parser_to(parser, parser_or_type, metadata_props=None): """ :return: a parser of type parser_or_type, initialized with the properties of parser. If parser_or_type is a type, an instance of it must contain a update method. The update method must also process the set of properties supported by MetadataParser for the conversion to have any affect. :param parser: the parser (or content or parser type) to convert to new_type :param parser_or_type: a parser (or content) or type of parser to return :see: get_metadata_parser(metadata_container) for more on how parser_or_type is treated """ old_parser = parser if isinstance(parser, MetadataParser) else get_metadata_parser(parser) new_parser = get_metadata_parser(parser_or_type) for prop in (metadata_props or _supported_props): setattr(new_parser, prop, deepcopy(getattr(old_parser, prop, u''))) new_parser.update() return new_parser
:return: a parser of type parser_or_type, initialized with the properties of parser. If parser_or_type is a type, an instance of it must contain a update method. The update method must also process the set of properties supported by MetadataParser for the conversion to have any affect. :param parser: the parser (or content or parser type) to convert to new_type :param parser_or_type: a parser (or content) or type of parser to return :see: get_metadata_parser(metadata_container) for more on how parser_or_type is treated
entailment
def get_metadata_parser(metadata_container, **metadata_defaults): """ Takes a metadata_container, which may be a type or instance of a parser, a dict, string, or file. :return: a new instance of a parser corresponding to the standard represented by metadata_container :see: get_parsed_content(metdata_content) for more on types of content that can be parsed """ parser_type = None if isinstance(metadata_container, MetadataParser): parser_type = type(metadata_container) elif isinstance(metadata_container, type): parser_type = metadata_container metadata_container = metadata_container().update(**metadata_defaults) xml_root, xml_tree = get_parsed_content(metadata_container) # The get_parsed_content method ensures only these roots will be returned parser = None if parser_type is not None: parser = parser_type(xml_tree, **metadata_defaults) elif xml_root in ISO_ROOTS: parser = IsoParser(xml_tree, **metadata_defaults) else: has_arcgis_data = any(element_exists(xml_tree, e) for e in ARCGIS_NODES) if xml_root == FGDC_ROOT and not has_arcgis_data: parser = FgdcParser(xml_tree, **metadata_defaults) elif xml_root in ARCGIS_ROOTS: parser = ArcGISParser(xml_tree, **metadata_defaults) return parser
Takes a metadata_container, which may be a type or instance of a parser, a dict, string, or file. :return: a new instance of a parser corresponding to the standard represented by metadata_container :see: get_parsed_content(metdata_content) for more on types of content that can be parsed
entailment
def get_parsed_content(metadata_content): """ Parses any of the following types of content: 1. XML string or file object: parses XML content 2. MetadataParser instance: deep copies xml_tree 3. Dictionary with nested objects containing: - name (required): the name of the element tag - text: the text contained by element - tail: text immediately following the element - attributes: a Dictionary containing element attributes - children: a List of converted child elements :raises InvalidContent: if the XML is invalid or does not conform to a supported metadata standard :raises NoContent: If the content passed in is null or otherwise empty :return: the XML root along with an XML Tree parsed by and compatible with element_utils """ _import_parsers() # Prevents circular dependencies between modules xml_tree = None if metadata_content is None: raise NoContent('Metadata has no data') else: if isinstance(metadata_content, MetadataParser): xml_tree = deepcopy(metadata_content._xml_tree) elif isinstance(metadata_content, dict): xml_tree = get_element_tree(metadata_content) else: try: # Strip name spaces from file or XML content xml_tree = get_element_tree(metadata_content) except Exception: xml_tree = None # Several exceptions possible, outcome is the same if xml_tree is None: raise InvalidContent( 'Cannot instantiate a {parser_type} parser with invalid content to parse', parser_type=type(metadata_content).__name__ ) xml_root = get_element_name(xml_tree) if xml_root is None: raise NoContent('Metadata contains no data') elif xml_root not in VALID_ROOTS: content = type(metadata_content).__name__ raise InvalidContent('Invalid root element for {content}: {xml_root}', content=content, xml_root=xml_root) return xml_root, xml_tree
Parses any of the following types of content: 1. XML string or file object: parses XML content 2. MetadataParser instance: deep copies xml_tree 3. Dictionary with nested objects containing: - name (required): the name of the element tag - text: the text contained by element - tail: text immediately following the element - attributes: a Dictionary containing element attributes - children: a List of converted child elements :raises InvalidContent: if the XML is invalid or does not conform to a supported metadata standard :raises NoContent: If the content passed in is null or otherwise empty :return: the XML root along with an XML Tree parsed by and compatible with element_utils
entailment
def _import_parsers(): """ Lazy imports to prevent circular dependencies between this module and utils """ global ARCGIS_NODES global ARCGIS_ROOTS global ArcGISParser global FGDC_ROOT global FgdcParser global ISO_ROOTS global IsoParser global VALID_ROOTS if ARCGIS_NODES is None or ARCGIS_ROOTS is None or ArcGISParser is None: from gis_metadata.arcgis_metadata_parser import ARCGIS_NODES from gis_metadata.arcgis_metadata_parser import ARCGIS_ROOTS from gis_metadata.arcgis_metadata_parser import ArcGISParser if FGDC_ROOT is None or FgdcParser is None: from gis_metadata.fgdc_metadata_parser import FGDC_ROOT from gis_metadata.fgdc_metadata_parser import FgdcParser if ISO_ROOTS is None or IsoParser is None: from gis_metadata.iso_metadata_parser import ISO_ROOTS from gis_metadata.iso_metadata_parser import IsoParser if VALID_ROOTS is None: VALID_ROOTS = {FGDC_ROOT}.union(ARCGIS_ROOTS + ISO_ROOTS)
Lazy imports to prevent circular dependencies between this module and utils
entailment
def _init_metadata(self): """ Dynamically sets attributes from a Dictionary passed in by children. The Dictionary will contain the name of each attribute as keys, and either an XPATH mapping to a text value in _xml_tree, or a function that takes no parameters and returns the intended value. """ if self._data_map is None: self._init_data_map() validate_properties(self._data_map, self._metadata_props) # Parse attribute values and assign them: key = parse(val) for prop in self._data_map: setattr(self, prop, parse_property(self._xml_tree, None, self._data_map, prop)) self.has_data = any(getattr(self, prop) for prop in self._data_map)
Dynamically sets attributes from a Dictionary passed in by children. The Dictionary will contain the name of each attribute as keys, and either an XPATH mapping to a text value in _xml_tree, or a function that takes no parameters and returns the intended value.
entailment
def _init_data_map(self): """ Default data map initialization: MUST be overridden in children """ if self._data_map is None: self._data_map = {'_root': None} self._data_map.update({}.fromkeys(self._metadata_props))
Default data map initialization: MUST be overridden in children
entailment
def _get_template(self, root=None, **metadata_defaults): """ Iterate over items metadata_defaults {prop: val, ...} to populate template """ if root is None: if self._data_map is None: self._init_data_map() root = self._xml_root = self._data_map['_root'] template_tree = self._xml_tree = create_element_tree(root) for prop, val in iteritems(metadata_defaults): path = self._data_map.get(prop) if path and val: setattr(self, prop, val) update_property(template_tree, None, path, prop, val) return template_tree
Iterate over items metadata_defaults {prop: val, ...} to populate template
entailment
def _get_xpath_for(self, prop): """ :return: the configured xpath for a given property """ xpath = self._data_map.get(prop) return getattr(xpath, 'xpath', xpath)
:return: the configured xpath for a given property
entailment
def _parse_complex(self, prop): """ Default parsing operation for a complex struct """ xpath_root = None xpath_map = self._data_structures[prop] return parse_complex(self._xml_tree, xpath_root, xpath_map, prop)
Default parsing operation for a complex struct
entailment
def _parse_complex_list(self, prop): """ Default parsing operation for lists of complex structs """ xpath_root = self._get_xroot_for(prop) xpath_map = self._data_structures[prop] return parse_complex_list(self._xml_tree, xpath_root, xpath_map, prop)
Default parsing operation for lists of complex structs
entailment
def _parse_dates(self, prop=DATES): """ Creates and returns a Date Types data structure parsed from the metadata """ return parse_dates(self._xml_tree, self._data_structures[prop])
Creates and returns a Date Types data structure parsed from the metadata
entailment
def _update_complex(self, **update_props): """ Default update operation for a complex struct """ prop = update_props['prop'] xpath_root = self._get_xroot_for(prop) xpath_map = self._data_structures[prop] return update_complex(xpath_root=xpath_root, xpath_map=xpath_map, **update_props)
Default update operation for a complex struct
entailment
def _update_complex_list(self, **update_props): """ Default update operation for lists of complex structs """ prop = update_props['prop'] xpath_root = self._get_xroot_for(prop) xpath_map = self._data_structures[prop] return update_complex_list(xpath_root=xpath_root, xpath_map=xpath_map, **update_props)
Default update operation for lists of complex structs
entailment
def _update_dates(self, xpath_root=None, **update_props): """ Default update operation for Dates metadata :see: gis_metadata.utils._complex_definitions[DATES] """ tree_to_update = update_props['tree_to_update'] prop = update_props['prop'] values = (update_props['values'] or {}).get(DATE_VALUES) or u'' xpaths = self._data_structures[prop] if not self.dates: date_xpaths = xpath_root elif self.dates[DATE_TYPE] != DATE_TYPE_RANGE: date_xpaths = xpaths.get(self.dates[DATE_TYPE], u'') else: date_xpaths = [ xpaths[DATE_TYPE_RANGE_BEGIN], xpaths[DATE_TYPE_RANGE_END] ] if xpath_root: remove_element(tree_to_update, xpath_root) return update_property(tree_to_update, xpath_root, date_xpaths, prop, values)
Default update operation for Dates metadata :see: gis_metadata.utils._complex_definitions[DATES]
entailment
def write(self, use_template=False, out_file_or_path=None, encoding=DEFAULT_ENCODING): """ Validates instance properties, updates an XML tree with them, and writes the content to a file. :param use_template: if True, updates a new template XML tree; otherwise the original XML tree :param out_file_or_path: optionally override self.out_file_or_path with a custom file path :param encoding: optionally use another encoding instead of UTF-8 """ if not out_file_or_path: out_file_or_path = self.out_file_or_path if not out_file_or_path: # FileNotFoundError doesn't exist in Python 2 raise IOError('Output file path has not been provided') write_element(self.update(use_template), out_file_or_path, encoding)
Validates instance properties, updates an XML tree with them, and writes the content to a file. :param use_template: if True, updates a new template XML tree; otherwise the original XML tree :param out_file_or_path: optionally override self.out_file_or_path with a custom file path :param encoding: optionally use another encoding instead of UTF-8
entailment
def validate(self): """ Default validation for updated properties: MAY be overridden in children """ validate_properties(self._data_map, self._metadata_props) for prop in self._data_map: validate_any(prop, getattr(self, prop), self._data_structures.get(prop)) return self
Default validation for updated properties: MAY be overridden in children
entailment
def _search_regex(ops: dict, regex_pat: str): """ Search order: * specified regexps * operators sorted from longer to shorter """ custom_regexps = list(filter(None, [dic['regex'] for op, dic in ops.items() if 'regex' in dic])) op_names = [op for op, dic in ops.items() if 'regex' not in dic] regex = [regex_pat.format(_ops_regex(op_names))] if len(op_names) > 0 else [] return re.compile('|'.join(custom_regexps + regex))
Search order: * specified regexps * operators sorted from longer to shorter
entailment
def spec(self, postf_un_ops: str) -> list: """Return prefix unary operators list""" spec = [(l + op, {'pat': self.pat(pat), 'postf': self.postf(r, postf_un_ops), 'regex': None}) for op, pat in self.styles.items() for l, r in self.brackets] spec[0][1]['regex'] = self.regex_pat.format( _ops_regex(l for l, r in self.brackets), _ops_regex(self.styles.keys()) ) return spec
Return prefix unary operators list
entailment
def spec(self) -> list: """Returns prefix unary operators list. Sets only one regex for all items in the dict.""" spec = [item for op, pat in self.ops.items() for item in [('{' + op, {'pat': pat, 'postf': self.postf, 'regex': None}), ('˱' + op, {'pat': pat, 'postf': self.postf, 'regex': None})] ] spec[0][1]['regex'] = self.regex_pat.format(_ops_regex(self.ops.keys())) return spec
Returns prefix unary operators list. Sets only one regex for all items in the dict.
entailment
def fill(self, postf_un_ops: str): """ Insert: * math styles * other styles * unary prefix operators without brackets * defaults """ for op, dic in self.ops.items(): if 'postf' not in dic: dic['postf'] = self.postf self.ops = OrderedDict( self.styles.spec(postf_un_ops) + self.other_styles.spec(postf_un_ops) + self.pref_un_greedy.spec() + list(self.ops.items()) ) for op, dic in self.ops.items(): dic['postf'] = re.compile(dic['postf']) self.regex = _search_regex(self.ops, self.regex_pat)
Insert: * math styles * other styles * unary prefix operators without brackets * defaults
entailment
def one_symbol_ops_str(self) -> str: """Regex-escaped string with all one-symbol operators""" return re.escape(''.join((key for key in self.ops.keys() if len(key) == 1)))
Regex-escaped string with all one-symbol operators
entailment
def _su_scripts_regex(self): """ :return: [compiled regex, function] """ sups = re.escape(''.join([k for k in self.superscripts.keys()])) subs = re.escape(''.join([k for k in self.subscripts.keys()])) # language=PythonRegExp su_regex = (r'\\([{su_}])|([{sub}]+|‹[{sub}]+›|˹[{sub}]+˺)' + r'|([{sup}]+)(?=√)|([{sup}]+(?!√)|‹[{sup}]+›|˹[{sup}]+˺)').format( su_=subs + sups, sub=subs, sup=sups) su_regex = re.compile(su_regex) def su_replace(m): esc, sub, root_sup, sup = m.groups() if esc is not None: return esc elif sub is not None: return '_{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.subscripts[c] for c in sub]) + '}' elif root_sup is not None: return ''.join([self.superscripts[c] for c in root_sup]) elif sup is not None: return '^{' + ''.join([c if (c in ['‹', '›', '˹', '˺']) else self.superscripts[c] for c in sup]) + '}' else: raise TypeError("Regex bug: this should never be reached") return [su_regex, su_replace]
:return: [compiled regex, function]
entailment
def _local_map(match, loc: str = 'lr') -> list: """ :param match: :param loc: str "l" or "r" or "lr" turns on/off left/right local area calculation :return: list list of the same size as the string + 2 it's the local map that counted { and } list can contain: None or int>=0 from the left of the operator match: in `b}a` if a:0 then }:0 and b:1 in `b{a` if a:0 then {:0 and b:-1(None) from the right of the operator match: in `a{b` if a:0 then {:0 and b:1 in `a}b` if a:0 then }:0 and b:-1(None) Map for +1 (needed for r'$') and -1 (needed for r'^') characters is also stored: +1 -> +1, -1 -> +2 """ s = match.string map_ = [None] * (len(s) + 2) if loc == 'l' or loc == 'lr': balance = 0 for i in reversed(range(0, match.start())): map_[i] = balance c, prev = s[i], (s[i - 1] if i > 0 else '') if (c == '}' or c == '˲') and prev != '\\': balance += 1 elif (c == '{' or c == '˱') and prev != '\\': balance -= 1 if balance < 0: break map_[-1] = balance if loc == 'r' or loc == 'lr': balance = 0 for i in range(match.end(), len(s)): map_[i] = balance c, prev = s[i], s[i - 1] if (c == '{' or c == '˱') and prev != '\\': balance += 1 elif (c == '}' or c == '˲') and prev != '\\': balance -= 1 if balance < 0: break map_[len(s)] = balance return map_
:param match: :param loc: str "l" or "r" or "lr" turns on/off left/right local area calculation :return: list list of the same size as the string + 2 it's the local map that counted { and } list can contain: None or int>=0 from the left of the operator match: in `b}a` if a:0 then }:0 and b:1 in `b{a` if a:0 then {:0 and b:-1(None) from the right of the operator match: in `a{b` if a:0 then {:0 and b:1 in `a}b` if a:0 then }:0 and b:-1(None) Map for +1 (needed for r'$') and -1 (needed for r'^') characters is also stored: +1 -> +1, -1 -> +2
entailment
def _operators_replace(self, string: str) -> str: """ Searches for first unary or binary operator (via self.op_regex that has only one group that contain operator) then replaces it (or escapes it if brackets do not match). Everything until: * space ' ' * begin/end of the string * bracket from outer scope (like '{a/b}': term1=a term2=b) is considered a term (contents of matching brackets '{}' are ignored). Attributes ---------- string: str string to replace """ # noinspection PyShadowingNames def replace(string: str, start: int, end: int, substring: str) -> str: return string[0:start] + substring + string[end:len(string)] # noinspection PyShadowingNames def sub_pat(pat: Callable[[list], str] or str, terms: list) -> str: if isinstance(pat, str): return pat.format(*terms) else: return pat(terms) count = 0 def check(): nonlocal count count += 1 if count > self.max_while: raise RuntimeError('Presumably while loop is stuck') # noinspection PyShadowingNames def null_replace(match) -> str: regex_terms = [gr for gr in match.groups() if gr is not None] op = regex_terms[0] terms = regex_terms[1:] return sub_pat(self.null_ops.ops[op]['pat'], terms) string = self.null_ops.regex.sub(null_replace, string) for ops, loc in [(self.pref_un_ops, 'r'), (self.postf_un_ops, 'l'), (self.bin_centr_ops, 'lr')]: count = 0 match = ops.regex.search(string) while match: check() regex_terms = [gr for gr in match.groups() if gr is not None] op = regex_terms[0] loc_map = self._local_map(match, loc) lmatch, rmatch = None, None if loc == 'l' or loc == 'lr': for m in ops.ops[op]['pref'].finditer(string): if m.end() <= match.start() and loc_map[m.end() - 1] == 0: lmatch = m if lmatch is None: string = replace(string, match.start(), match.end(), match.group(0).replace(op, '\\' + op)) match = ops.regex.search(string) continue else: term1 = string[lmatch.end():match.start()] if loc == 'r' or loc == 'lr': for m in ops.ops[op]['postf'].finditer(string): if m.start() >= match.end() and loc_map[m.start()] == 0: rmatch = m break if rmatch is None: string = replace(string, match.start(), match.end(), match.group(0).replace(op, '\\' + op)) match = ops.regex.search(string) continue else: term2 = string[match.end():rmatch.start()] if loc == 'l': # noinspection PyUnboundLocalVariable terms = list(lmatch.groups()) + [term1] + regex_terms[1:] start, end = lmatch.start(), match.end() elif loc == 'r': # noinspection PyUnboundLocalVariable terms = regex_terms[1:] + [term2] + list(rmatch.groups()) start, end = match.start(), rmatch.end() elif loc == 'lr': terms = list(lmatch.groups()) + [term1] + regex_terms[1:] + [term2] + list(rmatch.groups()) start, end = lmatch.start(), rmatch.end() else: # this never happen terms = regex_terms[1:] start, end = match.start(), match.end() string = replace(string, start, end, sub_pat(ops.ops[op]['pat'], terms)) match = ops.regex.search(string) return string
Searches for first unary or binary operator (via self.op_regex that has only one group that contain operator) then replaces it (or escapes it if brackets do not match). Everything until: * space ' ' * begin/end of the string * bracket from outer scope (like '{a/b}': term1=a term2=b) is considered a term (contents of matching brackets '{}' are ignored). Attributes ---------- string: str string to replace
entailment
def replace(self, src: str) -> str: """ Extends LaTeX syntax via regex preprocess :param src: str LaTeX string :return: str New LaTeX string """ if not self.readied: self.ready() # Brackets + simple pre replacements: src = self._dict_replace(self.simple_pre, src) # Superscripts and subscripts + pre regexps: for regex, replace in self.regex_pre: src = regex.sub(replace, src) # Unary and binary operators: src = self._operators_replace(src) # Loop regexps: src_prev = src for i in range(self.max_iter): for regex, replace in self.loop_regexps: src = regex.sub(replace, src) if src_prev == src: break else: src_prev = src # Post regexps: for regex, replace in self.regex_post: src = regex.sub(replace, src) # Simple post replacements: src = self._dict_replace(self.simple_post, src) # Escape characters: src = self.escapes_regex.sub(r'\1', src) return src
Extends LaTeX syntax via regex preprocess :param src: str LaTeX string :return: str New LaTeX string
entailment
def plot_gaps(plot, columns): """ plot % of gaps at each position """ from plot_window import window_plot_convolve as plot_window # plot_window([columns], len(columns)*.01, plot) plot_window([[100 - i for i in columns]], len(columns)*.01, plot)
plot % of gaps at each position
entailment
def strip_msa_100(msa, threshold, plot = False): """ strip out columns of a MSA that represent gaps for X percent (threshold) of sequences """ msa = [seq for seq in parse_fasta(msa)] columns = [[0, 0] for pos in msa[0][1]] # [[#bases, #gaps], [#bases, #gaps], ...] for seq in msa: for position, base in enumerate(seq[1]): if base == '-' or base == '.': columns[position][1] += 1 else: columns[position][0] += 1 columns = [float(float(g)/float(g+b)*100) for b, g in columns] # convert to percent gaps for seq in msa: stripped = [] for position, base in enumerate(seq[1]): if columns[position] < threshold: stripped.append(base) yield [seq[0], ''.join(stripped)] if plot is not False: plot_gaps(plot, columns)
strip out columns of a MSA that represent gaps for X percent (threshold) of sequences
entailment
def sample_group(sid, groups): """ Iterate through all categories in an OrderedDict and return category name if SampleID present in that category. :type sid: str :param sid: SampleID from dataset. :type groups: OrderedDict :param groups: Returned dict from phylotoast.util.gather_categories() function. :return type: str :return: Category name used to classify `sid`. """ for name in groups: if sid in groups[name].sids: return name
Iterate through all categories in an OrderedDict and return category name if SampleID present in that category. :type sid: str :param sid: SampleID from dataset. :type groups: OrderedDict :param groups: Returned dict from phylotoast.util.gather_categories() function. :return type: str :return: Category name used to classify `sid`.
entailment
def combine_sets(*sets): """ Combine multiple sets to create a single larger set. """ combined = set() for s in sets: combined.update(s) return combined
Combine multiple sets to create a single larger set.
entailment
def unique_otuids(groups): """ Get unique OTUIDs of each category. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on category name and unique OTUIDs as values. """ uniques = {key: set() for key in groups} for i, group in enumerate(groups): to_combine = groups.values()[:i]+groups.values()[i+1:] combined = combine_sets(*to_combine) uniques[group] = groups[group].difference(combined) return uniques
Get unique OTUIDs of each category. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on category name and unique OTUIDs as values.
entailment
def shared_otuids(groups): """ Get shared OTUIDs between all unique combinations of groups. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on group combination and their shared OTUIDs as values. """ for g in sorted(groups): print("Number of OTUs in {0}: {1}".format(g, len(groups[g].results["otuids"]))) number_of_categories = len(groups) shared = defaultdict() for i in range(2, number_of_categories+1): for j in combinations(sorted(groups), i): combo_name = " & ".join(list(j)) for grp in j: # initialize combo values shared[combo_name] = groups[j[0]].results["otuids"].copy() """iterate through all groups and keep updating combo OTUIDs with set intersection_update""" for grp in j[1:]: shared[combo_name].intersection_update(groups[grp].results["otuids"]) return shared
Get shared OTUIDs between all unique combinations of groups. :type groups: Dict :param groups: {Category name: OTUIDs in category} :return type: dict :return: Dict keyed on group combination and their shared OTUIDs as values.
entailment
def write_uniques(path, prefix, uniques): """ Given a path, the method writes out one file for each group name in the uniques dictionary with the file name in the pattern PATH/prefix_group.txt with each file containing the unique OTUIDs found when comparing that group to all the other groups in uniques. :type path: str :param path: Output files will be saved in this PATH. :type prefix: str :param prefix: Prefix name added in front of output filename. :type uniques: dict :param uniques: Output from unique_otus() function. """ for group in uniques: fp = osp.join(path, "{}_{}.txt".format(prefix, group)) with open(fp, "w") as outf: outf.write("\n".join(uniques[group]))
Given a path, the method writes out one file for each group name in the uniques dictionary with the file name in the pattern PATH/prefix_group.txt with each file containing the unique OTUIDs found when comparing that group to all the other groups in uniques. :type path: str :param path: Output files will be saved in this PATH. :type prefix: str :param prefix: Prefix name added in front of output filename. :type uniques: dict :param uniques: Output from unique_otus() function.
entailment
def storeFASTA(fastaFNH): """ Parse the records in a FASTA-format file by first reading the entire file into memory. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data. """ fasta = file_handle(fastaFNH).read() return [FASTARecord(rec[0].split()[0], rec[0].split(None, 1)[1], "".join(rec[1:])) for rec in (x.strip().split("\n") for x in fasta.split(">")[1:])]
Parse the records in a FASTA-format file by first reading the entire file into memory. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data.
entailment
def parseFASTA(fastaFNH): """ Parse the records in a FASTA-format file keeping the file open, and reading through one line at a time. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data. """ recs = [] seq = [] seqID = "" descr = "" for line in file_handle(fastaFNH): line = line.strip() if line[0] == ";": continue if line[0] == ">": # conclude previous record if seq: recs.append(FASTARecord(seqID, descr, "".join(seq))) seq = [] # start new record line = line[1:].split(None, 1) seqID, descr = line[0], line[1] else: seq.append(line) # catch last seq in file if seq: recs.append(FASTARecord(seqID, descr, "".join(seq))) return recs
Parse the records in a FASTA-format file keeping the file open, and reading through one line at a time. :type source: path to FAST file or open file handle :param source: The data source from which to parse the FASTA records. Expects the input to resolve to a collection that can be iterated through, such as an open file handle. :rtype: tuple :return: FASTA records containing entries for id, description and data.
entailment
def parse_map_file(mapFNH): """ Opens a QIIME mapping file and stores the contents in a dictionary keyed on SampleID (default) or a user-supplied one. The only required fields are SampleID, BarcodeSequence, LinkerPrimerSequence (in that order), and Description (which must be the final field). :type mapFNH: str :param mapFNH: Either the full path to the map file or an open file handle :rtype: tuple, dict :return: A tuple of header line for mapping file and a map associating each line of the mapping file with the appropriate sample ID (each value of the map also contains the sample ID). An OrderedDict is used for mapping so the returned map is guaranteed to have the same order as the input file. Example data: #SampleID BarcodeSequence LinkerPrimerSequence State Description 11.V13 ACGCTCGACA GTTTGATCCTGGCTCAG Disease Rat_Oral """ m = OrderedDict() map_header = None with file_handle(mapFNH) as mapF: for line in mapF: if line.startswith("#SampleID"): map_header = line.strip().split("\t") if line.startswith("#") or not line: continue line = line.strip().split("\t") m[line[0]] = line return map_header, m
Opens a QIIME mapping file and stores the contents in a dictionary keyed on SampleID (default) or a user-supplied one. The only required fields are SampleID, BarcodeSequence, LinkerPrimerSequence (in that order), and Description (which must be the final field). :type mapFNH: str :param mapFNH: Either the full path to the map file or an open file handle :rtype: tuple, dict :return: A tuple of header line for mapping file and a map associating each line of the mapping file with the appropriate sample ID (each value of the map also contains the sample ID). An OrderedDict is used for mapping so the returned map is guaranteed to have the same order as the input file. Example data: #SampleID BarcodeSequence LinkerPrimerSequence State Description 11.V13 ACGCTCGACA GTTTGATCCTGGCTCAG Disease Rat_Oral
entailment