sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def htmlcolor_to_rgb(str_color): """function to convert HTML-styly color string to RGB values Args: s: Color in HTML format Returns: list of three RGB color components """ if not (str_color.startswith('#') and len(str_color) == 7): raise ValueError("Bad html color format. Expected: '#RRGGBB' ") result = [1.0 * int(n, 16) / 255 for n in (str_color[1:3], str_color[3:5], str_color[5:])] return result
function to convert HTML-styly color string to RGB values Args: s: Color in HTML format Returns: list of three RGB color components
entailment
def compile_string(self, data, source_path=None, is_two_file=True, post=None, lang=None): """Compile docstrings into HTML strings, with shortcode support.""" if not is_two_file: _, data = self.split_metadata(data, None, lang) new_data, shortcodes = sc.extract_shortcodes(data) # The way pdoc generates output is a bit inflexible path_templates = os.path.join(self.plugin_path, "tempaltes") LOGGER.info(f"set path tempaltes to {path_templates}") with tempfile.TemporaryDirectory() as tmpdir: subprocess.check_call(['pdoc', '--html', '--html-no-source', '--html-dir', tmpdir, "--template-dir", path_templates] + shlex.split(new_data.strip())) fname = os.listdir(tmpdir)[0] tmd_subdir = os.path.join(tmpdir, fname) fname = os.listdir(tmd_subdir)[0] LOGGER.info(f"tmpdir = {tmd_subdir}, fname = {fname}") with open(os.path.join(tmd_subdir, fname), 'r', encoding='utf8') as inf: output = inf.read() return self.site.apply_shortcodes_uuid(output, shortcodes, filename=source_path, extra_context={'post': post})
Compile docstrings into HTML strings, with shortcode support.
entailment
def compile(self, source, dest, is_two_file=True, post=None, lang=None): """Compile the docstring into HTML and save as dest.""" makedirs(os.path.dirname(dest)) with io.open(dest, "w+", encoding="utf8") as out_file: with io.open(source, "r", encoding="utf8") as in_file: data = in_file.read() data, shortcode_deps = self.compile_string(data, source, is_two_file, post, lang) out_file.write(data) if post is None: if shortcode_deps: self.logger.error( "Cannot save dependencies for post {0} (post unknown)", source) else: post._depfile[dest] += shortcode_deps return True
Compile the docstring into HTML and save as dest.
entailment
def create_post(self, path, **kw): """Create a new post.""" content = kw.pop('content', None) onefile = kw.pop('onefile', False) # is_page is not used by create_post as of now. kw.pop('is_page', False) metadata = {} metadata.update(self.default_metadata) metadata.update(kw) makedirs(os.path.dirname(path)) if not content.endswith('\n'): content += '\n' with io.open(path, "w+", encoding="utf8") as fd: if onefile: fd.write(write_metadata(metadata, comment_wrap=False, site=self.site, compiler=self)) fd.write(content)
Create a new post.
entailment
def _append_value(self, value, _file, _name): """Call this function to write contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file * _name - str, name of current content dict """ if self._flag: _keys = _name + '\n' else: _keys = '\n' + _name + '\n' _file.seek(self._sptr, os.SEEK_SET) _file.write(_keys) self._bctr = collections.defaultdict(int) # blank branch counter dict self._append_branch(value, _file)
Call this function to write contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file * _name - str, name of current content dict
entailment
def _append_array(self, value, _file): """Call this function to write array contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file """ if not value: self._append_none(None, _file) return _bptr = '' _tabs = '' _tlen = len(value) - 1 if _tlen: _bptr = ' |-->' for _ in range(self._tctr + 1): _tabs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH else: _tabs = '' for (_nctr, _item) in enumerate(value): _text = '{tabs}{bptr}'.format(tabs=_tabs, bptr=_bptr) _file.write(_text) _item = self.object_hook(_item) _type = type(_item).__name__ _MAGIC_TYPES[_type](self, _item, _file) _suff = '\n' if _nctr < _tlen else '' _file.write(_suff)
Call this function to write array contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file
entailment
def _append_branch(self, value, _file): """Call this function to write branch contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file """ if not value: return # return self._append_none(None, _file) self._tctr += 1 _vlen = len(value) for (_vctr, (_item, _text)) in enumerate(value.items()): _text = self.object_hook(_text) _type = type(_text).__name__ flag_dict = (_type == 'dict') flag_list = (_type == 'list' and (len(_text) > 1 or (len(_text) == 1 and type(_text[0]).__name__ == 'dict'))) # noqa pylint: disable=line-too-long flag_tuple = (_type == 'tuple' and (len(_text) > 1 or (len(_text) == 1 and type(_text[0]).__name__ == 'dict'))) # noqa pylint: disable=line-too-long flag_bytes = (_type == 'bytes' and len(_text) > 16) if any((flag_dict, flag_list, flag_tuple, flag_bytes)): _pref = '\n' else: _pref = ' ->' _labs = '' for _ in range(self._tctr): _labs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH _keys = '{labs} |-- {item}{pref}'.format(labs=_labs, item=_item, pref=_pref) _file.write(_keys) if _vctr == _vlen - 1: self._bctr[self._tctr] = 1 _MAGIC_TYPES[_type](self, _text, _file) _suff = '' if _type == 'dict' else '\n' _file.write(_suff) self._bctr[self._tctr] = 0 self._tctr -= 1
Call this function to write branch contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file
entailment
def _append_bytes(self, value, _file): """Call this function to write bytes contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file """ # binascii.b2a_base64(value) -> plistlib.Data # binascii.a2b_base64(Data) -> value(bytes) if not value: self._append_none(None, _file) return if len(value) > 16: _tabs = '' for _ in range(self._tctr + 1): _tabs += _TEMP_SPACES if self._bctr[_] else _TEMP_BRANCH _list = [] for (_ictr, _item) in enumerate(textwrap.wrap(value.hex(), 32)): _bptr = ' ' if _ictr else ' |--> ' _text = ' '.join(textwrap.wrap(_item, 2)) _item = '{tabs}{bptr}{text}'.format(tabs=_tabs, bptr=_bptr, text=_text) _list.append(_item) _labs = '\n'.join(_list) else: _text = ' '.join(textwrap.wrap(value.hex(), 2)) _labs = ' {text}'.format(text=_text) _file.write(_labs)
Call this function to write bytes contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file
entailment
def _append_number(self, value, _file): # pylint: disable=no-self-use """Call this function to write number contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file """ _text = value _labs = ' {text}'.format(text=_text) _file.write(_labs)
Call this function to write number contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file
entailment
def _append_value(self, value, _file, _name): """Call this function to write contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file * _name - str, name of current content dict """ _tabs = '\t' * self._tctr _cmma = ',\n' if self._vctr[self._tctr] else '' _keys = '{cmma}{tabs}"{name}" :'.format(cmma=_cmma, tabs=_tabs, name=_name) _file.seek(self._sptr, os.SEEK_SET) _file.write(_keys) self._vctr[self._tctr] += 1 self._append_object(value, _file)
Call this function to write contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file * _name - str, name of current content dict
entailment
def _append_array(self, value, _file): """Call this function to write array contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file """ _labs = ' [' _file.write(_labs) self._tctr += 1 for _item in value: _cmma = ',' if self._vctr[self._tctr] else '' _file.write(_cmma) self._vctr[self._tctr] += 1 _item = self.object_hook(_item) _type = type(_item).__name__ _MAGIC_TYPES[_type](self, _item, _file) self._vctr[self._tctr] = 0 self._tctr -= 1 _labs = ' ]' _file.write(_labs)
Call this function to write array contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file
entailment
def _append_object(self, value, _file): """Call this function to write object contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file """ _labs = ' {' _file.write(_labs) self._tctr += 1 for (_item, _text) in value.items(): _tabs = '\t' * self._tctr _cmma = ',' if self._vctr[self._tctr] else '' _keys = '{cmma}\n{tabs}"{item}" :'.format(cmma=_cmma, tabs=_tabs, item=_item) _file.write(_keys) self._vctr[self._tctr] += 1 _text = self.object_hook(_text) _type = type(_text).__name__ _MAGIC_TYPES[_type](self, _text, _file) self._vctr[self._tctr] = 0 self._tctr -= 1 _tabs = '\t' * self._tctr _labs = '\n{tabs}{}'.format('}', tabs=_tabs) _file.write(_labs)
Call this function to write object contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file
entailment
def _append_string(self, value, _file): # pylint: disable=no-self-use """Call this function to write string contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file """ _text = str(value).replace('"', '\\"') _labs = ' "{text}"'.format(text=_text) _file.write(_labs)
Call this function to write string contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file
entailment
def _append_bytes(self, value, _file): # pylint: disable=no-self-use """Call this function to write bytes contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file """ # binascii.b2a_base64(value) -> plistlib.Data # binascii.a2b_base64(Data) -> value(bytes) _text = ' '.join(textwrap.wrap(value.hex(), 2)) # _data = [H for H in iter( # functools.partial(io.StringIO(value.hex()).read, 2), '') # ] # to split bytes string into length-2 hex string list _labs = ' "{text}"'.format(text=_text) _file.write(_labs)
Call this function to write bytes contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file
entailment
def _append_date(self, value, _file): # pylint: disable=no-self-use """Call this function to write date contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file """ _text = value.strftime('%Y-%m-%dT%H:%M:%S.%fZ') _labs = ' "{text}"'.format(text=_text) _file.write(_labs)
Call this function to write date contents. Keyword arguments: * value - dict, content to be dumped * _file - FileIO, output file
entailment
def start(self, verbose=None, end_in_new_line=None): """Start the stopwatch if it is paused. If the stopwatch is already started, then nothing will happen. Parameters ---------- verbose : Optional[bool] Wether to log. If `None`, use `verbose_start` set during initialization. end_in_new_line : Optional[bool]] If `False`, prevent logging the trailing new line. If `None`, use `end_in_new_line` set during initialization. """ if self._start_time is not None and self._end_time is None: # the stopwatch is already running return self if verbose is None: verbose = self.verbose_start if verbose: if end_in_new_line is None: end_in_new_line = self.end_in_new_line if end_in_new_line: self.log(self.description) else: self.log(self.description, end="", flush=True) self._end_time = None self._start_time = datetime.datetime.now() return self
Start the stopwatch if it is paused. If the stopwatch is already started, then nothing will happen. Parameters ---------- verbose : Optional[bool] Wether to log. If `None`, use `verbose_start` set during initialization. end_in_new_line : Optional[bool]] If `False`, prevent logging the trailing new line. If `None`, use `end_in_new_line` set during initialization.
entailment
def pause(self): """Pause the stopwatch. If the stopwatch is already paused, nothing will happen. """ if self._end_time is not None: # the stopwatch is already paused return self._end_time = datetime.datetime.now() self._elapsed_time += self._end_time - self._start_time
Pause the stopwatch. If the stopwatch is already paused, nothing will happen.
entailment
def get_elapsed_time(self): """Get the elapsed time of the current split. """ if self._start_time is None or self._end_time is not None: # the stopwatch is paused return self._elapsed_time return self._elapsed_time + (datetime.datetime.now() - self._start_time)
Get the elapsed time of the current split.
entailment
def split(self, verbose=None, end_in_new_line=None): """Save the elapsed time of the current split and restart the stopwatch. The current elapsed time will be appended to :attr:`split_elapsed_time`. If the stopwatch is paused, then it will remain paused. Otherwise, it will continue running. Parameters ---------- verbose : Optional[bool] Wether to log. If `None`, use `verbose_end` set during initialization. end_in_new_line : Optional[bool]] Wether to log the `description`. If `None`, use `end_in_new_line` set during initialization. """ elapsed_time = self.get_elapsed_time() self.split_elapsed_time.append(elapsed_time) self._cumulative_elapsed_time += elapsed_time self._elapsed_time = datetime.timedelta() if verbose is None: verbose = self.verbose_end if verbose: if end_in_new_line is None: end_in_new_line = self.end_in_new_line if end_in_new_line: self.log("{} done in {}".format(self.description, elapsed_time)) else: self.log(" done in {}".format(elapsed_time)) self._start_time = datetime.datetime.now()
Save the elapsed time of the current split and restart the stopwatch. The current elapsed time will be appended to :attr:`split_elapsed_time`. If the stopwatch is paused, then it will remain paused. Otherwise, it will continue running. Parameters ---------- verbose : Optional[bool] Wether to log. If `None`, use `verbose_end` set during initialization. end_in_new_line : Optional[bool]] Wether to log the `description`. If `None`, use `end_in_new_line` set during initialization.
entailment
def reset(self): """Reset the stopwatch. """ self._start_time = None self._end_time = None self._elapsed_time = datetime.timedelta() self._cumulative_elapsed_time = datetime.timedelta() self.split_elapsed_time = []
Reset the stopwatch.
entailment
def bbduk_trim(forward_in, forward_out, reverse_in='NA', reverse_out='NA', returncmd=False, **kwargs): """ Wrapper for using bbduk to quality trim reads. Contains arguments used in OLC Assembly Pipeline, but these can be overwritten by using keyword parameters. :param forward_in: Forward reads you want to quality trim. :param returncmd: If set to true, function will return the cmd string passed to subprocess as a third value. :param forward_out: Output forward reads. :param reverse_in: Reverse input reads. Don't need to be specified if _R1/_R2 naming convention is used. :param reverse_out: Reverse output reads. Don't need to be specified if _R1/_R2 convention is used. :param kwargs: Other arguments to give to bbduk in parameter=argument format. See bbduk documentation for full list. :return: out and err: stdout string and stderr string from running bbduk. """ options = kwargs_to_string(kwargs) cmd = 'which bbduk.sh' try: subprocess.check_output(cmd.split()).decode('utf-8') except subprocess.CalledProcessError: print('ERROR: Could not find bbduk. Plase check that the bbtools package is installed and on your $PATH.\n\n') raise FileNotFoundError if os.path.isfile(forward_in.replace('_R1', '_R2')) and reverse_in == 'NA' and '_R1' in forward_in: reverse_in = forward_in.replace('_R1', '_R2') if reverse_out == 'NA': if '_R1' in forward_out: reverse_out = forward_out.replace('_R1', '_R2') else: raise ValueError('If you do not specify reverse_out, forward_out must contain R1.\n\n') cmd = 'bbduk.sh in1={f_in} in2={r_in} out1={f_out} out2={r_out} qtrim=w trimq=20 k=25 minlength=50 ' \ 'forcetrimleft=15 ref=adapters overwrite hdist=1 tpe tbo{optn}'\ .format(f_in=forward_in, r_in=reverse_in, f_out=forward_out, r_out=reverse_out, optn=options) elif reverse_in == 'NA': cmd = 'bbduk.sh in={f_in} out={f_out} qtrim=w trimq=20 k=25 minlength=50 forcetrimleft=15' \ ' ref=adapters overwrite hdist=1 tpe tbo{optn}'\ .format(f_in=forward_in, f_out=forward_out, optn=options) else: if reverse_out == 'NA': raise ValueError('Reverse output reads must be specified.') cmd = 'bbduk.sh in1={f_in} in2={r_in} out1={f_out} out2={r_out} qtrim=w trimq=20 k=25 minlength=50 ' \ 'forcetrimleft=15 ref=adapters overwrite hdist=1 tpe tbo{optn}'\ .format(f_in=forward_in, r_in=reverse_in, f_out=forward_out, r_out=reverse_out, optn=options) out, err = run_subprocess(cmd) if returncmd: return out, err, cmd else: return out, err
Wrapper for using bbduk to quality trim reads. Contains arguments used in OLC Assembly Pipeline, but these can be overwritten by using keyword parameters. :param forward_in: Forward reads you want to quality trim. :param returncmd: If set to true, function will return the cmd string passed to subprocess as a third value. :param forward_out: Output forward reads. :param reverse_in: Reverse input reads. Don't need to be specified if _R1/_R2 naming convention is used. :param reverse_out: Reverse output reads. Don't need to be specified if _R1/_R2 convention is used. :param kwargs: Other arguments to give to bbduk in parameter=argument format. See bbduk documentation for full list. :return: out and err: stdout string and stderr string from running bbduk.
entailment
def genome_size(peaks_file, haploid=True): """ Finds the genome size of an organsim, based on the peaks file created by kmercountexact.sh :param peaks_file: Path to peaks file created by kmercountexact. :param haploid: Set to True if organism of interest is haploid, False if not. Default True. :return: size of genome, as an int. If size could not be found, return will be 0. """ size = 0 with open(peaks_file) as peaks: lines = peaks.readlines() for line in lines: if haploid: if '#haploid_genome_size' in line: size = int(line.split()[1]) else: if '#genome_size' in line: size = int(line.split()[1]) return size
Finds the genome size of an organsim, based on the peaks file created by kmercountexact.sh :param peaks_file: Path to peaks file created by kmercountexact. :param haploid: Set to True if organism of interest is haploid, False if not. Default True. :return: size of genome, as an int. If size could not be found, return will be 0.
entailment
def DownloadAccount(self, next_page_token=None, max_results=None): """Downloads multiple accounts from Gitkit server. Args: next_page_token: string, pagination token. max_results: pagination size. Returns: An array of accounts. """ param = {} if next_page_token: param['nextPageToken'] = next_page_token if max_results: param['maxResults'] = max_results response = self._InvokeGitkitApi('downloadAccount', param) # pylint does not recognize the return type of simplejson.loads # pylint: disable=maybe-no-member return response.get('nextPageToken', None), response.get('users', {})
Downloads multiple accounts from Gitkit server. Args: next_page_token: string, pagination token. max_results: pagination size. Returns: An array of accounts.
entailment
def UploadAccount(self, hash_algorithm, hash_key, accounts): """Uploads multiple accounts to Gitkit server. Args: hash_algorithm: string, algorithm to hash password. hash_key: string, base64-encoded key of the algorithm. accounts: array of accounts to be uploaded. Returns: Response of the API. """ param = { 'hashAlgorithm': hash_algorithm, 'signerKey': hash_key, 'users': accounts } # pylint does not recognize the return type of simplejson.loads # pylint: disable=maybe-no-member return self._InvokeGitkitApi('uploadAccount', param)
Uploads multiple accounts to Gitkit server. Args: hash_algorithm: string, algorithm to hash password. hash_key: string, base64-encoded key of the algorithm. accounts: array of accounts to be uploaded. Returns: Response of the API.
entailment
def GetPublicCert(self): """Download Gitkit public cert. Returns: dict of public certs. """ cert_url = self.google_api_url + 'publicKeys' resp, content = self.http.request(cert_url) if resp.status == 200: return simplejson.loads(content) else: raise errors.GitkitServerError('Error response for cert url: %s' % content)
Download Gitkit public cert. Returns: dict of public certs.
entailment
def _InvokeGitkitApi(self, method, params=None, need_service_account=True): """Invokes Gitkit API, with optional access token for service account. Args: method: string, the api method name. params: dict of optional parameters for the API. need_service_account: false if service account is not needed. Raises: GitkitClientError: if the request is bad. GitkitServerError: if Gitkit can not handle the request. Returns: API response as dict. """ body = simplejson.dumps(params) if params else None req = urllib_request.Request(self.google_api_url + method) req.add_header('Content-type', 'application/json') if need_service_account: if self.credentials: access_token = self.credentials.get_access_token().access_token elif self.service_account_email and self.service_account_key: access_token = self._GetAccessToken() else: raise errors.GitkitClientError('Missing service account credentials') req.add_header('Authorization', 'Bearer ' + access_token) try: binary_body = body.encode('utf-8') if body else None raw_response = urllib_request.urlopen(req, binary_body).read() except urllib_request.HTTPError as err: if err.code == 400: raw_response = err.read() else: raise return self._CheckGitkitError(raw_response)
Invokes Gitkit API, with optional access token for service account. Args: method: string, the api method name. params: dict of optional parameters for the API. need_service_account: false if service account is not needed. Raises: GitkitClientError: if the request is bad. GitkitServerError: if Gitkit can not handle the request. Returns: API response as dict.
entailment
def _GetAccessToken(self): """Gets oauth2 access token for Gitkit API using service account. Returns: string, oauth2 access token. """ d = { 'assertion': self._GenerateAssertion(), 'grant_type': 'urn:ietf:params:oauth:grant-type:jwt-bearer', } try: body = parse.urlencode(d) except AttributeError: body = urllib.urlencode(d) req = urllib_request.Request(RpcHelper.TOKEN_ENDPOINT) req.add_header('Content-type', 'application/x-www-form-urlencoded') binary_body = body.encode('utf-8') raw_response = urllib_request.urlopen(req, binary_body) return simplejson.loads(raw_response.read())['access_token']
Gets oauth2 access token for Gitkit API using service account. Returns: string, oauth2 access token.
entailment
def _GenerateAssertion(self): """Generates the signed assertion that will be used in the request. Returns: string, signed Json Web Token (JWT) assertion. """ now = int(time.time()) payload = { 'aud': RpcHelper.TOKEN_ENDPOINT, 'scope': 'https://www.googleapis.com/auth/identitytoolkit', 'iat': now, 'exp': now + RpcHelper.MAX_TOKEN_LIFETIME_SECS, 'iss': self.service_account_email } return crypt.make_signed_jwt( crypt.Signer.from_string(self.service_account_key), payload)
Generates the signed assertion that will be used in the request. Returns: string, signed Json Web Token (JWT) assertion.
entailment
def _CheckGitkitError(self, raw_response): """Raises error if API invocation failed. Args: raw_response: string, the http response. Raises: GitkitClientError: if the error code is 4xx. GitkitServerError: if the response if malformed. Returns: Successful response as dict. """ try: response = simplejson.loads(raw_response) if 'error' not in response: return response else: error = response['error'] if 'code' in error: code = error['code'] if str(code).startswith('4'): raise errors.GitkitClientError(error['message']) else: raise errors.GitkitServerError(error['message']) except simplejson.JSONDecodeError: pass raise errors.GitkitServerError('null error code from Gitkit server')
Raises error if API invocation failed. Args: raw_response: string, the http response. Raises: GitkitClientError: if the error code is 4xx. GitkitServerError: if the response if malformed. Returns: Successful response as dict.
entailment
def FromDictionary(cls, dictionary): """Initializes from user specified dictionary. Args: dictionary: dict of user specified attributes Returns: GitkitUser object """ if 'user_id' in dictionary: raise errors.GitkitClientError('use localId instead') if 'localId' not in dictionary: raise errors.GitkitClientError('must specify localId') if 'email' not in dictionary: raise errors.GitkitClientError('must specify email') return cls(decode=False, **dictionary)
Initializes from user specified dictionary. Args: dictionary: dict of user specified attributes Returns: GitkitUser object
entailment
def ToRequest(self): """Converts to gitkit api request parameter dict. Returns: Dict, containing non-empty user attributes. """ param = {} if self.email: param['email'] = self.email if self.user_id: param['localId'] = self.user_id if self.name: param['displayName'] = self.name if self.photo_url: param['photoUrl'] = self.photo_url if self.email_verified is not None: param['emailVerified'] = self.email_verified if self.password_hash: param['passwordHash'] = base64.urlsafe_b64encode(self.password_hash) if self.salt: param['salt'] = base64.urlsafe_b64encode(self.salt) if self.provider_info: param['providerUserInfo'] = self.provider_info return param
Converts to gitkit api request parameter dict. Returns: Dict, containing non-empty user attributes.
entailment
def VerifyGitkitToken(self, jwt): """Verifies a Gitkit token string. Args: jwt: string, the token to be checked Returns: GitkitUser, if the token is valid. None otherwise. """ certs = self.rpc_helper.GetPublicCert() crypt.MAX_TOKEN_LIFETIME_SECS = 30 * 86400 # 30 days parsed = None for aud in filter(lambda x: x is not None, [self.project_id, self.client_id]): try: parsed = crypt.verify_signed_jwt_with_certs(jwt, certs, aud) except crypt.AppIdentityError as e: if "Wrong recipient" not in e.message: return None if parsed: return GitkitUser.FromToken(parsed) return None
Verifies a Gitkit token string. Args: jwt: string, the token to be checked Returns: GitkitUser, if the token is valid. None otherwise.
entailment
def GetUserByEmail(self, email): """Gets user info by email. Args: email: string, the user email. Returns: GitkitUser, containing the user info. """ user = self.rpc_helper.GetAccountInfoByEmail(email) return GitkitUser.FromApiResponse(user)
Gets user info by email. Args: email: string, the user email. Returns: GitkitUser, containing the user info.
entailment
def GetUserById(self, local_id): """Gets user info by id. Args: local_id: string, the user id at Gitkit server. Returns: GitkitUser, containing the user info. """ user = self.rpc_helper.GetAccountInfoById(local_id) return GitkitUser.FromApiResponse(user)
Gets user info by id. Args: local_id: string, the user id at Gitkit server. Returns: GitkitUser, containing the user info.
entailment
def UploadUsers(self, hash_algorithm, hash_key, accounts): """Uploads multiple users to Gitkit server. Args: hash_algorithm: string, the hash algorithm. hash_key: array, raw key of the hash algorithm. accounts: list of GitkitUser. Returns: A dict of failed accounts. The key is the index of the 'accounts' list, starting from 0. """ return self.rpc_helper.UploadAccount(hash_algorithm, base64.urlsafe_b64encode(hash_key), [GitkitUser.ToRequest(i) for i in accounts])
Uploads multiple users to Gitkit server. Args: hash_algorithm: string, the hash algorithm. hash_key: array, raw key of the hash algorithm. accounts: list of GitkitUser. Returns: A dict of failed accounts. The key is the index of the 'accounts' list, starting from 0.
entailment
def GetAllUsers(self, pagination_size=10): """Gets all user info from Gitkit server. Args: pagination_size: int, how many users should be returned per request. The account info are retrieved in pagination. Yields: A generator to iterate all users. """ next_page_token, accounts = self.rpc_helper.DownloadAccount( None, pagination_size) while accounts: for account in accounts: yield GitkitUser.FromApiResponse(account) next_page_token, accounts = self.rpc_helper.DownloadAccount( next_page_token, pagination_size)
Gets all user info from Gitkit server. Args: pagination_size: int, how many users should be returned per request. The account info are retrieved in pagination. Yields: A generator to iterate all users.
entailment
def GetOobResult(self, param, user_ip, gitkit_token=None): """Gets out-of-band code for ResetPassword/ChangeEmail request. Args: param: dict of HTTP POST params user_ip: string, end user's IP address gitkit_token: string, the gitkit token if user logged in Returns: A dict of { email: user email who initializes the request new_email: the requested new email, for ChangeEmail action only oob_link: the generated link to be send to user's email oob_code: the one time out-of-band code action: OobAction response_body: the http body to be returned to Gitkit widget } """ if 'action' in param: try: if param['action'] == GitkitClient.RESET_PASSWORD_ACTION: request = self._PasswordResetRequest(param, user_ip) oob_code, oob_link = self._BuildOobLink(request, param['action']) return { 'action': GitkitClient.RESET_PASSWORD_ACTION, 'email': param['email'], 'oob_link': oob_link, 'oob_code': oob_code, 'response_body': simplejson.dumps({'success': True}) } elif param['action'] == GitkitClient.CHANGE_EMAIL_ACTION: if not gitkit_token: return self._FailureOobResponse('login is required') request = self._ChangeEmailRequest(param, user_ip, gitkit_token) oob_code, oob_link = self._BuildOobLink(request, param['action']) return { 'action': GitkitClient.CHANGE_EMAIL_ACTION, 'email': param['oldEmail'], 'new_email': param['newEmail'], 'oob_link': oob_link, 'oob_code': oob_code, 'response_body': simplejson.dumps({'success': True}) } except errors.GitkitClientError as error: return self._FailureOobResponse(error.value) return self._FailureOobResponse('unknown request type')
Gets out-of-band code for ResetPassword/ChangeEmail request. Args: param: dict of HTTP POST params user_ip: string, end user's IP address gitkit_token: string, the gitkit token if user logged in Returns: A dict of { email: user email who initializes the request new_email: the requested new email, for ChangeEmail action only oob_link: the generated link to be send to user's email oob_code: the one time out-of-band code action: OobAction response_body: the http body to be returned to Gitkit widget }
entailment
def _BuildOobLink(self, param, mode): """Builds out-of-band URL. Gitkit API GetOobCode() is called and the returning code is combined with Gitkit widget URL to building the out-of-band url. Args: param: dict of request. mode: string, Gitkit widget mode to handle the oob action after user clicks the oob url in the email. Raises: GitkitClientError: if oob code is not returned. Returns: A string of oob url. """ code = self.rpc_helper.GetOobCode(param) if code: parsed = list(parse.urlparse(self.widget_url)) query = dict(parse.parse_qsl(parsed[4])) query.update({'mode': mode, 'oobCode': code}) try: parsed[4] = parse.urlencode(query) except AttributeError: parsed[4] = urllib.urlencode(query) return code, parse.urlunparse(parsed) raise errors.GitkitClientError('invalid request')
Builds out-of-band URL. Gitkit API GetOobCode() is called and the returning code is combined with Gitkit widget URL to building the out-of-band url. Args: param: dict of request. mode: string, Gitkit widget mode to handle the oob action after user clicks the oob url in the email. Raises: GitkitClientError: if oob code is not returned. Returns: A string of oob url.
entailment
def run_subprocess(command): """ command is the command to run, as a string. runs a subprocess, returns stdout and stderr from the subprocess as strings. """ x = Popen(command, shell=True, stdout=PIPE, stderr=PIPE) out, err = x.communicate() out = out.decode('utf-8') err = err.decode('utf-8') return out, err
command is the command to run, as a string. runs a subprocess, returns stdout and stderr from the subprocess as strings.
entailment
def kwargs_to_string(kwargs): """ Given a set of kwargs, turns them into a string which can then be passed to a command. :param kwargs: kwargs from a function call. :return: outstr: A string, which is '' if no kwargs were given, and the kwargs in string format otherwise. """ outstr = '' for arg in kwargs: outstr += ' -{} {}'.format(arg, kwargs[arg]) return outstr
Given a set of kwargs, turns them into a string which can then be passed to a command. :param kwargs: kwargs from a function call. :return: outstr: A string, which is '' if no kwargs were given, and the kwargs in string format otherwise.
entailment
def read_mash_output(result_file): """ :param result_file: Tab-delimited result file generated by mash dist. :return: mash_results: A list with each entry in the result file as an entry, with attributes reference, query, distance, pvalue, and matching_hash """ with open(result_file) as handle: lines = handle.readlines() mash_results = list() for line in lines: result = MashResult(line) mash_results.append(result) return mash_results
:param result_file: Tab-delimited result file generated by mash dist. :return: mash_results: A list with each entry in the result file as an entry, with attributes reference, query, distance, pvalue, and matching_hash
entailment
def read_mash_screen(screen_result): """ :param screen_result: Tab-delimited result file generated by mash screen. :return: results: A list with each line in the result file as an entry, with attributes identity, shared_hashes, median_multiplicity, pvalue, and query_id """ with open(screen_result) as handle: lines = handle.readlines() results = list() for line in lines: result = ScreenResult(line) results.append(result) return results
:param screen_result: Tab-delimited result file generated by mash screen. :return: results: A list with each line in the result file as an entry, with attributes identity, shared_hashes, median_multiplicity, pvalue, and query_id
entailment
def run_cmd(cmd): """ Runs a command using subprocess, and returns both the stdout and stderr from that command If exit code from command is non-zero, raises subproess.CalledProcessError :param cmd: command to run as a string, as it would be called on the command line :return: out, err: Strings that are the stdout and stderr from the command called. """ p = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) out, err = p.communicate() out = out.decode('utf-8') err = err.decode('utf-8') if p.returncode != 0: raise subprocess.CalledProcessError(p.returncode, cmd=cmd) return out, err
Runs a command using subprocess, and returns both the stdout and stderr from that command If exit code from command is non-zero, raises subproess.CalledProcessError :param cmd: command to run as a string, as it would be called on the command line :return: out, err: Strings that are the stdout and stderr from the command called.
entailment
def write_to_logfile(logfile, out, err, cmd): """ Writes stdout, stderr, and a command to a logfile :param logfile: Path to file to write output to. :param out: Stdout of program called, as a string :param err: Stderr of program called, as a string :param cmd: command that was used """ with open(logfile, 'a+') as outfile: outfile.write('Command used: {}\n\n'.format(cmd)) outfile.write('STDOUT: {}\n\n'.format(out)) outfile.write('STDERR: {}\n\n'.format(err))
Writes stdout, stderr, and a command to a logfile :param logfile: Path to file to write output to. :param out: Stdout of program called, as a string :param err: Stderr of program called, as a string :param cmd: command that was used
entailment
def find_paired_reads(fastq_directory, forward_id='_R1', reverse_id='_R2'): """ Looks at a directory to try to find paired fastq files. Should be able to find anything fastq. :param fastq_directory: Complete path to directory containing fastq files. :param forward_id: Identifier for forward reads. Default R1. :param reverse_id: Identifier for reverse reads. Default R2. :return: List containing pairs of fastq files, in format [[forward_1, reverse_1], [forward_2, reverse_2]], etc. """ pair_list = list() fastq_files = glob.glob(os.path.join(fastq_directory, '*.f*q*')) for name in sorted(fastq_files): if forward_id in name and os.path.isfile(name.replace(forward_id, reverse_id)): pair_list.append([name, name.replace(forward_id, reverse_id)]) return pair_list
Looks at a directory to try to find paired fastq files. Should be able to find anything fastq. :param fastq_directory: Complete path to directory containing fastq files. :param forward_id: Identifier for forward reads. Default R1. :param reverse_id: Identifier for reverse reads. Default R2. :return: List containing pairs of fastq files, in format [[forward_1, reverse_1], [forward_2, reverse_2]], etc.
entailment
def find_unpaired_reads(fastq_directory, forward_id='_R1', reverse_id='_R2', find_fasta=False): """ Looks at a directory to find unpaired fastq files. :param fastq_directory: Complete path to directory containing fastq files. :param forward_id: Identifier for forward reads. Default _R1. :param reverse_id: Identifier for forward reads. Default _R2. :param find_fasta: If False, will look for fastq files. Otherwise, looks for Fasta files. :return: List of files that appear to be unpaired reads. """ read_list = list() if find_fasta is False: fastq_files = glob.glob(os.path.join(fastq_directory, '*.f*q*')) else: # Very misnamed! fastq_files = glob.glob(os.path.join(fastq_directory, '*.f*a*')) for name in sorted(fastq_files): # Iterate through files, adding them to our list of unpaired reads if: # 1) They don't have the forward identifier or the reverse identifier in their name. # 2) They have forward but the reverse isn't there. # 3) They have reverse but the forward isn't there. if forward_id not in name and reverse_id not in name: read_list.append([name]) elif forward_id in name and not os.path.isfile(name.replace(forward_id, reverse_id)): read_list.append([name]) elif reverse_id in name and not os.path.isfile(name.replace(reverse_id, forward_id)): read_list.append([name]) return read_list
Looks at a directory to find unpaired fastq files. :param fastq_directory: Complete path to directory containing fastq files. :param forward_id: Identifier for forward reads. Default _R1. :param reverse_id: Identifier for forward reads. Default _R2. :param find_fasta: If False, will look for fastq files. Otherwise, looks for Fasta files. :return: List of files that appear to be unpaired reads.
entailment
def find_genusspecific_allele_list(profiles_file, target_genus): """ A new way of making our specific databases: Make our profiles file have lists of every gene/allele present for each genus instead of just excluding a few genes for each. This way, should have much smaller databases while managing to make ConFindr a decent bit faster (maybe) :param profiles_file: Path to profiles file. :param target_genus: :return: List of gene/allele combinations that should be part of species-specific database. """ alleles = list() with open(profiles_file) as f: lines = f.readlines() for line in lines: line = line.rstrip() genus = line.split(':')[0] if genus == target_genus: alleles = line.split(':')[1].split(',')[:-1] return alleles
A new way of making our specific databases: Make our profiles file have lists of every gene/allele present for each genus instead of just excluding a few genes for each. This way, should have much smaller databases while managing to make ConFindr a decent bit faster (maybe) :param profiles_file: Path to profiles file. :param target_genus: :return: List of gene/allele combinations that should be part of species-specific database.
entailment
def setup_allelespecific_database(fasta_file, database_folder, allele_list): """ Since some genera have some rMLST genes missing, or two copies of some genes, genus-specific databases are needed. This will take only the alleles known to be part of each genus and write them to a genus-specific file. :param database_folder: Path to folder where rMLST_combined is stored. :param fasta_file: Path to fasta file to write allelespecific database to. :param allele_list: allele list generated by find_genusspecific_allele_list """ index = SeqIO.index(os.path.join(database_folder, 'rMLST_combined.fasta'), 'fasta') seqs = list() for s in allele_list: try: seqs.append(index[s]) except KeyError: logging.warning('Tried to add {} to allele-specific database, but could not find it.'.format(s)) SeqIO.write(seqs, fasta_file, 'fasta')
Since some genera have some rMLST genes missing, or two copies of some genes, genus-specific databases are needed. This will take only the alleles known to be part of each genus and write them to a genus-specific file. :param database_folder: Path to folder where rMLST_combined is stored. :param fasta_file: Path to fasta file to write allelespecific database to. :param allele_list: allele list generated by find_genusspecific_allele_list
entailment
def extract_rmlst_genes(pair, database, forward_out, reverse_out, threads=12, logfile=None): """ Given a pair of reads and an rMLST database, will extract reads that contain sequence from the database. :param pair: List containing path to forward reads at index 0 and path to reverse reads at index 1. :param database: Path to rMLST database, in FASTA format. :param forward_out: :param reverse_out: :param threads: """ out, err, cmd = bbtools.bbduk_bait(database, pair[0], forward_out, reverse_in=pair[1], reverse_out=reverse_out, threads=str(threads), returncmd=True) if logfile: write_to_logfile(logfile, out, err, cmd)
Given a pair of reads and an rMLST database, will extract reads that contain sequence from the database. :param pair: List containing path to forward reads at index 0 and path to reverse reads at index 1. :param database: Path to rMLST database, in FASTA format. :param forward_out: :param reverse_out: :param threads:
entailment
def find_cross_contamination(databases, pair, tmpdir='tmp', log='log.txt', threads=1): """ Usese mash to find out whether or not a sample has more than one genus present, indicating cross-contamination. :param databases: A databases folder, which must contain refseq.msh, a mash sketch that has one representative per genus from refseq. :param tmpdir: Temporary directory to store mash result files in. :param pair: Array with path to forward reads at index 0 and path to reverse reads at index o :param log: Logfile to write to. :param threads: Number of threads to run mash wit. :return: cross_contam: a bool that is True if more than one genus is found, and False otherwise. :return: genera_present: A string. If only one genus is found, string is just genus. If more than one genus is found, the string is a list of genera present, separated by colons (i.e. for Escherichia and Salmonella found, string would be 'Escherichia:Salmonella'. If no genus found, return 'NA' """ genera_present = list() out, err, cmd = mash.screen('{}/refseq.msh'.format(databases), pair[0], pair[1], threads=threads, w='', i='0.95', output_file=os.path.join(tmpdir, 'screen.tab'), returncmd=True) write_to_logfile(log, out, err, cmd) screen_output = mash.read_mash_screen(os.path.join(tmpdir, 'screen.tab')) for item in screen_output: mash_genus = item.query_id.split('/')[-3] if mash_genus == 'Shigella': mash_genus = 'Escherichia' if mash_genus not in genera_present: genera_present.append(mash_genus) if len(genera_present) == 1: genera_present = genera_present[0] elif len(genera_present) == 0: genera_present = 'NA' else: tmpstr = '' for mash_genus in genera_present: tmpstr += mash_genus + ':' genera_present = tmpstr[:-1] return genera_present
Usese mash to find out whether or not a sample has more than one genus present, indicating cross-contamination. :param databases: A databases folder, which must contain refseq.msh, a mash sketch that has one representative per genus from refseq. :param tmpdir: Temporary directory to store mash result files in. :param pair: Array with path to forward reads at index 0 and path to reverse reads at index o :param log: Logfile to write to. :param threads: Number of threads to run mash wit. :return: cross_contam: a bool that is True if more than one genus is found, and False otherwise. :return: genera_present: A string. If only one genus is found, string is just genus. If more than one genus is found, the string is a list of genera present, separated by colons (i.e. for Escherichia and Salmonella found, string would be 'Escherichia:Salmonella'. If no genus found, return 'NA'
entailment
def number_of_bases_above_threshold(high_quality_base_count, base_count_cutoff=2, base_fraction_cutoff=None): """ Finds if a site has at least two bases of high quality, enough that it can be considered fairly safe to say that base is actually there. :param high_quality_base_count: Dictionary of count of HQ bases at a position where key is base and values is the count of that base. :param base_count_cutoff: Number of bases needed to support multiple allele presence. :param base_fraction_cutoff: Fraction of bases needed to support multiple allele presence. :return: True if site has at least base_count_cutoff/base_fraction_cutoff bases, False otherwise (changeable by user) """ # make a dict by dictionary comprehension where values are True or False for each base depending on whether the count meets the threshold. # Method differs depending on whether absolute or fraction cutoff is specified if base_fraction_cutoff: total_hq_base_count = sum(high_quality_base_count.values()) bases_above_threshold = {base: float(count)/total_hq_base_count >= base_fraction_cutoff and count >= base_count_cutoff for (base,count) in high_quality_base_count.items()} else: bases_above_threshold = {base: count >= base_count_cutoff for (base, count) in high_quality_base_count.items()} # True is equal to 1 so sum of the number of Trues in the bases_above_threshold dict is the number of bases passing threhold return sum(bases_above_threshold.values())
Finds if a site has at least two bases of high quality, enough that it can be considered fairly safe to say that base is actually there. :param high_quality_base_count: Dictionary of count of HQ bases at a position where key is base and values is the count of that base. :param base_count_cutoff: Number of bases needed to support multiple allele presence. :param base_fraction_cutoff: Fraction of bases needed to support multiple allele presence. :return: True if site has at least base_count_cutoff/base_fraction_cutoff bases, False otherwise (changeable by user)
entailment
def find_if_multibase(column, quality_cutoff, base_cutoff, base_fraction_cutoff): """ Finds if a position in a pileup has more than one base present. :param column: A pileupColumn generated by pysam :param quality_cutoff: Desired minimum phred quality for a base in order to be counted towards a multi-allelic column :param base_cutoff: Minimum number of bases needed to support presence of a base. :param base_fraction_cutoff: Minimum fraction of bases needed to support presence of a base. If specified, noth the base_cutoff and base_fraction_cutoff will have to be met :return: If position has more than one base, a dictionary with counts for the bases. Otherwise, returns empty dictionary """ # Sometimes the qualities come out to ridiculously high (>70) values. Looks to be because sometimes reads # are overlapping and the qualities get summed for overlapping bases. Issue opened on pysam. unfiltered_base_qualities = dict() for read in column.pileups: if read.query_position is not None: # Not entirely sure why this is sometimes None, but it causes bad stuff reference_sequence = read.alignment.get_reference_sequence() previous_position = read.query_position - 1 if read.query_position > 1 else 0 next_position = read.query_position + 1 # This causes index errors. Fix at some point soon. # Another stringency check - to make sure that we're actually looking at a point mutation, check that the # base before and after the one we're looking at match the reference. With Nanopore data, lots of indels and # the like cause false positives, so this filters those out. try: # Need to actually handle this at some point. For now, be lazy previous_reference_base = reference_sequence[previous_position] next_reference_base = reference_sequence[next_position] previous_base = read.alignment.query_sequence[previous_position] next_base = read.alignment.query_sequence[next_position] base = read.alignment.query_sequence[read.query_position] quality = read.alignment.query_qualities[read.query_position] if previous_reference_base == previous_base and next_reference_base == next_base: if base not in unfiltered_base_qualities: unfiltered_base_qualities[base] = [quality] else: unfiltered_base_qualities[base].append(quality) except IndexError: pass # Now check that at least two bases for each of the bases present high quality. # first remove all low quality bases # Use dictionary comprehension to make a new dictionary where only scores above threshold are kept. # Internally list comprehension is used to filter the list filtered_base_qualities = {base:[score for score in scores if score >= quality_cutoff] for (base,scores) in unfiltered_base_qualities.items()} # Now remove bases that have no high quality scores # Use dictionary comprehension to make a new dictionary where bases that have a non-empty scores list are kept filtered_base_qualities = {base:scores for (base,scores) in filtered_base_qualities.items() if scores} # If we less than two bases with high quality scores, ignore things. if len(filtered_base_qualities) < 2: return dict() # Now that filtered_base_qualities only contains bases with more than one HQ base, make just a dict with base counts with dict comprehension high_quality_base_count = {base: len(scores) for (base, scores) in filtered_base_qualities.items()} if number_of_bases_above_threshold(high_quality_base_count, base_count_cutoff=base_cutoff, base_fraction_cutoff=base_fraction_cutoff) > 1: logging.debug('base qualities before filtering: {0}'.format(unfiltered_base_qualities)) logging.debug('base qualities after filtering: {0}'.format(filtered_base_qualities)) logging.debug('SNVs found at position {0}: {1}\n'.format(column.pos, high_quality_base_count)) return high_quality_base_count else: # logging.debug('No SNVs\n') return dict()
Finds if a position in a pileup has more than one base present. :param column: A pileupColumn generated by pysam :param quality_cutoff: Desired minimum phred quality for a base in order to be counted towards a multi-allelic column :param base_cutoff: Minimum number of bases needed to support presence of a base. :param base_fraction_cutoff: Minimum fraction of bases needed to support presence of a base. If specified, noth the base_cutoff and base_fraction_cutoff will have to be met :return: If position has more than one base, a dictionary with counts for the bases. Otherwise, returns empty dictionary
entailment
def get_contig_names(fasta_file): """ Gets contig names from a fasta file using SeqIO. :param fasta_file: Full path to uncompressed, fasta-formatted file :return: List of contig names. """ contig_names = list() for contig in SeqIO.parse(fasta_file, 'fasta'): contig_names.append(contig.id) return contig_names
Gets contig names from a fasta file using SeqIO. :param fasta_file: Full path to uncompressed, fasta-formatted file :return: List of contig names.
entailment
def read_contig(contig_name, bamfile_name, reference_fasta, quality_cutoff=20, base_cutoff=2, base_fraction_cutoff=None): """ Examines a contig to find if there are positions where more than one base is present. :param contig_name: Name of contig as a string. :param bamfile_name: Full path to bamfile. Must be sorted/indexed :param reference_fasta: Full path to fasta file that was used to generate the bamfile. :param report_file: File where information about each position found to have multiple bases will be written. :return: Dictionary of positions where more than one base is present. Keys are contig name, values are positions """ bamfile = pysam.AlignmentFile(bamfile_name, 'rb') multibase_position_dict = dict() to_write = list() # These parameters seem to be fairly undocumented with pysam, but I think that they should make the output # that I'm getting to match up with what I'm seeing in Tablet. for column in bamfile.pileup(contig_name, stepper='samtools', ignore_orphans=False, fastafile=pysam.FastaFile(reference_fasta), min_base_quality=0): base_dict = find_if_multibase(column, quality_cutoff=quality_cutoff, base_cutoff=base_cutoff, base_fraction_cutoff=base_fraction_cutoff) if base_dict: # Pysam starts counting at 0, whereas we actually want to start counting at 1. actual_position = column.pos + 1 if column.reference_name in multibase_position_dict: multibase_position_dict[column.reference_name].append(actual_position) else: multibase_position_dict[column.reference_name] = [actual_position] to_write.append('{reference},{position},{bases},{coverage}\n'.format(reference=column.reference_name, position=actual_position, bases=base_dict_to_string(base_dict), coverage=sum(base_dict.values()))) bamfile.close() return multibase_position_dict, to_write
Examines a contig to find if there are positions where more than one base is present. :param contig_name: Name of contig as a string. :param bamfile_name: Full path to bamfile. Must be sorted/indexed :param reference_fasta: Full path to fasta file that was used to generate the bamfile. :param report_file: File where information about each position found to have multiple bases will be written. :return: Dictionary of positions where more than one base is present. Keys are contig name, values are positions
entailment
def find_rmlst_type(kma_report, rmlst_report): """ Uses a report generated by KMA to determine what allele is present for each rMLST gene. :param kma_report: The .res report generated by KMA. :param rmlst_report: rMLST report file to write information to. :return: a sorted list of loci present, in format gene_allele """ genes_to_use = dict() score_dict = dict() gene_alleles = list() with open(kma_report) as tsvfile: reader = csv.DictReader(tsvfile, delimiter='\t') for row in reader: gene_allele = row['#Template'] score = int(row['Score']) gene = gene_allele.split('_')[0] allele = gene_allele.split('_')[1] if gene not in score_dict: score_dict[gene] = score genes_to_use[gene] = allele else: if score > score_dict[gene]: score_dict[gene] = score genes_to_use[gene] = allele for gene in genes_to_use: gene_alleles.append(gene + '_' + genes_to_use[gene].replace(' ', '')) gene_alleles = sorted(gene_alleles) with open(rmlst_report, 'w') as f: f.write('Gene,Allele\n') for gene_allele in gene_alleles: gene = gene_allele.split('_')[0] allele = gene_allele.split('_')[1] f.write('{},{}\n'.format(gene, allele)) return gene_alleles
Uses a report generated by KMA to determine what allele is present for each rMLST gene. :param kma_report: The .res report generated by KMA. :param rmlst_report: rMLST report file to write information to. :return: a sorted list of loci present, in format gene_allele
entailment
def base_dict_to_string(base_dict): """ Converts a dictionary to a string. {'C': 12, 'A':4} gets converted to C:12;A:4 :param base_dict: Dictionary of bases and counts created by find_if_multibase :return: String representing that dictionary. """ outstr = '' # First, sort base_dict so that major allele always comes first - makes output report nicer to look at. base_list = sorted(base_dict.items(), key=lambda kv: kv[1], reverse=True) for base in base_list: outstr += '{}:{};'.format(base[0], base[1]) return outstr[:-1]
Converts a dictionary to a string. {'C': 12, 'A':4} gets converted to C:12;A:4 :param base_dict: Dictionary of bases and counts created by find_if_multibase :return: String representing that dictionary.
entailment
def find_total_sequence_length(fasta_file): """ Totals up number of bases in a fasta file. :param fasta_file: Path to an uncompressed, fasta-formatted file. :return: Number of total bases in file, as an int. """ total_length = 0 for sequence in SeqIO.parse(fasta_file, 'fasta'): total_length += len(sequence.seq) return total_length
Totals up number of bases in a fasta file. :param fasta_file: Path to an uncompressed, fasta-formatted file. :return: Number of total bases in file, as an int.
entailment
def estimate_percent_contamination(contamination_report_file): """ Estimates the percent contamination of a sample (and standard deviation). :param contamination_report_file: File created by read_contig, :return: Estimated percent contamination and standard deviation. """ contam_levels = list() with open(contamination_report_file) as csvfile: reader = csv.DictReader(csvfile) for row in reader: lowest_count = 99999 base_counts = row['Bases'].split(';') for count in base_counts: num_bases = int(count.split(':')[1]) if num_bases < lowest_count: lowest_count = num_bases total_coverage = int(row['Coverage']) contam_levels.append(lowest_count*100/total_coverage) return '%.2f' % (np.mean(contam_levels)), '%.2f' % np.std(contam_levels)
Estimates the percent contamination of a sample (and standard deviation). :param contamination_report_file: File created by read_contig, :return: Estimated percent contamination and standard deviation.
entailment
def find_contamination(pair, output_folder, databases_folder, forward_id='_R1', threads=1, keep_files=False, quality_cutoff=20, base_cutoff=2, base_fraction_cutoff=0.05, cgmlst_db=None, Xmx=None, tmpdir=None, data_type='Illumina', use_rmlst=False): """ This needs some documentation fairly badly, so here we go. :param pair: This has become a misnomer. If the input reads are actually paired, needs to be a list with the full filepath to forward reads at index 0 and full path to reverse reads at index 1. If reads are unpaired, should be a list of length 1 with the only entry being the full filepath to read set. :param output_folder: Folder where outputs (confindr log and report, and other stuff) will be stored. This will be created if it does not exist. (I think - should write a test that double checks this). :param databases_folder: Full path to folder where ConFindr's databases live. These files can be downloaded from figshare in .tar.gz format (https://ndownloader.figshare.com/files/11864267), and will be automatically downloaded if the script is run from the command line. :param forward_id: Identifier that marks reads as being in the forward direction for paired reads. Defaults to _R1 :param threads: Number of threads to run analyses with. All parts of this pipeline scale pretty well, so more is better. :param keep_files: Boolean that says whether or not to keep temporary files. :param quality_cutoff: Integer of the phred score required to have a base count towards a multiallelic site. :param base_cutoff: Integer of number of bases needed to have a base be part of a multiallelic site. :param base_fraction_cutoff: Float of fraction of bases needed to have a base be part of a multiallelic site. If specified will be used in parallel with base_cutoff :param cgmlst_db: if None, we're using rMLST, if True, using some sort of custom cgMLST database. This requires some custom parameters. :param Xmx: if None, BBTools will use auto memory detection. If string, BBTools will use what's specified as their memory request. :param tmpdir: if None, any genus-specifc databases that need to be created will be written to ConFindr DB location. Otherwise, genus-specific databases will be written here. """ if os.path.isfile(os.path.join(databases_folder, 'download_date.txt')): with open(os.path.join(databases_folder, 'download_date.txt')) as f: database_download_date = f.readline().rstrip() else: database_download_date = 'NA' log = os.path.join(output_folder, 'confindr_log.txt') if len(pair) == 2: sample_name = os.path.split(pair[0])[-1].split(forward_id)[0] paired = True logging.debug('Sample is paired. Sample name is {}'.format(sample_name)) else: sample_name = os.path.split(pair[0])[-1].split('.')[0] paired = False logging.debug('Sample is unpaired. Sample name is {}'.format(sample_name)) sample_tmp_dir = os.path.join(output_folder, sample_name) if not os.path.isdir(sample_tmp_dir): os.makedirs(sample_tmp_dir) logging.info('Checking for cross-species contamination...') if paired: genus = find_cross_contamination(databases_folder, pair, tmpdir=sample_tmp_dir, log=log, threads=threads) else: genus = find_cross_contamination_unpaired(databases_folder, reads=pair[0], tmpdir=sample_tmp_dir, log=log, threads=threads) if len(genus.split(':')) > 1: write_output(output_report=os.path.join(output_folder, 'confindr_report.csv'), sample_name=sample_name, multi_positions=0, genus=genus, percent_contam='NA', contam_stddev='NA', total_gene_length=0, database_download_date=database_download_date) logging.info('Found cross-contamination! Skipping rest of analysis...\n') if keep_files is False: shutil.rmtree(sample_tmp_dir) return # Setup genus-specific databases, if necessary. if cgmlst_db is not None: # Sanity check that the DB specified is actually a file, otherwise, quit with appropriate error message. if not os.path.isfile(cgmlst_db): logging.error('ERROR: Specified cgMLST file ({}) does not exist. Please check the path and try again.'.format(cgmlst_db)) quit(code=1) sample_database = cgmlst_db else: db_folder = databases_folder if tmpdir is None else tmpdir if not os.path.isdir(db_folder): os.makedirs(db_folder) if genus != 'NA': # Logic here is as follows: users can either have both rMLST databases, which cover all of bacteria, # cgmlst-derived databases, which cover only Escherichia, Salmonella, and Listeria (may add more at some # point), or they can have both. They can also set priority to either always use rMLST, or to use my # core-genome derived stuff and fall back on rMLST if they're trying to look at a genus I haven't created # a scheme for. # In the event rmlst databases have priority, always use them. if use_rmlst is True: sample_database = os.path.join(db_folder, '{}_db.fasta'.format(genus)) if not os.path.isfile(sample_database): if os.path.isfile(os.path.join(db_folder, 'gene_allele.txt')) and os.path.isfile(os.path.join(db_folder, 'rMLST_combined.fasta')): logging.info('Setting up genus-specific database for genus {}...'.format(genus)) allele_list = find_genusspecific_allele_list(os.path.join(db_folder, 'gene_allele.txt'), genus) setup_allelespecific_database(fasta_file=sample_database, database_folder=db_folder, allele_list=allele_list) else: # Check if a cgderived database is available. If not, try to use rMLST database. sample_database = os.path.join(db_folder, '{}_db_cgderived.fasta'.format(genus)) if not os.path.isfile(sample_database): sample_database = os.path.join(db_folder, '{}_db.fasta'.format(genus)) # Create genus specific database if it doesn't already exist and we have the necessary rMLST files. if os.path.isfile(os.path.join(db_folder, 'rMLST_combined.fasta')) and os.path.isfile(os.path.join(db_folder, 'gene_allele.txt')) and not os.path.isfile(sample_database): logging.info('Setting up genus-specific database for genus {}...'.format(genus)) allele_list = find_genusspecific_allele_list(os.path.join(db_folder, 'gene_allele.txt'), genus) setup_allelespecific_database(fasta_file=sample_database, database_folder=db_folder, allele_list=allele_list) else: sample_database = os.path.join(db_folder, 'rMLST_combined.fasta') # If a user has gotten to this point and they don't have any database available to do analysis because # they don't have rMLST downloaded and we don't have a cg-derived database available, boot them with a helpful # message. if not os.path.isfile(sample_database): write_output(output_report=os.path.join(output_folder, 'confindr_report.csv'), sample_name=sample_name, multi_positions=0, genus=genus, percent_contam='NA', contam_stddev='NA', total_gene_length=0, database_download_date=database_download_date) logging.info('Did not find databases for genus {genus}. You can download the rMLST database to get access to all ' 'genera (see https://olc-bioinformatics.github.io/ConFindr/install/). Alternatively, if you have a ' 'high-quality core-genome derived database for your genome of interest, we would be happy to ' 'add it - open an issue at https://github.com/OLC-Bioinformatics/ConFindr/issues with the ' 'title "Add genus-specific database: {genus}"\n'.format(genus=genus)) if keep_files is False: shutil.rmtree(sample_tmp_dir) return # Extract rMLST reads and quality trim. logging.info('Extracting conserved core genes...') if paired: if Xmx is None: out, err, cmd = bbtools.bbduk_bait(reference=sample_database, forward_in=pair[0], reverse_in=pair[1], forward_out=os.path.join(sample_tmp_dir, 'rmlst_R1.fastq.gz'), reverse_out=os.path.join(sample_tmp_dir, 'rmlst_R2.fastq.gz'), threads=threads, returncmd=True) else: out, err, cmd = bbtools.bbduk_bait(reference=sample_database, forward_in=pair[0], reverse_in=pair[1], forward_out=os.path.join(sample_tmp_dir, 'rmlst_R1.fastq.gz'), reverse_out=os.path.join(sample_tmp_dir, 'rmlst_R2.fastq.gz'), threads=threads, Xmx=Xmx, returncmd=True) else: if data_type == 'Nanopore': forward_out = os.path.join(sample_tmp_dir, 'trimmed.fastq.gz') else: forward_out = os.path.join(sample_tmp_dir, 'rmlst.fastq.gz') if Xmx is None: out, err, cmd = bbtools.bbduk_bait(reference=sample_database, forward_in=pair[0], forward_out=forward_out, returncmd=True, threads=threads) else: out, err, cmd = bbtools.bbduk_bait(reference=sample_database, forward_in=pair[0], forward_out=forward_out, Xmx=Xmx, returncmd=True, threads=threads) write_to_logfile(log, out, err, cmd) logging.info('Quality trimming...') if data_type == 'Illumina': if paired: if Xmx is None: out, err, cmd = bbtools.bbduk_trim(forward_in=os.path.join(sample_tmp_dir, 'rmlst_R1.fastq.gz'), reverse_in=os.path.join(sample_tmp_dir, 'rmlst_R2.fastq.gz'), forward_out=os.path.join(sample_tmp_dir, 'trimmed_R1.fastq.gz'), reverse_out=os.path.join(sample_tmp_dir, 'trimmed_R2.fastq.gz'), threads=str(threads), returncmd=True) else: out, err, cmd = bbtools.bbduk_trim(forward_in=os.path.join(sample_tmp_dir, 'rmlst_R1.fastq.gz'), reverse_in=os.path.join(sample_tmp_dir, 'rmlst_R2.fastq.gz'), forward_out=os.path.join(sample_tmp_dir, 'trimmed_R1.fastq.gz'), reverse_out=os.path.join(sample_tmp_dir, 'trimmed_R2.fastq.gz'), Xmx=Xmx, threads=str(threads), returncmd=True) else: if Xmx is None: out, err, cmd = bbtools.bbduk_trim(forward_in=os.path.join(sample_tmp_dir, 'rmlst.fastq.gz'), forward_out=os.path.join(sample_tmp_dir, 'trimmed.fastq.gz'), returncmd=True, threads=threads) else: out, err, cmd = bbtools.bbduk_trim(forward_in=os.path.join(sample_tmp_dir, 'rmlst.fastq.gz'), forward_out=os.path.join(sample_tmp_dir, 'trimmed.fastq.gz'), returncmd=True, threads=threads, Xmx=Xmx) write_to_logfile(log, out, err, cmd) logging.info('Detecting contamination...') # Now do mapping in two steps - first, map reads back to database with ambiguous reads matching all - this # will be used to get a count of number of reads aligned to each gene/allele so we can create a custom rmlst file # with only the most likely allele for each gene. if not os.path.isfile(sample_database + '.fai'): # Don't bother re-indexing, this only needs to happen once. pysam.faidx(sample_database) kma_database = sample_database.replace('.fasta', '') + '_kma' kma_report = os.path.join(sample_tmp_dir, 'kma_rmlst') if not os.path.isfile(kma_database + '.name'): # The .name is one of the files KMA creates when making a database. cmd = 'kma index -i {} -o {}'.format(sample_database, kma_database) # NOTE: Need KMA >=1.2.0 for this to work. out, err = run_cmd(cmd) write_to_logfile(log, out, err, cmd) # Run KMA. if paired: cmd = 'kma -ipe {forward_in} {reverse_in} -t_db {kma_database} -o {kma_report} ' \ '-t {threads}'.format(forward_in=os.path.join(sample_tmp_dir, 'trimmed_R1.fastq.gz'), reverse_in=os.path.join(sample_tmp_dir, 'trimmed_R2.fastq.gz'), kma_database=kma_database, kma_report=kma_report, threads=threads) out, err = run_cmd(cmd) write_to_logfile(log, out, err, cmd) else: if data_type == 'Illumina': cmd = 'kma -i {input_reads} -t_db {kma_database} -o {kma_report} ' \ '-t {threads}'.format(input_reads=os.path.join(sample_tmp_dir, 'trimmed.fastq.gz'), kma_database=kma_database, kma_report=kma_report, threads=threads) else: # Recommended Nanopore settings from KMA repo: https://bitbucket.org/genomicepidemiology/kma cmd = 'kma -i {input_reads} -t_db {kma_database} -o {kma_report} -mem_mode -mp 20 -mrs 0.0 -bcNano ' \ '-t {threads}'.format(input_reads=os.path.join(sample_tmp_dir, 'trimmed.fastq.gz'), kma_database=kma_database, kma_report=kma_report, threads=threads) out, err = run_cmd(cmd) write_to_logfile(log, out, err, cmd) rmlst_report = os.path.join(output_folder, sample_name + '_rmlst.csv') gene_alleles = find_rmlst_type(kma_report=kma_report + '.res', rmlst_report=rmlst_report) with open(os.path.join(sample_tmp_dir, 'rmlst.fasta'), 'w') as f: for contig in SeqIO.parse(sample_database, 'fasta'): if contig.id in gene_alleles: f.write('>{}\n'.format(contig.id)) f.write(str(contig.seq) + '\n') rmlst_gene_length = find_total_sequence_length(os.path.join(sample_tmp_dir, 'rmlst.fasta')) logging.debug('Total gene length is {}'.format(rmlst_gene_length)) # Second step of mapping - Do a mapping of our baited reads against a fasta file that has only one allele per # rMLST gene. pysam.faidx(os.path.join(sample_tmp_dir, 'rmlst.fasta')) if paired: cmd = 'bbmap.sh ref={ref} in={forward_in} in2={reverse_in} out={outbam} threads={threads} mdtag ' \ 'nodisk'.format(ref=os.path.join(sample_tmp_dir, 'rmlst.fasta'), forward_in=os.path.join(sample_tmp_dir, 'trimmed_R1.fastq.gz'), reverse_in=os.path.join(sample_tmp_dir, 'trimmed_R2.fastq.gz'), outbam=os.path.join(sample_tmp_dir, 'out_2.bam'), threads=threads) if cgmlst_db is not None: # Lots of core genes seem to have relatives within a genome that are at ~70 percent identity - this means # that reads that shouldn't really map do, and cause false positives. Adding in this subfilter means that # reads can only have one mismatch, so they actually have to be from the right gene for this to work. cmd += ' subfilter=1' if Xmx: cmd += ' -Xmx{}'.format(Xmx) out, err = run_cmd(cmd) write_to_logfile(log, out, err, cmd) else: if data_type == 'Illumina': cmd = 'bbmap.sh ref={ref} in={forward_in} out={outbam} threads={threads} mdtag ' \ 'nodisk'.format(ref=os.path.join(sample_tmp_dir, 'rmlst.fasta'), forward_in=os.path.join(sample_tmp_dir, 'trimmed.fastq.gz'), outbam=os.path.join(sample_tmp_dir, 'out_2.bam'), threads=threads) if cgmlst_db is not None: # Lots of core genes seem to have relatives within a genome that are at ~70 percent identity - this means # that reads that shouldn't really map do, and cause false positives. Adding in this subfilter means that # reads can only have one mismatch, so they actually have to be from the right gene for this to work. cmd += ' subfilter=1' if Xmx: cmd += ' -Xmx{}'.format(Xmx) out, err = run_cmd(cmd) write_to_logfile(log, out, err, cmd) else: cmd = 'minimap2 --MD -t {threads} -ax map-ont {ref} {reads} ' \ '> {outsam}'.format(ref=os.path.join(sample_tmp_dir, 'rmlst.fasta'), reads=os.path.join(sample_tmp_dir, 'trimmed.fastq.gz'), outsam=os.path.join(sample_tmp_dir, 'out_2.sam'), threads=threads) out, err = run_cmd(cmd) write_to_logfile(log, out, err, cmd) outbam = os.path.join(sample_tmp_dir, 'out_2.bam') # Apparently have to perform equivalent of a touch on this file for this to work. fh = open(outbam, 'w') fh.close() pysam.view('-b', '-o', outbam, os.path.join(sample_tmp_dir, 'out_2.sam'), save_stdout=outbam) pysam.sort('-o', os.path.join(sample_tmp_dir, 'contamination.bam'), os.path.join(sample_tmp_dir, 'out_2.bam')) pysam.index(os.path.join(sample_tmp_dir, 'contamination.bam')) # Now find number of multi-positions for each rMLST gene/allele combination multi_positions = 0 # Run the BAM parsing in parallel! Some refactoring of the code would likely be a good idea so this # isn't quite so ugly, but it works. p = multiprocessing.Pool(processes=threads) bamfile_list = [os.path.join(sample_tmp_dir, 'contamination.bam')] * len(gene_alleles) # bamfile_list = [os.path.join(sample_tmp_dir, 'rmlst.bam')] * len(gene_alleles) reference_fasta_list = [os.path.join(sample_tmp_dir, 'rmlst.fasta')] * len(gene_alleles) quality_cutoff_list = [quality_cutoff] * len(gene_alleles) base_cutoff_list = [base_cutoff] * len(gene_alleles) base_fraction_list = [base_fraction_cutoff] * len(gene_alleles) multibase_dict_list = list() report_write_list = list() for multibase_dict, report_write in p.starmap(read_contig, zip(gene_alleles, bamfile_list, reference_fasta_list, quality_cutoff_list, base_cutoff_list, base_fraction_list), chunksize=1): multibase_dict_list.append(multibase_dict) report_write_list.append(report_write) p.close() p.join() # Write out report info. report_file = os.path.join(output_folder, sample_name + '_contamination.csv') with open(report_file, 'w') as r: r.write('{reference},{position},{bases},{coverage}\n'.format(reference='Gene', position='Position', bases='Bases', coverage='Coverage')) for item in report_write_list: for contamination_info in item: r.write(contamination_info) # Total up the number of multibase positions. for multibase_position_dict in multibase_dict_list: multi_positions += sum([len(snp_positions) for snp_positions in multibase_position_dict.values()]) if cgmlst_db is None: snp_cutoff = int(rmlst_gene_length/10000) + 1 else: snp_cutoff = 10 if multi_positions >= snp_cutoff: percent_contam, contam_stddev = estimate_percent_contamination(contamination_report_file=report_file) else: percent_contam = 0 contam_stddev = 0 logging.info('Done! Number of contaminating SNVs found: {}\n'.format(multi_positions)) write_output(output_report=os.path.join(output_folder, 'confindr_report.csv'), sample_name=sample_name, multi_positions=multi_positions, genus=genus, percent_contam=percent_contam, contam_stddev=contam_stddev, total_gene_length=rmlst_gene_length, snp_cutoff=snp_cutoff, cgmlst=cgmlst_db, database_download_date=database_download_date) if keep_files is False: shutil.rmtree(sample_tmp_dir)
This needs some documentation fairly badly, so here we go. :param pair: This has become a misnomer. If the input reads are actually paired, needs to be a list with the full filepath to forward reads at index 0 and full path to reverse reads at index 1. If reads are unpaired, should be a list of length 1 with the only entry being the full filepath to read set. :param output_folder: Folder where outputs (confindr log and report, and other stuff) will be stored. This will be created if it does not exist. (I think - should write a test that double checks this). :param databases_folder: Full path to folder where ConFindr's databases live. These files can be downloaded from figshare in .tar.gz format (https://ndownloader.figshare.com/files/11864267), and will be automatically downloaded if the script is run from the command line. :param forward_id: Identifier that marks reads as being in the forward direction for paired reads. Defaults to _R1 :param threads: Number of threads to run analyses with. All parts of this pipeline scale pretty well, so more is better. :param keep_files: Boolean that says whether or not to keep temporary files. :param quality_cutoff: Integer of the phred score required to have a base count towards a multiallelic site. :param base_cutoff: Integer of number of bases needed to have a base be part of a multiallelic site. :param base_fraction_cutoff: Float of fraction of bases needed to have a base be part of a multiallelic site. If specified will be used in parallel with base_cutoff :param cgmlst_db: if None, we're using rMLST, if True, using some sort of custom cgMLST database. This requires some custom parameters. :param Xmx: if None, BBTools will use auto memory detection. If string, BBTools will use what's specified as their memory request. :param tmpdir: if None, any genus-specifc databases that need to be created will be written to ConFindr DB location. Otherwise, genus-specific databases will be written here.
entailment
def write_output(output_report, sample_name, multi_positions, genus, percent_contam, contam_stddev, total_gene_length, database_download_date, snp_cutoff=3, cgmlst=None): """ Function that writes the output generated by ConFindr to a report file. Appends to a file that already exists, or creates the file if it doesn't already exist. :param output_report: Path to CSV output report file. Should have headers SampleName,Genus,NumContamSNVs, ContamStatus,PercentContam, and PercentContamStandardDeviation, in that order. :param sample_name: string - name of sample :param multi_positions: integer - number of positions that were found to have more than one base present. :param genus: string - The genus of your sample :param percent_contam: float - Estimated percentage contamination :param contam_stddev: float - Standard deviation of percentage contamination :param total_gene_length: integer - number of bases examined to make a contamination call. :param cgmlst: If None, means that rMLST database was used, so use rMLST snp cutoff. Otherwise, some sort of cgMLST database was used, so use a different cutoff. """ # If the report file hasn't been created, make it, with appropriate header. if not os.path.isfile(output_report): with open(os.path.join(output_report), 'w') as f: f.write('Sample,Genus,NumContamSNVs,ContamStatus,PercentContam,PercentContamStandardDeviation,BasesExamined,DatabaseDownloadDate\n') if multi_positions >= snp_cutoff or len(genus.split(':')) > 1: contaminated = True else: contaminated = False with open(output_report, 'a+') as f: f.write('{samplename},{genus},{numcontamsnvs},' '{contamstatus},{percent_contam},{contam_stddev},' '{gene_length},{database_download_date}\n'.format(samplename=sample_name, genus=genus, numcontamsnvs=multi_positions, contamstatus=contaminated, percent_contam=percent_contam, contam_stddev=contam_stddev, gene_length=total_gene_length, database_download_date=database_download_date))
Function that writes the output generated by ConFindr to a report file. Appends to a file that already exists, or creates the file if it doesn't already exist. :param output_report: Path to CSV output report file. Should have headers SampleName,Genus,NumContamSNVs, ContamStatus,PercentContam, and PercentContamStandardDeviation, in that order. :param sample_name: string - name of sample :param multi_positions: integer - number of positions that were found to have more than one base present. :param genus: string - The genus of your sample :param percent_contam: float - Estimated percentage contamination :param contam_stddev: float - Standard deviation of percentage contamination :param total_gene_length: integer - number of bases examined to make a contamination call. :param cgmlst: If None, means that rMLST database was used, so use rMLST snp cutoff. Otherwise, some sort of cgMLST database was used, so use a different cutoff.
entailment
def check_acceptable_xmx(xmx_string): """ BBTools can have their memory set manually. This will check that the memory setting is actually valid :param xmx_string: The users requested XMX, as a string. :return: True if the Xmx string will be accepted by BBTools, otherwise false. """ acceptable_xmx = True acceptable_suffixes = ['K', 'M', 'G'] if xmx_string[-1].upper() not in acceptable_suffixes: acceptable_xmx = False logging.error('ERROR: Memory must be specified as K (kilobytes), M (megabytes), or G (gigabytes). Your specified ' 'suffix was {}.'.format(xmx_string[-1])) if '.' in xmx_string: acceptable_xmx = False logging.error('ERROR: Xmx strings must be integers, floating point numbers are not accepted.') if not str.isdigit(xmx_string[:-1]): acceptable_xmx = False logging.error('ERROR: The amount of memory requested was not an integer.') return acceptable_xmx
BBTools can have their memory set manually. This will check that the memory setting is actually valid :param xmx_string: The users requested XMX, as a string. :return: True if the Xmx string will be accepted by BBTools, otherwise false.
entailment
def char_code(columns, name=None): """ Character set code field. :param name: name for the field :return: an instance of the Character set code field rules """ if name is None: name = 'Char Code Field (' + str(columns) + ' columns)' if columns <= 0: raise BaseException() char_sets = None for char_set in _tables.get_data('character_set'): regex = '[ ]{' + str(15 - len(char_set)) + '}' + char_set if char_sets is None: char_sets = regex else: char_sets += '|' + regex # Accepted sets _character_sets = pp.Regex(char_sets) _unicode_1_16b = pp.Regex('U\+0[0-8,A-F]{3}[ ]{' + str(columns - 6) + '}') _unicode_2_21b = pp.Regex('U\+0[0-8,A-F]{4}[ ]{' + str(columns - 7) + '}') # Basic field char_code_field = (_character_sets | _unicode_1_16b | _unicode_2_21b) # Parse action char_code_field = char_code_field.setParseAction(lambda s: s[0].strip()) # Name char_code_field.setName(name) return char_code_field
Character set code field. :param name: name for the field :return: an instance of the Character set code field rules
entailment
def make_choice_validator( choices, default_key=None, normalizer=None): """ Returns a callable that accepts the choices provided. Choices should be provided as a list of 2-tuples, where the first element is a string that should match user input (the key); the second being the value associated with the key. The callable by default will match, upon complete match the first value associated with the result will be returned. Partial matches are supported. If a default is provided, that value will be returned if the user provided input is empty, i.e. the value that is mapped to the empty string. Finally, a normalizer function can be passed. This normalizes all keys and validation value. """ def normalize_all(_choices): # normalize all the keys for easier comparison if normalizer: _choices = [(normalizer(key), value) for key, value in choices] return _choices choices = normalize_all(choices) def choice_validator(value): if normalizer: value = normalizer(value) if not value and default_key: value = choices[default_key][0] results = [] for choice, mapped in choices: if value == choice: return mapped if choice.startswith(value): results.append((choice, mapped)) if len(results) == 1: return results[0][1] elif not results: raise ValueError('Invalid choice.') else: raise ValueError( 'Choice ambiguous between (%s)' % ', '.join( k for k, v in normalize_all(results)) ) return choice_validator
Returns a callable that accepts the choices provided. Choices should be provided as a list of 2-tuples, where the first element is a string that should match user input (the key); the second being the value associated with the key. The callable by default will match, upon complete match the first value associated with the result will be returned. Partial matches are supported. If a default is provided, that value will be returned if the user provided input is empty, i.e. the value that is mapped to the empty string. Finally, a normalizer function can be passed. This normalizes all keys and validation value.
entailment
def prompt(question, validator=None, choices=None, default_key=NotImplemented, normalizer=str.lower, _stdin=None, _stdout=None): """ Prompt user for question, maybe choices, and get answer. Arguments: question The question to prompt. It will only be prompted once. validator Defaults to None. Must be a callable that takes in a value. The callable should raise ValueError when the value leads to an error, otherwise return a converted value. choices If choices are provided instead, a validator will be constructed using make_choice_validator along with the next default_value argument. Please refer to documentation for that function. default_value See above. normalizer Defaults to str.lower. See above. """ def write_choices(choice_keys, default_key): _stdout.write('(') _stdout.write('/'.join(choice_keys)) _stdout.write(') ') if default_key is not NotImplemented: _stdout.write('[') _stdout.write(choice_keys[default_key]) _stdout.write('] ') if _stdin is None: _stdin = sys.stdin if _stdout is None: _stdout = sys.stdout _stdout.write(question) _stdout.write(' ') if not check_interactive(): if choices and default_key is not NotImplemented: choice_keys = [choice for choice, mapped in choices] write_choices(choice_keys, default_key) display, answer = choices[default_key] _stdout.write(display) _stdout.write('\n') logger.warning( 'non-interactive mode; auto-selected default option [%s]', display) return answer logger.warning( 'interactive code triggered within non-interactive session') _stdout.write('Aborted.\n') return None choice_keys = [] if validator is None: if choices: validator = make_choice_validator( choices, default_key, normalizer) choice_keys = [choice for choice, mapped in choices] else: validator = null_validator answer = NotImplemented while answer is NotImplemented: if choice_keys: write_choices(choice_keys, default_key) _stdout.flush() try: answer = validator( _stdin.readline().strip().encode(locale).decode(locale)) except ValueError as e: _stdout.write('%s\n' % e) _stdout.write(question.splitlines()[-1]) _stdout.write(' ') except KeyboardInterrupt: _stdout.write('Aborted.\n') answer = None return answer
Prompt user for question, maybe choices, and get answer. Arguments: question The question to prompt. It will only be prompted once. validator Defaults to None. Must be a callable that takes in a value. The callable should raise ValueError when the value leads to an error, otherwise return a converted value. choices If choices are provided instead, a validator will be constructed using make_choice_validator along with the next default_value argument. Please refer to documentation for that function. default_value See above. normalizer Defaults to str.lower. See above.
entailment
def prompt_overwrite_json(original, new, target_path, dumps=json_dumps): """ Prompt end user with a diff of original and new json that may overwrite the file at the target_path. This function only displays a confirmation prompt and it is up to the caller to implement the actual functionality. Optionally, a custom json.dumps method can also be passed in for output generation. """ # generate compacted ndiff output. diff = '\n'.join(l for l in ( line.rstrip() for line in difflib.ndiff( json_dumps(original).splitlines(), json_dumps(new).splitlines(), )) if l[:1] in '?+-' or l[-1:] in '{}' or l[-2:] == '},') basename_target = basename(target_path) return prompt( "Generated '%(basename_target)s' differs with '%(target_path)s'.\n\n" "The following is a compacted list of changes required:\n" "%(diff)s\n\n" "Overwrite '%(target_path)s'?" % locals(), choices=( ('Yes', True), ('No', False), ), default_key=1, )
Prompt end user with a diff of original and new json that may overwrite the file at the target_path. This function only displays a confirmation prompt and it is up to the caller to implement the actual functionality. Optionally, a custom json.dumps method can also be passed in for output generation.
entailment
def locate_package_entry_file(working_dir, package_name): """ Locate a single npm package to return its browser or main entry. """ basedir = join(working_dir, 'node_modules', package_name) package_json = join(basedir, 'package.json') if not exists(package_json): logger.debug( "could not locate package.json for the npm package '%s' in the " "current working directory '%s'; the package may have been " "not installed, the build process may fail", package_name, working_dir, ) return with open(package_json) as fd: package_info = json.load(fd) if ('browser' in package_info or 'main' in package_info): # assume the target file exists because configuration files # never lie /s return join( basedir, *(package_info.get('browser') or package_info['main']).split('/') ) index_js = join(basedir, 'index.js') if exists(index_js): return index_js logger.debug( "package.json for the npm package '%s' does not contain a main " "entry point", package_name, )
Locate a single npm package to return its browser or main entry.
entailment
def render_pictures(context, selection='recent', amount=3): """Template tag to render a list of pictures.""" pictures = Image.objects.filter( folder__id__in=Gallery.objects.filter(is_published=True).values_list( 'folder__pk', flat=True)) if selection == 'recent': context.update({ 'pictures': pictures.order_by('-uploaded_at')[:amount] }) elif selection == 'random': context.update({ 'pictures': pictures.order_by('?')[:amount] }) else: return None return context
Template tag to render a list of pictures.
entailment
def add_header(self, entry): """Parses the VCF Header field and returns the number of samples in the VCF file""" info = entry.split('\t') self.n_individuals = len(info)-9 for i,v in enumerate(info[9:]): self.individuals[v] = i return self.n_individuals > 0
Parses the VCF Header field and returns the number of samples in the VCF file
entailment
def parse_entry(self, row): """Parse an individual VCF entry and return a VCFEntry which contains information about the call (such as alternative allele, zygosity, etc.) """ var_call = VCFEntry(self.individuals) var_call.parse_entry(row) return var_call
Parse an individual VCF entry and return a VCFEntry which contains information about the call (such as alternative allele, zygosity, etc.)
entailment
def add_entry(self, row): """This will parse the VCF entry and also store it within the VCFFile. It will also return the VCFEntry as well. """ var_call = VCFEntry(self.individuals) var_call.parse_entry( row ) self.entries[(var_call.chrom, var_call.pos)] = var_call return var_call
This will parse the VCF entry and also store it within the VCFFile. It will also return the VCFEntry as well.
entailment
def get_header(self, individual=-1): """Returns the vcf header """ type_map = dict([(val,key) for key,val in self.meta.type_map.iteritems()]) extra = '\n'.join(['##{0}'.format(i) for i in self.meta.extra]) info = '\n'.join(['##INFO=<ID={0},Number={1},Type={2},Description={3}>'.format(key, val.get('num_entries','.'), type_map.get(val.get('type', '')), val.get('description')) for key,val in self.meta.info.iteritems()]) filter = '\n'.join(['##FILTER=<ID={0},Description={1}>'.format(key, val.get('description','.')) for key,val in self.meta.filter.iteritems()]) format = '\n'.join(['##FORMAT=<ID={0},Number={1},Type={2},Description={3}>'.format(key, val.get('num_entries','.'), type_map.get(val.get('type', '')), val.get('description')) for key,val in self.meta.format.iteritems()]) alt = '\n'.join(['##ALT=<ID={0},Description={1}>'.format(key, val.get('description','.')) for key,val in self.meta.alt.iteritems()]) header = '\t'.join(['#CHROM','POS','ID','REF','ALT','QUAL','FILTER','INFO','FORMAT']) if individual is not None: if individual == -1: individual = '\t'.join(self.individuals.keys()) else: if isinstance(individual, int): for i, v in self.individuals.iteritems(): if v == individual: individual = i break header += '\t'+individual return '\n'.join([extra, info, filter, format, alt, header])
Returns the vcf header
entailment
def add_info(self, entry): """Parse and store the info field""" entry = entry[8:-1] info = entry.split(',') if len(info) < 4: return False for v in info: key, value = v.split('=', 1) if key == 'ID': self.info[value] = {} id_ = value elif key == 'Number': if value == 'A' or value == 'G': value = -1 self.info[id_]['num_entries'] = value elif key == 'Type': self.info[id_]['type'] = self.type_map[value] elif key == 'Description': self.info[id_]['description'] = value if len(info) > 4: self.info[id_]['description'] += '; '.join(info[4:]) break return True
Parse and store the info field
entailment
def add_filter(self, entry): """Parse and store the filter field""" entry = entry[10:-1] info = entry.split(',') if len(info) < 2: return False for v in info: key, value = v.split('=', 1) if key == 'ID': self.filter[value] = {} id_ = value elif key == 'Description': self.filter[id_]['description'] = value if len(info) > 2: self.info[id_]['description'] += '; '.join(info[2:]) return True
Parse and store the filter field
entailment
def add_alt(self, entry): """Parse and store the alternative allele field""" entry = entry[7:-1] info = entry.split(',') if len(info) < 2: return False for v in info: key, value = v.split('=', 1) if key == 'ID': self.alt[value] = {} id_ = value elif key == 'Description': self.alt[id_]['description'] = value if len(info) > 4: self.alt[id_]['description'] += '; '.join(info[4:]) break return True
Parse and store the alternative allele field
entailment
def sample_string(self, individual=-1): """Returns the VCF entry as it appears in the vcf file""" base = str(self) extra = self.get_sample_info(individual=individual) extra = [':'.join([str(j) for j in i]) for i in zip(*extra.values())] return '\t'.join([base, '\t'.join(extra)])
Returns the VCF entry as it appears in the vcf file
entailment
def get_sample_info(self, individual=-1): """Returns the sample info of a given sample or all by default """ if isinstance(individual, str): individual = self.individuals[individual] extra = OrderedDict() for format_ in self.format: index = getattr(self, format_) if index != -1: if format_ == 'GT': d = self.genotype elif format_ == 'GQ': d = self.genome_quality elif format_ == 'DP': d = self.depth if individual == -1: if len(d) != len(self.samples): [self.parse_sample(i) for i in six.moves.range(len(self.samples))] extra[format_] = [d[i] for i in six.moves.range(len(d))] else: if individual not in d: self.parse_sample(individual) extra[format_] = [d[individual]] return extra
Returns the sample info of a given sample or all by default
entailment
def is_homozygous(self, individual=None): """This will give a boolean list corresponding to whether each individual is homozygous for the alternative allele. """ if individual is not None: if isinstance(individual, str): individual = self.individuals[individual] alts = self.genotype[individual] return [sum(alts) == len(alts)] if sum(alts) > 0 else [False] else: return [sum(alts) == len(alts) if sum(alts) > 0 else False for i, alts in self.genotype.iteritems()]
This will give a boolean list corresponding to whether each individual is homozygous for the alternative allele.
entailment
def get_alt(self, individual=0, nucleotides_only=True): """Returns the alternative alleles of the individual as a list""" #not i.startswith(',') is put in to handle cases like <DEL:ME:ALU> where we have no alternate allele #but some reference if isinstance(individual, str): individual = self.individuals[individual] if nucleotides_only: return [self.alt[i-1].replace('.', '') for i in self.genotype[individual] if i > 0 and not self.alt[i-1].startswith('<')] else: return [self.alt[i-1].replace('.', '') for i in self.genotype[individual] if i > 0]
Returns the alternative alleles of the individual as a list
entailment
def get_alt_length(self, individual=0): """Returns the number of basepairs of each alternative allele""" if isinstance(individual, str): individual = self.individuals[individual] return [len(self.alt[i-1].replace('.','')) for i in self.genotype[individual] if i > 0 and not self.alt[i-1].startswith('<')]
Returns the number of basepairs of each alternative allele
entailment
def get_alt_lengths(self): """Returns the longest length of the variant. For deletions, return is negative, SNPs return 0, and insertions are +. None return corresponds to no variant in interval for specified individual """ #this is a hack to store the # of individuals without having to actually store it out = [] for i in six.moves.range(len(self.genotype)): valid_alt = self.get_alt_length(individual=i) if not valid_alt: out.append(None) else: out.append(max(valid_alt)-len(self.ref)) return out
Returns the longest length of the variant. For deletions, return is negative, SNPs return 0, and insertions are +. None return corresponds to no variant in interval for specified individual
entailment
def has_snp(self, individual=0): """Returns a boolean list of SNP status, ordered by samples""" if isinstance(individual, str): individual = self.individuals[individual] alts = self.get_alt(individual=individual) if alts: return [i != self.ref and len(i) == len(self.ref) for i in alts] return [False]
Returns a boolean list of SNP status, ordered by samples
entailment
def parse_entry(self, entry): """This parses a VCF row and stores the relevant information""" entry = entry.split('\t') self.chrom, self.pos, self.id, self.ref, alt_, self.qual, filter_, info, self.format = entry[:9] self.samples = entry[9:] self.alt = alt_.split(',') if filter_ == 'PASS' or filter_ == '.': self.passed = True else: self.passed = filter_.split(';') self.info = info # currently unused #if info != '.': #info_l = info.split(';') #self.info = [v.split('=') if '=' in v else (v,1) for v in info_l] self.format = self.format.split(':') if 'GT' in self.format: self.GT = self.format.index('GT') if 'GQ' in self.format: self.GQ = self.format.index('GQ') if 'DP' in self.format: self.DP = self.format.index('DP') if 'FT' in self.format: self.FT = self.format.index('FT')
This parses a VCF row and stores the relevant information
entailment
def add_child(self, child): """Children are GFFFeatures and are defined when added. This is done to avoid memory overheads that may be incurred by GFF files that have millions of rows. """ child_id = getattr(child, 'id', None) if child_id: if not hasattr(self, 'children'): self.children = {} if child_id not in self.children: self.children[child_id] = child
Children are GFFFeatures and are defined when added. This is done to avoid memory overheads that may be incurred by GFF files that have millions of rows.
entailment
def _iso_handler(obj): """ Transforms an object into it's ISO format, if possible. If the object can't be transformed, then an error is raised for the JSON parser. This is meant to be used on datetime instances, but will work with any object having a method called isoformat. :param obj: object to transform into it's ISO format :return: the ISO format of the object """ if hasattr(obj, 'isoformat'): result = obj.isoformat() else: raise TypeError("Unserializable object {} of type {}".format(obj, type(obj))) return result
Transforms an object into it's ISO format, if possible. If the object can't be transformed, then an error is raised for the JSON parser. This is meant to be used on datetime instances, but will work with any object having a method called isoformat. :param obj: object to transform into it's ISO format :return: the ISO format of the object
entailment
def try_encode(field_encoders, entity_dict): """ Inner encoding and try return string from entity dictionary :param field_encoders: :param entity_dict: :return: """ result = '' for field_encoder in field_encoders: try: result += field_encoder.encode(entity_dict) except KeyError as e: return False return result
Inner encoding and try return string from entity dictionary :param field_encoders: :param entity_dict: :return:
entailment
def encode(self, entity): """ Generate string of cwr format for all possible combinations of fields, accumulate and then elect the best. The best string it is who used most of all fields :param entity: :return: """ possible_results = [] entity_dict = self.get_entity_dict(entity) record_field_encoders = self.get_record_fields_encoders() for field_encoders in record_field_encoders: result = self.try_encode(field_encoders, entity_dict) if result: possible_results.append({'result': result, 'len': len(field_encoders)}) cwr = self.head(entity) + self._get_best_result(possible_results) + "\r\n" return cwr
Generate string of cwr format for all possible combinations of fields, accumulate and then elect the best. The best string it is who used most of all fields :param entity: :return:
entailment
def argparser(self): """ For setting up the argparser for this instance. """ if self.__argparser is None: self.__argparser = self.argparser_factory() self.init_argparser(self.__argparser) return self.__argparser
For setting up the argparser for this instance.
entailment
def argparser_factory(self): """ Produces argparser for this type of Runtime. """ return ArgumentParser( prog=self.prog, description=self.__doc__, add_help=False, )
Produces argparser for this type of Runtime.
entailment
def init_argparser(self, argparser): """ This should not be called with an external argparser as it will corrupt tracking data if forced. """ def prepare_argparser(): if argparser in self.argparser_details: return False result = self.argparser_details[argparser] = ArgumentParserDetails( {}, {}, {}) return result def to_module_attr(ep): return '%s:%s' % (ep.module_name, '.'.join(ep.attrs)) def register(name, runtime, entry_point): subparser = commands.add_parser( name, help=inst.description, ) # Have to specify this separately because otherwise the # subparser will not have a proper description when it is # invoked as the root. subparser.description = inst.description # Assign values for version reporting system setattr(subparser, ATTR_ROOT_PKG, getattr( argparser, ATTR_ROOT_PKG, self.package_name)) subp_info = [] subp_info.extend(getattr(argparser, ATTR_INFO, [])) subp_info.append((subparser.prog, entry_point.dist)) setattr(subparser, ATTR_INFO, subp_info) try: try: runtime.init_argparser(subparser) except RuntimeError as e: # first attempt to filter out recursion errors; also if # the stack frame isn't available the complaint about # bad validation doesn't apply anyway. frame = currentframe() if (not frame or 'maximum recursion depth' not in str( e.args)): raise if (not isinstance(runtime, Runtime) or (type( runtime).entry_point_load_validated.__code__ is Runtime.entry_point_load_validated.__code__)): # welp, guess some other thing blew up then, or # that the problem is definitely not caused by # this runtime implementation. # TODO figure out how to log this nicer via the # self.log_debug_error without exploding the # console like Megumin would have done. raise # assume the overridden method didn't do everything # correctly then; would be great if there is a way # to ensure that our thing would have been called. cls = type(runtime) logger.critical( "Runtime subclass at entry_point '%s' has override " "'entry_point_load_validated' without filtering out " "its parent classes; this can be addressed by calling " "super(%s.%s, self).entry_point_load_validated(" "entry_point) in its implementation, or simply don't " "override that method to avoid infinite recursion.", entry_point, cls.__module__, cls.__name__, ) exc = RuntimeError( "%r has an invalid 'entry_point_load_validated' " "implementation: insufficient protection against " "infinite recursion into self not provided" % runtime ) # for Python 3 to not blow it up. exc.__suppress_context__ = True raise exc except Exception as e: self.log_debug_error( "cannot register entry_point '%s' from '%s' as a " "subcommand to '%s': %s: %s", entry_point, entry_point.dist, argparser.prog, e.__class__.__name__, e ) # this is where naughty things happen: will be poking at # the parser internals to undo the damage that was done # first, pop the choices_actions as a help was provided commands._choices_actions.pop() # then pop the name that was mapped. commands._name_parser_map.pop(name) else: # finally record the completely initialized subparser # into the structure here if successful. subparsers[name] = subparser runtimes[name] = runtime entry_points[name] = entry_point details = prepare_argparser() if not details: logger.debug( 'argparser %r has already been initialized against runner %r', argparser, self, ) return subparsers, runtimes, entry_points = details super(Runtime, self).init_argparser(argparser) commands = argparser.add_subparsers( dest=self.action_key, metavar='<command>') # Python 3.7 has required set to True, which is correct in most # cases but this disables the manual handling for cases where a # command was not provided; also this generates a useless error # message that simply states "<command> is required" and forces # the program to exit. As the goal of this suite of classes is # to act as a helpful CLI front end, force required to be False # to keep our manual handling and management of subcommands. # Setting this as a property for compatibility with Python<3.7, # as only in Python>=3.7 the add_subparsers can accept required # as an argument. commands.required = False for entry_point in self.iter_entry_points(): inst = self.entry_point_load_validated(entry_point) if not inst: continue if entry_point.name in runtimes: reg_ep = entry_points[entry_point.name] reg_rt = runtimes[entry_point.name] if reg_rt is inst: # this is fine, multiple packages declared the same # thing with the same name. logger.debug( "duplicated registration of command '%s' via entry " "point '%s' ignored; registered '%s', confict '%s'", entry_point.name, entry_point, reg_ep.dist, entry_point.dist, ) continue logger.error( "a calmjs runtime command named '%s' already registered.", entry_point.name ) logger.info("conflicting entry points are:") logger.info( "'%s' from '%s' (registered)", reg_ep, reg_ep.dist) logger.info( "'%s' from '%s' (conflict)", entry_point, entry_point.dist) # Fall back name should work if the class/instances are # stable. name = to_module_attr(entry_point) if name in runtimes: # Maybe this is the third time this module is # registered. Test for its identity. if runtimes[name] is not inst: # Okay someone is having a fun time here mucking # with data structures internal to here, likely # (read hopefully) due to testing or random # monkey patching (or module level reload). logger.critical( "'%s' is already registered but points to a " "completely different instance; please try again " "with verbose logging and note which packages are " "reported as conflicted; alternatively this is a " "forced situation where this Runtime instance has " "been used or initialized improperly.", name ) else: logger.debug( "fallback command '%s' is already registered.", name ) continue logger.error( "falling back to using full instance path '%s' as command " "name, also registering alias for registered command", name ) register(to_module_attr(reg_ep), reg_rt, reg_ep) else: name = entry_point.name register(name, inst, entry_point)
This should not be called with an external argparser as it will corrupt tracking data if forced.
entailment
def unrecognized_arguments_error(self, args, parsed, extras): """ This exists because argparser is dumb and naive and doesn't fail unrecognized arguments early. """ # loop variants kwargs = vars(parsed) failed = list(extras) # initial values runtime, subparser, idx = (self, self.argparser, 0) # recursion not actually needed when it can be flattened. while isinstance(runtime, Runtime): cmd = kwargs.pop(runtime.action_key) # can happen if it wasn't set, or is set but from a default # value (thus not provided by args) action_idx = None if cmd not in args else args.index(cmd) if cmd not in args and cmd is not None: # this normally shouldn't happen, and the test case # showed that the parsing will not flip down to the # forced default subparser - this can remain a debug # message until otherwise. logger.debug( "command for prog=%r is set to %r without being specified " "as part of the input arguments - the following error " "message may contain misleading references", subparser.prog, cmd ) subargs = args[idx:action_idx] subparsed, subextras = subparser.parse_known_args(subargs) if subextras: subparser.unrecognized_arguments_error(subextras) # since the failed arguments are in order failed = failed[len(subextras):] if not failed: # have taken everything, quit now. # also note that if cmd was really None it would # cause KeyError below, but fortunately it also # forced action_idx to be None which took all # remaining tokens from failed, so definitely get # out of here. break # advance the values # note that any internal consistency will almost certainly # result in KeyError being raised. details = runtime.get_argparser_details(subparser) runtime = details.runtimes[cmd] subparser = details.subparsers[cmd] idx = action_idx + 1 if failed: subparser.unrecognized_arguments_error(failed) sys.exit(2)
This exists because argparser is dumb and naive and doesn't fail unrecognized arguments early.
entailment
def error(self, argparser, target, message): """ This was used as part of the original non-recursive lookup for the target parser. """ warnings.warn( 'Runtime.error is deprecated and will be removed by calmjs-4.0.0', DeprecationWarning) details = self.get_argparser_details(argparser) argparser = details.subparsers[target] if details else self.argparser argparser.error(message)
This was used as part of the original non-recursive lookup for the target parser.
entailment
def init_argparser_export_target( self, argparser, default=None, help='the export target', ): """ Subclass could override this by providing alternative keyword arguments and call this as its super. It should not reimplement this completely. Example: def init_argparser_export_target(self, argparser): super(MyToolchainRuntime, self).init_argparser_export_target( argparser, default='my_default.js', help="the export target, default is 'my_default.js'", ) Note that the above example will prevent its subclasses from directly using the definition of that class, but they _can_ simply call the exact same super, or invoke ToolchainRuntime's init_argparser_* method directly. Arguments default The default export target. help The help text. """ argparser.add_argument( '-w', '--overwrite', dest=EXPORT_TARGET_OVERWRITE, action='store_true', help='overwrite the export target without any confirmation', ) argparser.add_argument( '--export-target', dest=EXPORT_TARGET, metavar=metavar(EXPORT_TARGET), default=default, help=help, )
Subclass could override this by providing alternative keyword arguments and call this as its super. It should not reimplement this completely. Example: def init_argparser_export_target(self, argparser): super(MyToolchainRuntime, self).init_argparser_export_target( argparser, default='my_default.js', help="the export target, default is 'my_default.js'", ) Note that the above example will prevent its subclasses from directly using the definition of that class, but they _can_ simply call the exact same super, or invoke ToolchainRuntime's init_argparser_* method directly. Arguments default The default export target. help The help text.
entailment
def init_argparser_working_dir( self, argparser, explanation='', help_template=( 'the working directory; %(explanation)s' 'default is current working directory (%(cwd)s)'), ): """ Subclass could an extra expanation on how this is used. Arguments explanation Explanation text for the default help template help_template A standard help message for this option. """ cwd = self.toolchain.join_cwd() argparser.add_argument( '--working-dir', dest=WORKING_DIR, metavar=metavar(WORKING_DIR), default=cwd, help=help_template % {'explanation': explanation, 'cwd': cwd}, )
Subclass could an extra expanation on how this is used. Arguments explanation Explanation text for the default help template help_template A standard help message for this option.
entailment
def init_argparser_build_dir( self, argparser, help=( 'the build directory, where all sources will be copied to ' 'as part of the build process; if left unspecified, the ' 'default behavior is to create a new temporary directory ' 'that will be removed upon conclusion of the build; if ' 'specified, it must be an existing directory and all files ' 'for the build will be copied there instead, overwriting any ' 'existing file, with no cleanup done after.' )): """ For setting up build directory """ argparser.add_argument( '--build-dir', default=None, dest=BUILD_DIR, metavar=metavar(BUILD_DIR), help=help, )
For setting up build directory
entailment
def init_argparser_optional_advice( self, argparser, default=[], help=( 'a comma separated list of packages to retrieve optional ' 'advice from; the provided packages should have registered ' 'the appropriate entry points for setting up the advices for ' 'the toolchain; refer to documentation for the specified ' 'packages for details' )): """ For setting up optional advice. """ argparser.add_argument( '--optional-advice', default=default, required=False, dest=ADVICE_PACKAGES, action=StoreRequirementList, metavar='<advice>[,<advice>[...]]', help=help )
For setting up optional advice.
entailment
def init_argparser(self, argparser): """ Other runtimes (or users of ArgumentParser) can pass their subparser into here to collect the arguments here for a subcommand. """ super(ToolchainRuntime, self).init_argparser(argparser) # it is possible for subclasses to fully override this, but if # they are using this as the runtime to drive the toolchain they # should be prepared to follow the layout, but if they omit them # it should only result in the spec omitting these arguments. self.init_argparser_export_target(argparser) self.init_argparser_working_dir(argparser) self.init_argparser_build_dir(argparser) self.init_argparser_optional_advice(argparser)
Other runtimes (or users of ArgumentParser) can pass their subparser into here to collect the arguments here for a subcommand.
entailment
def prepare_spec(self, spec, **kwargs): """ Prepare a spec for usage with the generic ToolchainRuntime. Subclasses should avoid overriding this; override create_spec instead. """ self.prepare_spec_debug_flag(spec, **kwargs) self.prepare_spec_export_target_checks(spec, **kwargs) # defer the setup till the actual toolchain invocation spec.advise(SETUP, self.prepare_spec_advice_packages, spec, **kwargs)
Prepare a spec for usage with the generic ToolchainRuntime. Subclasses should avoid overriding this; override create_spec instead.
entailment
def kwargs_to_spec(self, **kwargs): """ Turn the provided kwargs into arguments ready for toolchain. """ spec = self.create_spec(**kwargs) self.prepare_spec(spec, **kwargs) return spec
Turn the provided kwargs into arguments ready for toolchain.
entailment
def init_argparser_package_names(self, argparser, help=( 'names of the python package to generate artifacts for; ' 'note that the metadata directory for the specified ' 'packages must be writable')): """ Default helper for setting up the package_names option. This is separate so that subclasses are not assumed for the purposes of artifact creation; they should consider modifying the default help message to reflect the fact. """ argparser.add_argument( 'package_names', metavar=metavar('package'), nargs='+', help=help)
Default helper for setting up the package_names option. This is separate so that subclasses are not assumed for the purposes of artifact creation; they should consider modifying the default help message to reflect the fact.
entailment
def init_argparser_source_registry( self, argparser, default=None, help=( 'comma separated list of registries to use for gathering ' 'JavaScript sources from the given Python packages' )): """ For setting up the source registry flag. """ argparser.add_argument( '--source-registry', default=default, dest=CALMJS_MODULE_REGISTRY_NAMES, action=StoreDelimitedList, metavar='<registry>[,<registry>[...]]', help=help, ) argparser.add_argument( '--source-registries', default=default, dest=CALMJS_MODULE_REGISTRY_NAMES, action=StoreDelimitedList, help=SUPPRESS, )
For setting up the source registry flag.
entailment