_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q39200
SQLAlchemyProxy.deptree
train
def deptree(self, field, oids, date=None, level=None, table=None): ''' Dependency tree builder. Recursively fetchs objects that are children of the initial set of parent object ids provided. :param field: Field that contains the 'parent of' data :param oids: Object oids to build depedency tree for :param date: date (metrique date range) that should be queried. If date==None then the most recent versions of the objects will be queried. :param level: limit depth of recursion ''' table = self.get_table(table) fringe = str2list(oids) checked = set(fringe) loop_k = 0 while len(fringe) > 0: if level and loop_k == abs(level): break query = '_oid in %s' % list(fringe) docs = self.find(table=table, query=query, fields=[field], date=date, raw=True) fringe = {oid for doc in docs for oid in (doc[field] or []) if oid not in checked} checked |= fringe loop_k += 1 return sorted(checked)
python
{ "resource": "" }
q39201
SQLAlchemyProxy.get_last_field
train
def get_last_field(self, field, table=None): '''Shortcut for querying to get the last field value for a given owner, cube. :param field: field name to query ''' field = field if is_array(field) else [field] table = self.get_table(table, except_=False) if table is None: last = None else: is_defined(field, 'field must be defined!') last = self.find(table=table, fields=field, scalar=True, sort=field, limit=1, descending=True, date='~', default_fields=False) logger.debug("last %s.%s: %s" % (table, list2str(field), last)) return last
python
{ "resource": "" }
q39202
SQLAlchemyProxy.index_list
train
def index_list(self): ''' List all cube indexes :param collection: cube name :param owner: username of cube owner ''' logger.info('Listing indexes') _ix = {} _i = self.inspector for tbl in _i.get_table_names(): _ix.setdefault(tbl, []) for ix in _i.get_indexes(tbl): _ix[tbl].append(ix) return _ix
python
{ "resource": "" }
q39203
SQLAlchemyProxy.ls
train
def ls(self, startswith=None): ''' List all cubes available to the calling client. :param startswith: string to use in a simple "startswith" query filter :returns list: sorted list of cube names ''' logger.info('Listing cubes starting with "%s")' % startswith) startswith = unicode(startswith or '') tables = sorted(name for name in self.db_tables if name.startswith(startswith)) return tables
python
{ "resource": "" }
q39204
muscle_seqs
train
def muscle_seqs(seqs, add_seq_names=False, out_filename=None, input_handler=None, params={}, WorkingDir=tempfile.gettempdir(), SuppressStderr=None, SuppressStdout=None): """Muscle align list of sequences. seqs: a list of sequences as strings or objects, you must set add_seq_names=True or sequences in a multiline string, as read() from a fasta file or sequences in a list of lines, as readlines() from a fasta file or a fasta seq filename. == for eg, testcode for guessing #guess_input_handler should correctly identify input gih = guess_input_handler self.assertEqual(gih('abc.txt'), '_input_as_string') self.assertEqual(gih('>ab\nTCAG'), '_input_as_multiline_string') self.assertEqual(gih(['ACC','TGA'], True), '_input_as_seqs') self.assertEqual(gih(['>a','ACC','>b','TGA']), '_input_as_lines') == docstring for blast_seqs, apply to muscle_seqs == seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. WARNING: DECISION RULES FOR INPUT HANDLING HAVE CHANGED. Decision rules for data are as follows. If it's s list, treat as lines, unless add_seq_names is true (in which case treat as list of seqs). If it's a string, test whether it has newlines. If it doesn't have newlines, assume it's a filename. If it does have newlines, it can't be a filename, so assume it's a multiline string containing sequences. If you want to skip the detection and force a specific type of input handler, use input_handler='your_favorite_handler'. add_seq_names: boolean. if True, sequence names are inserted in the list of sequences. if False, it assumes seqs is a list of lines of some proper format that the program can handle Addl docs coming soon """ if out_filename: params["-out"] = out_filename #else: # params["-out"] = get_tmp_filename(WorkingDir) ih = input_handler or guess_input_handler(seqs, add_seq_names) muscle_app = Muscle( params=params, InputHandler=ih, WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) return muscle_app(seqs)
python
{ "resource": "" }
q39205
cluster_seqs
train
def cluster_seqs(seqs, neighbor_join=False, params={}, add_seq_names=True, WorkingDir=tempfile.gettempdir(), SuppressStderr=None, SuppressStdout=None, max_chars=1000000, max_hours=1.0, constructor=PhyloNode, clean_up=True ): """Muscle cluster list of sequences. seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. Addl docs coming soon """ num_seqs = len(seqs) if num_seqs < 2: raise ValueError, "Muscle requres 2 or more sequences to cluster." num_chars = sum(map(len, seqs)) if num_chars > max_chars: params["-maxiters"] = 2 params["-diags1"] = True params["-sv"] = True #params["-distance1"] = "kmer6_6" #params["-distance1"] = "kmer20_3" #params["-distance1"] = "kbit20_3" print "lots of chars, using fast align", num_chars params["-maxhours"] = max_hours #params["-maxiters"] = 10 #cluster_type = "upgmb" #if neighbor_join: # cluster_type = "neighborjoining" params["-clusteronly"] = True params["-tree1"] = get_tmp_filename(WorkingDir) muscle_res = muscle_seqs(seqs, params=params, add_seq_names=add_seq_names, WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) tree = DndParser(muscle_res["Tree1Out"], constructor=constructor) if clean_up: muscle_res.cleanUp() return tree
python
{ "resource": "" }
q39206
aln_tree_seqs
train
def aln_tree_seqs(seqs, input_handler=None, tree_type='neighborjoining', params={}, add_seq_names=True, WorkingDir=tempfile.gettempdir(), SuppressStderr=None, SuppressStdout=None, max_hours=5.0, constructor=PhyloNode, clean_up=True ): """Muscle align sequences and report tree from iteration2. Unlike cluster_seqs, returns tree2 which is the tree made during the second muscle iteration (it should be more accurate that the cluster from the first iteration which is made fast based on k-mer words) seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. tree_type: can be either neighborjoining (default) or upgmb for UPGMA clean_up: When true, will clean up output files """ params["-maxhours"] = max_hours if tree_type: params["-cluster2"] = tree_type params["-tree2"] = get_tmp_filename(WorkingDir) params["-out"] = get_tmp_filename(WorkingDir) muscle_res = muscle_seqs(seqs, input_handler=input_handler, params=params, add_seq_names=add_seq_names, WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) tree = DndParser(muscle_res["Tree2Out"], constructor=constructor) aln = [line for line in muscle_res["MuscleOut"]] if clean_up: muscle_res.cleanUp() return tree, aln
python
{ "resource": "" }
q39207
align_and_build_tree
train
def align_and_build_tree(seqs, moltype, best_tree=False, params=None): """Returns an alignment and a tree from Sequences object seqs. seqs: a cogent.core.alignment.SequenceCollection object, or data that can be used to build one. moltype: cogent.core.moltype.MolType object best_tree: if True (default:False), uses a slower but more accurate algorithm to build the tree. params: dict of parameters to pass in to the Muscle app controller. The result will be a tuple containing a cogent.core.alignment.Alignment and a cogent.core.tree.PhyloNode object (or None for the alignment and/or tree if either fails). """ aln = align_unaligned_seqs(seqs, moltype=moltype, params=params) tree = build_tree_from_alignment(aln, moltype, best_tree, params) return {'Align':aln, 'Tree':tree}
python
{ "resource": "" }
q39208
Muscle._input_as_multifile
train
def _input_as_multifile(self, data): """For use with the -profile option This input handler expects data to be a tuple containing two filenames. Index 0 will be set to -in1 and index 1 to -in2 """ if data: try: filename1, filename2 = data except: raise ValueError, "Expected two filenames" self.Parameters['-in'].off() self.Parameters['-in1'].on(filename1) self.Parameters['-in2'].on(filename2) return ''
python
{ "resource": "" }
q39209
static_url
train
def static_url(redis, path): """Gets the static path for a file""" file_hash = get_cache_buster(redis, path) return "%s/%s?v=%s" % (oz.settings["static_host"], path, file_hash)
python
{ "resource": "" }
q39210
get_cache_buster
train
def get_cache_buster(redis, path): """Gets the cache buster value for a given file path""" return escape.to_unicode(redis.hget("cache-buster:{}:v3".format(oz.settings["s3_bucket"]), path))
python
{ "resource": "" }
q39211
get_bucket
train
def get_bucket(s3_bucket=None, validate=False): """Gets a bucket from specified settings""" global S3Connection if S3Connection != None: settings = oz.settings s3_bucket = s3_bucket or settings["s3_bucket"] opts = {} if settings["s3_host"]: opts["host"] = settings["s3_host"] if settings["aws_access_key"] and settings["aws_secret_key"]: opts["aws_access_key_id"] = settings["aws_access_key"] opts["aws_secret_access_key"] = settings["aws_secret_key"] return S3Connection(**opts).get_bucket(s3_bucket, validate=validate) else: raise Exception("S3 not supported in this environment as boto is not installed")
python
{ "resource": "" }
q39212
get_file
train
def get_file(path, s3_bucket=None): """Gets a file""" bucket_name = s3_bucket or oz.settings["s3_bucket"] if bucket_name: bucket = get_bucket(bucket_name) key = bucket.get_key(path) if not key: key = bucket.new_key(path) return S3File(key) else: return LocalFile(oz.settings["static_path"], path)
python
{ "resource": "" }
q39213
LocalFile.copy
train
def copy(self, new_path, replace=False): """ Uses shutil to copy a file over """ new_full_path = os.path.join(self.static_path, new_path) if replace or not os.path.exists(new_full_path): shutil.copy2(self.full_path, new_full_path) return True return False
python
{ "resource": "" }
q39214
S3File.copy
train
def copy(self, new_path, replace=False): """Uses boto to copy the file to the new path instead of uploading another file to the new key""" if replace or not get_file(new_path).exists(): self.key.copy(self.key.bucket, new_path) return True return False
python
{ "resource": "" }
q39215
create_connection
train
def create_connection(): """Sets up a redis configuration""" global _cached_connection settings = oz.settings if settings["redis_cache_connections"] and _cached_connection != None: return _cached_connection else: conn = redis.StrictRedis( host=settings["redis_host"], port=settings["redis_port"], db=settings["redis_db"], password=settings["redis_password"], decode_responses=settings["redis_decode_responses"], ssl=settings["redis_use_ssl"], ssl_keyfile=settings["redis_ssl_keyfile"], ssl_certfile=settings["redis_ssl_certfile"], ssl_cert_reqs=settings["redis_ssl_cert_reqs"], ssl_ca_certs=settings["redis_ssl_ca_certs"] ) if settings["redis_cache_connections"]: _cached_connection = conn return conn
python
{ "resource": "" }
q39216
usearch_sort_by_abundance
train
def usearch_sort_by_abundance( fasta_filepath, output_filepath=None, sizein=True, sizeout=True, minsize=0, log_name="abundance_sort.log", usersort=False, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, working_dir=None): """ Sorts fasta file by abundance fasta_filepath = input fasta file, generally a dereplicated fasta output_filepath = output abundance sorted fasta filepath sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring minsize = minimum size of cluster to retain. log_name = string to specify log filename usersort = Use if not sorting by abundance or usearch will raise an error HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. """ if not output_filepath: _, output_filepath = mkstemp(prefix='usearch_abundance_sorted', suffix='.fasta') log_filepath = join( working_dir, "minsize_" + str(minsize) + "_" + log_name) params = {} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if usersort: app.Parameters['--usersort'].on() if minsize: app.Parameters['--minsize'].on(minsize) if sizein: app.Parameters['--sizein'].on() if sizeout: app.Parameters['--sizeout'].on() data = {'--sortsize': fasta_filepath, '--output': output_filepath } if not remove_usearch_logs: data['--log'] = log_filepath # Can have no data following this filter step, which will raise an # application error, try to catch it here to raise meaningful message. try: app_result = app(data) except ApplicationError: raise ValueError('No data following filter steps, please check ' + 'parameter settings for usearch_qf.') return app_result, output_filepath
python
{ "resource": "" }
q39217
usearch_cluster_error_correction
train
def usearch_cluster_error_correction( fasta_filepath, output_filepath=None, output_uc_filepath=None, percent_id_err=0.97, sizein=True, sizeout=True, w=64, slots=16769023, maxrejects=64, log_name="usearch_cluster_err_corrected.log", usersort=False, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, working_dir=None): """ Cluster for err. correction at percent_id_err, output consensus fasta fasta_filepath = input fasta file, generally a dereplicated fasta output_filepath = output error corrected fasta filepath percent_id_err = minimum identity percent. sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring w = Word length for U-sorting slots = Size of compressed index table. Should be prime, e.g. 40000003. Should also specify --w, typical is --w 16 or --w 32. maxrejects = Max rejected targets, 0=ignore, default 32. log_name = string specifying output log name usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. """ if not output_filepath: _, output_filepath = mkstemp(prefix='usearch_cluster_err_corrected', suffix='.fasta') log_filepath = join(working_dir, log_name) params = {'--sizein': sizein, '--sizeout': sizeout, '--id': percent_id_err, '--w': w, '--slots': slots, '--maxrejects': maxrejects} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if usersort: app.Parameters['--usersort'].on() data = {'--cluster': fasta_filepath, '--consout': output_filepath } if not remove_usearch_logs: data['--log'] = log_filepath if output_uc_filepath: data['--uc'] = output_uc_filepath app_result = app(data) return app_result, output_filepath
python
{ "resource": "" }
q39218
usearch_chimera_filter_de_novo
train
def usearch_chimera_filter_de_novo( fasta_filepath, output_chimera_filepath=None, output_non_chimera_filepath=None, abundance_skew=2.0, log_name="uchime_de_novo_chimera_filtering.log", usersort=False, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, working_dir=None): """ Chimera filter de novo, output chimeras and non-chimeras to fastas fasta_filepath = input fasta file, generally a dereplicated fasta output_chimera_filepath = output chimera filepath output_non_chimera_filepath = output non chimera filepath abundance_skew = abundance skew setting for de novo filtering. usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. """ if not output_chimera_filepath: _, output_chimera_filepath = mkstemp(prefix='uchime_chimeras_', suffix='.fasta') if not output_non_chimera_filepath: _, output_non_chimera_filepath = mkstemp(prefix='uchime_non_chimeras_', suffix='.fasta') log_filepath = join(working_dir, log_name) params = {'--abskew': abundance_skew} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if usersort: app.Parameters['--usersort'].on() data = {'--uchime': fasta_filepath, '--chimeras': output_chimera_filepath, '--nonchimeras': output_non_chimera_filepath } if not remove_usearch_logs: data['--log'] = log_filepath app_result = app(data) if not save_intermediate_files: remove_files([output_chimera_filepath]) return app_result, output_non_chimera_filepath
python
{ "resource": "" }
q39219
usearch_cluster_seqs_ref
train
def usearch_cluster_seqs_ref( fasta_filepath, output_filepath=None, percent_id=0.97, sizein=True, sizeout=True, w=64, slots=16769023, maxrejects=64, log_name="usearch_cluster_seqs.log", usersort=True, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, suppress_new_clusters=False, refseqs_fp=None, output_dir=None, working_dir=None, rev=False): """ Cluster seqs at percent_id, output consensus fasta Also appends de novo clustered seqs if suppress_new_clusters is False. Forced to handle reference + de novo in hackish fashion as usearch does not work as listed in the helpstrings. Any failures are clustered de novo, and given unique cluster IDs. fasta_filepath = input fasta file, generally a dereplicated fasta output_filepath = output reference clustered uc filepath percent_id = minimum identity percent. sizein = not defined in usearch helpstring sizeout = not defined in usearch helpstring w = Word length for U-sorting slots = Size of compressed index table. Should be prime, e.g. 40000003. Should also specify --w, typical is --w 16 or --w 32. maxrejects = Max rejected targets, 0=ignore, default 32. log_name = string specifying output log name usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. In post chimera checked sequences, the seqs are sorted by abundance, so this should be set to True. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. suppress_new_clusters: Disables de novo OTUs when ref based OTU picking enabled. refseqs_fp: Filepath for ref based OTU picking output_dir: output directory rev = search plus and minus strands of sequences """ if not output_filepath: _, output_filepath = mkstemp(prefix='usearch_cluster_ref_based', suffix='.uc') log_filepath = join(working_dir, log_name) uc_filepath = join(working_dir, "clustered_seqs_post_chimera.uc") params = {'--sizein': sizein, '--sizeout': sizeout, '--id': percent_id, '--w': w, '--slots': slots, '--maxrejects': maxrejects} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) if usersort: app.Parameters['--usersort'].on() if rev: app.Parameters['--rev'].on() data = {'--query': fasta_filepath, '--uc': uc_filepath, '--db': refseqs_fp } if not remove_usearch_logs: data['--log'] = log_filepath app_result = app(data) files_to_remove = [] # Need to create fasta file of all hits (with reference IDs), # recluster failures if new clusters allowed, and create complete fasta # file, with unique fasta label IDs. if suppress_new_clusters: output_fna_filepath = join(output_dir, 'ref_clustered_seqs.fasta') output_filepath, labels_hits = get_fasta_from_uc_file(fasta_filepath, uc_filepath, hit_type="H", output_dir=output_dir, output_fna_filepath=output_fna_filepath) files_to_remove.append(uc_filepath) else: # Get fasta of successful ref based clusters output_fna_clustered = join(output_dir, 'ref_clustered_seqs.fasta') output_filepath_ref_clusters, labels_hits =\ get_fasta_from_uc_file(fasta_filepath, uc_filepath, hit_type="H", output_dir=output_dir, output_fna_filepath=output_fna_clustered) # get failures and recluster output_fna_failures =\ join(output_dir, 'ref_clustered_seqs_failures.fasta') output_filepath_failures, labels_hits =\ get_fasta_from_uc_file(fasta_filepath, uc_filepath, hit_type="N", output_dir=output_dir, output_fna_filepath=output_fna_failures) # de novo cluster the failures app_result, output_filepath_clustered_failures =\ usearch_cluster_seqs(output_fna_failures, output_filepath= join( output_dir, 'clustered_seqs_reference_failures.fasta'), percent_id=percent_id, sizein=sizein, sizeout=sizeout, w=w, slots=slots, maxrejects=maxrejects, save_intermediate_files=save_intermediate_files, remove_usearch_logs=remove_usearch_logs, working_dir=working_dir) output_filepath = concatenate_fastas(output_fna_clustered, output_fna_failures, output_concat_filepath=join( output_dir, 'concatenated_reference_denovo_clusters.fasta')) files_to_remove.append(output_fna_clustered) files_to_remove.append(output_fna_failures) files_to_remove.append(output_filepath_clustered_failures) if not save_intermediate_files: remove_files(files_to_remove) return app_result, output_filepath
python
{ "resource": "" }
q39220
concatenate_fastas
train
def concatenate_fastas(output_fna_clustered, output_fna_failures, output_concat_filepath): """ Concatenates two input fastas, writes to output_concat_filepath output_fna_clustered: fasta of successful ref clusters output_fna_failures: de novo fasta of cluster failures output_concat_filepath: path to write combined fastas to """ output_fp = open(output_concat_filepath, "w") for label, seq in parse_fasta(open(output_fna_clustered, "U")): output_fp.write(">%s\n%s\n" % (label, seq)) for label, seq in parse_fasta(open(output_fna_failures, "U")): output_fp.write(">%s\n%s\n" % (label, seq)) return output_concat_filepath
python
{ "resource": "" }
q39221
enumerate_otus
train
def enumerate_otus(fasta_filepath, output_filepath=None, label_prefix="", label_suffix="", retain_label_as_comment=False, count_start=0): """ Writes unique, sequential count to OTUs fasta_filepath = input fasta filepath output_filepath = output fasta filepath label_prefix = string to place before enumeration label_suffix = string to place after enumeration retain_label_as_comment = if True, will place existing label in sequence comment, after a tab count_start = number to start enumerating OTUs with """ fasta_i = open(fasta_filepath, "U") if not output_filepath: _, output_filepath = mkstemp(prefix='enumerated_seqs_', suffix='.fasta') fasta_o = open(output_filepath, "w") for label, seq in parse_fasta(fasta_i): curr_label = ">" + label_prefix + str(count_start) + label_suffix if retain_label_as_comment: curr_label += '\t' + label fasta_o.write(curr_label.strip() + '\n') fasta_o.write(seq.strip() + '\n') count_start += 1 return output_filepath
python
{ "resource": "" }
q39222
get_fasta_from_uc_file
train
def get_fasta_from_uc_file(fasta_filepath, uc_filepath, hit_type="H", output_fna_filepath=None, label_prefix="", output_dir=None): """ writes fasta of sequences from uc file of type hit_type fasta_filepath: Filepath of original query fasta file uc_filepath: Filepath of .uc file created by usearch post error filtering hit_type: type to read from first field of .uc file, "H" for hits, "N" for no hits. output_fna_filepath = fasta output filepath label_prefix = Added before each fasta label, important when doing ref based OTU picking plus de novo clustering to preserve label matching. output_dir: output directory """ hit_type_index = 0 seq_label_index = 8 target_label_index = 9 labels_hits = {} labels_to_keep = [] for line in open(uc_filepath, "U"): if line.startswith("#") or len(line.strip()) == 0: continue curr_line = line.split('\t') if curr_line[0] == hit_type: labels_hits[curr_line[seq_label_index]] =\ curr_line[target_label_index].strip() labels_to_keep.append(curr_line[seq_label_index]) labels_to_keep = set(labels_to_keep) out_fna = open(output_fna_filepath, "w") for label, seq in parse_fasta(open(fasta_filepath, "U")): if label in labels_to_keep: if hit_type == "H": out_fna.write(">" + labels_hits[label] + "\n%s\n" % seq) if hit_type == "N": out_fna.write(">" + label + "\n%s\n" % seq) return output_fna_filepath, labels_hits
python
{ "resource": "" }
q39223
get_retained_chimeras
train
def get_retained_chimeras(output_fp_de_novo_nonchimeras, output_fp_ref_nonchimeras, output_combined_fp, chimeras_retention='union'): """ Gets union or intersection of two supplied fasta files output_fp_de_novo_nonchimeras: filepath of nonchimeras from de novo usearch detection. output_fp_ref_nonchimeras: filepath of nonchimeras from reference based usearch detection. output_combined_fp: filepath to write retained sequences to. chimeras_retention: accepts either 'intersection' or 'union'. Will test for chimeras against the full input error clustered sequence set, and retain sequences flagged as non-chimeras by either (union) or only those flagged as non-chimeras by both (intersection).""" de_novo_non_chimeras = [] reference_non_chimeras = [] de_novo_nonchimeras_f = open(output_fp_de_novo_nonchimeras, "U") reference_nonchimeras_f = open(output_fp_ref_nonchimeras, "U") output_combined_f = open(output_combined_fp, "w") for label, seq in parse_fasta(de_novo_nonchimeras_f): de_novo_non_chimeras.append(label) de_novo_nonchimeras_f.close() for label, seq in parse_fasta(reference_nonchimeras_f): reference_non_chimeras.append(label) reference_nonchimeras_f.close() de_novo_non_chimeras = set(de_novo_non_chimeras) reference_non_chimeras = set(reference_non_chimeras) if chimeras_retention == 'union': all_non_chimeras = de_novo_non_chimeras.union(reference_non_chimeras) elif chimeras_retention == 'intersection': all_non_chimeras =\ de_novo_non_chimeras.intersection(reference_non_chimeras) de_novo_nonchimeras_f = open(output_fp_de_novo_nonchimeras, "U") reference_nonchimeras_f = open(output_fp_ref_nonchimeras, "U") # Save a list of already-written labels labels_written = [] for label, seq in parse_fasta(de_novo_nonchimeras_f): if label in all_non_chimeras: if label not in labels_written: output_combined_f.write('>%s\n%s\n' % (label, seq)) labels_written.append(label) de_novo_nonchimeras_f.close() for label, seq in parse_fasta(reference_nonchimeras_f): if label in all_non_chimeras: if label not in labels_written: output_combined_f.write('>%s\n%s\n' % (label, seq)) labels_written.append(label) reference_nonchimeras_f.close() output_combined_f.close() return output_combined_fp
python
{ "resource": "" }
q39224
assign_reads_to_otus
train
def assign_reads_to_otus(original_fasta, filtered_fasta, output_filepath=None, log_name="assign_reads_to_otus.log", perc_id_blast=0.97, global_alignment=True, HALT_EXEC=False, save_intermediate_files=False, remove_usearch_logs=False, working_dir=None): """ Uses original fasta file, blasts to assign reads to filtered fasta original_fasta = filepath to original query fasta filtered_fasta = filepath to enumerated, filtered fasta output_filepath = output path to clusters (uc) file log_name = string specifying output log name perc_id_blast = percent ID for blasting original seqs against filtered set usersort = Enable if input fasta not sorted by length purposefully, lest usearch will raise an error. In post chimera checked sequences, the seqs are sorted by abundance, so this should be set to True. HALT_EXEC: Used for debugging app controller save_intermediate_files: Preserve all intermediate files created. """ # Not sure if I feel confortable using blast as a way to recapitulate # original read ids.... if not output_filepath: _, output_filepath = mkstemp(prefix='assign_reads_to_otus', suffix='.uc') log_filepath = join(working_dir, log_name) params = {'--id': perc_id_blast, '--global': global_alignment} app = Usearch(params, WorkingDir=working_dir, HALT_EXEC=HALT_EXEC) data = {'--query': original_fasta, '--db': filtered_fasta, '--uc': output_filepath } if not remove_usearch_logs: data['--log'] = log_filepath app_result = app(data) return app_result, output_filepath
python
{ "resource": "" }
q39225
sort_by_abundance_usearch61
train
def sort_by_abundance_usearch61(seq_path, output_dir='.', rev=False, minlen=64, remove_usearch_logs=False, HALT_EXEC=False, output_fna_filepath=None, output_uc_filepath=None, log_name="abundance_sorted.log", threads=1.0): """ usearch61 application call to sort fasta file by abundance. seq_path: fasta filepath to be clustered with usearch61 output_dir: directory to output log, OTU mapping, and intermediate files rev: enable reverse strand matching for clustering/sorting minlen: minimum sequence length remove_usearch_logs: Saves usearch log files HALT_EXEC: application controller option to halt execution output_fna_filepath: path to write sorted fasta filepath output_uc_filepath: path to write usearch61 generated .uc file log_name: filepath to write usearch61 generated log file threads: Specify number of threads used per core per CPU """ if not output_fna_filepath: _, output_fna_filepath = mkstemp(prefix='abundance_sorted', suffix='.fna') if not output_uc_filepath: _, output_uc_filepath = mkstemp(prefix='abundance_sorted', suffix='.uc') log_filepath = join(output_dir, log_name) params = {'--minseqlength': minlen, '--sizeout': True, '--derep_fulllength': seq_path, '--output': output_fna_filepath, '--uc': output_uc_filepath, '--threads': threads } if rev: params['--strand'] = 'both' if not remove_usearch_logs: params['--log'] = log_filepath app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC) app_result = app() return output_fna_filepath, output_uc_filepath, app_result
python
{ "resource": "" }
q39226
sort_by_length_usearch61
train
def sort_by_length_usearch61(seq_path, output_dir=".", minlen=64, remove_usearch_logs=False, HALT_EXEC=False, output_fna_filepath=None, log_name="length_sorted.log"): """ usearch61 application call to sort fasta file by length. seq_path: fasta filepath to be clustered with usearch61 output_dir: directory to output log, OTU mapping, and intermediate files minlen: minimum sequence length remove_usearch_logs: Saves usearch log files HALT_EXEC: application controller option to halt execution output_fna_filepath: path to write sorted fasta filepath log_name: filepath to write usearch61 generated log file """ if not output_fna_filepath: _, output_fna_filepath = mkstemp(prefix='length_sorted', suffix='.fna') log_filepath = join(output_dir, log_name) params = {'--minseqlength': minlen, '--sortbylength': seq_path, '--output': output_fna_filepath } if not remove_usearch_logs: params['--log'] = log_filepath app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC) app_result = app() return output_fna_filepath, app_result
python
{ "resource": "" }
q39227
usearch61_cluster_ref
train
def usearch61_cluster_ref(intermediate_fasta, refseqs_fp, percent_id=0.97, rev=False, minlen=64, output_dir=".", remove_usearch_logs=False, wordlength=8, usearch61_maxrejects=32, usearch61_maxaccepts=1, HALT_EXEC=False, output_uc_filepath=None, log_filepath="ref_clustered.log", threads=1.0 ): """ Cluster input fasta seqs against reference database seq_path: fasta filepath to be clustered with usearch61 refseqs_fp: reference fasta filepath, used to cluster sequences against. percent_id: percentage id to cluster at rev: enable reverse strand matching for clustering minlen: minimum sequence length output_dir: directory to output log, OTU mapping, and intermediate files remove_usearch_logs: Saves usearch log files wordlength: word length to use for clustering usearch61_maxrejects: Number of rejects allowed by usearch61 usearch61_maxaccepts: Number of accepts allowed by usearch61 output_uc_filepath: path to write usearch61 generated .uc file threads: Specify number of threads used per core per CPU HALT_EXEC: application controller option to halt execution. """ log_filepath = join(output_dir, log_filepath) params = { '--usearch_global': intermediate_fasta, '--db': refseqs_fp, '--minseqlength': minlen, '--id': percent_id, '--uc': output_uc_filepath, '--wordlength': wordlength, '--maxrejects': usearch61_maxrejects, '--maxaccepts': usearch61_maxaccepts, '--threads': threads } if not remove_usearch_logs: params['--log'] = log_filepath if rev: params['--strand'] = 'both' else: params['--strand'] = 'plus' clusters_fp = output_uc_filepath app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC) app_result = app() return clusters_fp, app_result
python
{ "resource": "" }
q39228
usearch61_fast_cluster
train
def usearch61_fast_cluster(intermediate_fasta, percent_id=0.97, minlen=64, output_dir=".", remove_usearch_logs=False, wordlength=8, usearch61_maxrejects=8, usearch61_maxaccepts=1, HALT_EXEC=False, output_uc_filepath=None, log_name="fast_clustered.log", threads=1.0): """ Performs usearch61 de novo fast clustering via cluster_fast option Only supposed to be used with length sorted data (and performs length sorting automatically) and does not support reverse strand matching intermediate_fasta: fasta filepath to be clustered with usearch61 percent_id: percentage id to cluster at minlen: minimum sequence length output_dir: directory to output log, OTU mapping, and intermediate files remove_usearch_logs: Saves usearch log files wordlength: word length to use for initial high probability sequence matches usearch61_maxrejects: Set to 'default' or an int value specifying max rejects usearch61_maxaccepts: Number of accepts allowed by usearch61 HALT_EXEC: application controller option to halt execution output_uc_filepath: Path to write clusters (.uc) file. log_name: filepath to write usearch61 generated log file threads: Specify number of threads used per core per CPU """ log_filepath = join(output_dir, log_name) params = {'--minseqlength': minlen, '--cluster_fast': intermediate_fasta, '--id': percent_id, '--uc': output_uc_filepath, '--wordlength': wordlength, '--maxrejects': usearch61_maxrejects, '--maxaccepts': usearch61_maxaccepts, '--usersort': True, '--threads': threads } if not remove_usearch_logs: params['--log'] = log_filepath clusters_fp = output_uc_filepath app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC) app_result = app() return clusters_fp, app_result
python
{ "resource": "" }
q39229
usearch61_smallmem_cluster
train
def usearch61_smallmem_cluster(intermediate_fasta, percent_id=0.97, minlen=64, rev=False, output_dir=".", remove_usearch_logs=False, wordlength=8, usearch61_maxrejects=32, usearch61_maxaccepts=1, sizeorder=False, HALT_EXEC=False, output_uc_filepath=None, log_name="smallmem_clustered.log", sizeout=False, consout_filepath=None): """ Performs usearch61 de novo clustering via cluster_smallmem option Only supposed to be used with length sorted data (and performs length sorting automatically) and does not support reverse strand matching intermediate_fasta: fasta filepath to be clustered with usearch61 percent_id: percentage id to cluster at minlen: minimum sequence length rev: will enable reverse strand matching if True output_dir: directory to output log, OTU mapping, and intermediate files remove_usearch_logs: Saves usearch log files wordlength: word length to use for initial high probability sequence matches usearch61_maxrejects: Set to 'default' or an int value specifying max rejects usearch61_maxaccepts: Number of accepts allowed by usearch61 HALT_EXEC: application controller option to halt execution output_uc_filepath: Path to write clusters (.uc) file. log_name: filepath to write usearch61 generated log file sizeout: If True, will save abundance data in output fasta labels. consout_filepath: Needs to be set to save clustered consensus fasta filepath used for chimera checking. """ log_filepath = join(output_dir, log_name) params = {'--minseqlength': minlen, '--cluster_smallmem': intermediate_fasta, '--id': percent_id, '--uc': output_uc_filepath, '--wordlength': wordlength, '--maxrejects': usearch61_maxrejects, '--maxaccepts': usearch61_maxaccepts, '--usersort': True } if sizeorder: params['--sizeorder'] = True if not remove_usearch_logs: params['--log'] = log_filepath if rev: params['--strand'] = 'both' else: params['--strand'] = 'plus' if sizeout: params['--sizeout'] = True if consout_filepath: params['--consout'] = consout_filepath clusters_fp = output_uc_filepath app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC) app_result = app() return clusters_fp, app_result
python
{ "resource": "" }
q39230
usearch61_chimera_check_denovo
train
def usearch61_chimera_check_denovo(abundance_fp, uchime_denovo_fp, minlen=64, output_dir=".", remove_usearch_logs=False, uchime_denovo_log_fp="uchime_denovo.log", usearch61_minh=0.28, usearch61_xn=8.0, usearch61_dn=1.4, usearch61_mindiffs=3, usearch61_mindiv=0.8, usearch61_abundance_skew=2.0, HALT_EXEC=False): """ Does de novo, abundance based chimera checking with usearch61 abundance_fp: input consensus fasta file with abundance information for each cluster. uchime_denovo_fp: output uchime file for chimera results. minlen: minimum sequence length for usearch input fasta seqs. output_dir: output directory removed_usearch_logs: suppresses creation of log file. uchime_denovo_log_fp: output filepath for log file. usearch61_minh: Minimum score (h) to be classified as chimera. Increasing this value tends to the number of false positives (and also sensitivity). usearch61_xn: Weight of "no" vote. Increasing this value tends to the number of false positives (and also sensitivity). usearch61_dn: Pseudo-count prior for "no" votes. (n). Increasing this value tends to the number of false positives (and also sensitivity). usearch61_mindiffs: Minimum number of diffs in a segment. Increasing this value tends to reduce the number of false positives while reducing sensitivity to very low-divergence chimeras. usearch61_mindiv: Minimum divergence, i.e. 100% - identity between the query and closest reference database sequence. Expressed as a percentage, so the default is 0.8%, which allows chimeras that are up to 99.2% similar to a reference sequence. usearch61_abundance_skew: abundance skew for de novo chimera comparisons. HALTEXEC: halt execution and returns command used for app controller. """ params = {'--minseqlength': minlen, '--uchime_denovo': abundance_fp, '--uchimeout': uchime_denovo_fp, '--minh': usearch61_minh, '--xn': usearch61_xn, '--dn': usearch61_dn, '--mindiffs': usearch61_mindiffs, '--mindiv': usearch61_mindiv, '--abskew': usearch61_abundance_skew } if not remove_usearch_logs: params['--log'] = uchime_denovo_log_fp app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC) app_result = app() return uchime_denovo_fp, app_result
python
{ "resource": "" }
q39231
usearch61_chimera_check_ref
train
def usearch61_chimera_check_ref(abundance_fp, uchime_ref_fp, reference_seqs_fp, minlen=64, output_dir=".", remove_usearch_logs=False, uchime_ref_log_fp="uchime_ref.log", usearch61_minh=0.28, usearch61_xn=8.0, usearch61_dn=1.4, usearch61_mindiffs=3, usearch61_mindiv=0.8, threads=1.0, HALT_EXEC=False): """ Does reference based chimera checking with usearch61 abundance_fp: input consensus fasta file with abundance information for each cluster. uchime_ref_fp: output uchime filepath for reference results reference_seqs_fp: reference fasta database for chimera checking. minlen: minimum sequence length for usearch input fasta seqs. output_dir: output directory removed_usearch_logs: suppresses creation of log file. uchime_denovo_log_fp: output filepath for log file. usearch61_minh: Minimum score (h) to be classified as chimera. Increasing this value tends to the number of false positives (and also sensitivity). usearch61_xn: Weight of "no" vote. Increasing this value tends to the number of false positives (and also sensitivity). usearch61_dn: Pseudo-count prior for "no" votes. (n). Increasing this value tends to the number of false positives (and also sensitivity). usearch61_mindiffs: Minimum number of diffs in a segment. Increasing this value tends to reduce the number of false positives while reducing sensitivity to very low-divergence chimeras. usearch61_mindiv: Minimum divergence, i.e. 100% - identity between the query and closest reference database sequence. Expressed as a percentage, so the default is 0.8%, which allows chimeras that are up to 99.2% similar to a reference sequence. threads: Specify number of threads used per core per CPU HALTEXEC: halt execution and returns command used for app controller. """ params = {'--minseqlength': minlen, '--uchime_ref': abundance_fp, '--uchimeout': uchime_ref_fp, '--db': reference_seqs_fp, '--minh': usearch61_minh, '--xn': usearch61_xn, '--dn': usearch61_dn, '--mindiffs': usearch61_mindiffs, '--mindiv': usearch61_mindiv, # Only works in plus according to usearch doc '--strand': 'plus', '--threads': threads } if not remove_usearch_logs: params['--log'] = uchime_ref_log_fp app = Usearch61(params, WorkingDir=output_dir, HALT_EXEC=HALT_EXEC) app_result = app() return uchime_ref_fp, app_result
python
{ "resource": "" }
q39232
merge_failures_dereplicated_seqs
train
def merge_failures_dereplicated_seqs(failures, dereplicated_clusters): """ Appends failures from dereplicated seqs to failures list failures: list of failures dereplicated_clusters: dict of seq IDs: dereplicated seq IDs """ curr_failures = set(failures) dereplicated_ids = set(dereplicated_clusters) for curr_failure in curr_failures: if curr_failure in dereplicated_ids: failures += dereplicated_clusters[curr_failure] return failures
python
{ "resource": "" }
q39233
parse_usearch61_failures
train
def parse_usearch61_failures(seq_path, failures, output_fasta_fp): """ Parses seq IDs from failures list, writes to output_fasta_fp seq_path: filepath of original input fasta file. failures: list/set of failure seq IDs output_fasta_fp: path to write parsed sequences """ parsed_out = open(output_fasta_fp, "w") for label, seq in parse_fasta(open(seq_path), "U"): curr_label = label.split()[0] if curr_label in failures: parsed_out.write(">%s\n%s\n" % (label, seq)) parsed_out.close() return output_fasta_fp
python
{ "resource": "" }
q39234
ErrorPageMiddleware._on_error_page_write_error
train
def _on_error_page_write_error(self, status_code, **kwargs): """Replaces the default Tornado error page with a Django-styled one""" if oz.settings.get('debug'): exception_type, exception_value, tback = sys.exc_info() is_breakpoint = isinstance(exception_value, oz.error_pages.DebugBreakException) frames = oz.error_pages.get_frames(tback, is_breakpoint) frames.reverse() if is_breakpoint: exception_type = 'Debug breakpoint' exception_value = '' self.render(oz.settings["error_pages_template"], exception_type=exception_type, exception_value=exception_value, frames=frames, request_input=self.request.body, request_cookies=self.cookies, request_headers=self.request.headers, request_path=self.request.uri, request_method=self.request.method, response_output="".join(self._write_buffer), response_headers=self._headers, prettify_object=oz.error_pages.prettify_object, ) return oz.break_trigger
python
{ "resource": "" }
q39235
BlinkMiddleware.get_blink_cookie
train
def get_blink_cookie(self, name): """Gets a blink cookie value""" value = self.get_cookie(name) if value != None: self.clear_cookie(name) return escape.url_unescape(value)
python
{ "resource": "" }
q39236
BlinkMiddleware.set_blink
train
def set_blink(self, message, type="info"): """ Sets the blink, a one-time transactional message that is shown on the next page load """ self.set_cookie("blink_message", escape.url_escape(message), httponly=True) self.set_cookie("blink_type", escape.url_escape(type), httponly=True)
python
{ "resource": "" }
q39237
cdhit_clusters_from_seqs
train
def cdhit_clusters_from_seqs(seqs, moltype=DNA, params=None): """Returns the CD-HIT clusters given seqs seqs : dict like collection of sequences moltype : cogent.core.moltype object params : cd-hit parameters NOTE: This method will call CD_HIT if moltype is PROTIEN, CD_HIT_EST if moltype is RNA/DNA, and raise if any other moltype is passed. """ # keys are not remapped. Tested against seq_ids of 100char length seqs = SequenceCollection(seqs, MolType=moltype) #Create mapping between abbreviated IDs and full IDs int_map, int_keys = seqs.getIntMap() #Create SequenceCollection from int_map. int_map = SequenceCollection(int_map,MolType=moltype) # setup params and make sure the output argument is set if params is None: params = {} if '-o' not in params: _, params['-o'] = mkstemp() # call the correct version of cd-hit base on moltype working_dir = mkdtemp() if moltype is PROTEIN: app = CD_HIT(WorkingDir=working_dir, params=params) elif moltype is RNA: app = CD_HIT_EST(WorkingDir=working_dir, params=params) elif moltype is DNA: app = CD_HIT_EST(WorkingDir=working_dir, params=params) else: raise ValueError, "Moltype must be either PROTEIN, RNA, or DNA" # grab result res = app(int_map.toFasta()) clusters = parse_cdhit_clstr_file(res['CLSTR']) remapped_clusters = [] for c in clusters: curr = [int_keys[i] for i in c] remapped_clusters.append(curr) # perform cleanup res.cleanUp() shutil.rmtree(working_dir) remove(params['-o'] + '.bak.clstr') return remapped_clusters
python
{ "resource": "" }
q39238
cdhit_from_seqs
train
def cdhit_from_seqs(seqs, moltype, params=None): """Returns the CD-HIT results given seqs seqs : dict like collection of sequences moltype : cogent.core.moltype object params : cd-hit parameters NOTE: This method will call CD_HIT if moltype is PROTIEN, CD_HIT_EST if moltype is RNA/DNA, and raise if any other moltype is passed. """ # keys are not remapped. Tested against seq_ids of 100char length seqs = SequenceCollection(seqs, MolType=moltype) # setup params and make sure the output argument is set if params is None: params = {} if '-o' not in params: _, params['-o'] = mkstemp() # call the correct version of cd-hit base on moltype working_dir = mkdtemp() if moltype is PROTEIN: app = CD_HIT(WorkingDir=working_dir, params=params) elif moltype is RNA: app = CD_HIT_EST(WorkingDir=working_dir, params=params) elif moltype is DNA: app = CD_HIT_EST(WorkingDir=working_dir, params=params) else: raise ValueError, "Moltype must be either PROTEIN, RNA, or DNA" # grab result res = app(seqs.toFasta()) new_seqs = dict(parse_fasta(res['FASTA'])) # perform cleanup res.cleanUp() shutil.rmtree(working_dir) remove(params['-o'] + '.bak.clstr') return SequenceCollection(new_seqs, MolType=moltype)
python
{ "resource": "" }
q39239
parse_cdhit_clstr_file
train
def parse_cdhit_clstr_file(lines): """Returns a list of list of sequence ids representing clusters""" clusters = [] curr_cluster = [] for l in lines: if l.startswith('>Cluster'): if not curr_cluster: continue clusters.append(curr_cluster) curr_cluster = [] else: curr_cluster.append(clean_cluster_seq_id(l.split()[2])) if curr_cluster: clusters.append(curr_cluster) return clusters
python
{ "resource": "" }
q39240
CD_HIT._get_clstr_outfile
train
def _get_clstr_outfile(self): """Returns the absolute path to the clstr outfile""" if self.Parameters['-o'].isOn(): return ''.join([self.Parameters['-o'].Value, '.clstr']) else: raise ValueError, "No output file specified"
python
{ "resource": "" }
q39241
ParagraphsSentencesAndWhitespace.segment
train
def segment(self, tokens): """ Segments a sequence of tokens into a sequence of segments. :Parameters: tokens : `list` ( :class:`~deltas.Token` ) """ look_ahead = LookAhead(tokens) segments = Segment() while not look_ahead.empty(): if look_ahead.peek().type not in self.whitespace: # Paragraph! paragraph = MatchableSegment(look_ahead.i) while not look_ahead.empty() and \ look_ahead.peek().type not in self.paragraph_end: if look_ahead.peek().type == "tab_open": # Table tab_depth = 1 sentence = MatchableSegment( look_ahead.i, [next(look_ahead)]) while not look_ahead.empty() and tab_depth > 0: tab_depth += look_ahead.peek().type == "tab_open" tab_depth -= look_ahead.peek().type == "tab_close" sentence.append(next(look_ahead)) paragraph.append(sentence) elif look_ahead.peek().type not in self.whitespace: # Sentence! sentence = MatchableSegment( look_ahead.i, [next(look_ahead)]) sub_depth = int(sentence[0].type in SUB_OPEN) while not look_ahead.empty(): sub_depth += look_ahead.peek().type in SUB_OPEN sub_depth -= look_ahead.peek().type in SUB_CLOSE sentence.append(next(look_ahead)) if sentence[-1].type in self.sentence_end and sub_depth <= 0: non_whitespace = sum(s.type not in self.whitespace for s in sentence) if non_whitespace >= self.min_sentence: break paragraph.append(sentence) else: # look_ahead.peek().type in self.whitespace whitespace = Segment(look_ahead.i, [next(look_ahead)]) paragraph.append(whitespace) segments.append(paragraph) else: # look_ahead.peek().type in self.whitespace whitespace = Segment(look_ahead.i, [next(look_ahead)]) segments.append(whitespace) return segments
python
{ "resource": "" }
q39242
PushBaby.get_all_feedback
train
def get_all_feedback(self): """ Connects to the feedback service and returns any feedback that is sent as a list of FeedbackItem objects. Blocks the current greenlet until all feedback is returned. If a network error occurs before any feedback is received, it is propagated to the caller. Otherwise, it is ignored and the feedback that had arrived is returned. """ if not self.fbaddress: raise Exception("Attempted to fetch feedback but no feedback_address supplied") fbconn = FeedbackConnection(self, self.fbaddress, self.certfile, self.keyfile) return fbconn.get_all()
python
{ "resource": "" }
q39243
Client._handle_api_error
train
def _handle_api_error(self, error): """ New Relic cheerfully provides expected API error codes depending on your API call deficiencies so we convert these to exceptions and raise them for the user to handle as they see fit. """ status_code = error.response.status_code message = error.message if 403 == status_code: raise NewRelicInvalidApiKeyException(message) elif 404 == status_code: raise NewRelicUnknownApplicationException(message) elif 422 == status_code: raise NewRelicInvalidParameterException(message) else: raise NewRelicApiException(message)
python
{ "resource": "" }
q39244
Client._api_rate_limit_exceeded
train
def _api_rate_limit_exceeded(self, api_call, window=60): """ We want to keep track of the last time we sent a request to the NewRelic API, but only for certain operations. This method will dynamically add an attribute to the Client class with a unix timestamp with the name of the API api_call we make so that we can check it later. We return the amount of time until we can perform another API call so that appropriate waiting can be implemented. """ current = datetime.datetime.now() try: previous = getattr(self, api_call.__name__ + "_window") # Force the calling of our property so we can # handle not having set it yet. previous.__str__ except AttributeError: now = datetime.datetime.now() outside_window = datetime.timedelta(seconds=window+1) previous = now - outside_window if current - previous > datetime.timedelta(seconds=window): setattr(self, api_call.__name__ + "_window", current) else: timeout = window - (current - previous).seconds raise NewRelicApiRateLimitException(str(timeout))
python
{ "resource": "" }
q39245
join_paired_end_reads_fastqjoin
train
def join_paired_end_reads_fastqjoin( reads1_infile_path, reads2_infile_path, perc_max_diff=None, # typical default is 8 min_overlap=None, # typical default is 6 outfile_label='fastqjoin', params={}, working_dir=tempfile.gettempdir(), SuppressStderr=True, SuppressStdout=True, HALT_EXEC=False): """ Runs fastq-join, with default parameters to assemble paired-end reads. Returns file path string. -reads1_infile_path : reads1.fastq infile path -reads2_infile_path : reads2.fastq infile path -perc_max_diff : maximum % diff of overlap differences allowed -min_overlap : minimum allowed overlap required to assemble reads -outfile_label : base name for output files. -params : dictionary of application controller parameters """ abs_r1_path = os.path.abspath(reads1_infile_path) abs_r2_path = os.path.abspath(reads2_infile_path) infile_paths = [abs_r1_path, abs_r2_path] # check / make absolute infile paths for p in infile_paths: if not os.path.exists(p): raise IOError('File not found at: %s' % p) fastq_join_app = FastqJoin(params=params, WorkingDir=working_dir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout, HALT_EXEC=HALT_EXEC) # set param. Helps with QIIME integration to have these values # set to None by default. This way we do not have to worry # about changes in default behaviour of the wrapped # application if perc_max_diff is not None: if isinstance(perc_max_diff, int) and 0 <= perc_max_diff <= 100: fastq_join_app.Parameters['-p'].on(perc_max_diff) else: raise ValueError("perc_max_diff must be int between 0-100!") if min_overlap is not None: if isinstance(min_overlap, int) and 0 < min_overlap: fastq_join_app.Parameters['-m'].on(min_overlap) else: raise ValueError("min_overlap must be an int >= 0!") if outfile_label is not None: if isinstance(outfile_label, str): fastq_join_app.Parameters['-o'].on(outfile_label + '.') else: raise ValueError("outfile_label must be a string!") else: pass # run assembler result = fastq_join_app(infile_paths) # Store output file path data to dict path_dict = {} path_dict['Assembled'] = result['Assembled'].name path_dict['UnassembledReads1'] = result['UnassembledReads1'].name path_dict['UnassembledReads2'] = result['UnassembledReads2'].name # sanity check that files actually exist in path lcoations for path in path_dict.values(): if not os.path.exists(path): raise IOError('Output file not found at: %s' % path) # fastq-join automatically appends: 'join', 'un1', or 'un2' # to the end of the file names. But we want to rename them so # they end in '.fastq'. So, we iterate through path_dict to # rename the files and overwrite the dict values. for key, file_path in path_dict.items(): new_file_path = file_path + '.fastq' shutil.move(file_path, new_file_path) path_dict[key] = new_file_path return path_dict
python
{ "resource": "" }
q39246
FastqJoin._get_result_paths
train
def _get_result_paths(self, data): """Capture fastq-join output. Three output files are produced, in the form of outputjoin : assembled paired reads outputun1 : unassembled reads_1 outputun2 : unassembled reads_2 If a barcode / mate-pairs file is also provided then the following additional files are output: outputjoin2 outputun3 If a verbose stitch length report (-r) is chosen to be written by the user then use a user specified filename. """ output_path = self._get_output_path() result = {} # always output: result['Assembled'] = ResultPath(Path=output_path + 'join', IsWritten=True) result['UnassembledReads1'] = ResultPath(Path=output_path + 'un1', IsWritten=True) result['UnassembledReads2'] = ResultPath(Path=output_path + 'un2', IsWritten=True) # check if stitch report is requested: stitch_path = self._get_stitch_report_path() if stitch_path: result['Report'] = ResultPath(Path=stitch_path, IsWritten=True) # Check if mate file / barcode file is present. # If not, return result # We need to check this way becuase there are no infile parameters. mate_path_string = output_path + 'join2' mate_unassembled_path_string = output_path + 'un3' if os.path.exists(mate_path_string) and \ os.path.exists(mate_unassembled_path_string): result['Mate'] = ResultPath(Path=mate_path_string, IsWritten=True) result['MateUnassembled'] = ResultPath(Path= mate_unassembled_path_string, IsWritten=True) else: pass return result
python
{ "resource": "" }
q39247
date_range
train
def date_range(date, func='date'): ''' Return back start and end dates given date string :param date: metrique date (range) to apply to pql query The tilde '~' symbol is used as a date range separated. A tilde by itself will mean 'all dates ranges possible' and will therefore search all objects irrelevant of it's _end date timestamp. A date on the left with a tilde but no date on the right will generate a query where the date range starts at the date provide and ends 'today'. ie, from date -> now. A date on the right with a tilde but no date on the left will generate a query where the date range starts from the first date available in the past (oldest) and ends on the date provided. ie, from beginning of known time -> date. A date on both the left and right will be a simple date range query where the date range starts from the date on the left and ends on the date on the right. ie, from date to date. ''' if isinstance(date, basestring): date = date.strip() if not date: return '_end == None' if date == '~': return '' # don't include objects which have start EXACTLY on the # date in question, since we're looking for objects # which were true BEFORE the given date, not before or on. before = lambda d: '_start < %s("%s")' % (func, ts2dt(d) if d else None) after = lambda d: '(_end >= %s("%s") or _end == None)' % \ (func, ts2dt(d) if d else None) split = date.split('~') # replace all occurances of 'T' with ' ' # this is used for when datetime is passed in # like YYYY-MM-DDTHH:MM:SS instead of # YYYY-MM-DD HH:MM:SS as expected # and drop all occurances of 'timezone' like substring # FIXME: need to adjust (to UTC) for the timezone info we're dropping! split = [re.sub('\+\d\d:\d\d', '', d.replace('T', ' ')) for d in split] if len(split) == 1: # 'dt' return '%s and %s' % (before(split[0]), after(split[0])) elif split[0] in ['', None]: # '~dt' return before(split[1]) elif split[1] in ['', None]: # 'dt~' return after(split[0]) else: # 'dt~dt' return '%s and %s' % (before(split[1]), after(split[0]))
python
{ "resource": "" }
q39248
WorkerManager.enqueue_convert
train
def enqueue_convert(self, converter, from_resource, to_resource): ''' Enqueue use of the given converter to convert to given resources. Deprecated: Use async version instead ''' worker = self.pick_sticky(from_resource.url_string) args = (converter, from_resource, to_resource) coro = worker.enqueue(enums.Task.CONVERT, args) asyncio.ensure_future(coro)
python
{ "resource": "" }
q39249
WorkerManager.async_enqueue_convert
train
async def async_enqueue_convert(self, converter, from_, to): ''' Enqueue use of the given converter to convert to given from and to resources. ''' worker = self.pick_sticky(from_.url_string) args = (converter, from_, to) await worker.enqueue(enums.Task.CONVERT, args)
python
{ "resource": "" }
q39250
IIIVZincBlendeStrained001.strain_in_plane
train
def strain_in_plane(self, **kwargs): ''' Returns the in-plane strain assuming no lattice relaxation, which is positive for tensile strain and negative for compressive strain. ''' if self._strain_out_of_plane is not None: return ((self._strain_out_of_plane / -2.) * (self.unstrained.c11(**kwargs) / self.unstrained.c12(**kwargs) ) ) else: return 1 - self.unstrained.a(**kwargs) / self.substrate.a(**kwargs)
python
{ "resource": "" }
q39251
IIIVZincBlendeStrained001.substrate_a
train
def substrate_a(self, **kwargs): ''' Returns the substrate's lattice parameter. ''' if self.substrate is not None: return self.substrate.a(**kwargs) else: return (self.unstrained.a(**kwargs) / (1. - self.strain_in_plane(**kwargs)))
python
{ "resource": "" }
q39252
IIIVZincBlendeStrained001.Eg
train
def Eg(self, **kwargs): ''' Returns the strain-shifted bandgap, ``Eg``. ''' return self.unstrained.Eg(**kwargs) + self.Eg_strain_shift(**kwargs)
python
{ "resource": "" }
q39253
ApiMiddleware._api_on_write_error
train
def _api_on_write_error(self, status_code, **kwargs): """ Catches errors and renders it as a JSON message. Adds the traceback if debug is enabled. """ return_error = { "code": self.get_status() } exc_info = kwargs.get("exc_info") if exc_info and isinstance(exc_info[1], oz.json_api.ApiError): return_error["error"] = exc_info[1].message else: return_error["error"] = API_ERROR_CODE_MAP.get(self.get_status(), "Unknown error") if oz.settings.get("debug"): return_error["trace"] = "".join(traceback.format_exception(*exc_info)) self.finish(return_error) return oz.break_trigger
python
{ "resource": "" }
q39254
ApiMiddleware.body
train
def body(self): """Gets the JSON body of the request""" if self._decoded_body == None: # Try to decode the JSON body. But raise an error if the # content-type is unexpected, or the JSON is invalid. raw_content_type = self.request.headers.get("content-type") or "" content_type = raw_content_type.split(";")[0].strip().lower() if content_type == "application/json": try: self._decoded_body = escape.json_decode(self.request.body) except: raise oz.json_api.ApiError("Bad JSON body") else: raise oz.json_api.ApiError("JSON body expected") return self._decoded_body
python
{ "resource": "" }
q39255
Parameter._get_id
train
def _get_id(self): """Construct and return the identifier""" return ''.join(map(str, filter(is_not_None, [self.Prefix, self.Name])))
python
{ "resource": "" }
q39256
MixedParameter.on
train
def on(self, val=None): """Turns the MixedParameter ON by setting its Value to val An attempt to turn the parameter on with value 'False' will result in an error, since this is the same as turning the parameter off. Turning the MixedParameter ON without a value or with value 'None' will let the parameter behave as a flag. """ if val is False: raise ParameterError("Turning the ValuedParameter on with value " "False is the same as turning it off. Use " "another value.") elif self.IsPath: self.Value = FilePath(val) else: self.Value = val
python
{ "resource": "" }
q39257
ViewerManager.get_assets
train
def get_assets(self): ''' Return a flat list of absolute paths to all assets required by this viewer ''' return sum([ [self.prefix_asset(viewer, relpath) for relpath in viewer.assets] for viewer in self.viewers ], [])
python
{ "resource": "" }
q39258
ViewerManager.get_resource
train
def get_resource(self): ''' Returns a BytesResource to build the viewers JavaScript ''' # Basename could be used for controlling caching # basename = 'viewers_%s' % settings.get_cache_string() node_packages = self.get_node_packages() # sort_keys is essential to ensure resulting string is # deterministic (and thus hashable) viewers_data_str = json.dumps(node_packages, sort_keys=True) viewers_data = viewers_data_str.encode('utf8') viewers_resource = ForeignBytesResource( viewers_data, extension=VIEWER_EXT, # basename=basename, ) return viewers_resource
python
{ "resource": "" }
q39259
callback
train
def callback(request): """Callback URL for Nexmo.""" message_id = request.GET.get('messageId') status_id = request.GET.get('status') status_msg = NEXMO_STATUSES.get(status_id, UNKNOWN_STATUS) error_id = int(request.GET.get('err-code')) error_msg = NEXMO_MESSAGES.get(error_id, UNKNOWN_MESSAGE) logger.info(u'Nexmo callback: Sms = %s, Status = %s, message = %s' % ( message_id, status_msg, error_msg )) # Nexmo expects a 200 response code return HttpResponse('')
python
{ "resource": "" }
q39260
setup
train
def setup(provider=None): """ Creates the provider config files needed to deploy your project """ site = init(provider) if not site: site = yaml.safe_load(_read_file(DEPLOY_YAML)) provider_class = PROVIDERS[site['provider']] provider_class.init(site)
python
{ "resource": "" }
q39261
deploy
train
def deploy(provider=None): """ Deploys your project """ if os.path.exists(DEPLOY_YAML): site = yaml.safe_load(_read_file(DEPLOY_YAML)) provider_class = PROVIDERS[site['provider']] provider_class.deploy()
python
{ "resource": "" }
q39262
ida_connect
train
def ida_connect(host='localhost', port=18861, retry=10): """ Connect to an instance of IDA running our server.py. :param host: The host to connect to :param port: The port to connect to :param retry: How many times to try after errors before giving up """ for i in range(retry): try: LOG.debug('Connectint to %s:%d, try %d...', host, port, i + 1) link = rpyc_classic.connect(host, port) link.eval('2 + 2') except socket.error: time.sleep(1) continue else: LOG.debug('Connected to %s:%d', host, port) return link raise IDALinkError("Could not connect to %s:%d after %d tries" % (host, port, retry))
python
{ "resource": "" }
q39263
ida_spawn
train
def ida_spawn(ida_binary, filename, port=18861, mode='oneshot', processor_type=None, logfile=None): """ Open IDA on the the file we want to analyse. :param ida_binary: The binary name or path to ida :param filename: The filename to open in IDA :param port: The port on which to serve rpc from ida :param mode: The server mode. "oneshot" to close ida when the connection is closed, or "threaded" to run IDA visible to the user and allow multiple connections :param processor_type: Which processor IDA should analyze this binary as, e.g. "metapc". If not provided, IDA will guess. :param logfile: The file to log IDA's output to. Default /tmp/idalink-{port}.log """ ida_progname = _which(ida_binary) if ida_progname is None: raise IDALinkError('Could not find executable %s' % ida_binary) if mode not in ('oneshot', 'threaded'): raise ValueError("Bad mode %s" % mode) if logfile is None: logfile = LOGFILE.format(port=port) ida_realpath = os.path.expanduser(ida_progname) file_realpath = os.path.realpath(os.path.expanduser(filename)) server_script = os.path.join(MODULE_DIR, 'server.py') LOG.info('Launching IDA (%s) on %s, listening on port %d, logging to %s', ida_realpath, file_realpath, port, logfile) env = dict(os.environ) if mode == 'oneshot': env['TVHEADLESS'] = '1' if sys.platform == "darwin": # If we are running in a virtual environment, which we should, we need # to insert the python lib into the launched process in order for IDA # to not default back to the Apple-installed python because of the use # of paths in library identifiers on macOS. if "VIRTUAL_ENV" in os.environ: env['DYLD_INSERT_LIBRARIES'] = os.environ['VIRTUAL_ENV'] + '/.Python' # The parameters are: # -A Automatic mode # -S Run a script (our server script) # -L Log all output to our logfile # -p Set the processor type command = [ ida_realpath, '-A', '-S%s %d %s' % (server_script, port, mode), '-L%s' % logfile, ] if processor_type is not None: command.append('-p%s' % processor_type) command.append(file_realpath) LOG.debug('IDA command is %s', ' '.join("%s" % s for s in command)) return subprocess.Popen(command, env=env)
python
{ "resource": "" }
q39264
_clear_cache
train
def _clear_cache(url, ts=None): ''' Helper function used by precache and clearcache that clears the cache of a given URL and type ''' if ts is None: # Clears an entire ForeignResource cache res = ForeignResource(url) if not os.path.exists(res.cache_path_base): cli.printerr('%s is not cached (looked at %s)' % (url, res.cache_path_base)) return cli.print('%s: clearing ALL at %s' % (url, res.cache_path_base)) res.cache_remove_all() else: # Clears an entire ForeignResource cache res = TypedResource(url, ts) if not res.cache_exists(): cli.printerr('%s is not cached for type %s (looked at %s)' % (url, str(ts), res.cache_path)) return cli.print('%s: clearing "%s" at %s' % (url, str(ts), res.cache_path)) if os.path.isdir(res.cache_path): res.cache_remove_as_dir() else: res.cache_remove()
python
{ "resource": "" }
q39265
_precache
train
async def _precache(url, to_type, force=False): ''' Helper function used by precache and precache-named which does the actual precaching ''' if force: cli.print('%s: force clearing' % url) _clear_cache(url) cli.print('%s: precaching "%s"' % (url, to_type)) with autodrain_worker(): await singletons.workers.async_enqueue_multiconvert(url, to_type) result = TypedResource(url, TypeString(to_type)) cli.print('%s: %s precached at: %s' % (url, to_type, result.cache_path))
python
{ "resource": "" }
q39266
ActiveVWProcess.expect_exact
train
def expect_exact(self, *args, **kwargs): """This does not attempt to duplicate the expect_exact API, but just sets self.before to the latest response line.""" response = self._recvline() self.before = response.strip()
python
{ "resource": "" }
q39267
create_bwa_index_from_fasta_file
train
def create_bwa_index_from_fasta_file(fasta_in, params=None): """Create a BWA index from an input fasta file. fasta_in: the input fasta file from which to create the index params: dict of bwa index specific paramters This method returns a dictionary where the keys are the various output suffixes (.amb, .ann, .bwt, .pac, .sa) and the values are open file objects. The index prefix will be the same as fasta_in, unless the -p parameter is passed in params. """ if params is None: params = {} # Instantiate the app controller index = BWA_index(params) # call the application, passing the fasta file in results = index({'fasta_in': fasta_in}) return results
python
{ "resource": "" }
q39268
assign_dna_reads_to_dna_database
train
def assign_dna_reads_to_dna_database(query_fasta_fp, database_fasta_fp, out_fp, params={}): """Wraps assign_reads_to_database, setting various parameters. The default settings are below, but may be overwritten and/or added to using the params dict: algorithm: bwasw """ my_params = {'algorithm': 'bwasw'} my_params.update(params) result = assign_reads_to_database(query_fasta_fp, database_fasta_fp, out_fp, my_params) return result
python
{ "resource": "" }
q39269
BWA.check_arguments
train
def check_arguments(self): """Sanity check the arguments passed in. Uses the boolean functions specified in the subclasses in the _valid_arguments dictionary to determine if an argument is valid or invalid. """ for k, v in self.Parameters.iteritems(): if self.Parameters[k].isOn(): if k in self._valid_arguments: if not self._valid_arguments[k](v.Value): error_message = 'Invalid argument (%s) ' % v.Value error_message += 'for parameter %s\n' % k raise InvalidArgumentApplicationError(error_message)
python
{ "resource": "" }
q39270
BWA._input_as_dict
train
def _input_as_dict(self, data): """Takes dictionary that sets input and output files. Valid keys for the dictionary are specified in the subclasses. File paths must be absolute. """ # clear self._input; ready to receive new input and output files self._input = {} # Check that the arguments to the # subcommand-specific parameters are valid self.check_arguments() # Ensure that we have all required input (file I/O) for k in self._input_order: # N.B.: optional positional arguments begin with underscore (_)! # (e.g., see _mate_in for bwa bwasw) if k[0] != '_' and k not in data: raise MissingRequiredArgumentApplicationError("Missing " "required " "input %s" % k) # Set values for input and output files for k in data: # check for unexpected keys in the dict if k not in self._input_order: error_message = "Invalid input arguments (%s)\n" % k error_message += "Valid keys are: %s" % repr(self._input_order) raise InvalidArgumentApplicationError(error_message + '\n') # check for absolute paths if not isabs(data[k][0]): raise InvalidArgumentApplicationError("Only absolute paths " "allowed.\n%s" % repr(data)) self._input[k] = data[k] # if there is a -f option to specify an output file, force the user to # use it (otherwise things to to stdout) if '-f' in self.Parameters and not self.Parameters['-f'].isOn(): raise InvalidArgumentApplicationError("Please specify an output " "file with -f") return ''
python
{ "resource": "" }
q39271
BWA_index._get_result_paths
train
def _get_result_paths(self, data): """Gets the results for a run of bwa index. bwa index outputs 5 files when the index is created. The filename prefix will be the same as the input fasta, unless overridden with the -p option, and the 5 extensions are listed below: .amb .ann .bwt .pac .sa and these extentions (including the period) are the keys to the dictionary that is returned. """ # determine the names of the files. The name will be the same as the # input fasta file unless overridden with the -p option if self.Parameters['-p'].isOn(): prefix = self.Parameters['-p'].Value else: prefix = data['fasta_in'] # the 5 output file suffixes suffixes = ['.amb', '.ann', '.bwt', '.pac', '.sa'] out_files = {} for suffix in suffixes: out_files[suffix] = ResultPath(prefix + suffix, IsWritten=True) return out_files
python
{ "resource": "" }
q39272
DirectedGraph.get_all_paths_from
train
def get_all_paths_from(self, start, seen=None): ''' Return a list of all paths to all nodes from a given start node ''' if seen is None: seen = frozenset() results = [(0, (start, ))] if start in seen or start not in self.edges: return results seen = seen | frozenset((start,)) for node, edge_weight in self.edges[start].items(): for subpath_weight, subpath in self.get_all_paths_from(node, seen): total_weight = edge_weight + subpath_weight full_path = (start, ) + subpath results.append((total_weight, full_path)) return tuple(results)
python
{ "resource": "" }
q39273
cmbuild_from_file
train
def cmbuild_from_file(stockholm_file_path, refine=False,return_alignment=False,\ params=None): """Uses cmbuild to build a CM file given a stockholm file. - stockholm_file_path: a path to a stockholm file. This file should contain a multiple sequence alignment formated in Stockholm format. This must contain a sequence structure line: #=GC SS_cons <structure string> - refine: refine the alignment and realign before building the cm. (Default=False) - return_alignment: Return alignment and structure string used to construct the CM file. This will either be the original alignment and structure string passed in, or the refined alignment if --refine was used. (Default=False) """ #get alignment and structure string from stockholm file. info, aln, structure_string = \ list(MinimalRfamParser(open(stockholm_file_path,'U'),\ seq_constructor=ChangedSequence))[0] #call cmbuild_from_alignment. res = cmbuild_from_alignment(aln, structure_string, refine=refine, \ return_alignment=return_alignment,params=params) return res
python
{ "resource": "" }
q39274
cmalign_from_alignment
train
def cmalign_from_alignment(aln, structure_string, seqs, moltype=DNA,\ include_aln=True,refine=False, return_stdout=False,params=None,\ cmbuild_params=None): """Uses cmbuild to build a CM file, then cmalign to build an alignment. - aln: an Alignment object or something that can be used to construct one. All sequences must be the same length. - structure_string: vienna structure string representing the consensus stucture for the sequences in aln. Must be the same length as the alignment. - seqs: SequenceCollection object or something that can be used to construct one, containing unaligned sequences that are to be aligned to the aligned sequences in aln. - moltype: Cogent moltype object. Must be RNA or DNA. - include_aln: Boolean to include sequences in aln in final alignment. (Default=True) - refine: refine the alignment and realign before building the cm. (Default=False) - return_stdout: Boolean to return standard output from infernal. This includes alignment and structure bit scores and average probabilities for each sequence. (Default=False) """ #NOTE: Must degap seqs or Infernal well seg fault! seqs = SequenceCollection(seqs,MolType=moltype).degap() #Create mapping between abbreviated IDs and full IDs int_map, int_keys = seqs.getIntMap() #Create SequenceCollection from int_map. int_map = SequenceCollection(int_map,MolType=moltype) cm_file, aln_file_string = cmbuild_from_alignment(aln, structure_string,\ refine=refine,return_alignment=True,params=cmbuild_params) if params is None: params = {} params.update({MOLTYPE_MAP[moltype]:True}) app = Cmalign(InputHandler='_input_as_paths',WorkingDir='/tmp',\ params=params) app.Parameters['--informat'].on('FASTA') #files to remove that aren't cleaned up by ResultPath object to_remove = [] #turn on --withali flag if True. if include_aln: app.Parameters['--withali'].on(\ app._tempfile_as_multiline_string(aln_file_string)) #remove this file at end to_remove.append(app.Parameters['--withali'].Value) seqs_path = app._input_as_multiline_string(int_map.toFasta()) cm_path = app._tempfile_as_multiline_string(cm_file) #add cm_path to to_remove to_remove.append(cm_path) paths = [cm_path,seqs_path] _, tmp_file = mkstemp(dir=app.WorkingDir) app.Parameters['-o'].on(tmp_file) res = app(paths) info, aligned, struct_string = \ list(MinimalRfamParser(res['Alignment'].readlines(),\ seq_constructor=SEQ_CONSTRUCTOR_MAP[moltype]))[0] #Make new dict mapping original IDs new_alignment={} for k,v in aligned.NamedSeqs.items(): new_alignment[int_keys.get(k,k)]=v #Create an Alignment object from alignment dict new_alignment = Alignment(new_alignment,MolType=moltype) std_out = res['StdOut'].read() #clean up files res.cleanUp() for f in to_remove: remove(f) if return_stdout: return new_alignment, struct_string, std_out else: return new_alignment, struct_string
python
{ "resource": "" }
q39275
cmalign_from_file
train
def cmalign_from_file(cm_file_path, seqs, moltype=DNA, alignment_file_path=None,\ include_aln=False,return_stdout=False,params=None): """Uses cmalign to align seqs to alignment in cm_file_path. - cm_file_path: path to the file created by cmbuild, containing aligned sequences. This will be used to align sequences in seqs. - seqs: unaligned sequendes that are to be aligned to the sequences in cm_file. - moltype: cogent.core.moltype object. Must be DNA or RNA - alignment_file_path: path to stockholm alignment file used to create cm_file. __IMPORTANT__: This MUST be the same file used by cmbuild originally. Only need to pass in this file if include_aln=True. This helper function will NOT check if the alignment file is correct so you must use it correctly. - include_aln: Boolean to include sequences in aln_file in final alignment. (Default=False) - return_stdout: Boolean to return standard output from infernal. This includes alignment and structure bit scores and average probabilities for each sequence. (Default=False) """ #NOTE: Must degap seqs or Infernal well seg fault! seqs = SequenceCollection(seqs,MolType=moltype).degap() #Create mapping between abbreviated IDs and full IDs int_map, int_keys = seqs.getIntMap() #Create SequenceCollection from int_map. int_map = SequenceCollection(int_map,MolType=moltype) if params is None: params = {} params.update({MOLTYPE_MAP[moltype]:True}) app = Cmalign(InputHandler='_input_as_paths',WorkingDir='/tmp',\ params=params) app.Parameters['--informat'].on('FASTA') #turn on --withali flag if True. if include_aln: if alignment_file_path is None: raise DataError, """Must have path to alignment file used to build CM if include_aln=True.""" else: app.Parameters['--withali'].on(alignment_file_path) seqs_path = app._input_as_multiline_string(int_map.toFasta()) paths = [cm_file_path,seqs_path] _, tmp_file = mkstemp(dir=app.WorkingDir) app.Parameters['-o'].on(tmp_file) res = app(paths) info, aligned, struct_string = \ list(MinimalRfamParser(res['Alignment'].readlines(),\ seq_constructor=SEQ_CONSTRUCTOR_MAP[moltype]))[0] #Make new dict mapping original IDs new_alignment={} for k,v in aligned.items(): new_alignment[int_keys.get(k,k)]=v #Create an Alignment object from alignment dict new_alignment = Alignment(new_alignment,MolType=moltype) std_out = res['StdOut'].read() res.cleanUp() if return_stdout: return new_alignment, struct_string, std_out else: return new_alignment, struct_string
python
{ "resource": "" }
q39276
DetectorManager.detect
train
def detect(self, path): ''' Guesses a TypeString from the given path ''' typestring = UNKNOWN for detector in self.detectors: if typestring != UNKNOWN and not detector.can_improve(typestring): continue if not detector.can_detect(path): continue detected = detector.detect(path) if detected: typestring = detected return typestring
python
{ "resource": "" }
q39277
CDNMiddleware.upload_file
train
def upload_file(self, path, contents, replace=False): """ Uplodas the file to its path with the given `content`, adding the appropriate parent directories when needed. If the path already exists and `replace` is `False`, the file will not be uploaded. """ f = self.get_file(path) f.upload(contents, replace=replace) self.set_cache_buster(path, f.hash())
python
{ "resource": "" }
q39278
CDNMiddleware.copy_file
train
def copy_file(self, from_path, to_path, replace=False): """ Copies a file from a given source path to a destination path, adding appropriate parent directories when needed. If the destination path already exists and `replace` is `False`, the file will not be uploaded. """ f = self.get_file(from_path) if f.copy(to_path, replace): self.set_cache_buster(to_path, f.hash())
python
{ "resource": "" }
q39279
CDNMiddleware.remove_file
train
def remove_file(self, path): """Removes the given file""" self.get_file(path).remove() self.remove_cache_buster(path)
python
{ "resource": "" }
q39280
alignTwoAlignments
train
def alignTwoAlignments(aln1,aln2,outfile,WorkingDir=None,SuppressStderr=None,\ SuppressStdout=None): """Aligns two alignments. Individual sequences are not realigned aln1: string, name of file containing the first alignment aln2: string, name of file containing the second alignment outfile: you're forced to specify an outfile name, because if you don't aln1 will be overwritten. So, if you want aln1 to be overwritten, you should specify the same filename. WARNING: a .dnd file is created with the same prefix as aln1. So an existing dendrogram might get overwritten. """ app = Clustalw({'-profile':None,'-profile1':aln1,\ '-profile2':aln2,'-outfile':outfile},SuppressStderr=\ SuppressStderr,WorkingDir=WorkingDir,SuppressStdout=SuppressStdout) app.Parameters['-align'].off() return app()
python
{ "resource": "" }
q39281
addSeqsToAlignment
train
def addSeqsToAlignment(aln1,seqs,outfile,WorkingDir=None,SuppressStderr=None,\ SuppressStdout=None): """Aligns sequences from second profile against first profile aln1: string, name of file containing the alignment seqs: string, name of file containing the sequences that should be added to the alignment. opoutfile: string, name of the output file (the new alignment) """ app = Clustalw({'-sequences':None,'-profile1':aln1,\ '-profile2':seqs,'-outfile':outfile},SuppressStderr=\ SuppressStderr,WorkingDir=WorkingDir, SuppressStdout=SuppressStdout) app.Parameters['-align'].off() return app()
python
{ "resource": "" }
q39282
buildTreeFromAlignment
train
def buildTreeFromAlignment(filename,WorkingDir=None,SuppressStderr=None): """Builds a new tree from an existing alignment filename: string, name of file containing the seqs or alignment """ app = Clustalw({'-tree':None,'-infile':filename},SuppressStderr=\ SuppressStderr,WorkingDir=WorkingDir) app.Parameters['-align'].off() return app()
python
{ "resource": "" }
q39283
bootstrap_tree_from_alignment
train
def bootstrap_tree_from_alignment(aln, seed=None, num_trees=None, params=None): """Returns a tree from Alignment object aln with bootstrap support values. aln: an cogent.core.alignment.Alignment object, or data that can be used to build one. seed: an interger, seed value to use num_trees: an integer, number of trees to bootstrap against params: dict of parameters to pass in to the Clustal app controller. The result will be an cogent.core.tree.PhyloNode object, or None if tree fails. If seed is not specifed in params, a random integer between 0-1000 is used. """ # Create instance of controllor, enable bootstrap, disable alignment,tree app = Clustalw(InputHandler='_input_as_multiline_string', params=params, \ WorkingDir='/tmp') app.Parameters['-align'].off() app.Parameters['-tree'].off() if app.Parameters['-bootstrap'].isOff(): if num_trees is None: num_trees = 1000 app.Parameters['-bootstrap'].on(num_trees) if app.Parameters['-seed'].isOff(): if seed is None: seed = randint(0,1000) app.Parameters['-seed'].on(seed) if app.Parameters['-bootlabels'].isOff(): app.Parameters['-bootlabels'].on("node") # Setup mapping. Clustalw clips identifiers. We will need to remap them. seq_collection = SequenceCollection(aln) int_map, int_keys = seq_collection.getIntMap() int_map = SequenceCollection(int_map) # Collect result result = app(int_map.toFasta()) # Build tree tree = DndParser(result['Tree'].read(), constructor=PhyloNode) for node in tree.tips(): node.Name = int_keys[node.Name] # Clean up result.cleanUp() del(seq_collection, app, result, int_map, int_keys) return tree
python
{ "resource": "" }
q39284
Clustalw._suffix
train
def _suffix(self): """Return appropriate suffix for alignment file""" _output_formats={'GCG':'.msf', 'GDE':'.gde', 'PHYLIP':'.phy', 'PIR':'.pir', 'NEXUS':'.nxs'} if self.Parameters['-output'].isOn(): return _output_formats[self.Parameters['-output'].Value] else: return '.aln'
python
{ "resource": "" }
q39285
directory_walk
train
def directory_walk(source_d, destination_d): ''' Walk a directory structure and yield full parallel source and destination files, munging filenames as necessary ''' for dirpath, dirnames, filenames in os.walk(source_d): relpath = os.path.relpath(dirpath, source_d) if relpath == '.': relpath = '' # remove implied '.' for filename in filenames: suffix = filename if relpath: suffix = os.path.join(relpath, filename) full_source_path = os.path.join(source_d, suffix) full_destination_path = os.path.join(destination_d, suffix) yield full_source_path, full_destination_path
python
{ "resource": "" }
q39286
recursive_hardlink_dirs
train
def recursive_hardlink_dirs(source_d, destination_d): ''' Same as above, except creating hardlinks for all files ''' func = os.link if os.name == 'nt': func = shutil.copy if os.path.exists(destination_d): os.rmdir(destination_d) shutil.copytree(source_d, destination_d, copy_function=func)
python
{ "resource": "" }
q39287
SettingsManager.load_all
train
def load_all(self, key, default=None): ''' Import settings key as a dict or list with values of importable paths If a default constructor is specified, and a path is not importable, it falls back to running the given constructor. ''' value = getattr(self, key) if default is not None: def loader(path): return self.load_path_with_default(path, default) else: loader = self.load_path if isinstance(value, dict): return {key: loader(value) for key, value in value.items()} elif isinstance(value, list): return [loader(value) for value in value] else: raise ValueError('load_all must be list or dict')
python
{ "resource": "" }
q39288
SettingsManager.load_path
train
def load_path(self, path): ''' Load and return a given import path to a module or class ''' containing_module, _, last_item = path.rpartition('.') if last_item[0].isupper(): # Is a class definition, should do an "import from" path = containing_module imported_obj = importlib.import_module(path) if last_item[0].isupper(): try: imported_obj = getattr(imported_obj, last_item) except AttributeError: msg = 'Cannot import "%s". ' \ '(Hint: CamelCase is only for classes)' % last_item raise ConfigurationError(msg) return imported_obj
python
{ "resource": "" }
q39289
SettingsManager.set
train
def set(self, **kwargs): ''' Override existing settings, taking precedence over both user settings object and default settings. Useful for specific runtime requirements, such as overriding PORT or HOST. ''' for lower_key, value in kwargs.items(): if lower_key.lower() != lower_key: raise ValueError('Requires lowercase: %s' % lower_key) key = lower_key.upper() try: getattr(self, key) except (AttributeError, ConfigurationError): raise AttributeError('Cannot override %s' % key) self.overridden_settings[key] = value
python
{ "resource": "" }
q39290
SettingsManager.use_settings
train
def use_settings(self, settings_module): ''' Useful for tests for overriding current settings manually ''' self._previous_settings = self.settings_module self.settings_module = settings_module self.reconfigure()
python
{ "resource": "" }
q39291
SettingsManager.use_settings_dict
train
def use_settings_dict(self, settings_dict): ''' Slightly cleaner interface to override settings that autogenerates a settings module based on a given dict. ''' class SettingsDictModule: __slots__ = tuple(key.upper() for key in settings_dict.keys()) settings_obj = SettingsDictModule() for key, value in settings_dict.items(): setattr(settings_obj, key.upper(), value) self.use_settings(settings_obj)
python
{ "resource": "" }
q39292
process_sequence
train
def process_sequence(sequence, rules=None, skip_non_vietnamese=True): """\ Convert a key sequence into a Vietnamese string with diacritical marks. Args: rules (optional): see docstring for process_key(). skip_non_vietnamese (optional): see docstring for process_key(). It even supports continous key sequences connected by separators. i.e. process_sequence('con meof.ddieen') should work. """ result = "" raw = result result_parts = [] if rules is None: rules = get_telex_definition() accepted_chars = _accepted_chars(rules) for key in sequence: if key not in accepted_chars: result_parts.append(result) result_parts.append(key) result = "" raw = "" else: result, raw = process_key( string=result, key=key, fallback_sequence=raw, rules=rules, skip_non_vietnamese=skip_non_vietnamese) result_parts.append(result) return ''.join(result_parts)
python
{ "resource": "" }
q39293
_get_action
train
def _get_action(trans): """ Return the action inferred from the transformation `trans`. and the parameter going with this action An _Action.ADD_MARK goes with a Mark while an _Action.ADD_ACCENT goes with an Accent """ # TODO: VIQR-like convention mark_action = { '^': (_Action.ADD_MARK, Mark.HAT), '+': (_Action.ADD_MARK, Mark.BREVE), '*': (_Action.ADD_MARK, Mark.HORN), '-': (_Action.ADD_MARK, Mark.BAR), } accent_action = { '\\': (_Action.ADD_ACCENT, Accent.GRAVE), '/': (_Action.ADD_ACCENT, Accent.ACUTE), '?': (_Action.ADD_ACCENT, Accent.HOOK), '~': (_Action.ADD_ACCENT, Accent.TIDLE), '.': (_Action.ADD_ACCENT, Accent.DOT), } if trans[0] in ('<', '+'): return _Action.ADD_CHAR, trans[1] if trans[0] == "_": return _Action.UNDO, trans[1:] if len(trans) == 2: return mark_action[trans[1]] else: return accent_action[trans[0]]
python
{ "resource": "" }
q39294
_reverse
train
def _reverse(components, trans): """ Reverse the effect of transformation 'trans' on 'components' If the transformation does not affect the components, return the original string. """ action, parameter = _get_action(trans) comps = list(components) string = utils.join(comps) if action == _Action.ADD_CHAR and string[-1].lower() == parameter.lower(): if comps[2]: i = 2 elif comps[1]: i = 1 else: i = 0 comps[i] = comps[i][:-1] elif action == _Action.ADD_ACCENT: comps = accent.add_accent(comps, Accent.NONE) elif action == _Action.ADD_MARK: if parameter == Mark.BAR: comps[0] = comps[0][:-1] + \ mark.add_mark_char(comps[0][-1:], Mark.NONE) else: if mark.is_valid_mark(comps, trans): comps[1] = "".join([mark.add_mark_char(c, Mark.NONE) for c in comps[1]]) return comps
python
{ "resource": "" }
q39295
_can_undo
train
def _can_undo(comps, trans_list): """ Return whether a components can be undone with one of the transformation in trans_list. """ comps = list(comps) accent_list = list(map(accent.get_accent_char, comps[1])) mark_list = list(map(mark.get_mark_char, utils.join(comps))) action_list = list(map(lambda x: _get_action(x), trans_list)) def atomic_check(action): """ Check if the `action` created one of the marks, accents, or characters in `comps`. """ return (action[0] == _Action.ADD_ACCENT and action[1] in accent_list) \ or (action[0] == _Action.ADD_MARK and action[1] in mark_list) \ or (action[0] == _Action.ADD_CHAR and action[1] == \ accent.remove_accent_char(comps[1][-1])) # ơ, ư return any(map(atomic_check, action_list))
python
{ "resource": "" }
q39296
add_mark_char
train
def add_mark_char(char, mark): """ Add mark to a single char. """ if char == "": return "" case = char.isupper() ac = accent.get_accent_char(char) char = accent.add_accent_char(char.lower(), Accent.NONE) new_char = char if mark == Mark.HAT: if char in FAMILY_A: new_char = "â" elif char in FAMILY_O: new_char = "ô" elif char in FAMILY_E: new_char = "ê" elif mark == Mark.HORN: if char in FAMILY_O: new_char = "ơ" elif char in FAMILY_U: new_char = "ư" elif mark == Mark.BREVE: if char in FAMILY_A: new_char = "ă" elif mark == Mark.BAR: if char in FAMILY_D: new_char = "đ" elif mark == Mark.NONE: if char in FAMILY_A: new_char = "a" elif char in FAMILY_E: new_char = "e" elif char in FAMILY_O: new_char = "o" elif char in FAMILY_U: new_char = "u" elif char in FAMILY_D: new_char = "d" new_char = accent.add_accent_char(new_char, ac) return utils.change_case(new_char, case)
python
{ "resource": "" }
q39297
Session.delete
train
def delete(self, path): """Call the Infoblox device to delete the ref :param str ref: The reference id :rtype: requests.Response """ return self.session.delete(self._request_url(path), auth=self.auth, verify=False)
python
{ "resource": "" }
q39298
Session.get
train
def get(self, path, data=None, return_fields=None): """Call the Infoblox device to get the obj for the data passed in :param str obj_reference: The object reference data :param dict data: The data for the get request :rtype: requests.Response """ return self.session.get(self._request_url(path, return_fields), data=json.dumps(data), auth=self.auth, verify=False)
python
{ "resource": "" }
q39299
Session.post
train
def post(self, path, data): """Call the Infoblox device to post the obj for the data passed in :param str obj: The object type :param dict data: The data for the post :rtype: requests.Response """ LOGGER.debug('Posting data: %r', data) return self.session.post(self._request_url(path), data=json.dumps(data or {}), headers=self.HEADERS, auth=self.auth, verify=False)
python
{ "resource": "" }