_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q39000
SeqPrep._get_result_paths
train
def _get_result_paths(self, data): """Captures SeqPrep output. """ result = {} # Always output: result['UnassembledReads1'] = ResultPath(Path= self._unassembled_reads1_out_file_name( ), IsWritten=True) result['UnassembledReads2'] = ResultPath(Path= self._unassembled_reads2_out_file_name( ), IsWritten=True) # optional output, so we check for each # check for assembled reads file if self.Parameters['-s'].isOn(): result['Assembled'] = ResultPath(Path= self._assembled_out_file_name(), IsWritten=True) # check for discarded (unassembled) reads1 file if self.Parameters['-3'].isOn(): result['Reads1Discarded'] = ResultPath(Path= self._discarded_reads1_out_file_name( ), IsWritten=True) # check for discarded (unassembled) reads2 file if self.Parameters['-4'].isOn(): result['Reads2Discarded'] = ResultPath(Path= self._discarded_reads2_out_file_name( ), IsWritten=True) # check for pretty-alignment file if self.Parameters['-E'].isOn(): result['PrettyAlignments'] = ResultPath(Path= self._pretty_alignment_out_file_name( ), IsWritten=True) return result
python
{ "resource": "" }
q39001
get_files_for_document
train
def get_files_for_document(document): """ Returns the available files for all languages. In case the file is already present in another language, it does not re-add it again. """ files = [] for doc_trans in document.translations.all(): if doc_trans.filer_file is not None and \ doc_trans.filer_file not in files: doc_trans.filer_file.language = doc_trans.language_code files.append(doc_trans.filer_file) return files
python
{ "resource": "" }
q39002
get_frontpage_documents
train
def get_frontpage_documents(context): """Returns the library favs that should be shown on the front page.""" req = context.get('request') qs = Document.objects.published(req).filter(is_on_front_page=True) return qs
python
{ "resource": "" }
q39003
authenticate_connection
train
def authenticate_connection(username, password, db=None): """ Authenticates the current database connection with the passed username and password. If the database connection uses all default parameters, this can be called without connect_to_database. Otherwise, it should be preceded by a connect_to_database call. @param username: the username with which you authenticate; must match a user registered in the database @param password: the password of that user @param db: the database the user is authenticated to access. Passing None (the default) means authenticating against the admin database, which gives the connection access to all databases Example; connecting to all databases locally: connect_to_database() authenticate_connection("username", "password") Example; connecting to a particular database of a remote server: connect_to_database(host="example.com", port="12345") authenticate_connection("username", "password", db="somedb") """ return CONNECTION.authenticate(username, password, db=db)
python
{ "resource": "" }
q39004
add_user
train
def add_user(name, password=None, read_only=None, db=None, **kwargs): """ Adds a user that can be used for authentication. @param name: the name of the user to create @param passowrd: the password of the user to create. Can not be used with the userSource argument. @param read_only: if True the user will be read only @param db: the database the user is authenticated to access. Passing None (the default) means add the user to the admin database, which gives the user access to all databases @param **kwargs: forwarded to pymongo.database.add_user Example; adding a user with full database access: add_user("username", "password") Example; adding a user with read only privilage on a partiucalr database: add_user("username", "password", read_only=True, db="somedb") NOTE: This function will only work if mongo is being run unauthenticated or you have already authenticated with another user with appropriate privileges to add a user to the specified database. """ return CONNECTION.add_user(name, password=password, read_only=read_only, db=db, **kwargs)
python
{ "resource": "" }
q39005
add_superuser
train
def add_superuser(name, password, **kwargs): """ Adds a user with userAdminAnyDatabase role to mongo. @param name: the name of the user to create @param passowrd: the password of the user to create. Can not be used with the userSource argument. @param **kwargs: forwarded to pymongo.database.add_user """ return CONNECTION.add_user( name, password=password, roles=["userAdminAnyDatabase", "readWriteAnyDatabase", "root", "backup", "restore"], **kwargs )
python
{ "resource": "" }
q39006
list_database
train
def list_database(db=None): """ Lists the names of either the databases on the machine or the collections of a particular database @param db: the database for which to list the collection names; if db is None, then it lists all databases instead the contents of the database with the name passed in db """ if db is None: return CONNECTION.get_connection().database_names() return CONNECTION.get_connection()[db].collection_names()
python
{ "resource": "" }
q39007
MongoConnection.authenticate
train
def authenticate(self, username, password, db=None): """ Authenticates the MongoClient with the passed username and password """ if db is None: return self.get_connection().admin.authenticate(username, password) return self.get_connection()[db].authenticate(username, password)
python
{ "resource": "" }
q39008
MongoConnection.add_user
train
def add_user(self, name, password=None, read_only=None, db=None, **kwargs): """ Adds a user that can be used for authentication """ if db is None: return self.get_connection().admin.add_user( name, password=password, read_only=read_only, **kwargs) return self.get_connection()[db].add_user( name, password=password, read_only=read_only, **kwargs)
python
{ "resource": "" }
q39009
MetriqueContainer._add_variants
train
def _add_variants(self, key, value, schema): ''' also possible to define some function that takes current value and creates a new value from it ''' variants = schema.get('variants') obj = {} if variants: for _key, func in variants.iteritems(): _value = func(value, self.store) obj.update({_key: _value}) return obj
python
{ "resource": "" }
q39010
MetriqueContainer._type_container
train
def _type_container(self, value, _type): ' apply type to all values in the list ' if value is None: # normalize null containers to empty list return [] elif not isinstance(value, list): raise ValueError("expected list type, got: %s" % type(value)) else: return sorted(self._type_single(item, _type) for item in value)
python
{ "resource": "" }
q39011
MetriqueContainer._type_single
train
def _type_single(self, value, _type): ' apply type to the single value ' if value is None or _type in (None, NoneType): # don't convert null values # default type is the original type if none set pass elif isinstance(value, _type): # or values already of correct type # normalize all dates to epochs value = dt2ts(value) if _type in [datetime, date] else value else: if _type in (datetime, date): # normalize all dates to epochs value = dt2ts(value) elif _type in (unicode, str): # make sure all string types are properly unicoded value = to_encoding(value) else: try: value = _type(value) except Exception: value = to_encoding(value) logger.error("typecast failed: %s(value=%s)" % ( _type.__name__, value)) raise return value
python
{ "resource": "" }
q39012
MetriqueContainer.flush
train
def flush(self, objects=None, batch_size=None, **kwargs): ''' flush objects stored in self.container or those passed in''' batch_size = batch_size or self.config.get('batch_size') # if we're flushing these from self.store, we'll want to # pop them later. if objects: from_store = False else: from_store = True objects = self.itervalues() # sort by _oid for grouping by _oid below objects = sorted(objects, key=lambda x: x['_oid']) batch, _ids = [], [] # batch in groups with _oid, since upsert's delete # all _oid rows when autosnap=False! for key, group in groupby(objects, lambda x: x['_oid']): _grouped = list(group) if len(batch) + len(_grouped) > batch_size: logger.debug("Upserting %s objects" % len(batch)) _ = self.upsert(objects=batch, **kwargs) logger.debug("... done upserting %s objects" % len(batch)) _ids.extend(_) # start a new batch batch = _grouped else: # extend existing batch, since still will be < batch_size batch.extend(_grouped) else: if batch: # get the last batch too logger.debug("Upserting last batch of %s objects" % len(batch)) _ = self.upsert(objects=batch, **kwargs) _ids.extend(_) logger.debug("... Finished upserting all objects!") if from_store: for _id in _ids: # try to pop the _id's flushed from store; warn / ignore # the KeyError if they're not there try: self.store.pop(_id) except KeyError: logger.warn( "failed to pop {} from self.store!".format(_id)) return sorted(_ids)
python
{ "resource": "" }
q39013
build_database_sortmerna
train
def build_database_sortmerna(fasta_path, max_pos=None, output_dir=None, temp_dir=tempfile.gettempdir(), HALT_EXEC=False): """ Build sortmerna db from fasta_path; return db name and list of files created Parameters ---------- fasta_path : string path to fasta file of sequences to build database. max_pos : integer, optional maximum positions to store per seed in index [default: 10000]. output_dir : string, optional directory where output should be written [default: same directory as fasta_path] HALT_EXEC : boolean, optional halt just before running the indexdb_rna command and print the command -- useful for debugging [default: False]. Return ------ db_name : string filepath to indexed database. db_filepaths : list output files by indexdb_rna """ if fasta_path is None: raise ValueError("Error: path to fasta reference " "sequences must exist.") fasta_dir, fasta_filename = split(fasta_path) if not output_dir: output_dir = fasta_dir or '.' # Will cd to this directory, so just pass the filename # so the app is not confused by relative paths fasta_path = fasta_filename index_basename = splitext(fasta_filename)[0] db_name = join(output_dir, index_basename) # Instantiate the object sdb = IndexDB(WorkingDir=output_dir, HALT_EXEC=HALT_EXEC) # The parameter --ref STRING must follow the format where # STRING = /path/to/ref.fasta,/path/to/ref.idx sdb.Parameters['--ref'].on("%s,%s" % (fasta_path, db_name)) # Set temporary directory sdb.Parameters['--tmpdir'].on(temp_dir) # Override --max_pos parameter if max_pos is not None: sdb.Parameters['--max_pos'].on(max_pos) # Run indexdb_rna app_result = sdb() # Return all output files (by indexdb_rna) as a list, # first however remove the StdErr and StdOut filepaths # as they files will be destroyed at the exit from # this function (IndexDB is a local instance) db_filepaths = [v.name for k, v in app_result.items() if k not in {'StdErr', 'StdOut'} and hasattr(v, 'name')] return db_name, db_filepaths
python
{ "resource": "" }
q39014
sortmerna_ref_cluster
train
def sortmerna_ref_cluster(seq_path=None, sortmerna_db=None, refseqs_fp=None, result_path=None, tabular=False, max_e_value=1, similarity=0.97, coverage=0.97, threads=1, best=1, HALT_EXEC=False ): """Launch sortmerna OTU picker Parameters ---------- seq_path : str filepath to query sequences. sortmerna_db : str indexed reference database. refseqs_fp : str filepath of reference sequences. result_path : str filepath to output OTU map. max_e_value : float, optional E-value threshold [default: 1]. similarity : float, optional similarity %id threshold [default: 0.97]. coverage : float, optional query coverage % threshold [default: 0.97]. threads : int, optional number of threads to use (OpenMP) [default: 1]. tabular : bool, optional output BLAST tabular alignments [default: False]. best : int, optional number of best alignments to output per read [default: 1]. Returns ------- clusters : dict of lists OTU ids and reads mapping to them failures : list reads which did not align """ # Instantiate the object smr = Sortmerna(HALT_EXEC=HALT_EXEC) # Set input query sequences path if seq_path is not None: smr.Parameters['--reads'].on(seq_path) else: raise ValueError("Error: a read file is mandatory input.") # Set the input reference sequence + indexed database path if sortmerna_db is not None: smr.Parameters['--ref'].on("%s,%s" % (refseqs_fp, sortmerna_db)) else: raise ValueError("Error: an indexed database for reference set %s must" " already exist.\nUse indexdb_rna to index the" " database." % refseqs_fp) if result_path is None: raise ValueError("Error: the result path must be set.") # Set output results path (for Blast alignments, clusters and failures) output_dir = dirname(result_path) if output_dir is not None: output_file = join(output_dir, "sortmerna_otus") smr.Parameters['--aligned'].on(output_file) # Set E-value threshold if max_e_value is not None: smr.Parameters['-e'].on(max_e_value) # Set similarity threshold if similarity is not None: smr.Parameters['--id'].on(similarity) # Set query coverage threshold if coverage is not None: smr.Parameters['--coverage'].on(coverage) # Set number of best alignments to output if best is not None: smr.Parameters['--best'].on(best) # Set Blast tabular output # The option --blast 3 represents an # m8 blast tabular output + two extra # columns containing the CIGAR string # and the query coverage if tabular: smr.Parameters['--blast'].on("3") # Set number of threads if threads is not None: smr.Parameters['-a'].on(threads) # Run sortmerna app_result = smr() # Put clusters into a map of lists f_otumap = app_result['OtuMap'] rows = (line.strip().split('\t') for line in f_otumap) clusters = {r[0]: r[1:] for r in rows} # Put failures into a list f_failure = app_result['FastaForDenovo'] failures = [re.split('>| ', label)[0] for label, seq in parse_fasta(f_failure)] # remove the aligned FASTA file and failures FASTA file # (currently these are re-constructed using pick_rep_set.py # further in the OTU-picking pipeline) smr_files_to_remove = [app_result['FastaForDenovo'].name, app_result['FastaMatches'].name, app_result['OtuMap'].name] return clusters, failures, smr_files_to_remove
python
{ "resource": "" }
q39015
sortmerna_map
train
def sortmerna_map(seq_path, output_dir, refseqs_fp, sortmerna_db, e_value=1, threads=1, best=None, num_alignments=None, HALT_EXEC=False, output_sam=False, sam_SQ_tags=False, blast_format=3, print_all_reads=True, ): """Launch sortmerna mapper Parameters ---------- seq_path : str filepath to reads. output_dir : str dirpath to sortmerna output. refseqs_fp : str filepath of reference sequences. sortmerna_db : str indexed reference database. e_value : float, optional E-value threshold [default: 1]. threads : int, optional number of threads to use (OpenMP) [default: 1]. best : int, optional number of best alignments to output per read [default: None]. num_alignments : int, optional number of first alignments passing E-value threshold to output per read [default: None]. HALT_EXEC : bool, debugging parameter If passed, will exit just before the sortmerna command is issued and will print out the command that would have been called to stdout [default: False]. output_sam : bool, optional flag to set SAM output format [default: False]. sam_SQ_tags : bool, optional add SQ field to SAM output (if output_SAM is True) [default: False]. blast_format : int, optional Output Blast m8 tabular + 2 extra columns for CIGAR string and query coverge [default: 3]. print_all_reads : bool, optional output NULL alignments for non-aligned reads [default: True]. Returns ------- dict of result paths set in _get_result_paths() """ if not (blast_format or output_sam): raise ValueError("Either Blast or SAM output alignment " "format must be chosen.") if (best and num_alignments): raise ValueError("Only one of --best or --num_alignments " "options must be chosen.") # Instantiate the object smr = Sortmerna(HALT_EXEC=HALT_EXEC) # Set the input reference sequence + indexed database path smr.Parameters['--ref'].on("%s,%s" % (refseqs_fp, sortmerna_db)) # Set input query sequences path smr.Parameters['--reads'].on(seq_path) # Set Blast tabular output # The option --blast 3 represents an # m8 blast tabular output + two extra # columns containing the CIGAR string # and the query coverage if blast_format: smr.Parameters['--blast'].on(blast_format) # Output alignments in SAM format if output_sam: smr.Parameters['--sam'].on() if sam_SQ_tags: smr.Parameters['--SQ'].on() # Turn on NULL string alignment output if print_all_reads: smr.Parameters['--print_all_reads'].on() # Set output results path (for Blast alignments and log file) output_file = join(output_dir, "sortmerna_map") smr.Parameters['--aligned'].on(output_file) # Set E-value threshold if e_value is not None: smr.Parameters['-e'].on(e_value) # Set number of best alignments to output per read if best is not None: smr.Parameters['--best'].on(best) # Set number of first alignments passing E-value threshold # to output per read if num_alignments is not None: smr.Parameters['--num_alignments'].on(num_alignments) # Set number of threads if threads is not None: smr.Parameters['-a'].on(threads) # Turn off parameters related to OTU-picking smr.Parameters['--fastx'].off() smr.Parameters['--otu_map'].off() smr.Parameters['--de_novo_otu'].off() smr.Parameters['--id'].off() smr.Parameters['--coverage'].off() # Run sortmerna app_result = smr() return app_result
python
{ "resource": "" }
q39016
Plotter.get_color
train
def get_color(self, color): ''' Returns a color to use. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES. ''' if color is None: color = self.counter if isinstance(color, str): color = CNAMES[color] self.counter = color + 1 color %= len(COLORS) return color
python
{ "resource": "" }
q39017
Plotter.plot
train
def plot(self, series, label='', color=None, style=None): ''' Wrapper around plot. :param pandas.Series series: The series to be plotted, all values must be positive if stacked is True. :param string label: The label for the series. :param integer/string color: Color for the plot. Can be an index for the color from COLORS or a key(string) from CNAMES. :param string style: Style forwarded to the plt.plot. ''' color = self.get_color(color) if self.stacked: series += self.running_sum plt.fill_between(series.index, self.running_sum, series, facecolor=ALPHAS[color]) self.running_sum = series plt.gca().set_ylim(bottom=0, top=int(series.max() * 1.05)) series.plot(label=label, c=COLORS[color], linewidth=2, style=style)
python
{ "resource": "" }
q39018
Plotter.plots
train
def plots(self, series_list, label_list, colors=None): ''' Plots all the series from the list. The assumption is that all of the series share the same index. :param list series_list: A list of series which should be plotted :param list label_list: A list of labels corresponding to the series :params list list_of_colors: A list of colors to use. ''' colors = colors or range(len(series_list)) for series, label, color in zip(series_list, label_list, colors): self.plot(series=series, label=label, color=color)
python
{ "resource": "" }
q39019
Plotter.lines
train
def lines(self, lines_dict, y='bottom', color='grey', **kwargs): ''' Creates vertical lines in the plot. :param lines_dict: A dictionary of label, x-coordinate pairs. :param y: May be 'top', 'bottom' or int. The y coordinate of the text-labels. :param color color: The color of the lines. ''' for l, x in lines_dict.items(): self.line(x, l, y, color, **kwargs)
python
{ "resource": "" }
q39020
DocumentManager.published
train
def published(self, request=None): """ Returns the published documents in the current language. :param request: A Request instance. """ language = getattr(request, 'LANGUAGE_CODE', get_language()) if not language: return self.model.objects.none() qs = self.get_queryset() qs = qs.filter( translations__is_published=True, translations__language_code=language, ) # either it has no category or the one it has is published qs = qs.filter( models.Q(category__isnull=True) | models.Q(category__is_published=True)) return qs
python
{ "resource": "" }
q39021
Module._format
train
def _format(self): """Format search queries to perform in bulk. Build up the URLs to call for the search engine. These will be ran through a bulk processor and returned to a detailer. """ self.log.debug("Formatting URLs to request") items = list() for i in range(0, self.limit, 10): query = '"%s" %s' % (self.domain, self.modifier) url = self.host + "/search?q=" + query + "&first=" + str(i) items.append(url) self.log.debug("URLs were generated") return items
python
{ "resource": "" }
q39022
Module._process
train
def _process(self, responses): """Process search engine results for detailed analysis. Search engine result pages (SERPs) come back with each request and will need to be extracted in order to crawl the actual hits. """ self.log.debug("Processing search results") items = list() for response in responses: try: soup = BeautifulSoup(response.content, 'html.parser', from_encoding="iso-8859-1") except: continue else: listings = soup.findAll('li', {'class': 'b_algo'}) items.extend([l.find('a')['href'] for l in listings]) self.log.debug("Search result URLs were extracted") return items
python
{ "resource": "" }
q39023
Module._fetch
train
def _fetch(self, urls): """Perform bulk collection of data and return the content. Gathering responses is handled by the base class and uses futures to speed up the processing. Response data is saved inside a local variable to be used later in extraction. """ responses = self._request_bulk(urls) for response in responses: try: soup = BeautifulSoup(response.content, 'html.parser', from_encoding="iso-8859-1") text = soup.get_text() except Exception: text = response.text self.data.append(text) # Opportunistic findings return responses
python
{ "resource": "" }
q39024
Module._extract
train
def _extract(self): """Extract email addresses from results. Text content from all crawled pages are ran through a simple email extractor. Data is cleaned prior to running pattern expressions. """ self.log.debug("Extracting emails from text content") for item in self.data: emails = extract_emails(item, self.domain, self.fuzzy) self.results.extend(emails) self.log.debug("Email extraction completed") return list(set(self.results))
python
{ "resource": "" }
q39025
Module.search
train
def search(self): """Run the full search process. Simple public method to abstract the steps needed to produce a full search using the engine. """ requests = self._format() serps = self._fetch(requests) urls = self._process(serps) details = self._fetch(urls) emails = self._extract() return {'emails': emails, 'processed': len(self.data)}
python
{ "resource": "" }
q39026
cmdline_generator
train
def cmdline_generator(param_iter, PathToBin=None, PathToCmd=None, PathsToInputs=None, PathToOutput=None, PathToStderr='/dev/null', PathToStdout='/dev/null', UniqueOutputs=False, InputParam=None, OutputParam=None): """Generates command lines that can be used in a cluster environment param_iter : ParameterIterBase subclass instance PathToBin : Absolute location primary command (i.e. Python) PathToCmd : Absolute location of the command PathsToInputs : Absolute location(s) of input file(s) PathToOutput : Absolute location of output file PathToStderr : Path to stderr PathToStdout : Path to stdout UniqueOutputs : Generate unique tags for output files InputParam : Application input parameter (if not specified, assumes stdin is to be used) OutputParam : Application output parameter (if not specified, assumes stdout is to be used) """ # Make sure we have input(s) and output if not PathsToInputs: raise ValueError("No input file(s) specified.") if not PathToOutput: raise ValueError("No output file specified.") if not isinstance(PathsToInputs, list): PathsToInputs = [PathsToInputs] # PathToBin and PathToCmd can be blank if PathToBin is None: PathToBin = '' if PathToCmd is None: PathToCmd = '' # stdout_ and stderr_ do not have to be redirected if PathToStdout is None: stdout_ = '' else: stdout_ = '> "%s"' % PathToStdout if PathToStderr is None: stderr_ = '' else: stderr_ = '2> "%s"' % PathToStderr # Output can be redirected to stdout or specified output argument if OutputParam is None: output = '> "%s"' % PathToOutput stdout_ = '' else: output_param = param_iter.AppParams[OutputParam] output_param.on('"%s"' % PathToOutput) output = str(output_param) output_param.off() output_count = 0 base_command = ' '.join([PathToBin, PathToCmd]) for params in param_iter: # Support for multiple input files for inputfile in PathsToInputs: cmdline = [base_command] cmdline.extend(sorted(filter(None, map(str, params.values())))) # Input can come from stdin or specified input argument if InputParam is None: input = '< "%s"' % inputfile else: input_param = params[InputParam] input_param.on('"%s"' % inputfile) input = str(input_param) input_param.off() cmdline.append(input) if UniqueOutputs: cmdline.append(''.join([output, str(output_count)])) output_count += 1 else: cmdline.append(output) cmdline.append(stdout_) cmdline.append(stderr_) yield ' '.join(cmdline)
python
{ "resource": "" }
q39027
get_tmp_filename
train
def get_tmp_filename(tmp_dir=gettempdir(), prefix="tmp", suffix=".txt", result_constructor=FilePath): """ Generate a temporary filename and return as a FilePath object tmp_dir: the directory to house the tmp_filename prefix: string to append to beginning of filename Note: It is very useful to have prefix be descriptive of the process which is creating the temporary file. For example, if your temp file will be used to build a temporary blast database, you might pass prefix=TempBlastDB suffix: the suffix to be appended to the temp filename result_constructor: the constructor used to build the result filename (default: cogent.app.parameters.FilePath). Note that joining FilePath objects with one another or with strings, you must use the + operator. If this causes trouble, you can pass str as the the result_constructor. """ # check not none if not tmp_dir: tmp_dir = "" # if not current directory, append "/" if not already on path elif not tmp_dir.endswith("/"): tmp_dir += "/" chars = "abcdefghigklmnopqrstuvwxyz" picks = chars + chars.upper() + "0123456790" return result_constructor(tmp_dir) + result_constructor(prefix) +\ result_constructor("%s%s" % (''.join([choice(picks) for i in range(20)]), suffix))
python
{ "resource": "" }
q39028
guess_input_handler
train
def guess_input_handler(seqs, add_seq_names=False): """Returns the name of the input handler for seqs.""" if isinstance(seqs, str): if '\n' in seqs: # can't be a filename... return '_input_as_multiline_string' else: # assume it was a filename return '_input_as_string' if isinstance(seqs, list) and len(seqs) and isinstance(seqs[0], tuple): return '_input_as_seq_id_seq_pairs' if add_seq_names: return '_input_as_seqs' return '_input_as_lines'
python
{ "resource": "" }
q39029
CommandLineAppResult.cleanUp
train
def cleanUp(self): """ Delete files that are written by CommandLineApplication from disk WARNING: after cleanUp() you may still have access to part of your result data, but you should be aware that if the file size exceeds the size of the buffer you will only have part of the file. To be safe, you should not use cleanUp() until you are done with the file or have copied it to a different location. """ file_keys = self.file_keys for item in file_keys: if self[item] is not None: self[item].close() remove(self[item].name) # remove input handler temp files if hasattr(self, "_input_filename"): remove(self._input_filename)
python
{ "resource": "" }
q39030
CommandLineApplication._input_as_lines
train
def _input_as_lines(self, data): """ Write a seq of lines to a temp file and return the filename string data: a sequence to be written to a file, each element of the sequence will compose a line in the file * Note: the result will be the filename as a FilePath object (which is a string subclass). * Note: '\n' will be stripped off the end of each sequence element before writing to a file in order to avoid multiple new lines accidentally be written to a file """ filename = self._input_filename = \ FilePath(self.getTmpFilename(self.TmpDir)) filename = FilePath(filename) data_file = open(filename, 'w') data_to_file = '\n'.join([str(d).strip('\n') for d in data]) data_file.write(data_to_file) data_file.close() return filename
python
{ "resource": "" }
q39031
CommandLineApplication._input_as_paths
train
def _input_as_paths(self, data): """ Return data as a space delimited string with each path quoted data: paths or filenames, most likely as a list of strings """ return self._command_delimiter.join( map(str, map(self._input_as_path, data)))
python
{ "resource": "" }
q39032
CommandLineApplication._absolute
train
def _absolute(self, path): """ Convert a filename to an absolute path """ path = FilePath(path) if isabs(path): return path else: # these are both Path objects, so joining with + is acceptable return self.WorkingDir + path
python
{ "resource": "" }
q39033
CommandLineApplication.getTmpFilename
train
def getTmpFilename(self, tmp_dir=None, prefix='tmp', suffix='.txt', include_class_id=False, result_constructor=FilePath): """ Return a temp filename tmp_dir: directory where temporary files will be stored prefix: text to append to start of file name suffix: text to append to end of file name include_class_id: if True, will append a class identifier (built from the class name) to the filename following prefix. This is False by default b/c there is some string processing overhead in getting the class name. This will probably be most useful for testing: if temp files are being left behind by tests, you can turn this on in here (temporarily) to find out which tests are leaving the temp files. result_constructor: the constructor used to build the result (default: cogent.app.parameters.FilePath). Note that joining FilePath objects with one another or with strings, you must use the + operator. If this causes trouble, you can pass str as the the result_constructor. """ # check not none if not tmp_dir: tmp_dir = self.TmpDir # if not current directory, append "/" if not already on path elif not tmp_dir.endswith("/"): tmp_dir += "/" if include_class_id: # Append the classname to the prefix from the class name # so any problematic temp files can be associated with # the class that created them. This should be especially # useful for testing, but is turned off by default to # avoid the string-parsing overhead. class_id = str(self.__class__()) prefix = ''.join([prefix, class_id[class_id.rindex('.') + 1: class_id.index(' ')]]) try: mkdir(tmp_dir) except OSError: # Directory already exists pass # note: it is OK to join FilePath objects with + return result_constructor(tmp_dir) + result_constructor(prefix) + \ result_constructor(''.join([choice(_all_chars) for i in range(self.TmpNameLen)])) +\ result_constructor(suffix)
python
{ "resource": "" }
q39034
get_accent_char
train
def get_accent_char(char): """ Get the accent of an single char, if any. """ index = utils.VOWELS.find(char.lower()) if (index != -1): return 5 - index % 6 else: return Accent.NONE
python
{ "resource": "" }
q39035
get_accent_string
train
def get_accent_string(string): """ Get the first accent from the right of a string. """ accents = list(filter(lambda accent: accent != Accent.NONE, map(get_accent_char, string))) return accents[-1] if accents else Accent.NONE
python
{ "resource": "" }
q39036
add_accent_char
train
def add_accent_char(char, accent): """ Add accent to a single char. Parameter accent is member of class Accent """ if char == "": return "" case = char.isupper() char = char.lower() index = utils.VOWELS.find(char) if (index != -1): index = index - index % 6 + 5 char = utils.VOWELS[index - accent] return utils.change_case(char, case)
python
{ "resource": "" }
q39037
remove_accent_string
train
def remove_accent_string(string): """ Remove all accent from a whole string. """ return utils.join([add_accent_char(c, Accent.NONE) for c in string])
python
{ "resource": "" }
q39038
assign_taxonomy
train
def assign_taxonomy( data, min_confidence=0.80, output_fp=None, training_data_fp=None, fixrank=True, max_memory=None, tmp_dir=tempfile.gettempdir()): """Assign taxonomy to each sequence in data with the RDP classifier data: open fasta file object or list of fasta lines confidence: minimum support threshold to assign taxonomy to a sequence output_fp: path to write output; if not provided, result will be returned in a dict of {seq_id:(taxonomy_assignment,confidence)} """ # Going to iterate through this twice in succession, best to force # evaluation now data = list(data) # RDP classifier doesn't preserve identifiers with spaces # Use lookup table seq_id_lookup = {} for seq_id, seq in parse_fasta(data): seq_id_lookup[seq_id.split()[0]] = seq_id app_kwargs = {} if tmp_dir is not None: app_kwargs['TmpDir'] = tmp_dir app = RdpClassifier(**app_kwargs) if max_memory is not None: app.Parameters['-Xmx'].on(max_memory) temp_output_file = tempfile.NamedTemporaryFile( prefix='RdpAssignments_', suffix='.txt', dir=tmp_dir) app.Parameters['-o'].on(temp_output_file.name) if training_data_fp is not None: app.Parameters['-t'].on(training_data_fp) if fixrank: app.Parameters['-f'].on('fixrank') else: app.Parameters['-f'].on('allrank') app_result = app(data) assignments = {} # ShortSequenceException messages are written to stdout # Tag these ID's as unassignable for line in app_result['StdOut']: excep = parse_rdp_exception(line) if excep is not None: _, rdp_id = excep orig_id = seq_id_lookup[rdp_id] assignments[orig_id] = ('Unassignable', 1.0) for line in app_result['Assignments']: rdp_id, direction, taxa = parse_rdp_assignment(line) if taxa[0][0] == "Root": taxa = taxa[1:] orig_id = seq_id_lookup[rdp_id] lineage, confidence = get_rdp_lineage(taxa, min_confidence) if lineage: assignments[orig_id] = (';'.join(lineage), confidence) else: assignments[orig_id] = ('Unclassified', 1.0) if output_fp: try: output_file = open(output_fp, 'w') except OSError: raise OSError("Can't open output file for writing: %s" % output_fp) for seq_id, assignment in assignments.items(): lineage, confidence = assignment output_file.write( '%s\t%s\t%1.3f\n' % (seq_id, lineage, confidence)) output_file.close() return None else: return assignments
python
{ "resource": "" }
q39039
train_rdp_classifier
train
def train_rdp_classifier( training_seqs_file, taxonomy_file, model_output_dir, max_memory=None, tmp_dir=tempfile.gettempdir()): """ Train RDP Classifier, saving to model_output_dir training_seqs_file, taxonomy_file: file-like objects used to train the RDP Classifier (see RdpTrainer documentation for format of training data) model_output_dir: directory in which to save the files necessary to classify sequences according to the training data Once the model data has been generated, the RDP Classifier may """ app_kwargs = {} if tmp_dir is not None: app_kwargs['TmpDir'] = tmp_dir app = RdpTrainer(**app_kwargs) if max_memory is not None: app.Parameters['-Xmx'].on(max_memory) temp_taxonomy_file = tempfile.NamedTemporaryFile( prefix='RdpTaxonomy_', suffix='.txt', dir=tmp_dir) temp_taxonomy_file.write(taxonomy_file.read()) temp_taxonomy_file.seek(0) app.Parameters['taxonomy_file'].on(temp_taxonomy_file.name) app.Parameters['model_output_dir'].on(model_output_dir) return app(training_seqs_file)
python
{ "resource": "" }
q39040
train_rdp_classifier_and_assign_taxonomy
train
def train_rdp_classifier_and_assign_taxonomy( training_seqs_file, taxonomy_file, seqs_to_classify, min_confidence=0.80, model_output_dir=None, classification_output_fp=None, max_memory=None, tmp_dir=tempfile.gettempdir()): """ Train RDP Classifier and assign taxonomy in one fell swoop The file objects training_seqs_file and taxonomy_file are used to train the RDP Classifier (see RdpTrainer documentation for details). Model data is stored in model_output_dir. If model_output_dir is not provided, a temporary directory is created and removed after classification. The sequences in seqs_to_classify are classified according to the model and filtered at the desired confidence level (default: 0.80). The results are saved to classification_output_fp if provided, otherwise a dict of {seq_id:(taxonomy_assignment,confidence)} is returned. """ if model_output_dir is None: training_dir = tempfile.mkdtemp(prefix='RdpTrainer_', dir=tmp_dir) else: training_dir = model_output_dir training_results = train_rdp_classifier( training_seqs_file, taxonomy_file, training_dir, max_memory=max_memory, tmp_dir=tmp_dir) training_data_fp = training_results['properties'].name assignment_results = assign_taxonomy( seqs_to_classify, min_confidence=min_confidence, output_fp=classification_output_fp, training_data_fp=training_data_fp, max_memory=max_memory, fixrank=False, tmp_dir=tmp_dir) if model_output_dir is None: # Forum user reported an error on the call to os.rmtree: # https://groups.google.com/d/topic/qiime-forum/MkNe7-JtSBw/discussion # We were not able to replicate the problem and fix it # properly. However, even if an error occurs, we would like # to return results, along with a warning. try: rmtree(training_dir) except OSError: msg = ( "Temporary training directory %s not removed" % training_dir) if os.path.isdir(training_dir): training_dir_files = os.listdir(training_dir) msg += "\nDetected files %s" % training_dir_files warnings.warn(msg, RuntimeWarning) return assignment_results
python
{ "resource": "" }
q39041
parse_rdp_assignment
train
def parse_rdp_assignment(line): """Returns a list of assigned taxa from an RDP classification line """ toks = line.strip().split('\t') seq_id = toks.pop(0) direction = toks.pop(0) if ((len(toks) % 3) != 0): raise ValueError( "Expected assignments in a repeating series of (rank, name, " "confidence), received %s" % toks) assignments = [] # Fancy way to create list of triples using consecutive items from # input. See grouper function in documentation for itertools for # more general example. itoks = iter(toks) for taxon, rank, confidence_str in zip(itoks, itoks, itoks): if not taxon: continue assignments.append((taxon.strip('"'), rank, float(confidence_str))) return seq_id, direction, assignments
python
{ "resource": "" }
q39042
RdpClassifier._get_jar_fp
train
def _get_jar_fp(self): """Returns the full path to the JAR file. If the JAR file cannot be found in the current directory and the environment variable RDP_JAR_PATH is not set, returns None. """ # handles case where the jar file is in the current working directory if os.path.exists(self._command): return self._command # handles the case where the user has specified the location via # an environment variable elif 'RDP_JAR_PATH' in environ: return getenv('RDP_JAR_PATH') else: return None
python
{ "resource": "" }
q39043
RdpClassifier._commandline_join
train
def _commandline_join(self, tokens): """Formats a list of tokens as a shell command This seems to be a repeated pattern; may be useful in superclass. """ commands = filter(None, map(str, tokens)) return self._command_delimiter.join(commands).strip()
python
{ "resource": "" }
q39044
RdpClassifier._get_result_paths
train
def _get_result_paths(self, data): """ Return a dict of ResultPath objects representing all possible output """ assignment_fp = str(self.Parameters['-o'].Value).strip('"') if not os.path.isabs(assignment_fp): assignment_fp = os.path.relpath(assignment_fp, self.WorkingDir) return {'Assignments': ResultPath(assignment_fp, IsWritten=True)}
python
{ "resource": "" }
q39045
RdpTrainer.ModelDir
train
def ModelDir(self): """Absolute FilePath to the training output directory. """ model_dir = self.Parameters['model_output_dir'].Value absolute_model_dir = os.path.abspath(model_dir) return FilePath(absolute_model_dir)
python
{ "resource": "" }
q39046
RdpTrainer._input_handler_decorator
train
def _input_handler_decorator(self, data): """Adds positional parameters to selected input_handler's results. """ input_handler = getattr(self, self.__InputHandler) input_parts = [ self.Parameters['taxonomy_file'], input_handler(data), self.Parameters['training_set_id'], self.Parameters['taxonomy_version'], self.Parameters['modification_info'], self.ModelDir, ] return self._commandline_join(input_parts)
python
{ "resource": "" }
q39047
RdpTrainer._get_result_paths
train
def _get_result_paths(self, output_dir): """Return a dict of output files. """ # Only include the properties file here. Add the other result # paths in the __call__ method, so we can catch errors if an # output file is not written. self._write_properties_file() properties_fp = os.path.join(self.ModelDir, self.PropertiesFile) result_paths = { 'properties': ResultPath(properties_fp, IsWritten=True,) } return result_paths
python
{ "resource": "" }
q39048
RdpTrainer._write_properties_file
train
def _write_properties_file(self): """Write an RDP training properties file manually. """ # The properties file specifies the names of the files in the # training directory. We use the example properties file # directly from the rdp_classifier distribution, which lists # the default set of files created by the application. We # must write this file manually after generating the # training data. properties_fp = os.path.join(self.ModelDir, self.PropertiesFile) properties_file = open(properties_fp, 'w') properties_file.write( "# Sample ResourceBundle properties file\n" "bergeyTree=bergeyTrainingTree.xml\n" "probabilityList=genus_wordConditionalProbList.txt\n" "probabilityIndex=wordConditionalProbIndexArr.txt\n" "wordPrior=logWordPrior.txt\n" "classifierVersion=Naive Bayesian rRNA Classifier Version 1.0, " "November 2003\n" ) properties_file.close()
python
{ "resource": "" }
q39049
InfobloxHost.delete_old_host
train
def delete_old_host(self, hostname): """Remove all records for the host. :param str hostname: Hostname to remove :rtype: bool """ host = Host(self.session, name=hostname) return host.delete()
python
{ "resource": "" }
q39050
InfobloxHost.add_new_host
train
def add_new_host(self, hostname, ipv4addr, comment=None): """Add or update a host in the infoblox, overwriting any IP address entries. :param str hostname: Hostname to add/set :param str ipv4addr: IP Address to add/set :param str comment: The comment for the record """ host = Host(self.session, name=hostname) if host.ipv4addrs: host.ipv4addrs = [] host.add_ipv4addr(ipv4addr) host.comment = comment return host.save()
python
{ "resource": "" }
q39051
Mafft._input_as_seqs
train
def _input_as_seqs(self, data): """Format a list of seq as input. Parameters ---------- data: list of strings Each string is a sequence to be aligned. Returns ------- A temp file name that contains the sequences. See Also -------- burrito.util.CommandLineApplication """ lines = [] for i, s in enumerate(data): # will number the sequences 1,2,3,etc. lines.append(''.join(['>', str(i+1)])) lines.append(s) return self._input_as_lines(lines)
python
{ "resource": "" }
q39052
Segmenter.from_config
train
def from_config(cls, config, name, section_key="segmenters"): """ Constructs a segmenter from a configuration doc. """ section = config[section_key][name] segmenter_class_path = section['class'] Segmenter = yamlconf.import_module(segmenter_class_path) return Segmenter.from_config(config, name, section_key=section_key)
python
{ "resource": "" }
q39053
random_hex
train
def random_hex(length): """Generates a random hex string""" return escape.to_unicode(binascii.hexlify(os.urandom(length))[length:])
python
{ "resource": "" }
q39054
password_hash
train
def password_hash(password, password_salt=None): """Hashes a specified password""" password_salt = password_salt or oz.settings["session_salt"] salted_password = password_salt + password return "sha256!%s" % hashlib.sha256(salted_password.encode("utf-8")).hexdigest()
python
{ "resource": "" }
q39055
bisect
train
def bisect(func, a, b, xtol=1e-12, maxiter=100): """ Finds the root of `func` using the bisection method. Requirements ------------ - func must be continuous function that accepts a single number input and returns a single number - `func(a)` and `func(b)` must have opposite sign Parameters ---------- func : function the function that we want to find the root of a : number one of the bounds on the input b : number the other bound on the input xtol : number, optional the solution tolerance of the input value. The algorithm is considered converged if `abs(b-a)2. < xtol` maxiter : number, optional the maximum number of iterations allowed for convergence """ fa = func(a) if fa == 0.: return a fb = func(b) if fb == 0.: return b assert sign(fa) != sign(fb) for i in xrange(maxiter): c = (a + b) / 2. fc = func(c) if fc == 0. or abs(b - a) / 2. < xtol: return c if sign(fc) == sign(func(a)): a = c else: b = c else: raise RuntimeError('Failed to converge after %d iterations.' % maxiter)
python
{ "resource": "" }
q39056
ResourceURL.parse_string
train
def parse_string(s): ''' Parses a foreign resource URL into the URL string itself and any relevant args and kwargs ''' matched_obj = SPLIT_URL_RE.match(s) if not matched_obj: raise URLParseException('Invalid Resource URL: "%s"' % s) url_string, arguments_string = matched_obj.groups() args_as_strings = URL_ARGUMENTS_RE.findall(arguments_string) # Determine args and kwargs args = [] kwargs = {} for arg_string in args_as_strings: kwarg_match = ARG_RE.match(arg_string) if kwarg_match: key, value = kwarg_match.groups() kwargs[key.strip()] = value.strip() else: args.append(arg_string.strip()) # Default to HTTP if url_string has no URL if not SCHEME_RE.match(url_string): url_string = '%s://%s' % (DEFAULT_SCHEME, url_string) return url_string.strip(), args, kwargs
python
{ "resource": "" }
q39057
add_experiment_choice
train
def add_experiment_choice(experiment, choice): """Adds an experiment choice""" redis = oz.redis.create_connection() oz.bandit.Experiment(redis, experiment).add_choice(choice)
python
{ "resource": "" }
q39058
remove_experiment_choice
train
def remove_experiment_choice(experiment, choice): """Removes an experiment choice""" redis = oz.redis.create_connection() oz.bandit.Experiment(redis, experiment).remove_choice(choice)
python
{ "resource": "" }
q39059
get_experiment_results
train
def get_experiment_results(): """ Computes the results of all experiments, stores it in redis, and prints it out """ redis = oz.redis.create_connection() for experiment in oz.bandit.get_experiments(redis): experiment.compute_default_choice() csq, confident = experiment.confidence() print("%s:" % experiment.name) print("- creation date: %s" % experiment.metadata["creation_date"]) print("- default choice: %s" % experiment.default_choice) print("- chi squared: %s" % csq) print("- confident: %s" % confident) print("- choices:") for choice in experiment.choices: print(" - %s: plays=%s, rewards=%s, performance=%s" % (choice.name, choice.plays, choice.rewards, choice.performance))
python
{ "resource": "" }
q39060
sync_experiments_from_spec
train
def sync_experiments_from_spec(filename): """ Takes the path to a JSON file declaring experiment specifications, and modifies the experiments stored in redis to match the spec. A spec looks like this: { "experiment 1": ["choice 1", "choice 2", "choice 3"], "experiment 2": ["choice 1", "choice 2"] } """ redis = oz.redis.create_connection() with open(filename, "r") as f: schema = escape.json_decode(f.read()) oz.bandit.sync_from_spec(redis, schema)
python
{ "resource": "" }
q39061
Report.add_chapter
train
def add_chapter(self, title): ''' Adds a new chapter to the report. :param str title: Title of the chapter. ''' chap_id = 'chap%s' % self.chap_counter self.chap_counter += 1 self.sidebar += '<a href="#%s" class="list-group-item">%s</a>\n' % ( chap_id, title) self.body += '<h1 id="%s">%s</h1>\n' % (chap_id, title)
python
{ "resource": "" }
q39062
Report.write_report
train
def write_report(self, force=False): ''' Writes the report to a file. ''' path = self.title + '.html' value = self._template.format( title=self.title, body=self.body, sidebar=self.sidebar) write_file(path, value, force=force) plt.ion()
python
{ "resource": "" }
q39063
diff
train
def diff(a, b): """ Performs a longest common substring diff. :Parameters: a : sequence of `comparable` Initial sequence b : sequence of `comparable` Changed sequence :Returns: An `iterable` of operations. """ a, b = list(a), list(b) opcodes = SM(None, a, b).get_opcodes() return parse_opcodes(opcodes)
python
{ "resource": "" }
q39064
convert_endpoint
train
async def convert_endpoint(url_string, ts, is_just_checking): ''' Main logic for HTTP endpoint. ''' response = singletons.server.response # Prep ForeignResource and ensure does not validate security settings singletons.settings foreign_res = ForeignResource(url_string) target_ts = TypeString(ts) target_resource = TypedResource(url_string, target_ts) # Send back cache if it exists if target_resource.cache_exists(): if is_just_checking: return _just_checking_response(True, target_resource) return await response.file(target_resource.cache_path, headers={ 'Content-Type': target_ts.mimetype, }) # Check if already downloaded. If not, queue up download. if not foreign_res.cache_exists(): singletons.workers.enqueue_download(foreign_res) # Queue up a single function that will in turn queue up conversion # process singletons.workers.enqueue_sync( enqueue_conversion_path, url_string, str(target_ts), singletons.workers.enqueue_convert ) if is_just_checking: return _just_checking_response(False, target_resource) # Respond with placeholder return singletons.placeholders.stream_response(target_ts, response)
python
{ "resource": "" }
q39065
apply_command_list_template
train
def apply_command_list_template(command_list, in_path, out_path, args): ''' Perform necessary substitutions on a command list to create a CLI-ready list to launch a conversion or download process via system binary. ''' replacements = { '$IN': in_path, '$OUT': out_path, } # Add in positional arguments ($0, $1, etc) for i, arg in enumerate(args): replacements['$' + str(i)] = arg results = [replacements.get(arg, arg) for arg in command_list] # Returns list of truthy replaced arguments in command return [item for item in results if item]
python
{ "resource": "" }
q39066
convert_local
train
async def convert_local(path, to_type): ''' Given an absolute path to a local file, convert to a given to_type ''' # Now find path between types typed_foreign_res = TypedLocalResource(path) original_ts = typed_foreign_res.typestring conversion_path = singletons.converter_graph.find_path( original_ts, to_type) # print('Conversion path: ', conversion_path) # Loop through each step in graph path and convert for is_first, is_last, path_step in first_last_iterator(conversion_path): converter_class, from_ts, to_ts = path_step converter = converter_class() in_resource = TypedLocalResource(path, from_ts) if is_first: # Ensure first resource is just the source one in_resource = typed_foreign_res out_resource = TypedLocalResource(path, to_ts) if is_last: out_resource = TypedPathedLocalResource(path, to_ts) await converter.convert(in_resource, out_resource)
python
{ "resource": "" }
q39067
enqueue_conversion_path
train
def enqueue_conversion_path(url_string, to_type, enqueue_convert): ''' Given a URL string that has already been downloaded, enqueue necessary conversion to get to target type ''' target_ts = TypeString(to_type) foreign_res = ForeignResource(url_string) # Determine the file type of the foreign resource typed_foreign_res = foreign_res.guess_typed() if not typed_foreign_res.cache_exists(): # Symlink to new location that includes typed extension typed_foreign_res.symlink_from(foreign_res) # Now find path between types original_ts = typed_foreign_res.typestring path = singletons.converter_graph.find_path(original_ts, target_ts) # Loop through each step in graph path and convert is_first = True for converter_class, from_ts, to_ts in path: converter = converter_class() in_resource = TypedResource(url_string, from_ts) if is_first: # Ensure first resource is just the source one in_resource = TypedForeignResource(url_string, from_ts) out_resource = TypedResource(url_string, to_ts) enqueue_convert(converter, in_resource, out_resource) is_first = False
python
{ "resource": "" }
q39068
check_path
train
def check_path(path, otherwise): """ Checks if a path exists. If it does, print a warning message; if not, execute the `otherwise` callback argument. """ if os.path.exists(path): print("WARNING: Path '%s' already exists; skipping" % path) else: otherwise(path)
python
{ "resource": "" }
q39069
config_maker
train
def config_maker(project_name, path): """Creates a config file based on the project name""" with open(skeleton_path("config.py"), "r") as config_source: config_content = config_source.read() config_content = config_content.replace("__PROJECT_NAME__", project_name) with open(path, "w") as config_dest: config_dest.write(config_content)
python
{ "resource": "" }
q39070
skeleton_path
train
def skeleton_path(parts): """Gets the path to a skeleton asset""" return os.path.join(os.path.dirname(oz.__file__), "skeleton", parts)
python
{ "resource": "" }
q39071
server
train
def server(): """Runs the server""" tornado.log.enable_pretty_logging() # Get and validate the server_type server_type = oz.settings["server_type"] if server_type not in [None, "wsgi", "asyncio", "twisted"]: raise Exception("Unknown server type: %s" % server_type) # Install the correct ioloop if necessary if server_type == "asyncio": from tornado.platform.asyncio import AsyncIOMainLoop AsyncIOMainLoop().install() elif server_type == "twisted": from tornado.platform.twisted import TwistedIOLoop TwistedIOLoop().install() if server_type == "wsgi": wsgi_app = tornado.wsgi.WSGIApplication(oz._routes, **oz.settings) wsgi_srv = wsgiref.simple_server.make_server("", oz.settings["port"], wsgi_app) wsgi_srv.serve_forever() else: web_app = tornado.web.Application(oz._routes, **oz.settings) if oz.settings["ssl_cert_file"] != None and oz.settings["ssl_key_file"] != None: ssl_options = { "certfile": oz.settings["ssl_cert_file"], "keyfile": oz.settings["ssl_key_file"], "cert_reqs": oz.settings["ssl_cert_reqs"], "ca_certs": oz.settings["ssl_ca_certs"] } else: ssl_options = None http_srv = tornado.httpserver.HTTPServer( web_app, ssl_options=ssl_options, body_timeout=oz.settings["body_timeout"], xheaders=oz.settings["xheaders"] ) http_srv.bind(oz.settings["port"]) server_workers = oz.settings["server_workers"] if server_workers > 1: if oz.settings["debug"]: print("WARNING: Debug is enabled, but multiple server workers have been configured. Only one server worker can run in debug mode.") server_workers = 1 elif (server_type == "asyncio" or server_type == "twisted"): print("WARNING: A non-default server type is being used, but multiple server workers have been configured. Only one server worker can run on a non-default server type.") server_workers = 1 # Forks multiple sub-processes if server_workers > 1 http_srv.start(server_workers) # Registers signal handles for graceful server shutdown if oz.settings.get("use_graceful_shutdown"): if server_type == "asyncio" or server_type == "twisted": print("WARNING: Cannot enable graceful shutdown for asyncio or twisted server types.") else: # NOTE: Do not expect any logging to with certain tools (e.g., invoker), # because they may quiet logs on SIGINT/SIGTERM signal.signal(signal.SIGTERM, functools.partial(_shutdown_tornado_ioloop, http_srv)) signal.signal(signal.SIGINT, functools.partial(_shutdown_tornado_ioloop, http_srv)) # Starts the ioloops if server_type == "asyncio": import asyncio asyncio.get_event_loop().run_forever() elif server_type == "twisted": from twisted.internet import reactor reactor.run() else: from tornado import ioloop ioloop.IOLoop.instance().start()
python
{ "resource": "" }
q39072
repl
train
def repl(): """Runs an IPython repl with some context""" try: import IPython except: print("ERROR: IPython is not installed. Please install it to use the repl.", file=sys.stderr) raise IPython.embed(user_ns=dict( settings=oz.settings, actions=oz._actions, uimodules=oz._uimodules, routes=oz._routes, ))
python
{ "resource": "" }
q39073
ResolverGraph.find_resource_url_basename
train
def find_resource_url_basename(self, resource_url): ''' Figure out path basename for given resource_url ''' scheme = resource_url.parsed.scheme if scheme in ('http', 'https', 'file'): return _get_basename_based_on_url(resource_url) elif scheme in ('git', 'git+https', 'git+http'): if len(resource_url.args) == 2: # For now, git has 2 positional args, hash and path git_tree, subpath = resource_url.args basename = os.path.basename(subpath) if basename: return basename # subpath was not '/' or '' return _get_basename_based_on_url(resource_url)
python
{ "resource": "" }
q39074
ResolverGraph.find_destination_type
train
def find_destination_type(self, resource_url): ''' Given a resource_url, figure out what it would resolve into ''' resolvers = self.converters.values() for resolver in resolvers: # Not all resolvers are opinionated about destination types if not hasattr(resolver, 'get_destination_type'): continue destination_type = resolver.get_destination_type(resource_url) if destination_type: return destination_type
python
{ "resource": "" }
q39075
ResolverGraph.download
train
async def download(self, resource_url): ''' Download given Resource URL by finding path through graph and applying each step ''' resolver_path = self.find_path_from_url(resource_url) await self.apply_resolver_path(resource_url, resolver_path)
python
{ "resource": "" }
q39076
insert_sequences_into_tree
train
def insert_sequences_into_tree(aln, moltype, params={}): """Returns a tree from placement of sequences """ # convert aln to phy since seq_names need fixed to run through parsinsert new_aln=get_align_for_phylip(StringIO(aln)) # convert aln to fasta in case it is not already a fasta file aln2 = Alignment(new_aln) seqs = aln2.toFasta() parsinsert_app = ParsInsert(params=params) result = parsinsert_app(seqs) # parse tree tree = DndParser(result['Tree'].read(), constructor=PhyloNode) # cleanup files result.cleanUp() return tree
python
{ "resource": "" }
q39077
ParsInsert._get_result_paths
train
def _get_result_paths(self,data): """ Get the resulting tree""" result = {} result['Tree'] = ResultPath(Path=splitext(self._input_filename)[0] + \ '.tree') return result
python
{ "resource": "" }
q39078
download
train
async def download(resource_url): ''' Download given resource_url ''' scheme = resource_url.parsed.scheme if scheme in ('http', 'https'): await download_http(resource_url) elif scheme in ('git', 'git+https', 'git+http'): await download_git(resource_url) else: raise ValueError('Unknown URL scheme: "%s"' % scheme)
python
{ "resource": "" }
q39079
IIIVZincBlendeAlloy.F
train
def F(self, **kwargs): ''' Returns the Kane remote-band parameter, `F`, calculated from `Eg_Gamma_0`, `Delta_SO`, `Ep`, and `meff_e_Gamma_0`. ''' Eg = self.Eg_Gamma_0(**kwargs) Delta_SO = self.Delta_SO(**kwargs) Ep = self.Ep(**kwargs) meff = self.meff_e_Gamma_0(**kwargs) return (1./meff-1-(Ep*(Eg+2.*Delta_SO/3.))/(Eg*(Eg+Delta_SO)))/2
python
{ "resource": "" }
q39080
IIIVZincBlendeAlloy.nonparabolicity
train
def nonparabolicity(self, **kwargs): ''' Returns the Kane band nonparabolicity parameter for the Gamma-valley. ''' Eg = self.Eg_Gamma(**kwargs) meff = self.meff_e_Gamma(**kwargs) T = kwargs.get('T', 300.) return k*T/Eg * (1 - meff)**2
python
{ "resource": "" }
q39081
ConverterGraph._setup_converter_graph
train
def _setup_converter_graph(self, converter_list, prune_converters): ''' Set up directed conversion graph, pruning unavailable converters as necessary ''' for converter in converter_list: if prune_converters: try: converter.configure() except ConverterUnavailable as e: log.warning('%s unavailable: %s' % (converter.__class__.__name__, str(e))) continue for in_ in converter.inputs: for out in converter.outputs: self.dgraph.add_edge(in_, out, converter.cost) self.converters[(in_, out)] = converter if hasattr(converter, 'direct_outputs'): self._setup_direct_converter(converter)
python
{ "resource": "" }
q39082
ConverterGraph._setup_preferred_paths
train
def _setup_preferred_paths(self, preferred_conversion_paths): ''' Add given valid preferred conversion paths ''' for path in preferred_conversion_paths: for pair in pair_looper(path): if pair not in self.converters: log.warning('Invalid conversion path %s, unknown step %s' % (repr(path), repr(pair))) break else: # If it did not break, then add to dgraph self.dgraph.add_preferred_path(*path)
python
{ "resource": "" }
q39083
ConverterGraph._setup_profiles
train
def _setup_profiles(self, conversion_profiles): ''' Add given conversion profiles checking for invalid profiles ''' # Check for invalid profiles for key, path in conversion_profiles.items(): if isinstance(path, str): path = (path, ) for left, right in pair_looper(path): pair = (_format(left), _format(right)) if pair not in self.converters: msg = 'Invalid conversion profile %s, unknown step %s' log.warning(msg % (repr(key), repr(pair))) break else: # If it did not break, then add to conversion profiles self.conversion_profiles[key] = path
python
{ "resource": "" }
q39084
ConverterGraph._setup_direct_converter
train
def _setup_direct_converter(self, converter): ''' Given a converter, set up the direct_output routes for conversions, which is used for transcoding between similar datatypes. ''' inputs = ( converter.direct_inputs if hasattr(converter, 'direct_inputs') else converter.inputs ) for in_ in inputs: for out in converter.direct_outputs: self.direct_converters[(in_, out)] = converter
python
{ "resource": "" }
q39085
ConverterGraph.find_path
train
def find_path(self, in_, out): ''' Given an input and output TypeString, produce a graph traversal, keeping in mind special options like Conversion Profiles, Preferred Paths, and Direct Conversions. ''' if in_.arguments: raise ValueError('Cannot originate path in argumented TypeString') # Determine conversion profile. This is either simply the output, OR, # if a custom profile has been specified for this output, that custom # path or type is used. profile = self.conversion_profiles.get(str(out), str(out)) if isinstance(profile, str): profile = (profile, ) types_by_format = {_format(s): TypeString(s) for s in profile} # Normalize input and output types to string in_str = str(in_) out_str = _format(profile[0]) # First check for direct conversions, returning immediately if found direct_converter = self.direct_converters.get((in_str, out_str)) if direct_converter: out_ts = types_by_format.get(out_str, TypeString(out_str)) return [(direct_converter, TypeString(in_str), out_ts)] # No direct conversions was found, so find path through graph. # If profile was plural, add in extra steps. path = self.dgraph.shortest_path(in_str, out_str) path += profile[1:] # Loop through each edge traversal, adding converters and type # string pairs as we go along. This is to ensure conversion # profiles that have arguments mid-profile get included. results = [] for left, right in pair_looper(path): converter = self.converters.get((_format(left), _format(right))) right_typestring = types_by_format.get(right, TypeString(right)) results.append((converter, TypeString(left), right_typestring)) return results
python
{ "resource": "" }
q39086
ConverterGraph.find_path_with_profiles
train
def find_path_with_profiles(self, conversion_profiles, in_, out): ''' Like find_path, except forces the conversion profiles to be the given conversion profile setting. Useful for "temporarily overriding" the global conversion profiles with your own. ''' original_profiles = dict(self.conversion_profiles) self._setup_profiles(conversion_profiles) results = self.find_path(in_, out) self.conversion_profiles = original_profiles return results
python
{ "resource": "" }
q39087
get_frames
train
def get_frames(tback, is_breakpoint): """Builds a list of ErrorFrame objects from a traceback""" frames = [] while tback is not None: if tback.tb_next is None and is_breakpoint: break filename = tback.tb_frame.f_code.co_filename function = tback.tb_frame.f_code.co_name context = tback.tb_frame.f_locals lineno = tback.tb_lineno - 1 tback_id = id(tback) pre_context_lineno, pre_context, context_line, post_context = get_lines_from_file(filename, lineno + 1, 7) frames.append(ErrorFrame(tback, filename, function, lineno, context, tback_id, pre_context, context_line, post_context, pre_context_lineno)) tback = tback.tb_next return frames
python
{ "resource": "" }
q39088
prettify_object
train
def prettify_object(obj): """Makes a pretty string for an object for nice output""" try: return pprint.pformat(str(obj)) except UnicodeDecodeError as e: raise except Exception as e: return "[could not display: <%s: %s>]" % (e.__class__.__name__, str(e))
python
{ "resource": "" }
q39089
render_from_repo
train
def render_from_repo(repo_path, to_path, template_params, settings_dir): """ rendering all files into the target directory """ TEMPLATE_PROJECT_FOLDER_PLACEHOLDER_NAME = 'deployer_project' repo_path = repo_path.rstrip('/') to_path = to_path.rstrip('/') files_to_render = get_template_filelist(repo_path, ignore_folders=[TEMPLATE_PROJECT_FOLDER_PLACEHOLDER_NAME]) # rendering generic deploy files for single_file_path in files_to_render: source_file_path = single_file_path dest_file_path = source_file_path.replace(repo_path, to_path) render_from_single_file(source_file_path, dest_file_path, template_params) settings_template_dir = os.path.join(repo_path, TEMPLATE_PROJECT_FOLDER_PLACEHOLDER_NAME) settings_files = get_template_filelist(settings_template_dir) # rendering settings file for single_file_path in settings_files: source = single_file_path dest = single_file_path.replace(settings_template_dir, settings_dir) render_from_single_file(source, dest, template_params)
python
{ "resource": "" }
q39090
gen_headers
train
def gen_headers() -> Dict[str, str]: """Generate a header pairing.""" ua_list: List[str] = ['Mozilla/5.0 (Windows NT 6.3; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/33.0.1750.117 Safari/537.36'] headers: Dict[str, str] = {'User-Agent': ua_list[random.randint(0, len(ua_list) - 1)]} return headers
python
{ "resource": "" }
q39091
extract_emails
train
def extract_emails(results: str, domain: str, fuzzy: bool) -> List[str]: """Grab email addresses from raw text data.""" pattern: Pattern = re.compile(r'([\w.-]+@[\w.-]+)') hits: List[str] = pattern.findall(results) if fuzzy: seed = domain.split('.')[0] emails: List[str] = [x.lower() for x in hits if x.split('@')[1].__contains__(seed)] else: emails: List[str] = [x.lower() for x in hits if x.endswith(domain)] return list(set(emails))
python
{ "resource": "" }
q39092
seqs_to_stream
train
def seqs_to_stream(seqs, ih): """Converts seqs into stream of FASTA records, depending on input handler. Each FASTA record will be a list of lines. """ if ih == '_input_as_multiline_string': recs = FastaFinder(seqs.split('\n')) elif ih == '_input_as_string': recs = FastaFinder(open(seqs)) elif ih == '_input_as_seqs': recs = [['>'+str(i), s] for i, s in enumerate(seqs)] elif ih == '_input_as_lines': recs = FastaFinder(seqs) else: raise TypeError, "Unknown input handler %s" % ih return recs
python
{ "resource": "" }
q39093
blast_seqs
train
def blast_seqs(seqs, blast_constructor, blast_db=None, blast_mat_root=None, params={}, add_seq_names=True, out_filename=None, WorkingDir=None, SuppressStderr=None, SuppressStdout=None, input_handler=None, HALT_EXEC=False ): """Blast list of sequences. seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. WARNING: DECISION RULES FOR INPUT HANDLING HAVE CHANGED. Decision rules for data are as follows. If it's s list, treat as lines, unless add_seq_names is true (in which case treat as list of seqs). If it's a string, test whether it has newlines. If it doesn't have newlines, assume it's a filename. If it does have newlines, it can't be a filename, so assume it's a multiline string containing sequences. If you want to skip the detection and force a specific type of input handler, use input_handler='your_favorite_handler'. add_seq_names: boolean. if True, sequence names are inserted in the list of sequences. if False, it assumes seqs is a list of lines of some proper format that the program can handle """ # set num keep if blast_db: params["-d"] = blast_db if out_filename: params["-o"] = out_filename ih = input_handler or guess_input_handler(seqs, add_seq_names) blast_app = blast_constructor( params=params, blast_mat_root=blast_mat_root, InputHandler=ih, WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout, HALT_EXEC=HALT_EXEC) return blast_app(seqs)
python
{ "resource": "" }
q39094
fasta_cmd_get_seqs
train
def fasta_cmd_get_seqs(acc_list, blast_db=None, is_protein=None, out_filename=None, params={}, WorkingDir=tempfile.gettempdir(), SuppressStderr=None, SuppressStdout=None): """Retrieve sequences for list of accessions """ if is_protein is None: params["-p"] = 'G' elif is_protein: params["-p"] = 'T' else: params["-p"] = 'F' if blast_db: params["-d"] = blast_db if out_filename: params["-o"] = out_filename # turn off duplicate accessions params["-a"] = "F" # create Psi-BLAST fasta_cmd = FastaCmd(params=params, InputHandler='_input_as_string', WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout) # return results return fasta_cmd("\"%s\"" % ','.join(acc_list))
python
{ "resource": "" }
q39095
psiblast_n_neighbors
train
def psiblast_n_neighbors(seqs, n=100, blast_db=None, core_threshold=1e-50, extra_threshold=1e-10, lower_threshold=1e-6, step=100, method="two-step", blast_mat_root=None, params={}, add_seq_names=False, WorkingDir=None, SuppressStderr=None, SuppressStdout=None, input_handler=None, scorer=3, #shotgun with 3 hits needed to keep second_db=None ): """PsiBlasts sequences, stopping when n neighbors are reached. core_threshold: threshold for the core profile (default: 1e-50) extra_threshold: threshold for pulling in additional seqs (default:1e-10) lower_threshold: threshold for seqs in final round (default:1e-6) seqs: either file name or list of sequence objects or list of strings or single multiline string containing sequences. If you want to skip the detection and force a specific type of input handler, use input_handler='your_favorite_handler'. add_seq_names: boolean. if True, sequence names are inserted in the list of sequences. if False, it assumes seqs is a list of lines of some proper format that the program can handle """ if blast_db: params["-d"] = blast_db ih = input_handler or guess_input_handler(seqs, add_seq_names) recs = seqs_to_stream(seqs, ih) #checkpointing can only handle one seq... #set up the parameters for the core and additional runs max_iterations = params['-j'] params['-j'] = 2 #won't checkpoint with single iteration app = PsiBlast(params=params, blast_mat_root=blast_mat_root, InputHandler='_input_as_lines', WorkingDir=WorkingDir, SuppressStderr=SuppressStderr, SuppressStdout=SuppressStdout, ) result = {} for seq in recs: query_id = seq[0][1:].split(None,1)[0] if method == "two-step": result[query_id] = ids_from_seq_two_step(seq, n, max_iterations, \ app, core_threshold, extra_threshold, lower_threshold, second_db) elif method == "lower_threshold": result[query_id] = ids_from_seq_lower_threshold(seq, n, \ max_iterations, app, core_threshold, lower_threshold, step) elif method == "iterative": result[query_id] = ids_from_seqs_iterative(seq, app, \ QMEPsiBlast9, scorer, params['-j'], n) else: raise TypeError, "Got unknown method %s" % method params['-j'] = max_iterations return result
python
{ "resource": "" }
q39096
ids_from_seq_two_step
train
def ids_from_seq_two_step(seq, n, max_iterations, app, core_threshold, \ extra_threshold, lower_threshold, second_db=None): """Returns ids that match a seq, using a 2-tiered strategy. Optionally uses a second database for the second search. """ #first time through: reset 'h' and 'e' to core #-h is the e-value threshold for including seqs in the score matrix model app.Parameters['-h'].on(core_threshold) #-e is the e-value threshold for the final blast app.Parameters['-e'].on(core_threshold) checkpoints = [] ids = [] last_num_ids = None for i in range(max_iterations): if checkpoints: app.Parameters['-R'].on(checkpoints[-1]) curr_check = 'checkpoint_%s.chk' % i app.Parameters['-C'].on(curr_check) output = app(seq) #if we didn't write a checkpoint, bail out if not access(curr_check, F_OK): break #if we got here, we wrote a checkpoint file checkpoints.append(curr_check) result = list(output.get('BlastOut', output['StdOut'])) output.cleanUp() if result: ids = LastProteinIds9(result,keep_values=True,filter_identity=False) num_ids = len(ids) if num_ids >= n: break if num_ids == last_num_ids: break last_num_ids = num_ids #if we didn't write any checkpoints, second run won't work, so return ids if not checkpoints: return ids #if we got too many ids and don't have a second database, return the ids we got if (not second_db) and num_ids >= n: return ids #second time through: reset 'h' and 'e' to get extra hits, and switch the #database if appropriate app.Parameters['-h'].on(extra_threshold) app.Parameters['-e'].on(lower_threshold) if second_db: app.Parameters['-d'].on(second_db) for i in range(max_iterations): #will always have last_check if we get here app.Parameters['-R'].on(checkpoints[-1]) curr_check = 'checkpoint_b_%s.chk' % i app.Parameters['-C'].on(curr_check) output = app(seq) #bail out if we couldn't write a checkpoint if not access(curr_check, F_OK): break #if we got here, the checkpoint worked checkpoints.append(curr_check) result = list(output.get('BlastOut', output['StdOut'])) if result: ids = LastProteinIds9(result,keep_values=True,filter_identity=False) num_ids = len(ids) if num_ids >= n: break if num_ids == last_num_ids: break last_num_ids = num_ids #return the ids we got. may not be as many as we wanted. for c in checkpoints: remove(c) return ids
python
{ "resource": "" }
q39097
ids_from_seq_lower_threshold
train
def ids_from_seq_lower_threshold(seq, n, max_iterations, app, core_threshold, \ lower_threshold, step=100): """Returns ids that match a seq, decreasing the sensitivity.""" last_num_ids = None checkpoints = [] cp_name_base = make_unique_str() # cache ides for each iteration # store { iteration_num:(core_threshold, [list of matching ids]) } all_ids = {} try: i=0 while 1: #-h is the e-value threshold for inclusion in the score matrix model app.Parameters['-h'].on(core_threshold) app.Parameters['-e'].on(core_threshold) if core_threshold > lower_threshold: raise ThresholdFound if checkpoints: #-R restarts from a previously stored file app.Parameters['-R'].on(checkpoints[-1]) #store the score model from this iteration curr_check = 'checkpoint_' + cp_name_base + '_' + str(i) + \ '.chk' app.Parameters['-C'].on(curr_check) output = app(seq) result = list(output.get('BlastOut', output['StdOut'])) #sometimes fails on first try -- don't know why, but this seems #to fix problem while not result: output = app(seq) result = list(output.get('BlastOut', output['StdOut'])) ids = LastProteinIds9(result,keep_values=True,filter_identity=False) output.cleanUp() all_ids[i + 1] = (core_threshold, copy(ids)) if not access(curr_check, F_OK): raise ThresholdFound checkpoints.append(curr_check) num_ids = len(ids) if num_ids >= n: raise ThresholdFound last_num_ids = num_ids core_threshold *= step if i >= max_iterations - 1: #because max_iterations is 1-based raise ThresholdFound i += 1 except ThresholdFound: for c in checkpoints: remove(c) #turn app.Parameters['-R'] off so that for the next file it does not #try and read in a checkpoint file that is not there app.Parameters['-R'].off() return ids, i + 1, all_ids
python
{ "resource": "" }
q39098
make_unique_str
train
def make_unique_str(num_chars=20): """make a random string of characters for a temp filename""" chars = 'abcdefghigklmnopqrstuvwxyz' all_chars = chars + chars.upper() + '01234567890' picks = list(all_chars) return ''.join([choice(picks) for i in range(num_chars)])
python
{ "resource": "" }
q39099
keep_everything_scorer
train
def keep_everything_scorer(checked_ids): """Returns every query and every match in checked_ids, with best score.""" result = checked_ids.keys() for i in checked_ids.values(): result.extend(i.keys()) return dict.fromkeys(result).keys()
python
{ "resource": "" }