sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def rename_motifs(motifs, stats=None): """Rename motifs to GimmeMotifs_1..GimmeMotifs_N. If stats object is passed, stats will be copied.""" final_motifs = [] for i, motif in enumerate(motifs): old = str(motif) motif.id = "GimmeMotifs_{}".format(i + 1) final_motifs.append(motif) if stats: stats[str(motif)] = stats[old].copy() if stats: return final_motifs, stats else: return final_motifs
Rename motifs to GimmeMotifs_1..GimmeMotifs_N. If stats object is passed, stats will be copied.
entailment
def gimme_motifs(inputfile, outdir, params=None, filter_significant=True, cluster=True, create_report=True): """De novo motif prediction based on an ensemble of different tools. Parameters ---------- inputfile : str Filename of input. Can be either BED, narrowPeak or FASTA. outdir : str Name of output directory. params : dict, optional Optional parameters. filter_significant : bool, optional Filter motifs for significance using the validation set. cluster : bool, optional Cluster similar predicted (and significant) motifs. create_report : bool, optional Create output reports (both .txt and .html). Returns ------- motifs : list List of predicted motifs. Examples -------- >>> from gimmemotifs.denovo import gimme_motifs >>> gimme_motifs("input.fa", "motifs.out") """ if outdir is None: outdir = "gimmemotifs_{}".format(datetime.date.today().strftime("%d_%m_%Y")) # Create output directories tmpdir = os.path.join(outdir, "intermediate") for d in [outdir, tmpdir]: if not os.path.exists(d): os.mkdir(d) # setup logfile logger = logging.getLogger("gimme") # Log to file logfile = os.path.join(outdir, "gimmemotifs.log") fh = logging.FileHandler(logfile, "w") fh.setLevel(logging.DEBUG) file_formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s") fh.setFormatter(file_formatter) logger.addHandler(fh) logger = logging.getLogger("gimme.denovo.gimme_motifs") # Initialize parameters params = parse_denovo_params(params) # Check the input files input_type, background = check_denovo_input(inputfile, params) logger.info("starting full motif analysis") logger.debug("Using temporary directory %s", mytmpdir()) # Create the necessary files for motif prediction and validation if input_type == "bed": prepare_denovo_input_bed(inputfile, params, tmpdir) elif input_type == "narrowpeak": prepare_denovo_input_narrowpeak(inputfile, params, tmpdir) elif input_type == "fasta": prepare_denovo_input_fa(inputfile, params, tmpdir) else: logger.error("Unknown input file.") sys.exit(1) # Create the background FASTA files background = create_backgrounds( tmpdir, background, params.get("genome", None), params["width"], params.get("custom_background", None) ) # Predict de novo motifs result = predict_motifs( os.path.join(tmpdir, "prediction.fa"), os.path.join(tmpdir, "prediction.bg.fa"), os.path.join(tmpdir, "all_motifs.pfm"), params=params, stats_fg=os.path.join(tmpdir, 'validation.fa'), stats_bg=background, ) if len(result.motifs) == 0: logger.info("finished") return [] # Write statistics stats_file = os.path.join(tmpdir, "stats.{}.txt") write_stats(result.stats, stats_file) bg = sorted(background, key=lambda x: BG_RANK[x])[0] if filter_significant: motifs = filter_significant_motifs( os.path.join(tmpdir, "significant_motifs.pfm"), result, bg) if len(motifs) == 0: logger.info("no significant motifs") return pwmfile = os.path.join(tmpdir, "significant_motifs.pfm") else: logger.info("not filtering for significance") motifs = result.motifs pwmfile = os.path.join(tmpdir, "all_motifs.pfm") if cluster: clusters = cluster_motifs_with_report( pwmfile, os.path.join(tmpdir, "clustered_motifs.pfm"), outdir, 0.95, title=inputfile) # Determine best motif in cluster best_motifs = best_motif_in_cluster( pwmfile, os.path.join(tmpdir, "clustered_motifs.pfm"), clusters, os.path.join(tmpdir, 'validation.fa'), background, result.stats) final_motifs, stats = rename_motifs(best_motifs, result.stats) else: logger.info("not clustering") rank = rank_motifs(result.stats) sorted_motifs = sorted(motifs, key=lambda x: rank[str(x)], reverse=True) final_motifs, stats = rename_motifs(sorted_motifs, result.stats) with open(os.path.join(outdir, "motifs.pwm"), "w") as f: for m in final_motifs: f.write("{}\n".format(m.to_pwm())) if create_report: bg = dict([(b, os.path.join(tmpdir, "bg.{}.fa".format(b))) for b in background]) create_denovo_motif_report( inputfile, os.path.join(outdir, "motifs.pwm"), os.path.join(tmpdir, "validation.fa"), bg, os.path.join(tmpdir, "localization.fa"), outdir, params, stats, ) with open(os.path.join(outdir, "params.txt"), "w") as f: for k,v in params.items(): f.write("{}\t{}\n".format(k,v)) if not(params.get("keep_intermediate")): logger.debug( "Deleting intermediate files. " "Please specifify the -k option if you want to keep these files.") shutil.rmtree(tmpdir) logger.info("finished") logger.info("output dir: %s", outdir) if cluster: logger.info("report: %s", os.path.join(outdir, "motif_report.html")) return final_motifs
De novo motif prediction based on an ensemble of different tools. Parameters ---------- inputfile : str Filename of input. Can be either BED, narrowPeak or FASTA. outdir : str Name of output directory. params : dict, optional Optional parameters. filter_significant : bool, optional Filter motifs for significance using the validation set. cluster : bool, optional Cluster similar predicted (and significant) motifs. create_report : bool, optional Create output reports (both .txt and .html). Returns ------- motifs : list List of predicted motifs. Examples -------- >>> from gimmemotifs.denovo import gimme_motifs >>> gimme_motifs("input.fa", "motifs.out")
entailment
def register_db(cls, dbname): """Register method to keep list of dbs.""" def decorator(subclass): """Register as decorator function.""" cls._dbs[dbname] = subclass subclass.name = dbname return subclass return decorator
Register method to keep list of dbs.
entailment
def moap(inputfile, method="hypergeom", scoring=None, outfile=None, motiffile=None, pwmfile=None, genome=None, fpr=0.01, ncpus=None, subsample=None): """Run a single motif activity prediction algorithm. Parameters ---------- inputfile : str :1File with regions (chr:start-end) in first column and either cluster name in second column or a table with values. method : str, optional Motif activity method to use. Any of 'hypergeom', 'lasso', 'lightningclassification', 'lightningregressor', 'bayesianridge', 'rf', 'xgboost'. Default is 'hypergeom'. scoring: str, optional Either 'score' or 'count' outfile : str, optional Name of outputfile to save the fitted activity values. motiffile : str, optional Table with motif scan results. First column should be exactly the same regions as in the inputfile. pwmfile : str, optional File with motifs in pwm format. Required when motiffile is not supplied. genome : str, optional Genome name, as indexed by gimme. Required when motiffile is not supplied fpr : float, optional FPR for motif scanning ncpus : int, optional Number of threads to use. Default is the number specified in the config. Returns ------- pandas DataFrame with motif activity """ if scoring and scoring not in ['score', 'count']: raise ValueError("valid values are 'score' and 'count'") config = MotifConfig() if inputfile.endswith("feather"): df = pd.read_feather(inputfile) df = df.set_index(df.columns[0]) else: # read data df = pd.read_table(inputfile, index_col=0, comment="#") clf = Moap.create(method, ncpus=ncpus) if clf.ptype == "classification": if df.shape[1] != 1: raise ValueError("1 column expected for {}".format(method)) else: if np.dtype('object') in set(df.dtypes): raise ValueError( "columns should all be numeric for {}".format(method)) if motiffile is None: if genome is None: raise ValueError("need a genome") pwmfile = pwmfile_location(pwmfile) try: motifs = read_motifs(pwmfile) except: sys.stderr.write("can't read motifs from {}".format(pwmfile)) raise # initialize scanner s = Scanner(ncpus=ncpus) sys.stderr.write(pwmfile + "\n") s.set_motifs(pwmfile) s.set_genome(genome) s.set_background(genome=genome) # scan for motifs sys.stderr.write("scanning for motifs\n") motif_names = [m.id for m in read_motifs(pwmfile)] scores = [] if method == 'classic' or scoring == "count": s.set_threshold(fpr=fpr) for row in s.count(list(df.index)): scores.append(row) else: for row in s.best_score(list(df.index), normalize=True): scores.append(row) motifs = pd.DataFrame(scores, index=df.index, columns=motif_names) else: motifs = pd.read_table(motiffile, index_col=0, comment="#") if outfile and os.path.exists(outfile): out = pd.read_table(outfile, index_col=0, comment="#") ncols = df.shape[1] if ncols == 1: ncols = len(df.iloc[:,0].unique()) if out.shape[0] == motifs.shape[1] and out.shape[1] == ncols: logger.warn("%s output already exists... skipping", method) return out if subsample is not None: n = int(subsample * df.shape[0]) logger.debug("Subsampling %d regions", n) df = df.sample(n) motifs = motifs.loc[df.index] if method == "lightningregressor": outdir = os.path.dirname(outfile) tmpname = os.path.join(outdir, ".lightning.tmp") clf.fit(motifs, df, tmpdir=tmpname) shutil.rmtree(tmpname) else: clf.fit(motifs, df) if outfile: with open(outfile, "w") as f: f.write("# maelstrom - GimmeMotifs version {}\n".format(__version__)) f.write("# method: {} with motif {}\n".format(method, scoring)) if genome: f.write("# genome: {}\n".format(genome)) if motiffile: f.write("# motif table: {}\n".format(motiffile)) f.write("# {}\n".format(clf.act_description)) with open(outfile, "a") as f: clf.act_.to_csv(f, sep="\t") return clf.act_
Run a single motif activity prediction algorithm. Parameters ---------- inputfile : str :1File with regions (chr:start-end) in first column and either cluster name in second column or a table with values. method : str, optional Motif activity method to use. Any of 'hypergeom', 'lasso', 'lightningclassification', 'lightningregressor', 'bayesianridge', 'rf', 'xgboost'. Default is 'hypergeom'. scoring: str, optional Either 'score' or 'count' outfile : str, optional Name of outputfile to save the fitted activity values. motiffile : str, optional Table with motif scan results. First column should be exactly the same regions as in the inputfile. pwmfile : str, optional File with motifs in pwm format. Required when motiffile is not supplied. genome : str, optional Genome name, as indexed by gimme. Required when motiffile is not supplied fpr : float, optional FPR for motif scanning ncpus : int, optional Number of threads to use. Default is the number specified in the config. Returns ------- pandas DataFrame with motif activity
entailment
def create(cls, name, ncpus=None): """Create a Moap instance based on the predictor name. Parameters ---------- name : str Name of the predictor (eg. Xgboost, BayesianRidge, ...) ncpus : int, optional Number of threads. Default is the number specified in the config. Returns ------- moap : Moap instance moap instance. """ try: return cls._predictors[name.lower()](ncpus=ncpus) except KeyError: raise Exception("Unknown class")
Create a Moap instance based on the predictor name. Parameters ---------- name : str Name of the predictor (eg. Xgboost, BayesianRidge, ...) ncpus : int, optional Number of threads. Default is the number specified in the config. Returns ------- moap : Moap instance moap instance.
entailment
def register_predictor(cls, name): """Register method to keep list of predictors.""" def decorator(subclass): """Register as decorator function.""" cls._predictors[name.lower()] = subclass subclass.name = name.lower() return subclass return decorator
Register method to keep list of predictors.
entailment
def list_classification_predictors(self): """List available classification predictors.""" preds = [self.create(x) for x in self._predictors.keys()] return [x.name for x in preds if x.ptype == "classification"]
List available classification predictors.
entailment
def _activate(self): """Activates the stream.""" if six.callable(self.streamer): # If it's a function, create the stream. self.stream_ = self.streamer(*(self.args), **(self.kwargs)) else: # If it's iterable, use it directly. self.stream_ = iter(self.streamer)
Activates the stream.
entailment
def iterate(self, max_iter=None): '''Instantiate an iterator. Parameters ---------- max_iter : None or int > 0 Maximum number of iterations to yield. If ``None``, exhaust the stream. Yields ------ obj : Objects yielded by the streamer provided on init. See Also -------- cycle : force an infinite stream. ''' # Use self as context manager / calls __enter__() => _activate() with self as active_streamer: for n, obj in enumerate(active_streamer.stream_): if max_iter is not None and n >= max_iter: break yield obj
Instantiate an iterator. Parameters ---------- max_iter : None or int > 0 Maximum number of iterations to yield. If ``None``, exhaust the stream. Yields ------ obj : Objects yielded by the streamer provided on init. See Also -------- cycle : force an infinite stream.
entailment
def cycle(self, max_iter=None): '''Iterate from the streamer infinitely. This function will force an infinite stream, restarting the streamer even if a StopIteration is raised. Parameters ---------- max_iter : None or int > 0 Maximum number of iterations to yield. If `None`, iterate indefinitely. Yields ------ obj : Objects yielded by the streamer provided on init. ''' count = 0 while True: for obj in self.iterate(): count += 1 if max_iter is not None and count > max_iter: return yield obj
Iterate from the streamer infinitely. This function will force an infinite stream, restarting the streamer even if a StopIteration is raised. Parameters ---------- max_iter : None or int > 0 Maximum number of iterations to yield. If `None`, iterate indefinitely. Yields ------ obj : Objects yielded by the streamer provided on init.
entailment
def calc_stats_iterator(motifs, fg_file, bg_file, genome=None, stats=None, ncpus=None): """Calculate motif enrichment metrics. Parameters ---------- motifs : str, list or Motif instance A file with motifs in pwm format, a list of Motif instances or a single Motif instance. fg_file : str Filename of a FASTA, BED or region file with positive sequences. bg_file : str Filename of a FASTA, BED or region file with negative sequences. genome : str, optional Genome or index directory in case of BED/regions. stats : list, optional Names of metrics to calculate. See gimmemotifs.rocmetrics.__all__ for available metrics. ncpus : int, optional Number of cores to use. Returns ------- result : dict Dictionary with results where keys are motif ids and the values are dictionary with metric name and value pairs. """ if not stats: stats = rocmetrics.__all__ if isinstance(motifs, Motif): all_motifs = [motifs] else: if type([]) == type(motifs): all_motifs = motifs else: all_motifs = read_motifs(motifs, fmt="pwm") if ncpus is None: ncpus = int(MotifConfig().get_default_params()["ncpus"]) chunksize = 240 for i in range(0, len(all_motifs), chunksize): result = {} logger.debug("chunk %s of %s", (i / chunksize) + 1, len(all_motifs) // chunksize + 1) motifs = all_motifs[i:i + chunksize] fg_total = scan_to_best_match(fg_file, motifs, ncpus=ncpus, genome=genome) bg_total = scan_to_best_match(bg_file, motifs, ncpus=ncpus, genome=genome) logger.debug("calculating statistics") if ncpus == 1: it = _single_stats(motifs, stats, fg_total, bg_total) else: it = _mp_stats(motifs, stats, fg_total, bg_total, ncpus) for motif_id, s, ret in it: if motif_id not in result: result[motif_id] = {} result[motif_id][s] = ret yield result
Calculate motif enrichment metrics. Parameters ---------- motifs : str, list or Motif instance A file with motifs in pwm format, a list of Motif instances or a single Motif instance. fg_file : str Filename of a FASTA, BED or region file with positive sequences. bg_file : str Filename of a FASTA, BED or region file with negative sequences. genome : str, optional Genome or index directory in case of BED/regions. stats : list, optional Names of metrics to calculate. See gimmemotifs.rocmetrics.__all__ for available metrics. ncpus : int, optional Number of cores to use. Returns ------- result : dict Dictionary with results where keys are motif ids and the values are dictionary with metric name and value pairs.
entailment
def calc_stats(motifs, fg_file, bg_file, genome=None, stats=None, ncpus=None): """Calculate motif enrichment metrics. Parameters ---------- motifs : str, list or Motif instance A file with motifs in pwm format, a list of Motif instances or a single Motif instance. fg_file : str Filename of a FASTA, BED or region file with positive sequences. bg_file : str Filename of a FASTA, BED or region file with negative sequences. genome : str, optional Genome or index directory in case of BED/regions. stats : list, optional Names of metrics to calculate. See gimmemotifs.rocmetrics.__all__ for available metrics. ncpus : int, optional Number of cores to use. Returns ------- result : dict Dictionary with results where keys are motif ids and the values are dictionary with metric name and value pairs. """ result = {} for batch_result in calc_stats_iterator(motifs, fg_file, bg_file, genome=genome, stats=stats, ncpus=ncpus): for motif_id in batch_result: if motif_id not in result: result[motif_id] = {} for s,ret in batch_result[motif_id].items(): result[motif_id][s] = ret return result
Calculate motif enrichment metrics. Parameters ---------- motifs : str, list or Motif instance A file with motifs in pwm format, a list of Motif instances or a single Motif instance. fg_file : str Filename of a FASTA, BED or region file with positive sequences. bg_file : str Filename of a FASTA, BED or region file with negative sequences. genome : str, optional Genome or index directory in case of BED/regions. stats : list, optional Names of metrics to calculate. See gimmemotifs.rocmetrics.__all__ for available metrics. ncpus : int, optional Number of cores to use. Returns ------- result : dict Dictionary with results where keys are motif ids and the values are dictionary with metric name and value pairs.
entailment
def rank_motifs(stats, metrics=("roc_auc", "recall_at_fdr")): """Determine mean rank of motifs based on metrics.""" rank = {} combined_metrics = [] motif_ids = stats.keys() background = list(stats.values())[0].keys() for metric in metrics: mean_metric_stats = [np.mean( [stats[m][bg][metric] for bg in background]) for m in motif_ids] ranked_metric_stats = rankdata(mean_metric_stats) combined_metrics.append(ranked_metric_stats) for motif, val in zip(motif_ids, np.mean(combined_metrics, 0)): rank[motif] = val return rank
Determine mean rank of motifs based on metrics.
entailment
def write_stats(stats, fname, header=None): """write motif statistics to text file.""" # Write stats output to file for bg in list(stats.values())[0].keys(): f = open(fname.format(bg), "w") if header: f.write(header) stat_keys = sorted(list(list(stats.values())[0].values())[0].keys()) f.write("{}\t{}\n".format("Motif", "\t".join(stat_keys))) for motif in stats: m_stats = stats.get(str(motif), {}).get(bg) if m_stats: f.write("{}\t{}\n".format( "_".join(motif.split("_")[:-1]), "\t".join([str(m_stats[k]) for k in stat_keys]) )) else: logger.warn("No stats for motif {0}, skipping this motif!".format(motif.id)) #motifs.remove(motif) f.close() return
write motif statistics to text file.
entailment
def get_roc_values(motif, fg_file, bg_file): """Calculate ROC AUC values for ROC plots.""" #print(calc_stats(motif, fg_file, bg_file, stats=["roc_values"], ncpus=1)) #["roc_values"]) try: # fg_result = motif.pwm_scan_score(Fasta(fg_file), cutoff=0.0, nreport=1) # fg_vals = [sorted(x)[-1] for x in fg_result.values()] # # bg_result = motif.pwm_scan_score(Fasta(bg_file), cutoff=0.0, nreport=1) # bg_vals = [sorted(x)[-1] for x in bg_result.values()] # (x, y) = roc_values(fg_vals, bg_vals) stats = calc_stats(motif, fg_file, bg_file, stats=["roc_values"], ncpus=1) (x,y) = list(stats.values())[0]["roc_values"] return None,x,y except Exception as e: print(motif) print(motif.id) raise error = e return error,[],[]
Calculate ROC AUC values for ROC plots.
entailment
def create_roc_plots(pwmfile, fgfa, background, outdir): """Make ROC plots for all motifs.""" motifs = read_motifs(pwmfile, fmt="pwm", as_dict=True) ncpus = int(MotifConfig().get_default_params()['ncpus']) pool = Pool(processes=ncpus) jobs = {} for bg,fname in background.items(): for m_id, m in motifs.items(): k = "{}_{}".format(str(m), bg) jobs[k] = pool.apply_async( get_roc_values, (motifs[m_id], fgfa, fname,) ) imgdir = os.path.join(outdir, "images") if not os.path.exists(imgdir): os.mkdir(imgdir) roc_img_file = os.path.join(outdir, "images", "{}_roc.{}.png") for motif in motifs.values(): for bg in background: k = "{}_{}".format(str(motif), bg) error, x, y = jobs[k].get() if error: logger.error("Error in thread: %s", error) logger.error("Motif: %s", motif) sys.exit(1) roc_plot(roc_img_file.format(motif.id, bg), x, y)
Make ROC plots for all motifs.
entailment
def _create_text_report(inputfile, motifs, closest_match, stats, outdir): """Create text report of motifs with statistics and database match.""" my_stats = {} for motif in motifs: match = closest_match[motif.id] my_stats[str(motif)] = {} for bg in list(stats.values())[0].keys(): if str(motif) not in stats: logger.error("####") logger.error("{} not found".format(str(motif))) for s in sorted(stats.keys()): logger.error(s) logger.error("####") else: my_stats[str(motif)][bg] = stats[str(motif)][bg].copy() my_stats[str(motif)][bg]["best_match"] = "_".join(match[0].split("_")[:-1]) my_stats[str(motif)][bg]["best_match_pvalue"] = match[1][-1] header = ("# GimmeMotifs version {}\n" "# Inputfile: {}\n" ).format(__version__, inputfile) write_stats(my_stats, os.path.join(outdir, "stats.{}.txt"), header=header)
Create text report of motifs with statistics and database match.
entailment
def _create_graphical_report(inputfile, pwm, background, closest_match, outdir, stats, best_id=None): """Create main gimme_motifs output html report.""" if best_id is None: best_id = {} logger.debug("Creating graphical report") class ReportMotif(object): """Placeholder for motif stats.""" pass config = MotifConfig() imgdir = os.path.join(outdir, "images") if not os.path.exists(imgdir): os.mkdir(imgdir) motifs = read_motifs(pwm, fmt="pwm") roc_img_file = "%s_roc.%s" dbpwm = config.get_default_params()["motif_db"] pwmdir = config.get_motif_dir() dbmotifs = read_motifs(os.path.join(pwmdir, dbpwm), as_dict=True) report_motifs = [] for motif in motifs: rm = ReportMotif() rm.id = motif.id rm.id_href = {"href": "#%s" % motif.id} rm.id_name = {"name": motif.id} rm.img = {"src": os.path.join("images", "%s.png" % motif.id)} motif.to_img(os.path.join(outdir, "images/{}.png".format(motif.id)), fmt="PNG") # TODO: fix best ID rm.best = "Gimme"#best_id[motif.id] rm.consensus = motif.to_consensus() rm.stars = int(np.mean( [stats[str(motif)][bg].get("stars", 0) for bg in background] ) + 0.5) rm.bg = {} for bg in background: rm.bg[bg] = {} this_stats = stats.get(str(motif), {}).get(bg) # TODO: fix these stats rm.bg[bg]["e"] = "%0.2f" % this_stats.get("enr_at_fpr", 1.0) rm.bg[bg]["p"] = "%0.2f" % this_stats.get("phyper_at_fpr", 1.0) rm.bg[bg]["auc"] = "%0.3f" % this_stats.get("roc_auc", 0.5) rm.bg[bg]["mncp"] = "%0.3f" % this_stats.get("mncp", 1.0) rm.bg[bg]["roc_img"] = {"src": "images/" + os.path.basename(roc_img_file % (motif.id, bg)) + ".png"} rm.bg[bg][u"roc_img_link"] = {u"href": "images/" + os.path.basename(roc_img_file % (motif.id, bg)) + ".png"} rm.histogram_img = {"data":"images/%s_histogram.svg" % motif.id} rm.histogram_link= {"href":"images/%s_histogram.svg" % motif.id} match_id = closest_match[motif.id][0] dbmotifs[match_id].to_img(os.path.join(outdir, "images/{}.png".format(match_id)), fmt="PNG") rm.match_img = {"src": "images/{}.png".format(match_id)} rm.match_id = closest_match[motif.id][0] rm.match_pval = "%0.2e" % closest_match[motif.id][1][-1] report_motifs.append(rm) total_report = os.path.join(outdir, "motif_report.html") star_img = os.path.join(config.get_template_dir(), "star.png") shutil.copyfile(star_img, os.path.join(outdir, "images", "star.png")) env = jinja2.Environment(loader=jinja2.FileSystemLoader([config.get_template_dir()])) template = env.get_template("report_template.jinja.html") # TODO: title result = template.render( motifs=report_motifs, inputfile=inputfile, date=datetime.today().strftime("%d/%m/%Y"), version=__version__, bg_types=list(background.keys())) with open(total_report, "wb") as f: f.write(result.encode('utf-8'))
Create main gimme_motifs output html report.
entailment
def create_denovo_motif_report(inputfile, pwmfile, fgfa, background, locfa, outdir, params, stats=None): """Create text and graphical (.html) motif reports.""" logger.info("creating reports") motifs = read_motifs(pwmfile, fmt="pwm") # ROC plots create_roc_plots(pwmfile, fgfa, background, outdir) # Closest match in database mc = MotifComparer() closest_match = mc.get_closest_match(motifs) if stats is None: stats = {} for bg, bgfa in background.items(): for m, s in calc_stats(motifs, fgfa, bgfa).items(): if m not in stats: stats[m] = {} stats[m][bg] = s stats = add_star(stats) if not params: params = {} cutoff_fpr = params.get('cutoff_fpr', 0.9) lwidth = np.median([len(seq) for seq in Fasta(locfa).seqs]) # Location plots logger.debug("Creating localization plots") for motif in motifs: logger.debug(" {} {}".format(motif.id, motif)) outfile = os.path.join(outdir, "images/{}_histogram.svg".format(motif.id)) motif_localization(locfa, motif, lwidth, outfile, cutoff=cutoff_fpr) # Create reports _create_text_report(inputfile, motifs, closest_match, stats, outdir) _create_graphical_report(inputfile, pwmfile, background, closest_match, outdir, stats)
Create text and graphical (.html) motif reports.
entailment
def axes_off(ax): """Get rid of all axis ticks, lines, etc. """ ax.set_frame_on(False) ax.axes.get_yaxis().set_visible(False) ax.axes.get_xaxis().set_visible(False)
Get rid of all axis ticks, lines, etc.
entailment
def match_plot(plotdata, outfile): """Plot list of motifs with database match and p-value "param plotdata: list of (motif, dbmotif, pval) """ fig_h = 2 fig_w = 7 nrows = len(plotdata) ncols = 2 fig = plt.figure(figsize=(fig_w, nrows * fig_h)) for i, (motif, dbmotif, pval) in enumerate(plotdata): text = "Motif: %s\nBest match: %s\np-value: %0.2e" % (motif.id, dbmotif.id, pval) grid = ImageGrid(fig, (nrows, ncols, i * 2 + 1), nrows_ncols = (2,1), axes_pad=0, ) for j in range(2): axes_off(grid[j]) tmp = NamedTemporaryFile(dir=mytmpdir(), suffix=".png") motif.to_img(tmp.name, fmt="PNG", height=6) grid[0].imshow(plt.imread(tmp.name), interpolation="none") tmp = NamedTemporaryFile(dir=mytmpdir(), suffix=".png") dbmotif.to_img(tmp.name, fmt="PNG") grid[1].imshow(plt.imread(tmp.name), interpolation="none") ax = plt.subplot(nrows, ncols, i * 2 + 2) axes_off(ax) ax.text(0, 0.5, text, horizontalalignment='left', verticalalignment='center') plt.savefig(outfile, dpi=300, bbox_inches='tight') plt.close(fig)
Plot list of motifs with database match and p-value "param plotdata: list of (motif, dbmotif, pval)
entailment
def motif_tree_plot(outfile, tree, data, circle=True, vmin=None, vmax=None, dpi=300): """ Plot a "phylogenetic" tree """ try: from ete3 import Tree, faces, AttrFace, TreeStyle, NodeStyle except ImportError: print("Please install ete3 to use this functionality") sys.exit(1) # Define the tree t, ts = _get_motif_tree(tree, data, circle, vmin, vmax) # Save image t.render(outfile, tree_style=ts, w=100, dpi=dpi, units="mm"); # Remove the bottom (empty) half of the figure if circle: img = Image.open(outfile) size = img.size[0] spacer = 50 img.crop((0,0,size,size/2 + spacer)).save(outfile)
Plot a "phylogenetic" tree
entailment
def check_bed_file(fname): """ Check if the inputfile is a valid bed-file """ if not os.path.exists(fname): logger.error("Inputfile %s does not exist!", fname) sys.exit(1) for i, line in enumerate(open(fname)): if line.startswith("#") or line.startswith("track") or line.startswith("browser"): # comment or BED specific stuff pass else: vals = line.strip().split("\t") if len(vals) < 3: logger.error("Expecting tab-seperated values (chromosome<tab>start<tab>end) on line %s of file %s", i + 1, fname) sys.exit(1) try: start, end = int(vals[1]), int(vals[2]) except ValueError: logger.error("No valid integer coordinates on line %s of file %s", i + 1, fname) sys.exit(1) if len(vals) > 3: try: float(vals[3]) except ValueError: pass
Check if the inputfile is a valid bed-file
entailment
def check_denovo_input(inputfile, params): """ Check if an input file is valid, which means BED, narrowPeak or FASTA """ background = params["background"] input_type = determine_file_type(inputfile) if input_type == "fasta": valid_bg = FA_VALID_BGS elif input_type in ["bed", "narrowpeak"]: genome = params["genome"] valid_bg = BED_VALID_BGS if "genomic" in background or "gc" in background: Genome(genome) # is it a valid bed-file etc. check_bed_file(inputfile) # bed-specific, will also work for narrowPeak else: sys.stderr.write("Format of inputfile {} not recognized.\n".format(inputfile)) sys.stderr.write("Input should be FASTA, BED or narrowPeak.\n") sys.stderr.write("See https://genome.ucsc.edu/FAQ/FAQformat.html for specifications.\n") sys.exit(1) for bg in background: if not bg in valid_bg: logger.info("Input type is %s, ignoring background type '%s'", input_type, bg) background = [bg for bg in background if bg in valid_bg] if len(background) == 0: logger.error("No valid backgrounds specified!") sys.exit(1) return input_type, background
Check if an input file is valid, which means BED, narrowPeak or FASTA
entailment
def scan_to_best_match(fname, motifs, ncpus=None, genome=None, score=False): """Scan a FASTA file with motifs. Scan a FASTA file and return a dictionary with the best match per motif. Parameters ---------- fname : str Filename of a sequence file in FASTA format. motifs : list List of motif instances. Returns ------- result : dict Dictionary with motif scanning results. """ # Initialize scanner s = Scanner(ncpus=ncpus) s.set_motifs(motifs) s.set_threshold(threshold=0.0) if genome: s.set_genome(genome) if isinstance(motifs, six.string_types): motifs = read_motifs(motifs) logger.debug("scanning %s...", fname) result = dict([(m.id, []) for m in motifs]) if score: it = s.best_score(fname) else: it = s.best_match(fname) for scores in it: for motif,score in zip(motifs, scores): result[motif.id].append(score) # Close the pool and reclaim memory del s return result
Scan a FASTA file with motifs. Scan a FASTA file and return a dictionary with the best match per motif. Parameters ---------- fname : str Filename of a sequence file in FASTA format. motifs : list List of motif instances. Returns ------- result : dict Dictionary with motif scanning results.
entailment
def set_background(self, fname=None, genome=None, length=200, nseq=10000): """Set the background to use for FPR and z-score calculations. Background can be specified either as a genome name or as the name of a FASTA file. Parameters ---------- fname : str, optional Name of FASTA file to use as background. genome : str, optional Name of genome to use to retrieve random sequences. length : int, optional Length of genomic sequences to retrieve. The default is 200. nseq : int, optional Number of genomic sequences to retrieve. """ length = int(length) if genome and fname: raise ValueError("Need either genome or filename for background.") if fname: if not os.path.exists(fname): raise IOError("Background file {} does not exist!".format(fname)) self.background = Fasta(fname) self.background_hash = file_checksum(fname) return if not genome: if self.genome: genome = self.genome logger.info("Using default background: genome {} with length {}".format( genome, length)) else: raise ValueError("Need either genome or filename for background.") logger.info("Using background: genome {} with length {}".format(genome, length)) with Cache(CACHE_DIR) as cache: self.background_hash = "{}\{}".format(genome, int(length)) fa = cache.get(self.background_hash) if not fa: fa = RandomGenomicFasta(genome, length, nseq) cache.set(self.background_hash, fa) self.background = fa
Set the background to use for FPR and z-score calculations. Background can be specified either as a genome name or as the name of a FASTA file. Parameters ---------- fname : str, optional Name of FASTA file to use as background. genome : str, optional Name of genome to use to retrieve random sequences. length : int, optional Length of genomic sequences to retrieve. The default is 200. nseq : int, optional Number of genomic sequences to retrieve.
entailment
def set_threshold(self, fpr=None, threshold=None): """Set motif scanning threshold based on background sequences. Parameters ---------- fpr : float, optional Desired FPR, between 0.0 and 1.0. threshold : float or str, optional Desired motif threshold, expressed as the fraction of the difference between minimum and maximum score of the PWM. Should either be a float between 0.0 and 1.0 or a filename with thresholds as created by 'gimme threshold'. """ if threshold and fpr: raise ValueError("Need either fpr or threshold.") if fpr: fpr = float(fpr) if not (0.0 < fpr < 1.0): raise ValueError("Parameter fpr should be between 0 and 1") if not self.motifs: raise ValueError("please run set_motifs() first") thresholds = {} motifs = read_motifs(self.motifs) if threshold is not None: self.threshold = parse_threshold_values(self.motifs, threshold) return if not self.background: try: self.set_background() except: raise ValueError("please run set_background() first") seqs = self.background.seqs with Cache(CACHE_DIR) as cache: scan_motifs = [] for motif in motifs: k = "{}|{}|{:.4f}".format(motif.hash(), self.background_hash, fpr) threshold = cache.get(k) if threshold is None: scan_motifs.append(motif) else: if np.isclose(threshold, motif.pwm_max_score()): thresholds[motif.id] = None elif np.isclose(threshold, motif.pwm_min_score()): thresholds[motif.id] = 0.0 else: thresholds[motif.id] = threshold if len(scan_motifs) > 0: logger.info("Determining FPR-based threshold") for motif, threshold in self._threshold_from_seqs(scan_motifs, seqs, fpr): k = "{}|{}|{:.4f}".format(motif.hash(), self.background_hash, fpr) cache.set(k, threshold) if np.isclose(threshold, motif.pwm_max_score()): thresholds[motif.id] = None elif np.isclose(threshold, motif.pwm_min_score()): thresholds[motif.id] = 0.0 else: thresholds[motif.id] = threshold self.threshold_str = "{}_{}_{}".format(fpr, threshold, self.background_hash) self.threshold = thresholds
Set motif scanning threshold based on background sequences. Parameters ---------- fpr : float, optional Desired FPR, between 0.0 and 1.0. threshold : float or str, optional Desired motif threshold, expressed as the fraction of the difference between minimum and maximum score of the PWM. Should either be a float between 0.0 and 1.0 or a filename with thresholds as created by 'gimme threshold'.
entailment
def count(self, seqs, nreport=100, scan_rc=True): """ count the number of matches above the cutoff returns an iterator of lists containing integer counts """ for matches in self.scan(seqs, nreport, scan_rc): counts = [len(m) for m in matches] yield counts
count the number of matches above the cutoff returns an iterator of lists containing integer counts
entailment
def total_count(self, seqs, nreport=100, scan_rc=True): """ count the number of matches above the cutoff returns an iterator of lists containing integer counts """ count_table = [counts for counts in self.count(seqs, nreport, scan_rc)] return np.sum(np.array(count_table), 0)
count the number of matches above the cutoff returns an iterator of lists containing integer counts
entailment
def best_score(self, seqs, scan_rc=True, normalize=False): """ give the score of the best match of each motif in each sequence returns an iterator of lists containing floats """ self.set_threshold(threshold=0.0) if normalize and len(self.meanstd) == 0: self.set_meanstd() means = np.array([self.meanstd[m][0] for m in self.motif_ids]) stds = np.array([self.meanstd[m][1] for m in self.motif_ids]) for matches in self.scan(seqs, 1, scan_rc): scores = np.array([sorted(m, key=lambda x: x[0])[0][0] for m in matches if len(m) > 0]) if normalize: scores = (scores - means) / stds yield scores
give the score of the best match of each motif in each sequence returns an iterator of lists containing floats
entailment
def best_match(self, seqs, scan_rc=True): """ give the best match of each motif in each sequence returns an iterator of nested lists containing tuples: (score, position, strand) """ self.set_threshold(threshold=0.0) for matches in self.scan(seqs, 1, scan_rc): yield [m[0] for m in matches]
give the best match of each motif in each sequence returns an iterator of nested lists containing tuples: (score, position, strand)
entailment
def scan(self, seqs, nreport=100, scan_rc=True, normalize=False): """ scan a set of regions / sequences """ if not self.threshold: sys.stderr.write( "Using default threshold of 0.95. " "This is likely not optimal!\n" ) self.set_threshold(threshold=0.95) seqs = as_fasta(seqs, genome=self.genome) it = self._scan_sequences(seqs.seqs, nreport, scan_rc) if normalize: if len(self.meanstd) == 0: self.set_meanstd() mean_std = [self.meanstd.get(m_id) for m_id in self.motif_ids] means = [x[0] for x in mean_std] stds = [x[1] for x in mean_std] for result in it: if normalize: zresult = [] for i,mrow in enumerate(result): mrow = [((x[0] - means[i]) / stds[i], x[1], x[2]) for x in mrow] zresult.append(mrow) yield zresult else: yield result
scan a set of regions / sequences
entailment
def roc(args): """ Calculate ROC_AUC and other metrics and optionally plot ROC curve.""" outputfile = args.outfile # Default extension for image if outputfile and not outputfile.endswith(".png"): outputfile += ".png" motifs = read_motifs(args.pwmfile, fmt="pwm") ids = [] if args.ids: ids = args.ids.split(",") else: ids = [m.id for m in motifs] motifs = [m for m in motifs if (m.id in ids)] stats = [ "phyper_at_fpr", "roc_auc", "pr_auc", "enr_at_fpr", "recall_at_fdr", "roc_values", "matches_at_fpr", ] plot_x = [] plot_y = [] legend = [] f_out = sys.stdout if args.outdir: if not os.path.exists(args.outdir): os.makedirs(args.outdir) f_out = open(args.outdir + "/gimme.roc.report.txt", "w") # Print the metrics f_out.write("Motif\t# matches\t# matches background\tP-value\tlog10 P-value\tROC AUC\tPR AUC\tEnr. at 1% FPR\tRecall at 10% FDR\n") for motif_stats in calc_stats_iterator(motifs, args.sample, args.background, genome=args.genome, stats=stats, ncpus=args.ncpus): for motif in motifs: if str(motif) in motif_stats: if outputfile: x, y = motif_stats[str(motif)]["roc_values"] plot_x.append(x) plot_y.append(y) legend.append(motif.id) log_pvalue = np.inf if motif_stats[str(motif)]["phyper_at_fpr"] > 0: log_pvalue = -np.log10(motif_stats[str(motif)]["phyper_at_fpr"]) f_out.write("{}\t{:d}\t{:d}\t{:.2e}\t{:.3f}\t{:.3f}\t{:.3f}\t{:.2f}\t{:0.4f}\n".format( motif.id, motif_stats[str(motif)]["matches_at_fpr"][0], motif_stats[str(motif)]["matches_at_fpr"][1], motif_stats[str(motif)]["phyper_at_fpr"], log_pvalue, motif_stats[str(motif)]["roc_auc"], motif_stats[str(motif)]["pr_auc"], motif_stats[str(motif)]["enr_at_fpr"], motif_stats[str(motif)]["recall_at_fdr"], )) f_out.close() if args.outdir: html_report( args.outdir, args.outdir + "/gimme.roc.report.txt", args.pwmfile, 0.01, ) # Plot the ROC curve if outputfile: roc_plot(outputfile, plot_x, plot_y, ids=legend)
Calculate ROC_AUC and other metrics and optionally plot ROC curve.
entailment
def ssd(p1, p2): """Calculates motif position similarity based on sum of squared distances. Parameters ---------- p1 : list Motif position 1. p2 : list Motif position 2. Returns ------- score : float """ return 2 - np.sum([(a-b)**2 for a,b in zip(p1,p2)])
Calculates motif position similarity based on sum of squared distances. Parameters ---------- p1 : list Motif position 1. p2 : list Motif position 2. Returns ------- score : float
entailment
def seqcor(m1, m2, seq=None): """Calculates motif similarity based on Pearson correlation of scores. Based on Kielbasa (2015) and Grau (2015). Scores are calculated based on scanning a de Bruijn sequence of 7-mers. This sequence is taken from ShortCAKE (Orenstein & Shamir, 2015). Optionally another sequence can be given as an argument. Parameters ---------- m1 : Motif instance Motif 1 to compare. m2 : Motif instance Motif 2 to compare. seq : str, optional Sequence to use for scanning instead of k=7 de Bruijn sequence. Returns ------- score, position, strand """ l1 = len(m1) l2 = len(m2) l = max(l1, l2) if seq is None: seq = RCDB L = len(seq) # Scan RC de Bruijn sequence result1 = pfmscan(seq, m1.pwm, m1.pwm_min_score(), len(seq), False, True) result2 = pfmscan(seq, m2.pwm, m2.pwm_min_score(), len(seq), False, True) # Reverse complement of motif 2 result3 = pfmscan(seq, m2.rc().pwm, m2.rc().pwm_min_score(), len(seq), False, True) result1 = np.array(result1) result2 = np.array(result2) result3 = np.array(result3) # Return maximum correlation c = [] for i in range(l1 - l1 // 3): c.append([1 - distance.correlation(result1[:L-l-i],result2[i:L-l]), i, 1]) c.append([1 - distance.correlation(result1[:L-l-i],result3[i:L-l]), i, -1]) for i in range(l2 - l2 // 3): c.append([1 - distance.correlation(result1[i:L-l],result2[:L-l-i]), -i, 1]) c.append([1 - distance.correlation(result1[i:L-l],result3[:L-l-i]), -i, -1]) return sorted(c, key=lambda x: x[0])[-1]
Calculates motif similarity based on Pearson correlation of scores. Based on Kielbasa (2015) and Grau (2015). Scores are calculated based on scanning a de Bruijn sequence of 7-mers. This sequence is taken from ShortCAKE (Orenstein & Shamir, 2015). Optionally another sequence can be given as an argument. Parameters ---------- m1 : Motif instance Motif 1 to compare. m2 : Motif instance Motif 2 to compare. seq : str, optional Sequence to use for scanning instead of k=7 de Bruijn sequence. Returns ------- score, position, strand
entailment
def compare_motifs(self, m1, m2, match="total", metric="wic", combine="mean", pval=False): """Compare two motifs. The similarity metric can be any of seqcor, pcc, ed, distance, wic, chisq, akl or ssd. If match is 'total' the similarity score is calculated for the whole match, including positions that are not present in both motifs. If match is partial or subtotal, only the matching psotiions are used to calculate the score. The score of individual position is combined using either the mean or the sum. Note that the match and combine parameters have no effect on the seqcor similarity metric. Parameters ---------- m1 : Motif instance Motif instance 1. m2 : Motif instance Motif instance 2. match : str, optional Match can be "partial", "subtotal" or "total". Not all metrics use this. metric : str, optional Distance metric. combine : str, optional Combine positional scores using "mean" or "sum". Not all metrics use this. pval : bool, optional Calculate p-vale of match. Returns ------- score, position, strand """ if metric == "seqcor": return seqcor(m1, m2) elif match == "partial": if pval: return self.pvalue(m1, m2, "total", metric, combine, self.max_partial(m1.pwm, m2.pwm, metric, combine)) elif metric in ["pcc", "ed", "distance", "wic", "chisq", "ssd"]: return self.max_partial(m1.pwm, m2.pwm, metric, combine) else: return self.max_partial(m1.pfm, m2.pfm, metric, combine) elif match == "total": if pval: return self.pvalue(m1, m2, match, metric, combine, self.max_total(m1.pwm, m2.pwm, metric, combine)) elif metric in ["pcc", 'akl']: # Slightly randomize the weight matrix return self.max_total(m1.wiggle_pwm(), m2.wiggle_pwm(), metric, combine) elif metric in ["ed", "distance", "wic", "chisq", "pcc", "ssd"]: return self.max_total(m1.pwm, m2.pwm, metric, combine) else: return self.max_total(m1.pfm, m2.pfm, metric, combine) elif match == "subtotal": if metric in ["pcc", "ed", "distance", "wic", "chisq", "ssd"]: return self.max_subtotal(m1.pwm, m2.pwm, metric, combine) else: return self.max_subtotal(m1.pfm, m2.pfm, metric, combine)
Compare two motifs. The similarity metric can be any of seqcor, pcc, ed, distance, wic, chisq, akl or ssd. If match is 'total' the similarity score is calculated for the whole match, including positions that are not present in both motifs. If match is partial or subtotal, only the matching psotiions are used to calculate the score. The score of individual position is combined using either the mean or the sum. Note that the match and combine parameters have no effect on the seqcor similarity metric. Parameters ---------- m1 : Motif instance Motif instance 1. m2 : Motif instance Motif instance 2. match : str, optional Match can be "partial", "subtotal" or "total". Not all metrics use this. metric : str, optional Distance metric. combine : str, optional Combine positional scores using "mean" or "sum". Not all metrics use this. pval : bool, optional Calculate p-vale of match. Returns ------- score, position, strand
entailment
def get_all_scores(self, motifs, dbmotifs, match, metric, combine, pval=False, parallel=True, trim=None, ncpus=None): """Pairwise comparison of a set of motifs compared to reference motifs. Parameters ---------- motifs : list List of Motif instances. dbmotifs : list List of Motif instances. match : str Match can be "partial", "subtotal" or "total". Not all metrics use this. metric : str Distance metric. combine : str Combine positional scores using "mean" or "sum". Not all metrics use this. pval : bool , optional Calculate p-vale of match. parallel : bool , optional Use multiprocessing for parallel execution. True by default. trim : float or None If a float value is specified, motifs are trimmed used this IC cutoff before comparison. ncpus : int or None Specifies the number of cores to use for parallel execution. Returns ------- scores : dict Dictionary with scores. """ # trim motifs first, if specified if trim: for m in motifs: m.trim(trim) for m in dbmotifs: m.trim(trim) # hash of result scores scores = {} if parallel: # Divide the job into big chunks, to keep parallel overhead to minimum # Number of chunks = number of processors available if ncpus is None: ncpus = int(MotifConfig().get_default_params()["ncpus"]) pool = Pool(processes=ncpus, maxtasksperchild=1000) batch_len = len(dbmotifs) // ncpus if batch_len <= 0: batch_len = 1 jobs = [] for i in range(0, len(dbmotifs), batch_len): # submit jobs to the job server p = pool.apply_async(_get_all_scores, args=(self, motifs, dbmotifs[i: i + batch_len], match, metric, combine, pval)) jobs.append(p) pool.close() for job in jobs: # Get the job result result = job.get() # and update the result score for m1,v in result.items(): for m2, s in v.items(): if m1 not in scores: scores[m1] = {} scores[m1][m2] = s pool.join() else: # Do the whole thing at once if we don't want parallel scores = _get_all_scores(self, motifs, dbmotifs, match, metric, combine, pval) return scores
Pairwise comparison of a set of motifs compared to reference motifs. Parameters ---------- motifs : list List of Motif instances. dbmotifs : list List of Motif instances. match : str Match can be "partial", "subtotal" or "total". Not all metrics use this. metric : str Distance metric. combine : str Combine positional scores using "mean" or "sum". Not all metrics use this. pval : bool , optional Calculate p-vale of match. parallel : bool , optional Use multiprocessing for parallel execution. True by default. trim : float or None If a float value is specified, motifs are trimmed used this IC cutoff before comparison. ncpus : int or None Specifies the number of cores to use for parallel execution. Returns ------- scores : dict Dictionary with scores.
entailment
def get_closest_match(self, motifs, dbmotifs=None, match="partial", metric="wic",combine="mean", parallel=True, ncpus=None): """Return best match in database for motifs. Parameters ---------- motifs : list or str Filename of motifs or list of motifs. dbmotifs : list or str, optional Database motifs, default will be used if not specified. match : str, optional metric : str, optional combine : str, optional ncpus : int, optional Number of threads to use. Returns ------- closest_match : dict """ if dbmotifs is None: pwm = self.config.get_default_params()["motif_db"] pwmdir = self.config.get_motif_dir() dbmotifs = os.path.join(pwmdir, pwm) motifs = parse_motifs(motifs) dbmotifs = parse_motifs(dbmotifs) dbmotif_lookup = dict([(m.id, m) for m in dbmotifs]) scores = self.get_all_scores(motifs, dbmotifs, match, metric, combine, parallel=parallel, ncpus=ncpus) for motif in scores: scores[motif] = sorted( scores[motif].items(), key=lambda x:x[1][0] )[-1] for motif in motifs: dbmotif, score = scores[motif.id] pval, pos, orient = self.compare_motifs( motif, dbmotif_lookup[dbmotif], match, metric, combine, True) scores[motif.id] = [dbmotif, (list(score) + [pval])] return scores
Return best match in database for motifs. Parameters ---------- motifs : list or str Filename of motifs or list of motifs. dbmotifs : list or str, optional Database motifs, default will be used if not specified. match : str, optional metric : str, optional combine : str, optional ncpus : int, optional Number of threads to use. Returns ------- closest_match : dict
entailment
def list_regions(service): """ List regions for the service """ for region in service.regions(): print '%(name)s: %(endpoint)s' % { 'name': region.name, 'endpoint': region.endpoint, }
List regions for the service
entailment
def elb_table(balancers): """ Print nice looking table of information from list of load balancers """ t = prettytable.PrettyTable(['Name', 'DNS', 'Ports', 'Zones', 'Created']) t.align = 'l' for b in balancers: ports = ['%s: %s -> %s' % (l[2], l[0], l[1]) for l in b.listeners] ports = '\n'.join(ports) zones = '\n'.join(b.availability_zones) t.add_row([b.name, b.dns_name, ports, zones, b.created_time]) return t
Print nice looking table of information from list of load balancers
entailment
def ec2_table(instances): """ Print nice looking table of information from list of instances """ t = prettytable.PrettyTable(['ID', 'State', 'Monitored', 'Image', 'Name', 'Type', 'SSH key', 'DNS']) t.align = 'l' for i in instances: name = i.tags.get('Name', '') t.add_row([i.id, i.state, i.monitored, i.image_id, name, i.instance_type, i.key_name, i.dns_name]) return t
Print nice looking table of information from list of instances
entailment
def ec2_image_table(images): """ Print nice looking table of information from images """ t = prettytable.PrettyTable(['ID', 'State', 'Name', 'Owner', 'Root device', 'Is public', 'Description']) t.align = 'l' for i in images: t.add_row([i.id, i.state, i.name, i.ownerId, i.root_device_type, i.is_public, i.description]) return t
Print nice looking table of information from images
entailment
def ec2_fab(service, args): """ Run Fabric commands against EC2 instances """ instance_ids = args.instances instances = service.list(elb=args.elb, instance_ids=instance_ids) hosts = service.resolve_hosts(instances) fab.env.hosts = hosts fab.env.key_filename = settings.get('SSH', 'KEY_FILE') fab.env.user = settings.get('SSH', 'USER', getpass.getuser()) fab.env.parallel = True fabfile = find_fabfile(args.file) if not fabfile: print 'Couldn\'t find any fabfiles!' return fab.env.real_fabile = fabfile docstring, callables, default = load_fabfile(fabfile) fab_state.commands.update(callables) commands_to_run = parse_arguments(args.methods) for name, args, kwargs, arg_hosts, arg_roles, arg_exclude_hosts in commands_to_run: fab.execute(name, hosts=arg_hosts, roles=arg_roles, exclude_hosts=arg_exclude_hosts, *args, **kwargs)
Run Fabric commands against EC2 instances
entailment
def main(): """ AWS support script's main method """ p = argparse.ArgumentParser(description='Manage Amazon AWS services', prog='aws', version=__version__) subparsers = p.add_subparsers(help='Select Amazon AWS service to use') # Auto Scaling as_service = subparsers.add_parser('as', help='Amazon Auto Scaling') as_subparsers = as_service.add_subparsers(help='Perform action') as_service_list = as_subparsers.add_parser('list', help='List Auto Scaling groups') as_service_list.set_defaults(func=as_list_handler) # Elastic Cloud Computing ec2_service = subparsers.add_parser('ec2', help='Amazon Elastic Compute Cloud') ec2_subparsers = ec2_service.add_subparsers(help='Perform action') ec2_service_list = ec2_subparsers.add_parser('list', help='List items') ec2_service_list.add_argument('--elb', '-e', help='Filter instances inside this ELB instance') ec2_service_list.add_argument('--instances', '-i', nargs='*', metavar=('id', 'id'), help='List of instance IDs to use as filter') ec2_service_list.add_argument('--type', default='instances', choices=['instances', 'regions', 'images'], help='List items of this type') ec2_service_list.set_defaults(func=ec2_list_handler) ec2_service_fab = ec2_subparsers.add_parser('fab', help='Run Fabric commands') ec2_service_fab.add_argument('--elb', '-e', help='Run against EC2 instances for this ELB') ec2_service_fab.add_argument('--instances', '-i', nargs='*', metavar=('id', 'id'), help='List of instance IDs to use as filter') ec2_service_fab.add_argument('--file', '-f', nargs='+', help='Define fabfile to use') ec2_service_fab.add_argument('methods', metavar='method:arg1,arg2=val2,host=foo,hosts=\'h1;h2\',', nargs='+', help='Specify one or more methods to execute.') ec2_service_fab.set_defaults(func=ec2_fab_handler) ec2_service_create = ec2_subparsers.add_parser('create', help='Create and start new instances') ec2_service_create.set_defaults(func=ec2_create_handler) ec2_service_start = ec2_subparsers.add_parser('start', help='Start existing instances') ec2_service_start.add_argument('instance', nargs='+', help='ID of an instance to start') ec2_service_start.set_defaults(func=ec2_start_handler) ec2_service_stop = ec2_subparsers.add_parser('stop', help='Stop instances') ec2_service_stop.add_argument('instance', nargs='+', help='ID of an instance to stop') ec2_service_stop.add_argument('--force', '-f', action='store_true', help='Force stop') ec2_service_stop.set_defaults(func=ec2_stop_handler) ec2_service_terminate = ec2_subparsers.add_parser('terminate', help='Terminate instances') ec2_service_terminate.add_argument('instance', nargs='+', help='ID of an instance to terminate') ec2_service_terminate.set_defaults(func=ec2_terminate_handler) ec2_service_images = ec2_subparsers.add_parser('images', help='List AMI images') ec2_service_images.add_argument('image', nargs='*', help='Image ID to use as filter') ec2_service_images.set_defaults(func=ec2_images_handler) ec2_service_create_image = ec2_subparsers.add_parser('create-image', help='Create AMI image from instance') ec2_service_create_image.add_argument('instance', help='ID of an instance to image') ec2_service_create_image.add_argument('name', help='The name of the image') ec2_service_create_image.add_argument('--description', '-d', help='Optional description for the image') ec2_service_create_image.add_argument('--noreboot', action='store_true', default=False, help='Do not shutdown the instance before creating image. ' + 'Note: System integrity might suffer if used.') ec2_service_create_image.set_defaults(func=ec2_create_image_handler) # Elastic Load Balancing elb_service = subparsers.add_parser('elb', help='Amazon Elastic Load Balancing') elb_subparsers = elb_service.add_subparsers(help='Perform action') elb_service_list = elb_subparsers.add_parser('list', help='List items') elb_service_list.add_argument('--type', default='balancers', choices=['balancers', 'regions'], help='List items of this type') elb_service_list.set_defaults(func=elb_list_handler) elb_service_instances = elb_subparsers.add_parser('instances', help='List registered instances') elb_service_instances.add_argument('balancer', help='Name of the Load Balancer') elb_service_instances.set_defaults(func=elb_instances_handler) elb_service_register = elb_subparsers.add_parser('register', help='Register instances to balancer') elb_service_register.add_argument('balancer', help='Name of the load balancer') elb_service_register.add_argument('instance', nargs='+', help='ID of an instance to register') elb_service_register.set_defaults(func=elb_register_handler) elb_service_deregister = elb_subparsers.add_parser('deregister', help='Deregister instances of balancer') elb_service_deregister.add_argument('balancer', help='Name of the Load Balancer') elb_service_deregister.add_argument('instance', nargs='+', help='ID of an instance to deregister') elb_service_deregister.set_defaults(func=elb_deregister_handler) elb_service_zones = elb_subparsers.add_parser('zones', help='Enable or disable availability zones') elb_service_zones.add_argument('balancer', help='Name of the Load Balancer') elb_service_zones.add_argument('zone', nargs='+', help='Name of the availability zone') elb_service_zones.add_argument('status', help='Disable of enable zones', choices=['enable', 'disable']) elb_service_zones.set_defaults(func=elb_zones_handler) elb_service_delete = elb_subparsers.add_parser('delete', help='Delete Load Balancer') elb_service_delete.add_argument('balancer', help='Name of the Load Balancer') elb_service_delete.set_defaults(func=elb_delete_handler) # elb_service_create = elb_subparsers.add_parser('create', help='Create new Load Balancer') # elb_service_delete = elb_subparsers.add_parser('delete', help='Delete Load Balancer') # elb_service_register = elb_subparsers.add_parser('register', help='Register EC2 instance') # elb_service_zone = elb_subparsers.add_parser('zone', help='Enable or disable region') arguments = p.parse_args() arguments.func(p, arguments)
AWS support script's main method
entailment
def buffer_stream(stream, buffer_size, partial=False, axis=None): '''Buffer "data" from an stream into one data object. Parameters ---------- stream : stream The stream to buffer buffer_size : int > 0 The number of examples to retain per batch. partial : bool, default=False If True, yield a final partial batch on under-run. axis : int or None If `None` (default), concatenate data along a new 0th axis. Otherwise, concatenation is performed along the specified axis. This is primarily useful when combining data that already has a dimension for buffer index, e.g., when buffering buffers. Yields ------ batch A batch of size at most `buffer_size` Raises ------ DataError If the stream contains items that are not data-like. ''' data = [] count = 0 for item in stream: data.append(item) count += 1 if count < buffer_size: continue try: yield __stack_data(data, axis=axis) except (TypeError, AttributeError): raise DataError("Malformed data stream: {}".format(data)) finally: data = [] count = 0 if data and partial: yield __stack_data(data, axis=axis)
Buffer "data" from an stream into one data object. Parameters ---------- stream : stream The stream to buffer buffer_size : int > 0 The number of examples to retain per batch. partial : bool, default=False If True, yield a final partial batch on under-run. axis : int or None If `None` (default), concatenate data along a new 0th axis. Otherwise, concatenation is performed along the specified axis. This is primarily useful when combining data that already has a dimension for buffer index, e.g., when buffering buffers. Yields ------ batch A batch of size at most `buffer_size` Raises ------ DataError If the stream contains items that are not data-like.
entailment
def tuples(stream, *keys): """Reformat data as tuples. Parameters ---------- stream : iterable Stream of data objects. *keys : strings Keys to use for ordering data. Yields ------ items : tuple of np.ndarrays Data object reformated as a tuple. Raises ------ DataError If the stream contains items that are not data-like. KeyError If a data object does not contain the requested key. """ if not keys: raise PescadorError('Unable to generate tuples from ' 'an empty item set') for data in stream: try: yield tuple(data[key] for key in keys) except TypeError: raise DataError("Malformed data stream: {}".format(data))
Reformat data as tuples. Parameters ---------- stream : iterable Stream of data objects. *keys : strings Keys to use for ordering data. Yields ------ items : tuple of np.ndarrays Data object reformated as a tuple. Raises ------ DataError If the stream contains items that are not data-like. KeyError If a data object does not contain the requested key.
entailment
def keras_tuples(stream, inputs=None, outputs=None): """Reformat data objects as keras-compatible tuples. For more detail: https://keras.io/models/model/#fit Parameters ---------- stream : iterable Stream of data objects. inputs : string or iterable of strings, None Keys to use for ordered input data. If not specified, returns `None` in its place. outputs : string or iterable of strings, default=None Keys to use for ordered output data. If not specified, returns `None` in its place. Yields ------ x : np.ndarray, list of np.ndarray, or None If `inputs` is a string, `x` is a single np.ndarray. If `inputs` is an iterable of strings, `x` is a list of np.ndarrays. If `inputs` is a null type, `x` is None. y : np.ndarray, list of np.ndarray, or None If `outputs` is a string, `y` is a single np.ndarray. If `outputs` is an iterable of strings, `y` is a list of np.ndarrays. If `outputs` is a null type, `y` is None. Raises ------ DataError If the stream contains items that are not data-like. """ flatten_inputs, flatten_outputs = False, False if inputs and isinstance(inputs, six.string_types): inputs = [inputs] flatten_inputs = True if outputs and isinstance(outputs, six.string_types): outputs = [outputs] flatten_outputs = True inputs, outputs = (inputs or []), (outputs or []) if not inputs + outputs: raise PescadorError('At least one key must be given for ' '`inputs` or `outputs`') for data in stream: try: x = list(data[key] for key in inputs) or None if len(inputs) == 1 and flatten_inputs: x = x[0] y = list(data[key] for key in outputs) or None if len(outputs) == 1 and flatten_outputs: y = y[0] yield (x, y) except TypeError: raise DataError("Malformed data stream: {}".format(data))
Reformat data objects as keras-compatible tuples. For more detail: https://keras.io/models/model/#fit Parameters ---------- stream : iterable Stream of data objects. inputs : string or iterable of strings, None Keys to use for ordered input data. If not specified, returns `None` in its place. outputs : string or iterable of strings, default=None Keys to use for ordered output data. If not specified, returns `None` in its place. Yields ------ x : np.ndarray, list of np.ndarray, or None If `inputs` is a string, `x` is a single np.ndarray. If `inputs` is an iterable of strings, `x` is a list of np.ndarrays. If `inputs` is a null type, `x` is None. y : np.ndarray, list of np.ndarray, or None If `outputs` is a string, `y` is a single np.ndarray. If `outputs` is an iterable of strings, `y` is a list of np.ndarrays. If `outputs` is a null type, `y` is None. Raises ------ DataError If the stream contains items that are not data-like.
entailment
def location(args): """ Creates histrogram of motif location. Parameters ---------- args : argparse object Command line arguments. """ fastafile = args.fastafile pwmfile = args.pwmfile lwidth = args.width if not lwidth: f = Fasta(fastafile) lwidth = len(f.items()[0][1]) f = None jobs = [] motifs = pwmfile_to_motifs(pwmfile) ids = [motif.id for motif in motifs] if args.ids: ids = args.ids.split(",") n_cpus = int(MotifConfig().get_default_params()["ncpus"]) pool = Pool(processes=n_cpus, maxtasksperchild=1000) for motif in motifs: if motif.id in ids: outfile = os.path.join("%s_histogram" % motif.id) jobs.append( pool.apply_async( motif_localization, (fastafile,motif,lwidth,outfile, args.cutoff) )) for job in jobs: job.get()
Creates histrogram of motif location. Parameters ---------- args : argparse object Command line arguments.
entailment
def which(fname): """Find location of executable.""" if "PATH" not in os.environ or not os.environ["PATH"]: path = os.defpath else: path = os.environ["PATH"] for p in [fname] + [os.path.join(x, fname) for x in path.split(os.pathsep)]: p = os.path.abspath(p) if os.access(p, os.X_OK) and not os.path.isdir(p): return p p = sp.Popen("locate %s" % fname, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) (stdout, stderr) = p.communicate() if not stderr: for p in stdout.decode().split("\n"): if (os.path.basename(p) == fname) and ( os.access(p, os.X_OK)) and ( not os.path.isdir(p)): return p
Find location of executable.
entailment
def find_by_ext(dirname, ext): """Find all files in a directory by extension.""" # Get all fasta-files try: files = os.listdir(dirname) except OSError: if os.path.exists(dirname): cmd = "find {0} -maxdepth 1 -name \"*\"".format(dirname) p = sp.Popen(cmd, shell=True, stdout=sp.PIPE, stderr=sp.PIPE) stdout, _stderr = p.communicate() files = [os.path.basename(fname) for fname in stdout.decode().splitlines()] else: raise retfiles = [os.path.join(dirname, fname) for fname in files if os.path.splitext(fname)[-1] in ext] return retfiles
Find all files in a directory by extension.
entailment
def default_motifs(): """Return list of Motif instances from default motif database.""" config = MotifConfig() d = config.get_motif_dir() m = config.get_default_params()['motif_db'] if not d or not m: raise ValueError("default motif database not configured") fname = os.path.join(d, m) with open(fname) as f: motifs = read_motifs(f) return motifs
Return list of Motif instances from default motif database.
entailment
def motif_from_align(align): """Convert alignment to motif. Converts a list with sequences to a motif. Sequences should be the same length. Parameters ---------- align : list List with sequences (A,C,G,T). Returns ------- m : Motif instance Motif created from the aligned sequences. """ width = len(align[0]) nucs = {"A":0,"C":1,"G":2,"T":3} pfm = [[0 for _ in range(4)] for _ in range(width)] for row in align: for i in range(len(row)): pfm[i][nucs[row[i]]] += 1 m = Motif(pfm) m.align = align[:] return m
Convert alignment to motif. Converts a list with sequences to a motif. Sequences should be the same length. Parameters ---------- align : list List with sequences (A,C,G,T). Returns ------- m : Motif instance Motif created from the aligned sequences.
entailment
def motif_from_consensus(cons, n=12): """Convert consensus sequence to motif. Converts a consensus sequences using the nucleotide IUPAC alphabet to a motif. Parameters ---------- cons : str Consensus sequence using the IUPAC alphabet. n : int , optional Count used to convert the sequence to a PFM. Returns ------- m : Motif instance Motif created from the consensus. """ width = len(cons) nucs = {"A":0,"C":1,"G":2,"T":3} pfm = [[0 for _ in range(4)] for _ in range(width)] m = Motif() for i,char in enumerate(cons): for nuc in m.iupac[char.upper()]: pfm[i][nucs[nuc]] = n / len(m.iupac[char.upper()]) m = Motif(pfm) m.id = cons return m
Convert consensus sequence to motif. Converts a consensus sequences using the nucleotide IUPAC alphabet to a motif. Parameters ---------- cons : str Consensus sequence using the IUPAC alphabet. n : int , optional Count used to convert the sequence to a PFM. Returns ------- m : Motif instance Motif created from the consensus.
entailment
def parse_motifs(motifs): """Parse motifs in a variety of formats to return a list of motifs. Parameters ---------- motifs : list or str Filename of motif, list of motifs or single Motif instance. Returns ------- motifs : list List of Motif instances. """ if isinstance(motifs, six.string_types): with open(motifs) as f: if motifs.endswith("pwm") or motifs.endswith("pfm"): motifs = read_motifs(f, fmt="pwm") elif motifs.endswith("transfac"): motifs = read_motifs(f, fmt="transfac") else: motifs = read_motifs(f) elif isinstance(motifs, Motif): motifs = [motifs] else: if not isinstance(list(motifs)[0], Motif): raise ValueError("Not a list of motifs") return list(motifs)
Parse motifs in a variety of formats to return a list of motifs. Parameters ---------- motifs : list or str Filename of motif, list of motifs or single Motif instance. Returns ------- motifs : list List of Motif instances.
entailment
def _read_motifs_from_filehandle(handle, fmt): """ Read motifs from a file-like object. Parameters ---------- handle : file-like object Motifs. fmt : string, optional Motif format, can be 'pwm', 'transfac', 'xxmotif', 'jaspar' or 'align'. Returns ------- motifs : list List of Motif instances. """ if fmt.lower() == "pwm": motifs = _read_motifs_pwm(handle) if fmt.lower() == "transfac": motifs = _read_motifs_transfac(handle) if fmt.lower() == "xxmotif": motifs = _read_motifs_xxmotif(handle) if fmt.lower() == "align": motifs = _read_motifs_align(handle) if fmt.lower() == "jaspar": motifs = _read_motifs_jaspar(handle) if handle.name: base = os.path.splitext(handle.name)[0] map_file = base + ".motif2factors.txt" if os.path.exists(map_file): m2f_direct = {} m2f_indirect = {} for line in open(map_file): try: motif,*factor_info = line.strip().split("\t") if len(factor_info) == 1: m2f_direct[motif] = factor_info[0].split(",") elif len(factor_info) == 3: if factor_info[2] == "Y": m2f_direct[motif] = m2f_direct.get(motif, []) + [factor_info[0]] else: m2f_indirect[motif] = m2f_indirect.get(motif, []) + [factor_info[0]] except: pass for motif in motifs: if motif.id in m2f_direct: motif.factors[DIRECT_NAME] = m2f_direct[motif.id] if motif.id in m2f_indirect: motif.factors[INDIRECT_NAME] = m2f_indirect[motif.id] for motif in motifs: for n in [DIRECT_NAME, INDIRECT_NAME]: motif.factors[n] = list(set(motif.factors[n])) return motifs
Read motifs from a file-like object. Parameters ---------- handle : file-like object Motifs. fmt : string, optional Motif format, can be 'pwm', 'transfac', 'xxmotif', 'jaspar' or 'align'. Returns ------- motifs : list List of Motif instances.
entailment
def read_motifs(infile=None, fmt="pwm", as_dict=False): """ Read motifs from a file or stream or file-like object. Parameters ---------- infile : string or file-like object, optional Motif database, filename of motif file or file-like object. If infile is not specified the default motifs as specified in the config file will be returned. fmt : string, optional Motif format, can be 'pwm', 'transfac', 'xxmotif', 'jaspar' or 'align'. as_dict : boolean, optional Return motifs as a dictionary with motif_id, motif pairs. Returns ------- motifs : list List of Motif instances. If as_dict is set to True, motifs is a dictionary. """ if infile is None or isinstance(infile, six.string_types): infile = pwmfile_location(infile) with open(infile) as f: motifs = _read_motifs_from_filehandle(f, fmt) else: motifs = _read_motifs_from_filehandle(infile, fmt) if as_dict: motifs = {m.id:m for m in motifs} return motifs
Read motifs from a file or stream or file-like object. Parameters ---------- infile : string or file-like object, optional Motif database, filename of motif file or file-like object. If infile is not specified the default motifs as specified in the config file will be returned. fmt : string, optional Motif format, can be 'pwm', 'transfac', 'xxmotif', 'jaspar' or 'align'. as_dict : boolean, optional Return motifs as a dictionary with motif_id, motif pairs. Returns ------- motifs : list List of Motif instances. If as_dict is set to True, motifs is a dictionary.
entailment
def information_content(self): """Return the total information content of the motif. Return ------ ic : float Motif information content. """ ic = 0 for row in self.pwm: ic += 2.0 + np.sum([row[x] * log(row[x])/log(2) for x in range(4) if row[x] > 0]) return ic
Return the total information content of the motif. Return ------ ic : float Motif information content.
entailment
def pwm_min_score(self): """Return the minimum PWM score. Returns ------- score : float Minimum PWM score. """ if self.min_score is None: score = 0 for row in self.pwm: score += log(min(row) / 0.25 + 0.01) self.min_score = score return self.min_score
Return the minimum PWM score. Returns ------- score : float Minimum PWM score.
entailment
def pwm_max_score(self): """Return the maximum PWM score. Returns ------- score : float Maximum PWM score. """ if self.max_score is None: score = 0 for row in self.pwm: score += log(max(row) / 0.25 + 0.01) self.max_score = score return self.max_score
Return the maximum PWM score. Returns ------- score : float Maximum PWM score.
entailment
def score_kmer(self, kmer): """Calculate the log-odds score for a specific k-mer. Parameters ---------- kmer : str String representing a kmer. Should be the same length as the motif. Returns ------- score : float Log-odd score. """ if len(kmer) != len(self.pwm): raise Exception("incorrect k-mer length") score = 0.0 d = {"A":0, "C":1, "G":2, "T":3} for nuc, row in zip(kmer.upper(), self.pwm): score += log(row[d[nuc]] / 0.25 + 0.01) return score
Calculate the log-odds score for a specific k-mer. Parameters ---------- kmer : str String representing a kmer. Should be the same length as the motif. Returns ------- score : float Log-odd score.
entailment
def pfm_to_pwm(self, pfm, pseudo=0.001): """Convert PFM with counts to a PFM with fractions. Parameters ---------- pfm : list 2-dimensional list with counts. pseudo : float Pseudocount used in conversion. Returns ------- pwm : list 2-dimensional list with fractions. """ return [[(x + pseudo)/(float(np.sum(row)) + pseudo * 4) for x in row] for row in pfm]
Convert PFM with counts to a PFM with fractions. Parameters ---------- pfm : list 2-dimensional list with counts. pseudo : float Pseudocount used in conversion. Returns ------- pwm : list 2-dimensional list with fractions.
entailment
def to_motevo(self): """Return motif formatted in MotEvo (TRANSFAC-like) format Returns ------- m : str String of motif in MotEvo format. """ m = "//\n" m += "NA {}\n".format(self.id) m += "P0\tA\tC\tG\tT\n" for i, row in enumerate(self.pfm): m += "{}\t{}\n".format(i, "\t".join([str(int(x)) for x in row])) m += "//" return m
Return motif formatted in MotEvo (TRANSFAC-like) format Returns ------- m : str String of motif in MotEvo format.
entailment
def to_transfac(self): """Return motif formatted in TRANSFAC format Returns ------- m : str String of motif in TRANSFAC format. """ m = "%s\t%s\t%s\n" % ("DE", self.id, "unknown") for i, (row, cons) in enumerate(zip(self.pfm, self.to_consensus())): m += "%i\t%s\t%s\n" % (i, "\t".join([str(int(x)) for x in row]), cons) m += "XX" return m
Return motif formatted in TRANSFAC format Returns ------- m : str String of motif in TRANSFAC format.
entailment
def to_meme(self): """Return motif formatted in MEME format Returns ------- m : str String of motif in MEME format. """ motif_id = self.id.replace(" ", "_") m = "MOTIF %s\n" % motif_id m += "BL MOTIF %s width=0 seqs=0\n"% motif_id m += "letter-probability matrix: alength= 4 w= %s nsites= %s E= 0\n" % (len(self), np.sum(self.pfm[0])) m +="\n".join(["\t".join(["%s" % x for x in row]) for row in self.pwm]) return m
Return motif formatted in MEME format Returns ------- m : str String of motif in MEME format.
entailment
def ic_pos(self, row1, row2=None): """Calculate the information content of one position. Returns ------- score : float Information content. """ if row2 is None: row2 = [0.25,0.25,0.25,0.25] score = 0 for a,b in zip(row1, row2): if a > 0: score += a * log(a / b) / log(2) return score
Calculate the information content of one position. Returns ------- score : float Information content.
entailment
def pcc_pos(self, row1, row2): """Calculate the Pearson correlation coefficient of one position compared to another position. Returns ------- score : float Pearson correlation coefficient. """ mean1 = np.mean(row1) mean2 = np.mean(row2) a = 0 x = 0 y = 0 for n1, n2 in zip(row1, row2): a += (n1 - mean1) * (n2 - mean2) x += (n1 - mean1) ** 2 y += (n2 - mean2) ** 2 if a == 0: return 0 else: return a / sqrt(x * y)
Calculate the Pearson correlation coefficient of one position compared to another position. Returns ------- score : float Pearson correlation coefficient.
entailment
def rc(self): """Return the reverse complemented motif. Returns ------- m : Motif instance New Motif instance with the reverse complement of the input motif. """ m = Motif() m.pfm = [row[::-1] for row in self.pfm[::-1]] m.pwm = [row[::-1] for row in self.pwm[::-1]] m.id = self.id + "_revcomp" return m
Return the reverse complemented motif. Returns ------- m : Motif instance New Motif instance with the reverse complement of the input motif.
entailment
def trim(self, edge_ic_cutoff=0.4): """Trim positions with an information content lower than the threshold. The default threshold is set to 0.4. The Motif will be changed in-place. Parameters ---------- edge_ic_cutoff : float, optional Information content threshold. All motif positions at the flanks with an information content lower thab this will be removed. Returns ------- m : Motif instance """ pwm = self.pwm[:] while len(pwm) > 0 and self.ic_pos(pwm[0]) < edge_ic_cutoff: pwm = pwm[1:] self.pwm = self.pwm[1:] self.pfm = self.pfm[1:] while len(pwm) > 0 and self.ic_pos(pwm[-1]) < edge_ic_cutoff: pwm = pwm[:-1] self.pwm = self.pwm[:-1] self.pfm = self.pfm[:-1] self.consensus = None self.min_score = None self.max_score = None self.wiggled_pwm = None return self
Trim positions with an information content lower than the threshold. The default threshold is set to 0.4. The Motif will be changed in-place. Parameters ---------- edge_ic_cutoff : float, optional Information content threshold. All motif positions at the flanks with an information content lower thab this will be removed. Returns ------- m : Motif instance
entailment
def consensus_scan(self, fa): """Scan FASTA with the motif as a consensus sequence. Parameters ---------- fa : Fasta object Fasta object to scan Returns ------- matches : dict Dictionaru with matches. """ regexp = "".join(["[" + "".join(self.iupac[x.upper()]) + "]" for x in self.to_consensusv2()]) p = re.compile(regexp) matches = {} for name,seq in fa.items(): matches[name] = [] for match in p.finditer(seq): middle = (match.span()[1] + match.span()[0]) / 2 matches[name].append(middle) return matches
Scan FASTA with the motif as a consensus sequence. Parameters ---------- fa : Fasta object Fasta object to scan Returns ------- matches : dict Dictionaru with matches.
entailment
def pwm_scan(self, fa, cutoff=0.9, nreport=50, scan_rc=True): """Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. Only the position of the matches is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. Only the position of the matches is returned. """ c = self.pwm_min_score() + ( self.pwm_max_score() - self.pwm_min_score()) * cutoff pwm = self.pwm matches = {} for name, seq in fa.items(): matches[name] = [] result = pfmscan(seq.upper(), pwm, c, nreport, scan_rc) for _,pos,_ in result: matches[name].append(pos) return matches
Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. Only the position of the matches is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. Only the position of the matches is returned.
entailment
def pwm_scan_all(self, fa, cutoff=0.9, nreport=50, scan_rc=True): """Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. The score, position and strand for every match is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. The score, position and strand for every match is returned. """ c = self.pwm_min_score() + (self.pwm_max_score() - self.pwm_min_score()) * cutoff pwm = self.pwm matches = {} for name, seq in fa.items(): matches[name] = [] result = pfmscan(seq.upper(), pwm, c, nreport, scan_rc) for score,pos,strand in result: matches[name].append((pos,score,strand)) return matches
Scan sequences with this motif. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. The score, position and strand for every match is returned. Parameters ---------- fa : Fasta object Fasta object to scan. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. Returns ------- matches : dict Dictionary with motif matches. The score, position and strand for every match is returned.
entailment
def pwm_scan_to_gff(self, fa, gfffile, cutoff=0.9, nreport=50, scan_rc=True, append=False): """Scan sequences with this motif and save to a GFF file. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. The output is save to a file in GFF format. Parameters ---------- fa : Fasta object Fasta object to scan. gfffile : str Filename of GFF output file. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. append : bool , optional Append to GFF file instead of overwriting it. False by default. """ if append: out = open(gfffile, "a") else: out = open(gfffile, "w") c = self.pwm_min_score() + (self.pwm_max_score() - self.pwm_min_score()) * cutoff pwm = self.pwm strandmap = {-1:"-","-1":"-","-":"-","1":"+",1:"+","+":"+"} gff_line = ("{}\tpfmscan\tmisc_feature\t{}\t{}\t{:.3f}\t{}\t.\t" "motif_name \"{}\" ; motif_instance \"{}\"\n") for name, seq in fa.items(): result = pfmscan(seq.upper(), pwm, c, nreport, scan_rc) for score, pos, strand in result: out.write(gff_line.format( name, pos, pos + len(pwm), score, strandmap[strand], self.id, seq[pos:pos + len(pwm)] )) out.close()
Scan sequences with this motif and save to a GFF file. Scan sequences from a FASTA object with this motif. Less efficient than using a Scanner object. By setting the cutoff to 0.0 and nreport to 1, the best match for every sequence will be returned. The output is save to a file in GFF format. Parameters ---------- fa : Fasta object Fasta object to scan. gfffile : str Filename of GFF output file. cutoff : float , optional Cutoff to use for motif scanning. This cutoff is not specifically optimized and the strictness will vary a lot with motif lengh. nreport : int , optional Maximum number of matches to report. scan_rc : bool , optional Scan the reverse complement. True by default. append : bool , optional Append to GFF file instead of overwriting it. False by default.
entailment
def average_motifs(self, other, pos, orientation, include_bg=False): """Return the average of two motifs. Combine this motif with another motif and return the average as a new Motif object. The position and orientatien need to be supplied. The pos parameter is the position of the second motif relative to this motif. For example, take the following two motifs: Motif 1: CATGYT Motif 2: GGCTTGY With position -2, the motifs are averaged as follows: xxCATGYT GGCTTGYx Parameters ---------- other : Motif object Other Motif object. pos : int Position of the second motif relative to this motif. orientation : int Orientation, should be 1 or -1. If the orientation is -1 then the reverse complement of the other motif is used for averaging. include_bg : bool , optional Extend both motifs with background frequencies (0.25) before averaging. False by default. Returns ------- motif : motif object New Motif object containing average motif. """ # xxCATGYT # GGCTTGYx # pos = -2 pfm1 = self.pfm[:] pfm2 = other.pfm[:] if orientation < 0: pfm2 = [row[::-1] for row in pfm2[::-1]] pfm1_count = float(np.sum(pfm1[0])) pfm2_count = float(np.sum(pfm2[0])) if include_bg: if len(pfm1) > len(pfm2) + pos: pfm2 += [[pfm2_count / 4.0 for x in range(4)] for i in range(-(len(pfm1) - len(pfm2) - pos), 0)] elif len(pfm2) + pos > len(pfm1): pfm1 += [[pfm1_count / 4.0 for x in range(4)] for i in range(-(len(pfm2) - len(pfm1) + pos), 0)] if pos < 0: pfm1 = [[pfm1_count / 4.0 for x in range(4)] for i in range(-pos)] + pfm1 elif pos > 0: pfm2 = [[pfm2_count / 4.0 for x in range(4)] for i in range(pos)] + pfm2 else: if len(pfm1) > len(pfm2) + pos: pfm2 += [[pfm1[i][x] / pfm1_count * (pfm2_count) for x in range(4)] for i in range(-(len(pfm1) - len(pfm2) - pos), 0)] elif len(pfm2) + pos > len(pfm1): pfm1 += [[pfm2[i][x] / pfm2_count * (pfm1_count) for x in range(4)] for i in range(-(len(pfm2) - len(pfm1) + pos), 0)] if pos < 0: pfm1 = [[pfm2[i][x] / pfm2_count * (pfm1_count) for x in range(4)] for i in range(-pos)] + pfm1 elif pos > 0: pfm2 = [[pfm1[i][x] / pfm1_count * (pfm2_count) for x in range(4)] for i in range(pos)] + pfm2 pfm = [[a + b for a,b in zip(x,y)] for x,y in zip(pfm1, pfm2)] m = Motif(pfm) m.id = m.to_consensus() return m
Return the average of two motifs. Combine this motif with another motif and return the average as a new Motif object. The position and orientatien need to be supplied. The pos parameter is the position of the second motif relative to this motif. For example, take the following two motifs: Motif 1: CATGYT Motif 2: GGCTTGY With position -2, the motifs are averaged as follows: xxCATGYT GGCTTGYx Parameters ---------- other : Motif object Other Motif object. pos : int Position of the second motif relative to this motif. orientation : int Orientation, should be 1 or -1. If the orientation is -1 then the reverse complement of the other motif is used for averaging. include_bg : bool , optional Extend both motifs with background frequencies (0.25) before averaging. False by default. Returns ------- motif : motif object New Motif object containing average motif.
entailment
def _pwm_to_str(self, precision=4): """Return string representation of pwm. Parameters ---------- precision : int, optional, default 4 Floating-point precision. Returns ------- pwm_string : str """ if not self.pwm: return "" fmt = "{{:.{:d}f}}".format(precision) return "\n".join( ["\t".join([fmt.format(p) for p in row]) for row in self.pwm] )
Return string representation of pwm. Parameters ---------- precision : int, optional, default 4 Floating-point precision. Returns ------- pwm_string : str
entailment
def to_pwm(self, precision=4, extra_str=""): """Return pwm as string. Parameters ---------- precision : int, optional, default 4 Floating-point precision. extra_str |: str, optional Extra text to include with motif id line. Returns ------- motif_str : str Motif formatted in PWM format. """ motif_id = self.id if extra_str: motif_id += "_%s" % extra_str if not self.pwm: self.pwm = [self.iupac_pwm[char]for char in self.consensus.upper()] return ">%s\n%s" % ( motif_id, self._pwm_to_str(precision) )
Return pwm as string. Parameters ---------- precision : int, optional, default 4 Floating-point precision. extra_str |: str, optional Extra text to include with motif id line. Returns ------- motif_str : str Motif formatted in PWM format.
entailment
def to_img(self, fname, fmt="PNG", add_left=0, seqlogo=None, height=6): """Create a sequence logo using seqlogo. Create a sequence logo and save it to a file. Valid formats are: PNG, EPS, GIF and PDF. Parameters ---------- fname : str Output filename. fmt : str , optional Output format (case-insensitive). Valid formats are PNG, EPS, GIF and PDF. add_left : int , optional Pad motif with empty positions on the left side. seqlogo : str Location of the seqlogo executable. By default the seqlogo version that is included with GimmeMotifs is used. height : float Height of the image """ if not seqlogo: seqlogo = self.seqlogo if not seqlogo: raise ValueError("seqlogo not specified or configured") #TODO: split to_align function VALID_FORMATS = ["EPS", "GIF", "PDF", "PNG"] N = 1000 fmt = fmt.upper() if not fmt in VALID_FORMATS: sys.stderr.write("Invalid motif format\n") return if fname[-4:].upper() == (".%s" % fmt): fname = fname[:-4] seqs = [] if add_left == 0: seqs = ["" for i in range(N)] else: for nuc in ["A", "C", "T", "G"]: seqs += [nuc * add_left for i in range(N // 4)] for pos in range(len(self.pwm)): vals = [self.pwm[pos][0] * N] for i in range(1,4): vals.append(vals[i-1] + self.pwm[pos][i] * N) if vals[3] - N != 0: #print "Motif weights don't add up to 1! Error of %s%%" % ((vals[3] - n)/ n * 100) vals[3] = N for i in range(N): if i <= vals[0]: seqs[i] += "A" elif i <= vals[1]: seqs[i] += "C" elif i <= vals[2]: seqs[i] += "G" elif i <= vals[3]: seqs[i] += "T" f = NamedTemporaryFile(mode="w", dir=mytmpdir()) for seq in seqs: f.write("%s\n" % seq) f.flush() makelogo = "{0} -f {1} -F {2} -c -a -h {3} -w {4} -o {5} -b -n -Y" cmd = makelogo.format( seqlogo, f.name, fmt, height, len(self) + add_left, fname) sp.call(cmd, shell=True)
Create a sequence logo using seqlogo. Create a sequence logo and save it to a file. Valid formats are: PNG, EPS, GIF and PDF. Parameters ---------- fname : str Output filename. fmt : str , optional Output format (case-insensitive). Valid formats are PNG, EPS, GIF and PDF. add_left : int , optional Pad motif with empty positions on the left side. seqlogo : str Location of the seqlogo executable. By default the seqlogo version that is included with GimmeMotifs is used. height : float Height of the image
entailment
def randomize(self): """Create a new motif with shuffled positions. Shuffle the positions of this motif and return a new Motif instance. Returns ------- m : Motif instance Motif instance with shuffled positions. """ random_pfm = [[c for c in row] for row in self.pfm] random.shuffle(random_pfm) m = Motif(pfm=random_pfm) m.id = "random" return m
Create a new motif with shuffled positions. Shuffle the positions of this motif and return a new Motif instance. Returns ------- m : Motif instance Motif instance with shuffled positions.
entailment
def maelstrom(args): """Run the maelstrom method.""" infile = args.inputfile genome = args.genome outdir = args.outdir pwmfile = args.pwmfile methods = args.methods ncpus = args.ncpus if not os.path.exists(infile): raise ValueError("file {} does not exist".format(infile)) if methods: methods = [x.strip() for x in methods.split(",")] run_maelstrom(infile, genome, outdir, pwmfile, methods=methods, ncpus=ncpus)
Run the maelstrom method.
entailment
def zmq_send_data(socket, data, flags=0, copy=True, track=False): """Send data, e.g. {key: np.ndarray}, with metadata""" header, payload = [], [] for key in sorted(data.keys()): arr = data[key] if not isinstance(arr, np.ndarray): raise DataError('Only ndarray types can be serialized') header.append(dict(dtype=str(arr.dtype), shape=arr.shape, key=key, aligned=arr.flags['ALIGNED'])) # Force contiguity payload.append(arr) # Send the header msg = [json.dumps(header).encode('ascii')] msg.extend(payload) return socket.send_multipart(msg, flags, copy=copy, track=track)
Send data, e.g. {key: np.ndarray}, with metadata
entailment
def zmq_recv_data(socket, flags=0, copy=True, track=False): """Receive data over a socket.""" data = dict() msg = socket.recv_multipart(flags=flags, copy=copy, track=track) headers = json.loads(msg[0].decode('ascii')) if len(headers) == 0: raise StopIteration for header, payload in zip(headers, msg[1:]): data[header['key']] = np.frombuffer(buffer(payload), dtype=header['dtype']) data[header['key']].shape = header['shape'] if six.PY2: # Legacy python won't let us preserve alignment, skip this step continue data[header['key']].flags['ALIGNED'] = header['aligned'] return data
Receive data over a socket.
entailment
def iterate(self, max_iter=None): """ Note: A ZMQStreamer does not activate its stream, but allows the zmq_worker to do that. Yields ------ data : dict Data drawn from `streamer(max_iter)`. """ context = zmq.Context() if six.PY2: warnings.warn('zmq_stream cannot preserve numpy array alignment ' 'in Python 2', RuntimeWarning) try: socket = context.socket(zmq.PAIR) port = socket.bind_to_random_port('tcp://*', min_port=self.min_port, max_port=self.max_port, max_tries=self.max_tries) terminate = mp.Event() worker = mp.Process(target=SafeFunction(zmq_worker), args=[port, self.streamer, terminate], kwargs=dict(copy=self.copy, max_iter=max_iter)) worker.daemon = True worker.start() # Yield from the queue as long as it's open while True: yield zmq_recv_data(socket) except StopIteration: pass except: # pylint: disable-msg=W0702 six.reraise(*sys.exc_info()) finally: terminate.set() worker.join(self.timeout) if worker.is_alive(): worker.terminate() context.destroy()
Note: A ZMQStreamer does not activate its stream, but allows the zmq_worker to do that. Yields ------ data : dict Data drawn from `streamer(max_iter)`.
entailment
def hardmask(self): """ Mask all lowercase nucleotides with N's """ p = re.compile("a|c|g|t|n") for seq_id in self.fasta_dict.keys(): self.fasta_dict[seq_id] = p.sub("N", self.fasta_dict[seq_id]) return self
Mask all lowercase nucleotides with N's
entailment
def get_random(self, n, l=None): """ Return n random sequences from this Fasta object """ random_f = Fasta() if l: ids = self.ids[:] random.shuffle(ids) i = 0 while (i < n) and (len(ids) > 0): seq_id = ids.pop() if (len(self[seq_id]) >= l): start = random.randint(0, len(self[seq_id]) - l) random_f["random%s" % (i + 1)] = self[seq_id][start:start+l] i += 1 if len(random_f) != n: sys.stderr.write("Not enough sequences of required length") return else: return random_f else: choice = random.sample(self.ids, n) for i in range(n): random_f[choice[i]] = self[choice[i]] return random_f
Return n random sequences from this Fasta object
entailment
def writefasta(self, fname): """ Write sequences to FASTA formatted file""" f = open(fname, "w") fa_str = "\n".join([">%s\n%s" % (id, self._format_seq(seq)) for id, seq in self.items()]) f.write(fa_str) f.close()
Write sequences to FASTA formatted file
entailment
def cluster_motifs(motifs, match="total", metric="wic", combine="mean", pval=True, threshold=0.95, trim_edges=False, edge_ic_cutoff=0.2, include_bg=True, progress=True, ncpus=None): """ Clusters a set of sequence motifs. Required arg 'motifs' is a file containing positional frequency matrices or an array with motifs. Optional args: 'match', 'metric' and 'combine' specify the method used to compare and score the motifs. By default the WIC score is used (metric='wic'), using the the score over the whole alignment (match='total'), with the total motif score calculated as the mean score of all positions (combine='mean'). 'match' can be either 'total' for the total alignment or 'subtotal' for the maximum scoring subsequence of the alignment. 'metric' can be any metric defined in MotifComparer, currently: 'pcc', 'ed', 'distance', 'wic' or 'chisq' 'combine' determines how the total score is calculated from the score of individual positions and can be either 'sum' or 'mean' 'pval' can be True or False and determines if the score should be converted to an empirical p-value 'threshold' determines the score (or p-value) cutoff If 'trim_edges' is set to True, all motif edges with an IC below 'edge_ic_cutoff' will be removed before clustering When computing the average of two motifs 'include_bg' determines if, at a position only present in one motif, the information in that motif should be kept, or if it should be averaged with background frequencies. Should probably be left set to True. """ # First read pfm or pfm formatted motiffile if type([]) != type(motifs): motifs = read_motifs(motifs, fmt="pwm") mc = MotifComparer() # Trim edges with low information content if trim_edges: for motif in motifs: motif.trim(edge_ic_cutoff) # Make a MotifTree node for every motif nodes = [MotifTree(m) for m in motifs] # Determine all pairwise scores and maxscore per motif scores = {} motif_nodes = dict([(n.motif.id,n) for n in nodes]) motifs = [n.motif for n in nodes] if progress: sys.stderr.write("Calculating initial scores\n") result = mc.get_all_scores(motifs, motifs, match, metric, combine, pval, parallel=True, ncpus=ncpus) for m1, other_motifs in result.items(): for m2, score in other_motifs.items(): if m1 == m2: if pval: motif_nodes[m1].maxscore = 1 - score[0] else: motif_nodes[m1].maxscore = score[0] else: if pval: score = [1 - score[0]] + score[1:] scores[(motif_nodes[m1],motif_nodes[m2])] = score cluster_nodes = [node for node in nodes] ave_count = 1 total = len(cluster_nodes) while len(cluster_nodes) > 1: l = sorted(scores.keys(), key=lambda x: scores[x][0]) i = -1 (n1, n2) = l[i] while n1 not in cluster_nodes or n2 not in cluster_nodes: i -= 1 (n1,n2) = l[i] if len(n1.motif) > 0 and len(n2.motif) > 0: (score, pos, orientation) = scores[(n1,n2)] ave_motif = n1.motif.average_motifs(n2.motif, pos, orientation, include_bg=include_bg) ave_motif.trim(edge_ic_cutoff) # Check if the motif is not empty if len(ave_motif) == 0: ave_motif = Motif([[0.25,0.25,0.25,0.25]]) ave_motif.id = "Average_%s" % ave_count ave_count += 1 new_node = MotifTree(ave_motif) if pval: new_node.maxscore = 1 - mc.compare_motifs(new_node.motif, new_node.motif, match, metric, combine, pval)[0] else: new_node.maxscore = mc.compare_motifs(new_node.motif, new_node.motif, match, metric, combine, pval)[0] new_node.mergescore = score #print "%s + %s = %s with score %s" % (n1.motif.id, n2.motif.id, ave_motif.id, score) n1.parent = new_node n2.parent = new_node new_node.left = n1 new_node.right = n2 cmp_nodes = dict([(node.motif, node) for node in nodes if not node.parent]) if progress: progress = (1 - len(cmp_nodes) / float(total)) * 100 sys.stderr.write('\rClustering [{0}{1}] {2}%'.format( '#' * (int(progress) // 10), " " * (10 - int(progress) // 10), int(progress))) result = mc.get_all_scores( [new_node.motif], list(cmp_nodes.keys()), match, metric, combine, pval, parallel=True) for motif, n in cmp_nodes.items(): x = result[new_node.motif.id][motif.id] if pval: x = [1 - x[0]] + x[1:] scores[(new_node, n)] = x nodes.append(new_node) cluster_nodes = [node for node in nodes if not node.parent] if progress: sys.stderr.write("\n") root = nodes[-1] for node in [node for node in nodes if not node.left]: node.parent.checkMerge(root, threshold) return root
Clusters a set of sequence motifs. Required arg 'motifs' is a file containing positional frequency matrices or an array with motifs. Optional args: 'match', 'metric' and 'combine' specify the method used to compare and score the motifs. By default the WIC score is used (metric='wic'), using the the score over the whole alignment (match='total'), with the total motif score calculated as the mean score of all positions (combine='mean'). 'match' can be either 'total' for the total alignment or 'subtotal' for the maximum scoring subsequence of the alignment. 'metric' can be any metric defined in MotifComparer, currently: 'pcc', 'ed', 'distance', 'wic' or 'chisq' 'combine' determines how the total score is calculated from the score of individual positions and can be either 'sum' or 'mean' 'pval' can be True or False and determines if the score should be converted to an empirical p-value 'threshold' determines the score (or p-value) cutoff If 'trim_edges' is set to True, all motif edges with an IC below 'edge_ic_cutoff' will be removed before clustering When computing the average of two motifs 'include_bg' determines if, at a position only present in one motif, the information in that motif should be kept, or if it should be averaged with background frequencies. Should probably be left set to True.
entailment
def batch_length(batch): '''Determine the number of samples in a batch. Parameters ---------- batch : dict A batch dictionary. Each value must implement `len`. All values must have the same `len`. Returns ------- n : int >= 0 or None The number of samples in this batch. If the batch has no fields, n is None. Raises ------ PescadorError If some two values have unequal length ''' n = None for value in six.itervalues(batch): if n is None: n = len(value) elif len(value) != n: raise PescadorError('Unequal field lengths') return n
Determine the number of samples in a batch. Parameters ---------- batch : dict A batch dictionary. Each value must implement `len`. All values must have the same `len`. Returns ------- n : int >= 0 or None The number of samples in this batch. If the batch has no fields, n is None. Raises ------ PescadorError If some two values have unequal length
entailment
def _activate(self): """Activates a number of streams""" self.distribution_ = 1. / self.n_streams * np.ones(self.n_streams) self.valid_streams_ = np.ones(self.n_streams, dtype=bool) self.streams_ = [None] * self.k self.stream_weights_ = np.zeros(self.k) self.stream_counts_ = np.zeros(self.k, dtype=int) # Array of pointers into `self.streamers` self.stream_idxs_ = np.zeros(self.k, dtype=int) for idx in range(self.k): if not (self.distribution_ > 0).any(): break self.stream_idxs_[idx] = self.rng.choice( self.n_streams, p=self.distribution_) self.streams_[idx], self.stream_weights_[idx] = ( self._new_stream(self.stream_idxs_[idx])) self.weight_norm_ = np.sum(self.stream_weights_)
Activates a number of streams
entailment
def _new_stream(self, idx): '''Randomly select and create a stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # instantiate if self.rate is not None: n_stream = 1 + self.rng.poisson(lam=self.rate) else: n_stream = None # If we're sampling without replacement, zero this one out # This effectively disables this stream as soon as it is chosen, # preventing it from being chosen again (unless it is revived) if not self.with_replacement: self.distribution_[idx] = 0.0 # Correct the distribution if (self.distribution_ > 0).any(): self.distribution_[:] /= np.sum(self.distribution_) return (self.streamers[idx].iterate(max_iter=n_stream), self.weights[idx])
Randomly select and create a stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace
entailment
def iterate(self, max_iter=None): """Yields items from the mux, and handles stream exhaustion and replacement. """ if max_iter is None: max_iter = np.inf # Calls Streamer's __enter__, which calls activate() with self as active_mux: # Main sampling loop n = 0 while n < max_iter and active_mux._streamers_available(): # Pick a stream from the active set idx = active_mux._next_sample_index() # Can we sample from it? try: # Then yield the sample yield six.advance_iterator(active_mux.streams_[idx]) # Increment the sample counter n += 1 active_mux.stream_counts_[idx] += 1 except StopIteration: # Oops, this stream is exhausted. # Call child-class exhausted-stream behavior active_mux._on_stream_exhausted(idx) # Setup a new stream for this index active_mux._replace_stream(idx)
Yields items from the mux, and handles stream exhaustion and replacement.
entailment
def _next_sample_index(self): """StochasticMux chooses its next sample stream randomly""" return self.rng.choice(self.n_active, p=(self.stream_weights_ / self.weight_norm_))
StochasticMux chooses its next sample stream randomly
entailment
def _activate_stream(self, idx): '''Randomly select and create a stream. StochasticMux adds mode handling to _activate_stream, making it so that if we're not sampling "with_replacement", the distribution for this chosen streamer is set to 0, causing the streamer not to be available until it is exhausted. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # Get the number of samples for this streamer. n_samples_to_stream = None if self.rate is not None: n_samples_to_stream = 1 + self.rng.poisson(lam=self.rate) # instantiate a new streamer streamer = self.streamers[idx].iterate(max_iter=n_samples_to_stream) weight = self.weights[idx] # If we're sampling without replacement, zero this one out # This effectively disables this stream as soon as it is chosen, # preventing it from being chosen again (unless it is revived) # if not self.with_replacement: if self.mode != "with_replacement": self.distribution_[idx] = 0.0 # Correct the distribution if (self.distribution_ > 0).any(): self.distribution_[:] /= np.sum(self.distribution_) return streamer, weight
Randomly select and create a stream. StochasticMux adds mode handling to _activate_stream, making it so that if we're not sampling "with_replacement", the distribution for this chosen streamer is set to 0, causing the streamer not to be available until it is exhausted. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace
entailment
def _new_stream(self, idx): '''Randomly select and create a new stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # Choose the stream index from the candidate pool self.stream_idxs_[idx] = self.rng.choice( self.n_streams, p=self.distribution_) # Activate the Streamer, and get the weights self.streams_[idx], self.stream_weights_[idx] = self._activate_stream( self.stream_idxs_[idx]) # Reset the sample count to zero self.stream_counts_[idx] = 0
Randomly select and create a new stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace
entailment
def _activate(self): """ShuffledMux's activate is similar to StochasticMux, but there is no 'n_active', since all the streams are always available. """ self.streams_ = [None] * self.n_streams # Weights of the active streams. # Once a stream is exhausted, it is set to 0. # Upon activation, this is just a copy of self.weights. self.stream_weights_ = np.array(self.weights, dtype=float) # How many samples have been drawn from each (active) stream. self.stream_counts_ = np.zeros(self.n_streams, dtype=int) # Initialize each active stream. for idx in range(self.n_streams): # Setup a new streamer at this index. self._new_stream(idx) self.weight_norm_ = np.sum(self.stream_weights_)
ShuffledMux's activate is similar to StochasticMux, but there is no 'n_active', since all the streams are always available.
entailment
def _next_sample_index(self): """ShuffledMux chooses its next sample stream randomly, conditioned on the stream weights. """ return self.rng.choice(self.n_streams, p=(self.stream_weights_ / self.weight_norm_))
ShuffledMux chooses its next sample stream randomly, conditioned on the stream weights.
entailment
def _new_stream(self, idx): '''Randomly select and create a new stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace ''' # Don't activate the stream if the weight is 0 or None if self.stream_weights_[idx]: self.streams_[idx] = self.streamers[idx].iterate() else: self.streams_[idx] = None # Reset the sample count to zero self.stream_counts_[idx] = 0
Randomly select and create a new stream. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace
entailment
def _next_sample_index(self): """Rotates through each active sampler by incrementing the index""" # Return the next streamer index where the streamer is not None, # wrapping around. idx = self.active_index_ self.active_index_ += 1 if self.active_index_ >= len(self.streams_): self.active_index_ = 0 # Continue to increment if this streamer is exhausted (None) # This should never be infinite looping; # the `_streamers_available` check happens immediately # before this, so there should always be at least one not-None # streamer. while self.streams_[idx] is None: idx = self.active_index_ self.active_index_ += 1 if self.active_index_ >= len(self.streams_): self.active_index_ = 0 return idx
Rotates through each active sampler by incrementing the index
entailment
def _new_stream(self, idx): """Activate a new stream, given the index into the stream pool. BaseMux's _new_stream simply chooses a new stream and activates it. For special behavior (ie Weighted streams), you must override this in a child class. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace """ # Get the stream index from the candidate pool stream_index = self.stream_idxs_[idx] # Activate the Streamer, and get the weights self.streams_[idx] = self.streamers[stream_index].iterate() # Reset the sample count to zero self.stream_counts_[idx] = 0
Activate a new stream, given the index into the stream pool. BaseMux's _new_stream simply chooses a new stream and activates it. For special behavior (ie Weighted streams), you must override this in a child class. Parameters ---------- idx : int, [0:n_streams - 1] The stream index to replace
entailment
def _replace_stream(self, idx=None): """Called by `BaseMux`'s iterate() when a stream is exhausted. Set the stream to None so it is ignored once exhausted. Parameters ---------- idx : int or None Raises ------ StopIteration If all streams are consumed, and `mode`=="exahustive" """ self.streams_[idx] = None # Check if we've now exhausted all the streams. if not self._streamers_available(): if self.mode == 'exhaustive': pass elif self.mode == "cycle": self._setup_streams(permute=False) elif self.mode == "permuted_cycle": self._setup_streams(permute=True)
Called by `BaseMux`'s iterate() when a stream is exhausted. Set the stream to None so it is ignored once exhausted. Parameters ---------- idx : int or None Raises ------ StopIteration If all streams are consumed, and `mode`=="exahustive"
entailment
def _new_stream(self): '''Grab the next stream from the input streamers, and start it. Raises ------ StopIteration When the input list or generator of streamers is complete, will raise a StopIteration. If `mode == cycle`, it will instead restart iterating from the beginning of the sequence. ''' try: # Advance the stream_generator_ to get the next available stream. # If successful, this will make self.chain_streamer_.active True next_stream = six.advance_iterator(self.stream_generator_) except StopIteration: # If running with cycle, restart the chain_streamer_ after # exhaustion. if self.mode == "cycle": self.stream_generator_ = self.chain_streamer_.iterate() # Try again to get the next stream; # if it fails this time, just let it raise the StopIteration; # this means the streams are probably dead or empty. next_stream = six.advance_iterator(self.stream_generator_) # If running in exhaustive mode else: # self.chain_streamer_ should no longer be active, so # the outer loop should fall out without running. next_stream = None if next_stream is not None: # Start that stream, and return it. streamer = next_stream.iterate() # Activate the Streamer self.streams_[0] = streamer # Reset the sample count to zero self.stream_counts_[0] = 0
Grab the next stream from the input streamers, and start it. Raises ------ StopIteration When the input list or generator of streamers is complete, will raise a StopIteration. If `mode == cycle`, it will instead restart iterating from the beginning of the sequence.
entailment
def split_and_save_datasets(X, Y, paths): """Shuffle X and Y into n / len(paths) datasets, and save them to disk at the locations provided in paths. """ shuffled_idxs = np.random.permutation(np.arange(len(X))) for i in range(len(paths)): # Take every len(paths) item, starting at i. # len(paths) is 3, so this would be [0::3], [1::3], [2::3] X_i = X[shuffled_idxs[i::len(paths)]] Y_i = Y[shuffled_idxs[i::len(paths)]] np.savez(paths[i], X=X_i, Y=Y_i)
Shuffle X and Y into n / len(paths) datasets, and save them to disk at the locations provided in paths.
entailment