repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
pescadores/pescador
examples/mux/mux_files_example.py
npz_generator
def npz_generator(npz_path): """Generate data from an npz file.""" npz_data = np.load(npz_path) X = npz_data['X'] # Y is a binary maxtrix with shape=(n, k), each y will have shape=(k,) y = npz_data['Y'] n = X.shape[0] while True: i = np.random.randint(0, n) yield {'X': X[i], 'Y': y[i]}
python
def npz_generator(npz_path): """Generate data from an npz file.""" npz_data = np.load(npz_path) X = npz_data['X'] # Y is a binary maxtrix with shape=(n, k), each y will have shape=(k,) y = npz_data['Y'] n = X.shape[0] while True: i = np.random.randint(0, n) yield {'X': X[i], 'Y': y[i]}
[ "def", "npz_generator", "(", "npz_path", ")", ":", "npz_data", "=", "np", ".", "load", "(", "npz_path", ")", "X", "=", "npz_data", "[", "'X'", "]", "# Y is a binary maxtrix with shape=(n, k), each y will have shape=(k,)", "y", "=", "npz_data", "[", "'Y'", "]", "n", "=", "X", ".", "shape", "[", "0", "]", "while", "True", ":", "i", "=", "np", ".", "random", ".", "randint", "(", "0", ",", "n", ")", "yield", "{", "'X'", ":", "X", "[", "i", "]", ",", "'Y'", ":", "y", "[", "i", "]", "}" ]
Generate data from an npz file.
[ "Generate", "data", "from", "an", "npz", "file", "." ]
train
https://github.com/pescadores/pescador/blob/786e2b5f882d13ea563769fbc7ad0a0a10c3553d/examples/mux/mux_files_example.py#L60-L71
vanheeringen-lab/gimmemotifs
gimmemotifs/utils.py
phyper
def phyper(k, good, bad, N): """ Current hypergeometric implementation in scipy is broken, so here's the correct version """ pvalues = [phyper_single(x, good, bad, N) for x in range(k + 1, N + 1)] return np.sum(pvalues)
python
def phyper(k, good, bad, N): """ Current hypergeometric implementation in scipy is broken, so here's the correct version """ pvalues = [phyper_single(x, good, bad, N) for x in range(k + 1, N + 1)] return np.sum(pvalues)
[ "def", "phyper", "(", "k", ",", "good", ",", "bad", ",", "N", ")", ":", "pvalues", "=", "[", "phyper_single", "(", "x", ",", "good", ",", "bad", ",", "N", ")", "for", "x", "in", "range", "(", "k", "+", "1", ",", "N", "+", "1", ")", "]", "return", "np", ".", "sum", "(", "pvalues", ")" ]
Current hypergeometric implementation in scipy is broken, so here's the correct version
[ "Current", "hypergeometric", "implementation", "in", "scipy", "is", "broken", "so", "here", "s", "the", "correct", "version" ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/utils.py#L77-L80
vanheeringen-lab/gimmemotifs
gimmemotifs/utils.py
write_equalwidth_bedfile
def write_equalwidth_bedfile(bedfile, width, outfile): """Read input from <bedfile>, set the width of all entries to <width> and write the result to <outfile>. Input file needs to be in BED or WIG format.""" BUFSIZE = 10000 f = open(bedfile) out = open(outfile, "w") lines = f.readlines(BUFSIZE) line_count = 0 while lines: for line in lines: line_count += 1 if not line.startswith("#") and not line.startswith("track") and not line.startswith("browser"): vals = line.strip().split("\t") try: start, end = int(vals[1]), int(vals[2]) except ValueError: print("Error on line %s while reading %s. Is the file in BED or WIG format?" % (line_count, bedfile)) sys.exit(1) start = (start + end) // 2 - (width // 2) # This shifts the center, but ensures the width is identical... maybe not ideal if start < 0: start = 0 end = start + width # Keep all the other information in the bedfile if it's there if len(vals) > 3: out.write("%s\t%s\t%s\t%s\n" % (vals[0], start, end, "\t".join(vals[3:]))) else: out.write("%s\t%s\t%s\n" % (vals[0], start, end)) lines = f.readlines(BUFSIZE) out.close() f.close()
python
def write_equalwidth_bedfile(bedfile, width, outfile): """Read input from <bedfile>, set the width of all entries to <width> and write the result to <outfile>. Input file needs to be in BED or WIG format.""" BUFSIZE = 10000 f = open(bedfile) out = open(outfile, "w") lines = f.readlines(BUFSIZE) line_count = 0 while lines: for line in lines: line_count += 1 if not line.startswith("#") and not line.startswith("track") and not line.startswith("browser"): vals = line.strip().split("\t") try: start, end = int(vals[1]), int(vals[2]) except ValueError: print("Error on line %s while reading %s. Is the file in BED or WIG format?" % (line_count, bedfile)) sys.exit(1) start = (start + end) // 2 - (width // 2) # This shifts the center, but ensures the width is identical... maybe not ideal if start < 0: start = 0 end = start + width # Keep all the other information in the bedfile if it's there if len(vals) > 3: out.write("%s\t%s\t%s\t%s\n" % (vals[0], start, end, "\t".join(vals[3:]))) else: out.write("%s\t%s\t%s\n" % (vals[0], start, end)) lines = f.readlines(BUFSIZE) out.close() f.close()
[ "def", "write_equalwidth_bedfile", "(", "bedfile", ",", "width", ",", "outfile", ")", ":", "BUFSIZE", "=", "10000", "f", "=", "open", "(", "bedfile", ")", "out", "=", "open", "(", "outfile", ",", "\"w\"", ")", "lines", "=", "f", ".", "readlines", "(", "BUFSIZE", ")", "line_count", "=", "0", "while", "lines", ":", "for", "line", "in", "lines", ":", "line_count", "+=", "1", "if", "not", "line", ".", "startswith", "(", "\"#\"", ")", "and", "not", "line", ".", "startswith", "(", "\"track\"", ")", "and", "not", "line", ".", "startswith", "(", "\"browser\"", ")", ":", "vals", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "try", ":", "start", ",", "end", "=", "int", "(", "vals", "[", "1", "]", ")", ",", "int", "(", "vals", "[", "2", "]", ")", "except", "ValueError", ":", "print", "(", "\"Error on line %s while reading %s. Is the file in BED or WIG format?\"", "%", "(", "line_count", ",", "bedfile", ")", ")", "sys", ".", "exit", "(", "1", ")", "start", "=", "(", "start", "+", "end", ")", "//", "2", "-", "(", "width", "//", "2", ")", "# This shifts the center, but ensures the width is identical... maybe not ideal", "if", "start", "<", "0", ":", "start", "=", "0", "end", "=", "start", "+", "width", "# Keep all the other information in the bedfile if it's there", "if", "len", "(", "vals", ")", ">", "3", ":", "out", ".", "write", "(", "\"%s\\t%s\\t%s\\t%s\\n\"", "%", "(", "vals", "[", "0", "]", ",", "start", ",", "end", ",", "\"\\t\"", ".", "join", "(", "vals", "[", "3", ":", "]", ")", ")", ")", "else", ":", "out", ".", "write", "(", "\"%s\\t%s\\t%s\\n\"", "%", "(", "vals", "[", "0", "]", ",", "start", ",", "end", ")", ")", "lines", "=", "f", ".", "readlines", "(", "BUFSIZE", ")", "out", ".", "close", "(", ")", "f", ".", "close", "(", ")" ]
Read input from <bedfile>, set the width of all entries to <width> and write the result to <outfile>. Input file needs to be in BED or WIG format.
[ "Read", "input", "from", "<bedfile", ">", "set", "the", "width", "of", "all", "entries", "to", "<width", ">", "and", "write", "the", "result", "to", "<outfile", ">", ".", "Input", "file", "needs", "to", "be", "in", "BED", "or", "WIG", "format", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/utils.py#L143-L177
vanheeringen-lab/gimmemotifs
gimmemotifs/utils.py
calc_motif_enrichment
def calc_motif_enrichment(sample, background, mtc=None, len_sample=None, len_back=None): """Calculate enrichment based on hypergeometric distribution""" INF = "Inf" if mtc not in [None, "Bonferroni", "Benjamini-Hochberg", "None"]: raise RuntimeError("Unknown correction: %s" % mtc) sig = {} p_value = {} n_sample = {} n_back = {} if not(len_sample): len_sample = sample.seqn() if not(len_back): len_back = background.seqn() for motif in sample.motifs.keys(): p = "NA" s = "NA" q = len(sample.motifs[motif]) m = 0 if(background.motifs.get(motif)): m = len(background.motifs[motif]) n = len_back - m k = len_sample p = phyper(q - 1, m, n, k) if p != 0: s = -(log(p)/log(10)) else: s = INF else: s = INF p = 0.0 sig[motif] = s p_value[motif] = p n_sample[motif] = q n_back[motif] = m if mtc == "Bonferroni": for motif in p_value.keys(): if p_value[motif] != "NA": p_value[motif] = p_value[motif] * len(p_value.keys()) if p_value[motif] > 1: p_value[motif] = 1 elif mtc == "Benjamini-Hochberg": motifs = sorted(p_value.keys(), key=lambda x: -p_value[x]) l = len(p_value) c = l for m in motifs: if p_value[m] != "NA": p_value[m] = p_value[m] * l / c c -= 1 return (sig, p_value, n_sample, n_back)
python
def calc_motif_enrichment(sample, background, mtc=None, len_sample=None, len_back=None): """Calculate enrichment based on hypergeometric distribution""" INF = "Inf" if mtc not in [None, "Bonferroni", "Benjamini-Hochberg", "None"]: raise RuntimeError("Unknown correction: %s" % mtc) sig = {} p_value = {} n_sample = {} n_back = {} if not(len_sample): len_sample = sample.seqn() if not(len_back): len_back = background.seqn() for motif in sample.motifs.keys(): p = "NA" s = "NA" q = len(sample.motifs[motif]) m = 0 if(background.motifs.get(motif)): m = len(background.motifs[motif]) n = len_back - m k = len_sample p = phyper(q - 1, m, n, k) if p != 0: s = -(log(p)/log(10)) else: s = INF else: s = INF p = 0.0 sig[motif] = s p_value[motif] = p n_sample[motif] = q n_back[motif] = m if mtc == "Bonferroni": for motif in p_value.keys(): if p_value[motif] != "NA": p_value[motif] = p_value[motif] * len(p_value.keys()) if p_value[motif] > 1: p_value[motif] = 1 elif mtc == "Benjamini-Hochberg": motifs = sorted(p_value.keys(), key=lambda x: -p_value[x]) l = len(p_value) c = l for m in motifs: if p_value[m] != "NA": p_value[m] = p_value[m] * l / c c -= 1 return (sig, p_value, n_sample, n_back)
[ "def", "calc_motif_enrichment", "(", "sample", ",", "background", ",", "mtc", "=", "None", ",", "len_sample", "=", "None", ",", "len_back", "=", "None", ")", ":", "INF", "=", "\"Inf\"", "if", "mtc", "not", "in", "[", "None", ",", "\"Bonferroni\"", ",", "\"Benjamini-Hochberg\"", ",", "\"None\"", "]", ":", "raise", "RuntimeError", "(", "\"Unknown correction: %s\"", "%", "mtc", ")", "sig", "=", "{", "}", "p_value", "=", "{", "}", "n_sample", "=", "{", "}", "n_back", "=", "{", "}", "if", "not", "(", "len_sample", ")", ":", "len_sample", "=", "sample", ".", "seqn", "(", ")", "if", "not", "(", "len_back", ")", ":", "len_back", "=", "background", ".", "seqn", "(", ")", "for", "motif", "in", "sample", ".", "motifs", ".", "keys", "(", ")", ":", "p", "=", "\"NA\"", "s", "=", "\"NA\"", "q", "=", "len", "(", "sample", ".", "motifs", "[", "motif", "]", ")", "m", "=", "0", "if", "(", "background", ".", "motifs", ".", "get", "(", "motif", ")", ")", ":", "m", "=", "len", "(", "background", ".", "motifs", "[", "motif", "]", ")", "n", "=", "len_back", "-", "m", "k", "=", "len_sample", "p", "=", "phyper", "(", "q", "-", "1", ",", "m", ",", "n", ",", "k", ")", "if", "p", "!=", "0", ":", "s", "=", "-", "(", "log", "(", "p", ")", "/", "log", "(", "10", ")", ")", "else", ":", "s", "=", "INF", "else", ":", "s", "=", "INF", "p", "=", "0.0", "sig", "[", "motif", "]", "=", "s", "p_value", "[", "motif", "]", "=", "p", "n_sample", "[", "motif", "]", "=", "q", "n_back", "[", "motif", "]", "=", "m", "if", "mtc", "==", "\"Bonferroni\"", ":", "for", "motif", "in", "p_value", ".", "keys", "(", ")", ":", "if", "p_value", "[", "motif", "]", "!=", "\"NA\"", ":", "p_value", "[", "motif", "]", "=", "p_value", "[", "motif", "]", "*", "len", "(", "p_value", ".", "keys", "(", ")", ")", "if", "p_value", "[", "motif", "]", ">", "1", ":", "p_value", "[", "motif", "]", "=", "1", "elif", "mtc", "==", "\"Benjamini-Hochberg\"", ":", "motifs", "=", "sorted", "(", "p_value", ".", "keys", "(", ")", ",", "key", "=", "lambda", "x", ":", "-", "p_value", "[", "x", "]", ")", "l", "=", "len", "(", "p_value", ")", "c", "=", "l", "for", "m", "in", "motifs", ":", "if", "p_value", "[", "m", "]", "!=", "\"NA\"", ":", "p_value", "[", "m", "]", "=", "p_value", "[", "m", "]", "*", "l", "/", "c", "c", "-=", "1", "return", "(", "sig", ",", "p_value", ",", "n_sample", ",", "n_back", ")" ]
Calculate enrichment based on hypergeometric distribution
[ "Calculate", "enrichment", "based", "on", "hypergeometric", "distribution" ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/utils.py#L264-L321
vanheeringen-lab/gimmemotifs
gimmemotifs/utils.py
parse_cutoff
def parse_cutoff(motifs, cutoff, default=0.9): """ Provide either a file with one cutoff per motif or a single cutoff returns a hash with motif id as key and cutoff as value """ cutoffs = {} if os.path.isfile(str(cutoff)): for i,line in enumerate(open(cutoff)): if line != "Motif\tScore\tCutoff\n": try: motif,_,c = line.strip().split("\t") c = float(c) cutoffs[motif] = c except Exception as e: sys.stderr.write("Error parsing cutoff file, line {0}: {1}\n".format(e, i + 1)) sys.exit(1) else: for motif in motifs: cutoffs[motif.id] = float(cutoff) for motif in motifs: if not motif.id in cutoffs: sys.stderr.write("No cutoff found for {0}, using default {1}\n".format(motif.id, default)) cutoffs[motif.id] = default return cutoffs
python
def parse_cutoff(motifs, cutoff, default=0.9): """ Provide either a file with one cutoff per motif or a single cutoff returns a hash with motif id as key and cutoff as value """ cutoffs = {} if os.path.isfile(str(cutoff)): for i,line in enumerate(open(cutoff)): if line != "Motif\tScore\tCutoff\n": try: motif,_,c = line.strip().split("\t") c = float(c) cutoffs[motif] = c except Exception as e: sys.stderr.write("Error parsing cutoff file, line {0}: {1}\n".format(e, i + 1)) sys.exit(1) else: for motif in motifs: cutoffs[motif.id] = float(cutoff) for motif in motifs: if not motif.id in cutoffs: sys.stderr.write("No cutoff found for {0}, using default {1}\n".format(motif.id, default)) cutoffs[motif.id] = default return cutoffs
[ "def", "parse_cutoff", "(", "motifs", ",", "cutoff", ",", "default", "=", "0.9", ")", ":", "cutoffs", "=", "{", "}", "if", "os", ".", "path", ".", "isfile", "(", "str", "(", "cutoff", ")", ")", ":", "for", "i", ",", "line", "in", "enumerate", "(", "open", "(", "cutoff", ")", ")", ":", "if", "line", "!=", "\"Motif\\tScore\\tCutoff\\n\"", ":", "try", ":", "motif", ",", "_", ",", "c", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "c", "=", "float", "(", "c", ")", "cutoffs", "[", "motif", "]", "=", "c", "except", "Exception", "as", "e", ":", "sys", ".", "stderr", ".", "write", "(", "\"Error parsing cutoff file, line {0}: {1}\\n\"", ".", "format", "(", "e", ",", "i", "+", "1", ")", ")", "sys", ".", "exit", "(", "1", ")", "else", ":", "for", "motif", "in", "motifs", ":", "cutoffs", "[", "motif", ".", "id", "]", "=", "float", "(", "cutoff", ")", "for", "motif", "in", "motifs", ":", "if", "not", "motif", ".", "id", "in", "cutoffs", ":", "sys", ".", "stderr", ".", "write", "(", "\"No cutoff found for {0}, using default {1}\\n\"", ".", "format", "(", "motif", ".", "id", ",", "default", ")", ")", "cutoffs", "[", "motif", ".", "id", "]", "=", "default", "return", "cutoffs" ]
Provide either a file with one cutoff per motif or a single cutoff returns a hash with motif id as key and cutoff as value
[ "Provide", "either", "a", "file", "with", "one", "cutoff", "per", "motif", "or", "a", "single", "cutoff", "returns", "a", "hash", "with", "motif", "id", "as", "key", "and", "cutoff", "as", "value" ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/utils.py#L400-L424
vanheeringen-lab/gimmemotifs
gimmemotifs/utils.py
determine_file_type
def determine_file_type(fname): """ Detect file type. The following file types are supported: BED, narrowPeak, FASTA, list of chr:start-end regions If the extension is bed, fa, fasta or narrowPeak, we will believe this without checking! Parameters ---------- fname : str File name. Returns ------- filetype : str Filename in lower-case. """ if not (isinstance(fname, str) or isinstance(fname, unicode)): raise ValueError("{} is not a file name!", fname) if not os.path.isfile(fname): raise ValueError("{} is not a file!", fname) ext = os.path.splitext(fname)[1].lower() if ext in ["bed"]: return "bed" elif ext in ["fa", "fasta"]: return "fasta" elif ext in ["narrowpeak"]: return "narrowpeak" try: Fasta(fname) return "fasta" except: pass # Read first line that is not a comment or an UCSC-specific line p = re.compile(r'^(#|track|browser)') with open(fname) as f: for line in f.readlines(): line = line.strip() if not p.search(line): break region_p = re.compile(r'^(.+):(\d+)-(\d+)$') if region_p.search(line): return "region" else: vals = line.split("\t") if len(vals) >= 3: try: _, _ = int(vals[1]), int(vals[2]) except ValueError: return "unknown" if len(vals) == 10: try: _, _ = int(vals[4]), int(vals[9]) return "narrowpeak" except ValueError: # As far as I know there is no 10-column BED format return "unknown" pass return "bed" # Catch-all return "unknown"
python
def determine_file_type(fname): """ Detect file type. The following file types are supported: BED, narrowPeak, FASTA, list of chr:start-end regions If the extension is bed, fa, fasta or narrowPeak, we will believe this without checking! Parameters ---------- fname : str File name. Returns ------- filetype : str Filename in lower-case. """ if not (isinstance(fname, str) or isinstance(fname, unicode)): raise ValueError("{} is not a file name!", fname) if not os.path.isfile(fname): raise ValueError("{} is not a file!", fname) ext = os.path.splitext(fname)[1].lower() if ext in ["bed"]: return "bed" elif ext in ["fa", "fasta"]: return "fasta" elif ext in ["narrowpeak"]: return "narrowpeak" try: Fasta(fname) return "fasta" except: pass # Read first line that is not a comment or an UCSC-specific line p = re.compile(r'^(#|track|browser)') with open(fname) as f: for line in f.readlines(): line = line.strip() if not p.search(line): break region_p = re.compile(r'^(.+):(\d+)-(\d+)$') if region_p.search(line): return "region" else: vals = line.split("\t") if len(vals) >= 3: try: _, _ = int(vals[1]), int(vals[2]) except ValueError: return "unknown" if len(vals) == 10: try: _, _ = int(vals[4]), int(vals[9]) return "narrowpeak" except ValueError: # As far as I know there is no 10-column BED format return "unknown" pass return "bed" # Catch-all return "unknown"
[ "def", "determine_file_type", "(", "fname", ")", ":", "if", "not", "(", "isinstance", "(", "fname", ",", "str", ")", "or", "isinstance", "(", "fname", ",", "unicode", ")", ")", ":", "raise", "ValueError", "(", "\"{} is not a file name!\"", ",", "fname", ")", "if", "not", "os", ".", "path", ".", "isfile", "(", "fname", ")", ":", "raise", "ValueError", "(", "\"{} is not a file!\"", ",", "fname", ")", "ext", "=", "os", ".", "path", ".", "splitext", "(", "fname", ")", "[", "1", "]", ".", "lower", "(", ")", "if", "ext", "in", "[", "\"bed\"", "]", ":", "return", "\"bed\"", "elif", "ext", "in", "[", "\"fa\"", ",", "\"fasta\"", "]", ":", "return", "\"fasta\"", "elif", "ext", "in", "[", "\"narrowpeak\"", "]", ":", "return", "\"narrowpeak\"", "try", ":", "Fasta", "(", "fname", ")", "return", "\"fasta\"", "except", ":", "pass", "# Read first line that is not a comment or an UCSC-specific line", "p", "=", "re", ".", "compile", "(", "r'^(#|track|browser)'", ")", "with", "open", "(", "fname", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "not", "p", ".", "search", "(", "line", ")", ":", "break", "region_p", "=", "re", ".", "compile", "(", "r'^(.+):(\\d+)-(\\d+)$'", ")", "if", "region_p", ".", "search", "(", "line", ")", ":", "return", "\"region\"", "else", ":", "vals", "=", "line", ".", "split", "(", "\"\\t\"", ")", "if", "len", "(", "vals", ")", ">=", "3", ":", "try", ":", "_", ",", "_", "=", "int", "(", "vals", "[", "1", "]", ")", ",", "int", "(", "vals", "[", "2", "]", ")", "except", "ValueError", ":", "return", "\"unknown\"", "if", "len", "(", "vals", ")", "==", "10", ":", "try", ":", "_", ",", "_", "=", "int", "(", "vals", "[", "4", "]", ")", ",", "int", "(", "vals", "[", "9", "]", ")", "return", "\"narrowpeak\"", "except", "ValueError", ":", "# As far as I know there is no 10-column BED format", "return", "\"unknown\"", "pass", "return", "\"bed\"", "# Catch-all", "return", "\"unknown\"" ]
Detect file type. The following file types are supported: BED, narrowPeak, FASTA, list of chr:start-end regions If the extension is bed, fa, fasta or narrowPeak, we will believe this without checking! Parameters ---------- fname : str File name. Returns ------- filetype : str Filename in lower-case.
[ "Detect", "file", "type", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/utils.py#L495-L562
vanheeringen-lab/gimmemotifs
gimmemotifs/utils.py
get_seqs_type
def get_seqs_type(seqs): """ automagically determine input type the following types are detected: - Fasta object - FASTA file - list of regions - region file - BED file """ region_p = re.compile(r'^(.+):(\d+)-(\d+)$') if isinstance(seqs, Fasta): return "fasta" elif isinstance(seqs, list): if len(seqs) == 0: raise ValueError("empty list of sequences to scan") else: if region_p.search(seqs[0]): return "regions" else: raise ValueError("unknown region type") elif isinstance(seqs, str) or isinstance(seqs, unicode): if os.path.isfile(seqs): ftype = determine_file_type(seqs) if ftype == "unknown": raise ValueError("unknown type") elif ftype == "narrowpeak": raise ValueError("narrowPeak not yet supported in this function") else: return ftype + "file" else: raise ValueError("no file found with name {}".format(seqs)) else: raise ValueError("unknown type {}".format(type(seqs).__name__))
python
def get_seqs_type(seqs): """ automagically determine input type the following types are detected: - Fasta object - FASTA file - list of regions - region file - BED file """ region_p = re.compile(r'^(.+):(\d+)-(\d+)$') if isinstance(seqs, Fasta): return "fasta" elif isinstance(seqs, list): if len(seqs) == 0: raise ValueError("empty list of sequences to scan") else: if region_p.search(seqs[0]): return "regions" else: raise ValueError("unknown region type") elif isinstance(seqs, str) or isinstance(seqs, unicode): if os.path.isfile(seqs): ftype = determine_file_type(seqs) if ftype == "unknown": raise ValueError("unknown type") elif ftype == "narrowpeak": raise ValueError("narrowPeak not yet supported in this function") else: return ftype + "file" else: raise ValueError("no file found with name {}".format(seqs)) else: raise ValueError("unknown type {}".format(type(seqs).__name__))
[ "def", "get_seqs_type", "(", "seqs", ")", ":", "region_p", "=", "re", ".", "compile", "(", "r'^(.+):(\\d+)-(\\d+)$'", ")", "if", "isinstance", "(", "seqs", ",", "Fasta", ")", ":", "return", "\"fasta\"", "elif", "isinstance", "(", "seqs", ",", "list", ")", ":", "if", "len", "(", "seqs", ")", "==", "0", ":", "raise", "ValueError", "(", "\"empty list of sequences to scan\"", ")", "else", ":", "if", "region_p", ".", "search", "(", "seqs", "[", "0", "]", ")", ":", "return", "\"regions\"", "else", ":", "raise", "ValueError", "(", "\"unknown region type\"", ")", "elif", "isinstance", "(", "seqs", ",", "str", ")", "or", "isinstance", "(", "seqs", ",", "unicode", ")", ":", "if", "os", ".", "path", ".", "isfile", "(", "seqs", ")", ":", "ftype", "=", "determine_file_type", "(", "seqs", ")", "if", "ftype", "==", "\"unknown\"", ":", "raise", "ValueError", "(", "\"unknown type\"", ")", "elif", "ftype", "==", "\"narrowpeak\"", ":", "raise", "ValueError", "(", "\"narrowPeak not yet supported in this function\"", ")", "else", ":", "return", "ftype", "+", "\"file\"", "else", ":", "raise", "ValueError", "(", "\"no file found with name {}\"", ".", "format", "(", "seqs", ")", ")", "else", ":", "raise", "ValueError", "(", "\"unknown type {}\"", ".", "format", "(", "type", "(", "seqs", ")", ".", "__name__", ")", ")" ]
automagically determine input type the following types are detected: - Fasta object - FASTA file - list of regions - region file - BED file
[ "automagically", "determine", "input", "type", "the", "following", "types", "are", "detected", ":", "-", "Fasta", "object", "-", "FASTA", "file", "-", "list", "of", "regions", "-", "region", "file", "-", "BED", "file" ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/utils.py#L565-L598
vanheeringen-lab/gimmemotifs
gimmemotifs/utils.py
file_checksum
def file_checksum(fname): """Return md5 checksum of file. Note: only works for files < 4GB. Parameters ---------- filename : str File used to calculate checksum. Returns ------- checkum : str """ size = os.path.getsize(fname) with open(fname, "r+") as f: checksum = hashlib.md5(mmap.mmap(f.fileno(), size)).hexdigest() return checksum
python
def file_checksum(fname): """Return md5 checksum of file. Note: only works for files < 4GB. Parameters ---------- filename : str File used to calculate checksum. Returns ------- checkum : str """ size = os.path.getsize(fname) with open(fname, "r+") as f: checksum = hashlib.md5(mmap.mmap(f.fileno(), size)).hexdigest() return checksum
[ "def", "file_checksum", "(", "fname", ")", ":", "size", "=", "os", ".", "path", ".", "getsize", "(", "fname", ")", "with", "open", "(", "fname", ",", "\"r+\"", ")", "as", "f", ":", "checksum", "=", "hashlib", ".", "md5", "(", "mmap", ".", "mmap", "(", "f", ".", "fileno", "(", ")", ",", "size", ")", ")", ".", "hexdigest", "(", ")", "return", "checksum" ]
Return md5 checksum of file. Note: only works for files < 4GB. Parameters ---------- filename : str File used to calculate checksum. Returns ------- checkum : str
[ "Return", "md5", "checksum", "of", "file", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/utils.py#L616-L633
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
download_annotation
def download_annotation(genomebuild, gene_file): """ Download gene annotation from UCSC based on genomebuild. Will check UCSC, Ensembl and RefSeq annotation. Parameters ---------- genomebuild : str UCSC genome name. gene_file : str Output file name. """ pred_bin = "genePredToBed" pred = find_executable(pred_bin) if not pred: sys.stderr.write("{} not found in path!\n".format(pred_bin)) sys.exit(1) tmp = NamedTemporaryFile(delete=False, suffix=".gz") anno = [] f = urlopen(UCSC_GENE_URL.format(genomebuild)) p = re.compile(r'\w+.Gene.txt.gz') for line in f.readlines(): m = p.search(line.decode()) if m: anno.append(m.group(0)) sys.stderr.write("Retrieving gene annotation for {}\n".format(genomebuild)) url = "" for a in ANNOS: if a in anno: url = UCSC_GENE_URL.format(genomebuild) + a break if url: sys.stderr.write("Using {}\n".format(url)) urlretrieve( url, tmp.name ) with gzip.open(tmp.name) as f: cols = f.readline().decode(errors='ignore').split("\t") start_col = 1 for i,col in enumerate(cols): if col == "+" or col == "-": start_col = i - 1 break end_col = start_col + 10 cmd = "zcat {} | cut -f{}-{} | {} /dev/stdin {}" print(cmd.format(tmp.name, start_col, end_col, pred, gene_file)) sp.call(cmd.format( tmp.name, start_col, end_col, pred, gene_file), shell=True) else: sys.stderr.write("No annotation found!")
python
def download_annotation(genomebuild, gene_file): """ Download gene annotation from UCSC based on genomebuild. Will check UCSC, Ensembl and RefSeq annotation. Parameters ---------- genomebuild : str UCSC genome name. gene_file : str Output file name. """ pred_bin = "genePredToBed" pred = find_executable(pred_bin) if not pred: sys.stderr.write("{} not found in path!\n".format(pred_bin)) sys.exit(1) tmp = NamedTemporaryFile(delete=False, suffix=".gz") anno = [] f = urlopen(UCSC_GENE_URL.format(genomebuild)) p = re.compile(r'\w+.Gene.txt.gz') for line in f.readlines(): m = p.search(line.decode()) if m: anno.append(m.group(0)) sys.stderr.write("Retrieving gene annotation for {}\n".format(genomebuild)) url = "" for a in ANNOS: if a in anno: url = UCSC_GENE_URL.format(genomebuild) + a break if url: sys.stderr.write("Using {}\n".format(url)) urlretrieve( url, tmp.name ) with gzip.open(tmp.name) as f: cols = f.readline().decode(errors='ignore').split("\t") start_col = 1 for i,col in enumerate(cols): if col == "+" or col == "-": start_col = i - 1 break end_col = start_col + 10 cmd = "zcat {} | cut -f{}-{} | {} /dev/stdin {}" print(cmd.format(tmp.name, start_col, end_col, pred, gene_file)) sp.call(cmd.format( tmp.name, start_col, end_col, pred, gene_file), shell=True) else: sys.stderr.write("No annotation found!")
[ "def", "download_annotation", "(", "genomebuild", ",", "gene_file", ")", ":", "pred_bin", "=", "\"genePredToBed\"", "pred", "=", "find_executable", "(", "pred_bin", ")", "if", "not", "pred", ":", "sys", ".", "stderr", ".", "write", "(", "\"{} not found in path!\\n\"", ".", "format", "(", "pred_bin", ")", ")", "sys", ".", "exit", "(", "1", ")", "tmp", "=", "NamedTemporaryFile", "(", "delete", "=", "False", ",", "suffix", "=", "\".gz\"", ")", "anno", "=", "[", "]", "f", "=", "urlopen", "(", "UCSC_GENE_URL", ".", "format", "(", "genomebuild", ")", ")", "p", "=", "re", ".", "compile", "(", "r'\\w+.Gene.txt.gz'", ")", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "m", "=", "p", ".", "search", "(", "line", ".", "decode", "(", ")", ")", "if", "m", ":", "anno", ".", "append", "(", "m", ".", "group", "(", "0", ")", ")", "sys", ".", "stderr", ".", "write", "(", "\"Retrieving gene annotation for {}\\n\"", ".", "format", "(", "genomebuild", ")", ")", "url", "=", "\"\"", "for", "a", "in", "ANNOS", ":", "if", "a", "in", "anno", ":", "url", "=", "UCSC_GENE_URL", ".", "format", "(", "genomebuild", ")", "+", "a", "break", "if", "url", ":", "sys", ".", "stderr", ".", "write", "(", "\"Using {}\\n\"", ".", "format", "(", "url", ")", ")", "urlretrieve", "(", "url", ",", "tmp", ".", "name", ")", "with", "gzip", ".", "open", "(", "tmp", ".", "name", ")", "as", "f", ":", "cols", "=", "f", ".", "readline", "(", ")", ".", "decode", "(", "errors", "=", "'ignore'", ")", ".", "split", "(", "\"\\t\"", ")", "start_col", "=", "1", "for", "i", ",", "col", "in", "enumerate", "(", "cols", ")", ":", "if", "col", "==", "\"+\"", "or", "col", "==", "\"-\"", ":", "start_col", "=", "i", "-", "1", "break", "end_col", "=", "start_col", "+", "10", "cmd", "=", "\"zcat {} | cut -f{}-{} | {} /dev/stdin {}\"", "print", "(", "cmd", ".", "format", "(", "tmp", ".", "name", ",", "start_col", ",", "end_col", ",", "pred", ",", "gene_file", ")", ")", "sp", ".", "call", "(", "cmd", ".", "format", "(", "tmp", ".", "name", ",", "start_col", ",", "end_col", ",", "pred", ",", "gene_file", ")", ",", "shell", "=", "True", ")", "else", ":", "sys", ".", "stderr", ".", "write", "(", "\"No annotation found!\"", ")" ]
Download gene annotation from UCSC based on genomebuild. Will check UCSC, Ensembl and RefSeq annotation. Parameters ---------- genomebuild : str UCSC genome name. gene_file : str Output file name.
[ "Download", "gene", "annotation", "from", "UCSC", "based", "on", "genomebuild", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L98-L157
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
GenomeIndex._check_dir
def _check_dir(self, dirname): """ Check if dir exists, if not: give warning and die""" if not os.path.exists(dirname): print("Directory %s does not exist!" % dirname) sys.exit(1)
python
def _check_dir(self, dirname): """ Check if dir exists, if not: give warning and die""" if not os.path.exists(dirname): print("Directory %s does not exist!" % dirname) sys.exit(1)
[ "def", "_check_dir", "(", "self", ",", "dirname", ")", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "dirname", ")", ":", "print", "(", "\"Directory %s does not exist!\"", "%", "dirname", ")", "sys", ".", "exit", "(", "1", ")" ]
Check if dir exists, if not: give warning and die
[ "Check", "if", "dir", "exists", "if", "not", ":", "give", "warning", "and", "die" ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L278-L282
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
GenomeIndex._make_index
def _make_index(self, fasta, index): """ Index a single, one-sequence fasta-file""" out = open(index, "wb") f = open(fasta) # Skip first line of fasta-file line = f.readline() offset = f.tell() line = f.readline() while line: out.write(pack(self.pack_char, offset)) offset = f.tell() line = f.readline() f.close() out.close()
python
def _make_index(self, fasta, index): """ Index a single, one-sequence fasta-file""" out = open(index, "wb") f = open(fasta) # Skip first line of fasta-file line = f.readline() offset = f.tell() line = f.readline() while line: out.write(pack(self.pack_char, offset)) offset = f.tell() line = f.readline() f.close() out.close()
[ "def", "_make_index", "(", "self", ",", "fasta", ",", "index", ")", ":", "out", "=", "open", "(", "index", ",", "\"wb\"", ")", "f", "=", "open", "(", "fasta", ")", "# Skip first line of fasta-file", "line", "=", "f", ".", "readline", "(", ")", "offset", "=", "f", ".", "tell", "(", ")", "line", "=", "f", ".", "readline", "(", ")", "while", "line", ":", "out", ".", "write", "(", "pack", "(", "self", ".", "pack_char", ",", "offset", ")", ")", "offset", "=", "f", ".", "tell", "(", ")", "line", "=", "f", ".", "readline", "(", ")", "f", ".", "close", "(", ")", "out", ".", "close", "(", ")" ]
Index a single, one-sequence fasta-file
[ "Index", "a", "single", "one", "-", "sequence", "fasta", "-", "file" ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L284-L297
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
GenomeIndex.create_index
def create_index(self,fasta_dir=None, index_dir=None): """Index all fasta-files in fasta_dir (one sequence per file!) and store the results in index_dir""" # Use default directories if they are not supplied if not fasta_dir: fasta_dir = self.fasta_dir if not index_dir: index_dir = self.index_dir # Can't continue if we still don't have an index_dir or fasta_dir if not fasta_dir: print("fasta_dir not defined!") sys.exit(1) if not index_dir: print("index_dir not defined!") sys.exit(1) index_dir = os.path.abspath(index_dir) fasta_dir = os.path.abspath(fasta_dir) self.index_dir = index_dir # Prepare index directory if not os.path.exists(index_dir): try: os.mkdir(index_dir) except OSError as e: if e.args[0] == 13: sys.stderr.write("No permission to create index directory. Superuser access needed?\n") sys.exit() else: sys.stderr.write(e) # Directories need to exist self._check_dir(fasta_dir) self._check_dir(index_dir) # Get all fasta-files fastafiles = find_by_ext(fasta_dir, FASTA_EXT) if not(fastafiles): msg = "No fastafiles found in {} with extension in {}".format( fasta_dir, ",".join(FASTA_EXT)) raise IOError(msg) # param_file will hold all the information about the location of the fasta-files, indeces and # length of the sequences param_file = os.path.join(index_dir, self.param_file) size_file = os.path.join(index_dir, self.size_file) try: out = open(param_file, "w") except IOError as e: if e.args[0] == 13: sys.stderr.write("No permission to create files in index directory. Superuser access needed?\n") sys.exit() else: sys.stderr.write(e) s_out = open(size_file, "w") for fasta_file in fastafiles: #sys.stderr.write("Indexing %s\n" % fasta_file) f = open(fasta_file) line = f.readline() if not line.startswith(">"): sys.stderr.write("%s is not a valid FASTA file, expected > at first line\n" % fasta_file) sys.exit() seqname = line.strip().replace(">", "") line = f.readline() line_size = len(line.strip()) total_size = 0 while line: line = line.strip() if line.startswith(">"): sys.stderr.write("Sorry, can only index genomes with " "one sequence per FASTA file\n%s contains multiple " "sequences\n" % fasta_file) sys.exit() total_size += len(line) line = f.readline() index_file = os.path.join(index_dir, "%s.index" % seqname) out.write("{}\t{}\t{}\t{}\t{}\n".format( seqname, fasta_file, index_file, line_size, total_size)) s_out.write("{}\t{}\n".format(seqname, total_size)) self._make_index(fasta_file, index_file) f.close() out.close() s_out.close() # Read the index we just made so we can immediately use it self._read_index_file()
python
def create_index(self,fasta_dir=None, index_dir=None): """Index all fasta-files in fasta_dir (one sequence per file!) and store the results in index_dir""" # Use default directories if they are not supplied if not fasta_dir: fasta_dir = self.fasta_dir if not index_dir: index_dir = self.index_dir # Can't continue if we still don't have an index_dir or fasta_dir if not fasta_dir: print("fasta_dir not defined!") sys.exit(1) if not index_dir: print("index_dir not defined!") sys.exit(1) index_dir = os.path.abspath(index_dir) fasta_dir = os.path.abspath(fasta_dir) self.index_dir = index_dir # Prepare index directory if not os.path.exists(index_dir): try: os.mkdir(index_dir) except OSError as e: if e.args[0] == 13: sys.stderr.write("No permission to create index directory. Superuser access needed?\n") sys.exit() else: sys.stderr.write(e) # Directories need to exist self._check_dir(fasta_dir) self._check_dir(index_dir) # Get all fasta-files fastafiles = find_by_ext(fasta_dir, FASTA_EXT) if not(fastafiles): msg = "No fastafiles found in {} with extension in {}".format( fasta_dir, ",".join(FASTA_EXT)) raise IOError(msg) # param_file will hold all the information about the location of the fasta-files, indeces and # length of the sequences param_file = os.path.join(index_dir, self.param_file) size_file = os.path.join(index_dir, self.size_file) try: out = open(param_file, "w") except IOError as e: if e.args[0] == 13: sys.stderr.write("No permission to create files in index directory. Superuser access needed?\n") sys.exit() else: sys.stderr.write(e) s_out = open(size_file, "w") for fasta_file in fastafiles: #sys.stderr.write("Indexing %s\n" % fasta_file) f = open(fasta_file) line = f.readline() if not line.startswith(">"): sys.stderr.write("%s is not a valid FASTA file, expected > at first line\n" % fasta_file) sys.exit() seqname = line.strip().replace(">", "") line = f.readline() line_size = len(line.strip()) total_size = 0 while line: line = line.strip() if line.startswith(">"): sys.stderr.write("Sorry, can only index genomes with " "one sequence per FASTA file\n%s contains multiple " "sequences\n" % fasta_file) sys.exit() total_size += len(line) line = f.readline() index_file = os.path.join(index_dir, "%s.index" % seqname) out.write("{}\t{}\t{}\t{}\t{}\n".format( seqname, fasta_file, index_file, line_size, total_size)) s_out.write("{}\t{}\n".format(seqname, total_size)) self._make_index(fasta_file, index_file) f.close() out.close() s_out.close() # Read the index we just made so we can immediately use it self._read_index_file()
[ "def", "create_index", "(", "self", ",", "fasta_dir", "=", "None", ",", "index_dir", "=", "None", ")", ":", "# Use default directories if they are not supplied", "if", "not", "fasta_dir", ":", "fasta_dir", "=", "self", ".", "fasta_dir", "if", "not", "index_dir", ":", "index_dir", "=", "self", ".", "index_dir", "# Can't continue if we still don't have an index_dir or fasta_dir", "if", "not", "fasta_dir", ":", "print", "(", "\"fasta_dir not defined!\"", ")", "sys", ".", "exit", "(", "1", ")", "if", "not", "index_dir", ":", "print", "(", "\"index_dir not defined!\"", ")", "sys", ".", "exit", "(", "1", ")", "index_dir", "=", "os", ".", "path", ".", "abspath", "(", "index_dir", ")", "fasta_dir", "=", "os", ".", "path", ".", "abspath", "(", "fasta_dir", ")", "self", ".", "index_dir", "=", "index_dir", "# Prepare index directory", "if", "not", "os", ".", "path", ".", "exists", "(", "index_dir", ")", ":", "try", ":", "os", ".", "mkdir", "(", "index_dir", ")", "except", "OSError", "as", "e", ":", "if", "e", ".", "args", "[", "0", "]", "==", "13", ":", "sys", ".", "stderr", ".", "write", "(", "\"No permission to create index directory. Superuser access needed?\\n\"", ")", "sys", ".", "exit", "(", ")", "else", ":", "sys", ".", "stderr", ".", "write", "(", "e", ")", "# Directories need to exist", "self", ".", "_check_dir", "(", "fasta_dir", ")", "self", ".", "_check_dir", "(", "index_dir", ")", "# Get all fasta-files ", "fastafiles", "=", "find_by_ext", "(", "fasta_dir", ",", "FASTA_EXT", ")", "if", "not", "(", "fastafiles", ")", ":", "msg", "=", "\"No fastafiles found in {} with extension in {}\"", ".", "format", "(", "fasta_dir", ",", "\",\"", ".", "join", "(", "FASTA_EXT", ")", ")", "raise", "IOError", "(", "msg", ")", "# param_file will hold all the information about the location of the fasta-files, indeces and ", "# length of the sequences", "param_file", "=", "os", ".", "path", ".", "join", "(", "index_dir", ",", "self", ".", "param_file", ")", "size_file", "=", "os", ".", "path", ".", "join", "(", "index_dir", ",", "self", ".", "size_file", ")", "try", ":", "out", "=", "open", "(", "param_file", ",", "\"w\"", ")", "except", "IOError", "as", "e", ":", "if", "e", ".", "args", "[", "0", "]", "==", "13", ":", "sys", ".", "stderr", ".", "write", "(", "\"No permission to create files in index directory. Superuser access needed?\\n\"", ")", "sys", ".", "exit", "(", ")", "else", ":", "sys", ".", "stderr", ".", "write", "(", "e", ")", "s_out", "=", "open", "(", "size_file", ",", "\"w\"", ")", "for", "fasta_file", "in", "fastafiles", ":", "#sys.stderr.write(\"Indexing %s\\n\" % fasta_file)", "f", "=", "open", "(", "fasta_file", ")", "line", "=", "f", ".", "readline", "(", ")", "if", "not", "line", ".", "startswith", "(", "\">\"", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"%s is not a valid FASTA file, expected > at first line\\n\"", "%", "fasta_file", ")", "sys", ".", "exit", "(", ")", "seqname", "=", "line", ".", "strip", "(", ")", ".", "replace", "(", "\">\"", ",", "\"\"", ")", "line", "=", "f", ".", "readline", "(", ")", "line_size", "=", "len", "(", "line", ".", "strip", "(", ")", ")", "total_size", "=", "0", "while", "line", ":", "line", "=", "line", ".", "strip", "(", ")", "if", "line", ".", "startswith", "(", "\">\"", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"Sorry, can only index genomes with \"", "\"one sequence per FASTA file\\n%s contains multiple \"", "\"sequences\\n\"", "%", "fasta_file", ")", "sys", ".", "exit", "(", ")", "total_size", "+=", "len", "(", "line", ")", "line", "=", "f", ".", "readline", "(", ")", "index_file", "=", "os", ".", "path", ".", "join", "(", "index_dir", ",", "\"%s.index\"", "%", "seqname", ")", "out", ".", "write", "(", "\"{}\\t{}\\t{}\\t{}\\t{}\\n\"", ".", "format", "(", "seqname", ",", "fasta_file", ",", "index_file", ",", "line_size", ",", "total_size", ")", ")", "s_out", ".", "write", "(", "\"{}\\t{}\\n\"", ".", "format", "(", "seqname", ",", "total_size", ")", ")", "self", ".", "_make_index", "(", "fasta_file", ",", "index_file", ")", "f", ".", "close", "(", ")", "out", ".", "close", "(", ")", "s_out", ".", "close", "(", ")", "# Read the index we just made so we can immediately use it", "self", ".", "_read_index_file", "(", ")" ]
Index all fasta-files in fasta_dir (one sequence per file!) and store the results in index_dir
[ "Index", "all", "fasta", "-", "files", "in", "fasta_dir", "(", "one", "sequence", "per", "file!", ")", "and", "store", "the", "results", "in", "index_dir" ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L299-L398
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
GenomeIndex._read_index_file
def _read_index_file(self): """read the param_file, index_dir should already be set """ param_file = os.path.join(self.index_dir, self.param_file) with open(param_file) as f: for line in f.readlines(): (name, fasta_file, index_file, line_size, total_size) = line.strip().split("\t") self.size[name] = int(total_size) self.fasta_file[name] = fasta_file self.index_file[name] = index_file self.line_size[name] = int(line_size)
python
def _read_index_file(self): """read the param_file, index_dir should already be set """ param_file = os.path.join(self.index_dir, self.param_file) with open(param_file) as f: for line in f.readlines(): (name, fasta_file, index_file, line_size, total_size) = line.strip().split("\t") self.size[name] = int(total_size) self.fasta_file[name] = fasta_file self.index_file[name] = index_file self.line_size[name] = int(line_size)
[ "def", "_read_index_file", "(", "self", ")", ":", "param_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "index_dir", ",", "self", ".", "param_file", ")", "with", "open", "(", "param_file", ")", "as", "f", ":", "for", "line", "in", "f", ".", "readlines", "(", ")", ":", "(", "name", ",", "fasta_file", ",", "index_file", ",", "line_size", ",", "total_size", ")", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "self", ".", "size", "[", "name", "]", "=", "int", "(", "total_size", ")", "self", ".", "fasta_file", "[", "name", "]", "=", "fasta_file", "self", ".", "index_file", "[", "name", "]", "=", "index_file", "self", ".", "line_size", "[", "name", "]", "=", "int", "(", "line_size", ")" ]
read the param_file, index_dir should already be set
[ "read", "the", "param_file", "index_dir", "should", "already", "be", "set" ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L400-L409
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
GenomeIndex._read_seq_from_fasta
def _read_seq_from_fasta(self, fasta, offset, nr_lines): """ retrieve a number of lines from a fasta file-object, starting at offset""" fasta.seek(offset) lines = [fasta.readline().strip() for _ in range(nr_lines)] return "".join(lines)
python
def _read_seq_from_fasta(self, fasta, offset, nr_lines): """ retrieve a number of lines from a fasta file-object, starting at offset""" fasta.seek(offset) lines = [fasta.readline().strip() for _ in range(nr_lines)] return "".join(lines)
[ "def", "_read_seq_from_fasta", "(", "self", ",", "fasta", ",", "offset", ",", "nr_lines", ")", ":", "fasta", ".", "seek", "(", "offset", ")", "lines", "=", "[", "fasta", ".", "readline", "(", ")", ".", "strip", "(", ")", "for", "_", "in", "range", "(", "nr_lines", ")", "]", "return", "\"\"", ".", "join", "(", "lines", ")" ]
retrieve a number of lines from a fasta file-object, starting at offset
[ "retrieve", "a", "number", "of", "lines", "from", "a", "fasta", "file", "-", "object", "starting", "at", "offset" ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L411-L415
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
GenomeIndex.get_sequences
def get_sequences(self, chr, coords): """ Retrieve multiple sequences from same chr (RC not possible yet)""" # Check if we have an index_dir if not self.index_dir: print("Index dir is not defined!") sys.exit() # retrieve all information for this specific sequence fasta_file = self.fasta_file[chr] index_file = self.index_file[chr] line_size = self.line_size[chr] total_size = self.size[chr] index = open(index_file, "rb") fasta = open(fasta_file) seqs = [] for coordset in coords: seq = "" for (start,end) in coordset: if start > total_size: raise ValueError("%s: %s, invalid start, greater than sequence length!" % (chr,start)) if start < 0: raise ValueError("Invalid start, < 0!") if end > total_size: raise ValueError("Invalid end, greater than sequence length!") seq += self._read(index, fasta, start, end, line_size) seqs.append(seq) index.close() fasta.close() return seqs
python
def get_sequences(self, chr, coords): """ Retrieve multiple sequences from same chr (RC not possible yet)""" # Check if we have an index_dir if not self.index_dir: print("Index dir is not defined!") sys.exit() # retrieve all information for this specific sequence fasta_file = self.fasta_file[chr] index_file = self.index_file[chr] line_size = self.line_size[chr] total_size = self.size[chr] index = open(index_file, "rb") fasta = open(fasta_file) seqs = [] for coordset in coords: seq = "" for (start,end) in coordset: if start > total_size: raise ValueError("%s: %s, invalid start, greater than sequence length!" % (chr,start)) if start < 0: raise ValueError("Invalid start, < 0!") if end > total_size: raise ValueError("Invalid end, greater than sequence length!") seq += self._read(index, fasta, start, end, line_size) seqs.append(seq) index.close() fasta.close() return seqs
[ "def", "get_sequences", "(", "self", ",", "chr", ",", "coords", ")", ":", "# Check if we have an index_dir", "if", "not", "self", ".", "index_dir", ":", "print", "(", "\"Index dir is not defined!\"", ")", "sys", ".", "exit", "(", ")", "# retrieve all information for this specific sequence", "fasta_file", "=", "self", ".", "fasta_file", "[", "chr", "]", "index_file", "=", "self", ".", "index_file", "[", "chr", "]", "line_size", "=", "self", ".", "line_size", "[", "chr", "]", "total_size", "=", "self", ".", "size", "[", "chr", "]", "index", "=", "open", "(", "index_file", ",", "\"rb\"", ")", "fasta", "=", "open", "(", "fasta_file", ")", "seqs", "=", "[", "]", "for", "coordset", "in", "coords", ":", "seq", "=", "\"\"", "for", "(", "start", ",", "end", ")", "in", "coordset", ":", "if", "start", ">", "total_size", ":", "raise", "ValueError", "(", "\"%s: %s, invalid start, greater than sequence length!\"", "%", "(", "chr", ",", "start", ")", ")", "if", "start", "<", "0", ":", "raise", "ValueError", "(", "\"Invalid start, < 0!\"", ")", "if", "end", ">", "total_size", ":", "raise", "ValueError", "(", "\"Invalid end, greater than sequence length!\"", ")", "seq", "+=", "self", ".", "_read", "(", "index", ",", "fasta", ",", "start", ",", "end", ",", "line_size", ")", "seqs", ".", "append", "(", "seq", ")", "index", ".", "close", "(", ")", "fasta", ".", "close", "(", ")", "return", "seqs" ]
Retrieve multiple sequences from same chr (RC not possible yet)
[ "Retrieve", "multiple", "sequences", "from", "same", "chr", "(", "RC", "not", "possible", "yet", ")" ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L461-L495
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
GenomeIndex.get_sequence
def get_sequence(self, chrom, start, end, strand=None): """ Retrieve a sequence """ # Check if we have an index_dir if not self.index_dir: print("Index dir is not defined!") sys.exit() # retrieve all information for this specific sequence fasta_file = self.fasta_file[chrom] index_file = self.index_file[chrom] line_size = self.line_size[chrom] total_size = self.size[chrom] #print fasta_file, index_file, line_size, total_size if start > total_size: raise ValueError( "Invalid start {0}, greater than sequence length {1} of {2}!".format(start, total_size, chrom)) if start < 0: raise ValueError("Invalid start, < 0!") if end > total_size: raise ValueError( "Invalid end {0}, greater than sequence length {1} of {2}!".format(end, total_size, chrom)) index = open(index_file, "rb") fasta = open(fasta_file) seq = self._read(index, fasta, start, end, line_size) index.close() fasta.close() if strand and strand == "-": seq = rc(seq) return seq
python
def get_sequence(self, chrom, start, end, strand=None): """ Retrieve a sequence """ # Check if we have an index_dir if not self.index_dir: print("Index dir is not defined!") sys.exit() # retrieve all information for this specific sequence fasta_file = self.fasta_file[chrom] index_file = self.index_file[chrom] line_size = self.line_size[chrom] total_size = self.size[chrom] #print fasta_file, index_file, line_size, total_size if start > total_size: raise ValueError( "Invalid start {0}, greater than sequence length {1} of {2}!".format(start, total_size, chrom)) if start < 0: raise ValueError("Invalid start, < 0!") if end > total_size: raise ValueError( "Invalid end {0}, greater than sequence length {1} of {2}!".format(end, total_size, chrom)) index = open(index_file, "rb") fasta = open(fasta_file) seq = self._read(index, fasta, start, end, line_size) index.close() fasta.close() if strand and strand == "-": seq = rc(seq) return seq
[ "def", "get_sequence", "(", "self", ",", "chrom", ",", "start", ",", "end", ",", "strand", "=", "None", ")", ":", "# Check if we have an index_dir", "if", "not", "self", ".", "index_dir", ":", "print", "(", "\"Index dir is not defined!\"", ")", "sys", ".", "exit", "(", ")", "# retrieve all information for this specific sequence", "fasta_file", "=", "self", ".", "fasta_file", "[", "chrom", "]", "index_file", "=", "self", ".", "index_file", "[", "chrom", "]", "line_size", "=", "self", ".", "line_size", "[", "chrom", "]", "total_size", "=", "self", ".", "size", "[", "chrom", "]", "#print fasta_file, index_file, line_size, total_size", "if", "start", ">", "total_size", ":", "raise", "ValueError", "(", "\"Invalid start {0}, greater than sequence length {1} of {2}!\"", ".", "format", "(", "start", ",", "total_size", ",", "chrom", ")", ")", "if", "start", "<", "0", ":", "raise", "ValueError", "(", "\"Invalid start, < 0!\"", ")", "if", "end", ">", "total_size", ":", "raise", "ValueError", "(", "\"Invalid end {0}, greater than sequence length {1} of {2}!\"", ".", "format", "(", "end", ",", "total_size", ",", "chrom", ")", ")", "index", "=", "open", "(", "index_file", ",", "\"rb\"", ")", "fasta", "=", "open", "(", "fasta_file", ")", "seq", "=", "self", ".", "_read", "(", "index", ",", "fasta", ",", "start", ",", "end", ",", "line_size", ")", "index", ".", "close", "(", ")", "fasta", ".", "close", "(", ")", "if", "strand", "and", "strand", "==", "\"-\"", ":", "seq", "=", "rc", "(", "seq", ")", "return", "seq" ]
Retrieve a sequence
[ "Retrieve", "a", "sequence" ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L498-L532
vanheeringen-lab/gimmemotifs
gimmemotifs/genome_index.py
GenomeIndex.get_size
def get_size(self, chrom=None): """ Return the sizes of all sequences in the index, or the size of chrom if specified as an optional argument """ if len(self.size) == 0: raise LookupError("no chromosomes in index, is the index correct?") if chrom: if chrom in self.size: return self.size[chrom] else: raise KeyError("chromosome {} not in index".format(chrom)) total = 0 for size in self.size.values(): total += size return total
python
def get_size(self, chrom=None): """ Return the sizes of all sequences in the index, or the size of chrom if specified as an optional argument """ if len(self.size) == 0: raise LookupError("no chromosomes in index, is the index correct?") if chrom: if chrom in self.size: return self.size[chrom] else: raise KeyError("chromosome {} not in index".format(chrom)) total = 0 for size in self.size.values(): total += size return total
[ "def", "get_size", "(", "self", ",", "chrom", "=", "None", ")", ":", "if", "len", "(", "self", ".", "size", ")", "==", "0", ":", "raise", "LookupError", "(", "\"no chromosomes in index, is the index correct?\"", ")", "if", "chrom", ":", "if", "chrom", "in", "self", ".", "size", ":", "return", "self", ".", "size", "[", "chrom", "]", "else", ":", "raise", "KeyError", "(", "\"chromosome {} not in index\"", ".", "format", "(", "chrom", ")", ")", "total", "=", "0", "for", "size", "in", "self", ".", "size", ".", "values", "(", ")", ":", "total", "+=", "size", "return", "total" ]
Return the sizes of all sequences in the index, or the size of chrom if specified as an optional argument
[ "Return", "the", "sizes", "of", "all", "sequences", "in", "the", "index", "or", "the", "size", "of", "chrom", "if", "specified", "as", "an", "optional", "argument" ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/genome_index.py#L538-L553
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
get_tool
def get_tool(name): """ Returns an instance of a specific tool. Parameters ---------- name : str Name of the tool (case-insensitive). Returns ------- tool : MotifProgram instance """ tool = name.lower() if tool not in __tools__: raise ValueError("Tool {0} not found!\n".format(name)) t = __tools__[tool]() if not t.is_installed(): sys.stderr.write("Tool {0} not installed!\n".format(tool)) if not t.is_configured(): sys.stderr.write("Tool {0} not configured!\n".format(tool)) return t
python
def get_tool(name): """ Returns an instance of a specific tool. Parameters ---------- name : str Name of the tool (case-insensitive). Returns ------- tool : MotifProgram instance """ tool = name.lower() if tool not in __tools__: raise ValueError("Tool {0} not found!\n".format(name)) t = __tools__[tool]() if not t.is_installed(): sys.stderr.write("Tool {0} not installed!\n".format(tool)) if not t.is_configured(): sys.stderr.write("Tool {0} not configured!\n".format(tool)) return t
[ "def", "get_tool", "(", "name", ")", ":", "tool", "=", "name", ".", "lower", "(", ")", "if", "tool", "not", "in", "__tools__", ":", "raise", "ValueError", "(", "\"Tool {0} not found!\\n\"", ".", "format", "(", "name", ")", ")", "t", "=", "__tools__", "[", "tool", "]", "(", ")", "if", "not", "t", ".", "is_installed", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"Tool {0} not installed!\\n\"", ".", "format", "(", "tool", ")", ")", "if", "not", "t", ".", "is_configured", "(", ")", ":", "sys", ".", "stderr", ".", "write", "(", "\"Tool {0} not configured!\\n\"", ".", "format", "(", "tool", ")", ")", "return", "t" ]
Returns an instance of a specific tool. Parameters ---------- name : str Name of the tool (case-insensitive). Returns ------- tool : MotifProgram instance
[ "Returns", "an", "instance", "of", "a", "specific", "tool", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L33-L58
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
locate_tool
def locate_tool(name, verbose=True): """ Returns the binary of a tool. Parameters ---------- name : str Name of the tool (case-insensitive). Returns ------- tool_bin : str Binary of tool. """ m = get_tool(name) tool_bin = which(m.cmd) if tool_bin: if verbose: print("Found {} in {}".format(m.name, tool_bin)) return tool_bin else: print("Couldn't find {}".format(m.name))
python
def locate_tool(name, verbose=True): """ Returns the binary of a tool. Parameters ---------- name : str Name of the tool (case-insensitive). Returns ------- tool_bin : str Binary of tool. """ m = get_tool(name) tool_bin = which(m.cmd) if tool_bin: if verbose: print("Found {} in {}".format(m.name, tool_bin)) return tool_bin else: print("Couldn't find {}".format(m.name))
[ "def", "locate_tool", "(", "name", ",", "verbose", "=", "True", ")", ":", "m", "=", "get_tool", "(", "name", ")", "tool_bin", "=", "which", "(", "m", ".", "cmd", ")", "if", "tool_bin", ":", "if", "verbose", ":", "print", "(", "\"Found {} in {}\"", ".", "format", "(", "m", ".", "name", ",", "tool_bin", ")", ")", "return", "tool_bin", "else", ":", "print", "(", "\"Couldn't find {}\"", ".", "format", "(", "m", ".", "name", ")", ")" ]
Returns the binary of a tool. Parameters ---------- name : str Name of the tool (case-insensitive). Returns ------- tool_bin : str Binary of tool.
[ "Returns", "the", "binary", "of", "a", "tool", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L60-L81
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MotifProgram.bin
def bin(self): """ Get the command used to run the tool. Returns ------- command : str The tool system command. """ if self.local_bin: return self.local_bin else: return self.config.bin(self.name)
python
def bin(self): """ Get the command used to run the tool. Returns ------- command : str The tool system command. """ if self.local_bin: return self.local_bin else: return self.config.bin(self.name)
[ "def", "bin", "(", "self", ")", ":", "if", "self", ".", "local_bin", ":", "return", "self", ".", "local_bin", "else", ":", "return", "self", ".", "config", ".", "bin", "(", "self", ".", "name", ")" ]
Get the command used to run the tool. Returns ------- command : str The tool system command.
[ "Get", "the", "command", "used", "to", "run", "the", "tool", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L93-L105
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MotifProgram.is_installed
def is_installed(self): """ Check if the tool is installed. Returns ------- is_installed : bool True if the tool is installed. """ return self.is_configured() and os.access(self.bin(), os.X_OK)
python
def is_installed(self): """ Check if the tool is installed. Returns ------- is_installed : bool True if the tool is installed. """ return self.is_configured() and os.access(self.bin(), os.X_OK)
[ "def", "is_installed", "(", "self", ")", ":", "return", "self", ".", "is_configured", "(", ")", "and", "os", ".", "access", "(", "self", ".", "bin", "(", ")", ",", "os", ".", "X_OK", ")" ]
Check if the tool is installed. Returns ------- is_installed : bool True if the tool is installed.
[ "Check", "if", "the", "tool", "is", "installed", ".", "Returns", "-------", "is_installed", ":", "bool", "True", "if", "the", "tool", "is", "installed", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L129-L138
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MotifProgram.run
def run(self, fastafile, params=None, tmp=None): """ Run the tool and predict motifs from a FASTA file. Parameters ---------- fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. tmp : str, optional Directory to use for creation of temporary files. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ if not self.is_configured(): raise ValueError("%s is not configured" % self.name) if not self.is_installed(): raise ValueError("%s is not installed or not correctly configured" % self.name) self.tmpdir = mkdtemp(prefix="{0}.".format(self.name), dir=tmp) fastafile = os.path.abspath(fastafile) try: return self._run_program(self.bin(), fastafile, params) except KeyboardInterrupt: return ([], "Killed", "Killed")
python
def run(self, fastafile, params=None, tmp=None): """ Run the tool and predict motifs from a FASTA file. Parameters ---------- fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. tmp : str, optional Directory to use for creation of temporary files. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ if not self.is_configured(): raise ValueError("%s is not configured" % self.name) if not self.is_installed(): raise ValueError("%s is not installed or not correctly configured" % self.name) self.tmpdir = mkdtemp(prefix="{0}.".format(self.name), dir=tmp) fastafile = os.path.abspath(fastafile) try: return self._run_program(self.bin(), fastafile, params) except KeyboardInterrupt: return ([], "Killed", "Killed")
[ "def", "run", "(", "self", ",", "fastafile", ",", "params", "=", "None", ",", "tmp", "=", "None", ")", ":", "if", "not", "self", ".", "is_configured", "(", ")", ":", "raise", "ValueError", "(", "\"%s is not configured\"", "%", "self", ".", "name", ")", "if", "not", "self", ".", "is_installed", "(", ")", ":", "raise", "ValueError", "(", "\"%s is not installed or not correctly configured\"", "%", "self", ".", "name", ")", "self", ".", "tmpdir", "=", "mkdtemp", "(", "prefix", "=", "\"{0}.\"", ".", "format", "(", "self", ".", "name", ")", ",", "dir", "=", "tmp", ")", "fastafile", "=", "os", ".", "path", ".", "abspath", "(", "fastafile", ")", "try", ":", "return", "self", ".", "_run_program", "(", "self", ".", "bin", "(", ")", ",", "fastafile", ",", "params", ")", "except", "KeyboardInterrupt", ":", "return", "(", "[", "]", ",", "\"Killed\"", ",", "\"Killed\"", ")" ]
Run the tool and predict motifs from a FASTA file. Parameters ---------- fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. tmp : str, optional Directory to use for creation of temporary files. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "the", "tool", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L140-L179
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
XXmotif._parse_params
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) if prm["background"]: # Absolute path, just to be sure prm["background"] = os.path.abspath(prm["background"]) prm["background"] = " --negSet {0} ".format( prm["background"]) prm["strand"] = "" if not prm["single"]: prm["strand"] = " --revcomp " return prm
python
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) if prm["background"]: # Absolute path, just to be sure prm["background"] = os.path.abspath(prm["background"]) prm["background"] = " --negSet {0} ".format( prm["background"]) prm["strand"] = "" if not prm["single"]: prm["strand"] = " --revcomp " return prm
[ "def", "_parse_params", "(", "self", ",", "params", "=", "None", ")", ":", "prm", "=", "self", ".", "default_params", ".", "copy", "(", ")", "if", "params", "is", "not", "None", ":", "prm", ".", "update", "(", "params", ")", "if", "prm", "[", "\"background\"", "]", ":", "# Absolute path, just to be sure", "prm", "[", "\"background\"", "]", "=", "os", ".", "path", ".", "abspath", "(", "prm", "[", "\"background\"", "]", ")", "prm", "[", "\"background\"", "]", "=", "\" --negSet {0} \"", ".", "format", "(", "prm", "[", "\"background\"", "]", ")", "prm", "[", "\"strand\"", "]", "=", "\"\"", "if", "not", "prm", "[", "\"single\"", "]", ":", "prm", "[", "\"strand\"", "]", "=", "\" --revcomp \"", "return", "prm" ]
Parse parameters. Combine default and user-defined parameters.
[ "Parse", "parameters", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L203-L223
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
XXmotif._run_program
def _run_program(self, bin, fastafile, params=None): """ Run XXmotif and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) outfile = os.path.join( self.tmpdir, os.path.basename(fastafile.replace(".fa", ".pwm"))) stdout = "" stderr = "" cmd = "%s %s %s --localization --batch %s %s" % ( bin, self.tmpdir, fastafile, params["background"], params["strand"], ) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out,err = p.communicate() stdout += out.decode() stderr += err.decode() motifs = [] if os.path.exists(outfile): motifs = read_motifs(outfile, fmt="xxmotif") for m in motifs: m.id = "{0}_{1}".format(self.name, m.id) else: stdout += "\nMotif file {0} not found!\n".format(outfile) stderr += "\nMotif file {0} not found!\n".format(outfile) return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run XXmotif and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) outfile = os.path.join( self.tmpdir, os.path.basename(fastafile.replace(".fa", ".pwm"))) stdout = "" stderr = "" cmd = "%s %s %s --localization --batch %s %s" % ( bin, self.tmpdir, fastafile, params["background"], params["strand"], ) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out,err = p.communicate() stdout += out.decode() stderr += err.decode() motifs = [] if os.path.exists(outfile): motifs = read_motifs(outfile, fmt="xxmotif") for m in motifs: m.id = "{0}_{1}".format(self.name, m.id) else: stdout += "\nMotif file {0} not found!\n".format(outfile) stderr += "\nMotif file {0} not found!\n".format(outfile) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "params", "=", "self", ".", "_parse_params", "(", "params", ")", "outfile", "=", "os", ".", "path", ".", "join", "(", "self", ".", "tmpdir", ",", "os", ".", "path", ".", "basename", "(", "fastafile", ".", "replace", "(", "\".fa\"", ",", "\".pwm\"", ")", ")", ")", "stdout", "=", "\"\"", "stderr", "=", "\"\"", "cmd", "=", "\"%s %s %s --localization --batch %s %s\"", "%", "(", "bin", ",", "self", ".", "tmpdir", ",", "fastafile", ",", "params", "[", "\"background\"", "]", ",", "params", "[", "\"strand\"", "]", ",", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "stdout", "+=", "out", ".", "decode", "(", ")", "stderr", "+=", "err", ".", "decode", "(", ")", "motifs", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "motifs", "=", "read_motifs", "(", "outfile", ",", "fmt", "=", "\"xxmotif\"", ")", "for", "m", "in", "motifs", ":", "m", ".", "id", "=", "\"{0}_{1}\"", ".", "format", "(", "self", ".", "name", ",", "m", ".", "id", ")", "else", ":", "stdout", "+=", "\"\\nMotif file {0} not found!\\n\"", ".", "format", "(", "outfile", ")", "stderr", "+=", "\"\\nMotif file {0} not found!\\n\"", ".", "format", "(", "outfile", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run XXmotif and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "XXmotif", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L225-L284
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Homer._parse_params
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) # Background file is essential! if not prm["background"]: print("Background file needed!") sys.exit() prm["background"] = os.path.abspath(prm["background"]) prm["strand"] = "" if prm["single"]: prm["strand"] = " -strand + " return prm
python
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) # Background file is essential! if not prm["background"]: print("Background file needed!") sys.exit() prm["background"] = os.path.abspath(prm["background"]) prm["strand"] = "" if prm["single"]: prm["strand"] = " -strand + " return prm
[ "def", "_parse_params", "(", "self", ",", "params", "=", "None", ")", ":", "prm", "=", "self", ".", "default_params", ".", "copy", "(", ")", "if", "params", "is", "not", "None", ":", "prm", ".", "update", "(", "params", ")", "# Background file is essential!", "if", "not", "prm", "[", "\"background\"", "]", ":", "print", "(", "\"Background file needed!\"", ")", "sys", ".", "exit", "(", ")", "prm", "[", "\"background\"", "]", "=", "os", ".", "path", ".", "abspath", "(", "prm", "[", "\"background\"", "]", ")", "prm", "[", "\"strand\"", "]", "=", "\"\"", "if", "prm", "[", "\"single\"", "]", ":", "prm", "[", "\"strand\"", "]", "=", "\" -strand + \"", "return", "prm" ]
Parse parameters. Combine default and user-defined parameters.
[ "Parse", "parameters", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L305-L326
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Homer._run_program
def _run_program(self, bin, fastafile, params=None): """ Run Homer and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) outfile = NamedTemporaryFile( mode="w", dir=self.tmpdir, prefix= "homer_w{}.".format(params["width"]) ).name cmd = "%s denovo -i %s -b %s -len %s -S %s %s -o %s -p 8" % ( bin, fastafile, params["background"], params["width"], params["number"], params["strand"], outfile) stderr = "" stdout = "Running command:\n{}\n".format(cmd) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, cwd=self.tmpdir) out,err = p.communicate() stdout += out.decode() stderr += err.decode() motifs = [] if os.path.exists(outfile): motifs = read_motifs(outfile, fmt="pwm") for i, m in enumerate(motifs): m.id = "{}_{}_{}".format(self.name, params["width"], i + 1) return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run Homer and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) outfile = NamedTemporaryFile( mode="w", dir=self.tmpdir, prefix= "homer_w{}.".format(params["width"]) ).name cmd = "%s denovo -i %s -b %s -len %s -S %s %s -o %s -p 8" % ( bin, fastafile, params["background"], params["width"], params["number"], params["strand"], outfile) stderr = "" stdout = "Running command:\n{}\n".format(cmd) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, cwd=self.tmpdir) out,err = p.communicate() stdout += out.decode() stderr += err.decode() motifs = [] if os.path.exists(outfile): motifs = read_motifs(outfile, fmt="pwm") for i, m in enumerate(motifs): m.id = "{}_{}_{}".format(self.name, params["width"], i + 1) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "params", "=", "self", ".", "_parse_params", "(", "params", ")", "outfile", "=", "NamedTemporaryFile", "(", "mode", "=", "\"w\"", ",", "dir", "=", "self", ".", "tmpdir", ",", "prefix", "=", "\"homer_w{}.\"", ".", "format", "(", "params", "[", "\"width\"", "]", ")", ")", ".", "name", "cmd", "=", "\"%s denovo -i %s -b %s -len %s -S %s %s -o %s -p 8\"", "%", "(", "bin", ",", "fastafile", ",", "params", "[", "\"background\"", "]", ",", "params", "[", "\"width\"", "]", ",", "params", "[", "\"number\"", "]", ",", "params", "[", "\"strand\"", "]", ",", "outfile", ")", "stderr", "=", "\"\"", "stdout", "=", "\"Running command:\\n{}\\n\"", ".", "format", "(", "cmd", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", "cwd", "=", "self", ".", "tmpdir", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "stdout", "+=", "out", ".", "decode", "(", ")", "stderr", "+=", "err", ".", "decode", "(", ")", "motifs", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "motifs", "=", "read_motifs", "(", "outfile", ",", "fmt", "=", "\"pwm\"", ")", "for", "i", ",", "m", "in", "enumerate", "(", "motifs", ")", ":", "m", ".", "id", "=", "\"{}_{}_{}\"", ".", "format", "(", "self", ".", "name", ",", "params", "[", "\"width\"", "]", ",", "i", "+", "1", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run Homer and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "Homer", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L328-L387
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
BioProspector.parse
def parse(self, fo): """ Convert BioProspector output to motifs Parameters ---------- fo : file-like File object containing BioProspector output. Returns ------- motifs : list List of Motif instances. """ motifs = [] p = re.compile(r'^\d+\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)') pwm = [] motif_id = "" for line in fo.readlines(): if line.startswith("Motif #"): if pwm: m = Motif(pwm) m.id = "BioProspector_w%s_%s" % (len(m), motif_id) motifs.append(m) motif_id = line.split("#")[1].split(":")[0] pwm = [] else: m = p.search(line) if m: pwm.append([float(m.group(x))/100.0 for x in range(1,5)]) if pwm: m = Motif(pwm) m.id = "BioProspector_w%s_%s" % (len(m), motif_id) motifs.append(m) return motifs
python
def parse(self, fo): """ Convert BioProspector output to motifs Parameters ---------- fo : file-like File object containing BioProspector output. Returns ------- motifs : list List of Motif instances. """ motifs = [] p = re.compile(r'^\d+\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)') pwm = [] motif_id = "" for line in fo.readlines(): if line.startswith("Motif #"): if pwm: m = Motif(pwm) m.id = "BioProspector_w%s_%s" % (len(m), motif_id) motifs.append(m) motif_id = line.split("#")[1].split(":")[0] pwm = [] else: m = p.search(line) if m: pwm.append([float(m.group(x))/100.0 for x in range(1,5)]) if pwm: m = Motif(pwm) m.id = "BioProspector_w%s_%s" % (len(m), motif_id) motifs.append(m) return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "p", "=", "re", ".", "compile", "(", "r'^\\d+\\s+(\\d+\\.\\d+)\\s+(\\d+\\.\\d+)\\s+(\\d+\\.\\d+)\\s+(\\d+\\.\\d+)'", ")", "pwm", "=", "[", "]", "motif_id", "=", "\"\"", "for", "line", "in", "fo", ".", "readlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "\"Motif #\"", ")", ":", "if", "pwm", ":", "m", "=", "Motif", "(", "pwm", ")", "m", ".", "id", "=", "\"BioProspector_w%s_%s\"", "%", "(", "len", "(", "m", ")", ",", "motif_id", ")", "motifs", ".", "append", "(", "m", ")", "motif_id", "=", "line", ".", "split", "(", "\"#\"", ")", "[", "1", "]", ".", "split", "(", "\":\"", ")", "[", "0", "]", "pwm", "=", "[", "]", "else", ":", "m", "=", "p", ".", "search", "(", "line", ")", "if", "m", ":", "pwm", ".", "append", "(", "[", "float", "(", "m", ".", "group", "(", "x", ")", ")", "/", "100.0", "for", "x", "in", "range", "(", "1", ",", "5", ")", "]", ")", "if", "pwm", ":", "m", "=", "Motif", "(", "pwm", ")", "m", ".", "id", "=", "\"BioProspector_w%s_%s\"", "%", "(", "len", "(", "m", ")", ",", "motif_id", ")", "motifs", ".", "append", "(", "m", ")", "return", "motifs" ]
Convert BioProspector output to motifs Parameters ---------- fo : file-like File object containing BioProspector output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "BioProspector", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "BioProspector", "output", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L488-L524
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Hms._run_program
def _run_program(self, bin, fastafile, params=None): """ Run HMS and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) default_params = {"width":10} if params is not None: default_params.update(params) fgfile, summitfile, outfile = self._prepare_files(fastafile) current_path = os.getcwd() os.chdir(self.tmpdir) cmd = "{} -i {} -w {} -dna 4 -iteration 50 -chain 20 -seqprop -0.1 -strand 2 -peaklocation {} -t_dof 3 -dep 2".format( bin, fgfile, params['width'], summitfile) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout,stderr = p.communicate() os.chdir(current_path) motifs = [] if os.path.exists(outfile): with open(outfile) as f: motifs = self.parse(f) for i,m in enumerate(motifs): m.id = "HMS_w{}_{}".format(params['width'], i + 1) return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run HMS and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) default_params = {"width":10} if params is not None: default_params.update(params) fgfile, summitfile, outfile = self._prepare_files(fastafile) current_path = os.getcwd() os.chdir(self.tmpdir) cmd = "{} -i {} -w {} -dna 4 -iteration 50 -chain 20 -seqprop -0.1 -strand 2 -peaklocation {} -t_dof 3 -dep 2".format( bin, fgfile, params['width'], summitfile) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout,stderr = p.communicate() os.chdir(current_path) motifs = [] if os.path.exists(outfile): with open(outfile) as f: motifs = self.parse(f) for i,m in enumerate(motifs): m.id = "HMS_w{}_{}".format(params['width'], i + 1) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "params", "=", "self", ".", "_parse_params", "(", "params", ")", "default_params", "=", "{", "\"width\"", ":", "10", "}", "if", "params", "is", "not", "None", ":", "default_params", ".", "update", "(", "params", ")", "fgfile", ",", "summitfile", ",", "outfile", "=", "self", ".", "_prepare_files", "(", "fastafile", ")", "current_path", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "self", ".", "tmpdir", ")", "cmd", "=", "\"{} -i {} -w {} -dna 4 -iteration 50 -chain 20 -seqprop -0.1 -strand 2 -peaklocation {} -t_dof 3 -dep 2\"", ".", "format", "(", "bin", ",", "fgfile", ",", "params", "[", "'width'", "]", ",", "summitfile", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "os", ".", "chdir", "(", "current_path", ")", "motifs", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "with", "open", "(", "outfile", ")", "as", "f", ":", "motifs", "=", "self", ".", "parse", "(", "f", ")", "for", "i", ",", "m", "in", "enumerate", "(", "motifs", ")", ":", "m", ".", "id", "=", "\"HMS_w{}_{}\"", ".", "format", "(", "params", "[", "'width'", "]", ",", "i", "+", "1", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run HMS and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "HMS", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L575-L631
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Hms.parse
def parse(self, fo): """ Convert HMS output to motifs Parameters ---------- fo : file-like File object containing HMS output. Returns ------- motifs : list List of Motif instances. """ motifs = [] m = [[float(x) for x in fo.readline().strip().split(" ")] for i in range(4)] matrix = [[m[0][i], m[1][i],m[2][i],m[3][i]] for i in range(len(m[0]))] motifs = [Motif(matrix)] motifs[-1].id = self.name return motifs
python
def parse(self, fo): """ Convert HMS output to motifs Parameters ---------- fo : file-like File object containing HMS output. Returns ------- motifs : list List of Motif instances. """ motifs = [] m = [[float(x) for x in fo.readline().strip().split(" ")] for i in range(4)] matrix = [[m[0][i], m[1][i],m[2][i],m[3][i]] for i in range(len(m[0]))] motifs = [Motif(matrix)] motifs[-1].id = self.name return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "m", "=", "[", "[", "float", "(", "x", ")", "for", "x", "in", "fo", ".", "readline", "(", ")", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "]", "for", "i", "in", "range", "(", "4", ")", "]", "matrix", "=", "[", "[", "m", "[", "0", "]", "[", "i", "]", ",", "m", "[", "1", "]", "[", "i", "]", ",", "m", "[", "2", "]", "[", "i", "]", ",", "m", "[", "3", "]", "[", "i", "]", "]", "for", "i", "in", "range", "(", "len", "(", "m", "[", "0", "]", ")", ")", "]", "motifs", "=", "[", "Motif", "(", "matrix", ")", "]", "motifs", "[", "-", "1", "]", ".", "id", "=", "self", ".", "name", "return", "motifs" ]
Convert HMS output to motifs Parameters ---------- fo : file-like File object containing HMS output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "HMS", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "HMS", "output", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L633-L653
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Amd._run_program
def _run_program(self, bin, fastafile, params=None): """ Run AMD and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) fgfile = os.path.join(self.tmpdir, "AMD.in.fa") outfile = fgfile + ".Matrix" shutil.copy(fastafile, fgfile) current_path = os.getcwd() os.chdir(self.tmpdir) stdout = "" stderr = "" cmd = "%s -F %s -B %s" % ( bin, fgfile, params["background"], ) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out,err = p.communicate() stdout += out.decode() stderr += err.decode() os.chdir(current_path) motifs = [] if os.path.exists(outfile): f = open(outfile) motifs = self.parse(f) f.close() return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run AMD and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) fgfile = os.path.join(self.tmpdir, "AMD.in.fa") outfile = fgfile + ".Matrix" shutil.copy(fastafile, fgfile) current_path = os.getcwd() os.chdir(self.tmpdir) stdout = "" stderr = "" cmd = "%s -F %s -B %s" % ( bin, fgfile, params["background"], ) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out,err = p.communicate() stdout += out.decode() stderr += err.decode() os.chdir(current_path) motifs = [] if os.path.exists(outfile): f = open(outfile) motifs = self.parse(f) f.close() return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "params", "=", "self", ".", "_parse_params", "(", "params", ")", "fgfile", "=", "os", ".", "path", ".", "join", "(", "self", ".", "tmpdir", ",", "\"AMD.in.fa\"", ")", "outfile", "=", "fgfile", "+", "\".Matrix\"", "shutil", ".", "copy", "(", "fastafile", ",", "fgfile", ")", "current_path", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "self", ".", "tmpdir", ")", "stdout", "=", "\"\"", "stderr", "=", "\"\"", "cmd", "=", "\"%s -F %s -B %s\"", "%", "(", "bin", ",", "fgfile", ",", "params", "[", "\"background\"", "]", ",", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "stdout", "+=", "out", ".", "decode", "(", ")", "stderr", "+=", "err", ".", "decode", "(", ")", "os", ".", "chdir", "(", "current_path", ")", "motifs", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "f", "=", "open", "(", "outfile", ")", "motifs", "=", "self", ".", "parse", "(", "f", ")", "f", ".", "close", "(", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run AMD and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "AMD", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L688-L744
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Amd.parse
def parse(self, fo): """ Convert AMD output to motifs Parameters ---------- fo : file-like File object containing AMD output. Returns ------- motifs : list List of Motif instances. """ motifs = [] #160: 112 CACGTGC 7.25 chr14:32308489-32308689 p = re.compile(r'\d+\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)') wm = [] name = "" for line in fo.readlines(): if line.startswith("Motif") and line.strip().endswith(":"): if name: motifs.append(Motif(wm)) motifs[-1].id = name name = "" wm = [] name = "%s_%s" % (self.name, line.split(":")[0]) else: m = p.search(line) if m: wm.append([float(m.group(x)) for x in range(1,5)]) motifs.append(Motif(wm)) motifs[-1].id = name return motifs
python
def parse(self, fo): """ Convert AMD output to motifs Parameters ---------- fo : file-like File object containing AMD output. Returns ------- motifs : list List of Motif instances. """ motifs = [] #160: 112 CACGTGC 7.25 chr14:32308489-32308689 p = re.compile(r'\d+\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)\s+([\d.]+)') wm = [] name = "" for line in fo.readlines(): if line.startswith("Motif") and line.strip().endswith(":"): if name: motifs.append(Motif(wm)) motifs[-1].id = name name = "" wm = [] name = "%s_%s" % (self.name, line.split(":")[0]) else: m = p.search(line) if m: wm.append([float(m.group(x)) for x in range(1,5)]) motifs.append(Motif(wm)) motifs[-1].id = name return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "#160: 112 CACGTGC 7.25 chr14:32308489-32308689", "p", "=", "re", ".", "compile", "(", "r'\\d+\\s+([\\d.]+)\\s+([\\d.]+)\\s+([\\d.]+)\\s+([\\d.]+)'", ")", "wm", "=", "[", "]", "name", "=", "\"\"", "for", "line", "in", "fo", ".", "readlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "\"Motif\"", ")", "and", "line", ".", "strip", "(", ")", ".", "endswith", "(", "\":\"", ")", ":", "if", "name", ":", "motifs", ".", "append", "(", "Motif", "(", "wm", ")", ")", "motifs", "[", "-", "1", "]", ".", "id", "=", "name", "name", "=", "\"\"", "wm", "=", "[", "]", "name", "=", "\"%s_%s\"", "%", "(", "self", ".", "name", ",", "line", ".", "split", "(", "\":\"", ")", "[", "0", "]", ")", "else", ":", "m", "=", "p", ".", "search", "(", "line", ")", "if", "m", ":", "wm", ".", "append", "(", "[", "float", "(", "m", ".", "group", "(", "x", ")", ")", "for", "x", "in", "range", "(", "1", ",", "5", ")", "]", ")", "motifs", ".", "append", "(", "Motif", "(", "wm", ")", ")", "motifs", "[", "-", "1", "]", ".", "id", "=", "name", "return", "motifs" ]
Convert AMD output to motifs Parameters ---------- fo : file-like File object containing AMD output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "AMD", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "AMD", "output", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L746-L781
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Improbizer.parse
def parse(self, fo): """ Convert Improbizer output to motifs Parameters ---------- fo : file-like File object containing Improbizer output. Returns ------- motifs : list List of Motif instances. """ motifs = [] p = re.compile(r'\d+\s+@\s+\d+\.\d+\s+sd\s+\d+\.\d+\s+(\w+)$') line = fo.readline() while line and line.find("Color") == -1: m = p.search(line) if m: pwm_data = {} for i in range(4): vals = [x.strip() for x in fo.readline().strip().split(" ") if x] pwm_data[vals[0].upper()] = vals[1:] pwm = [] for i in range(len(pwm_data["A"])): pwm.append([float(pwm_data[x][i]) for x in ["A","C","G","T"]]) motifs.append(Motif(pwm)) motifs[-1].id = "%s_%s" % (self.name, m.group(1)) line = fo.readline() return motifs
python
def parse(self, fo): """ Convert Improbizer output to motifs Parameters ---------- fo : file-like File object containing Improbizer output. Returns ------- motifs : list List of Motif instances. """ motifs = [] p = re.compile(r'\d+\s+@\s+\d+\.\d+\s+sd\s+\d+\.\d+\s+(\w+)$') line = fo.readline() while line and line.find("Color") == -1: m = p.search(line) if m: pwm_data = {} for i in range(4): vals = [x.strip() for x in fo.readline().strip().split(" ") if x] pwm_data[vals[0].upper()] = vals[1:] pwm = [] for i in range(len(pwm_data["A"])): pwm.append([float(pwm_data[x][i]) for x in ["A","C","G","T"]]) motifs.append(Motif(pwm)) motifs[-1].id = "%s_%s" % (self.name, m.group(1)) line = fo.readline() return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "p", "=", "re", ".", "compile", "(", "r'\\d+\\s+@\\s+\\d+\\.\\d+\\s+sd\\s+\\d+\\.\\d+\\s+(\\w+)$'", ")", "line", "=", "fo", ".", "readline", "(", ")", "while", "line", "and", "line", ".", "find", "(", "\"Color\"", ")", "==", "-", "1", ":", "m", "=", "p", ".", "search", "(", "line", ")", "if", "m", ":", "pwm_data", "=", "{", "}", "for", "i", "in", "range", "(", "4", ")", ":", "vals", "=", "[", "x", ".", "strip", "(", ")", "for", "x", "in", "fo", ".", "readline", "(", ")", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "if", "x", "]", "pwm_data", "[", "vals", "[", "0", "]", ".", "upper", "(", ")", "]", "=", "vals", "[", "1", ":", "]", "pwm", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "pwm_data", "[", "\"A\"", "]", ")", ")", ":", "pwm", ".", "append", "(", "[", "float", "(", "pwm_data", "[", "x", "]", "[", "i", "]", ")", "for", "x", "in", "[", "\"A\"", ",", "\"C\"", ",", "\"G\"", ",", "\"T\"", "]", "]", ")", "motifs", ".", "append", "(", "Motif", "(", "pwm", ")", ")", "motifs", "[", "-", "1", "]", ".", "id", "=", "\"%s_%s\"", "%", "(", "self", ".", "name", ",", "m", ".", "group", "(", "1", ")", ")", "line", "=", "fo", ".", "readline", "(", ")", "return", "motifs" ]
Convert Improbizer output to motifs Parameters ---------- fo : file-like File object containing Improbizer output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "Improbizer", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "Improbizer", "output", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L873-L905
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Trawler._run_program
def _run_program(self, bin, fastafile, params=None): """ Run Trawler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) tmp = NamedTemporaryFile(mode="w", dir=self.tmpdir, delete=False) shutil.copy(fastafile, tmp.name) fastafile = tmp.name current_path = os.getcwd() os.chdir(self.dir()) motifs = [] stdout = "" stderr = "" for wildcard in [0,1,2]: cmd = "%s -sample %s -background %s -directory %s -strand %s -wildcard %s" % ( bin, fastafile, params["background"], self.tmpdir, params["strand"], wildcard, ) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out,err = p.communicate() stdout += out.decode() stderr += err.decode() os.chdir(current_path) pwmfiles = glob.glob("{}/tmp*/result/*pwm".format(self.tmpdir)) if len(pwmfiles) > 0: out_file = pwmfiles[0] stdout += "\nOutfile: {}".format(out_file) my_motifs = [] if os.path.exists(out_file): my_motifs = read_motifs(out_file, fmt="pwm") for m in motifs: m.id = "{}_{}".format(self.name, m.id) stdout += "\nTrawler: {} motifs".format(len(motifs)) # remove temporary files if os.path.exists(tmp.name): os.unlink(tmp.name) for motif in my_motifs: motif.id = "{}_{}_{}".format(self.name, wildcard, motif.id) motifs += my_motifs else: stderr += "\nNo outfile found" return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run Trawler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) tmp = NamedTemporaryFile(mode="w", dir=self.tmpdir, delete=False) shutil.copy(fastafile, tmp.name) fastafile = tmp.name current_path = os.getcwd() os.chdir(self.dir()) motifs = [] stdout = "" stderr = "" for wildcard in [0,1,2]: cmd = "%s -sample %s -background %s -directory %s -strand %s -wildcard %s" % ( bin, fastafile, params["background"], self.tmpdir, params["strand"], wildcard, ) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out,err = p.communicate() stdout += out.decode() stderr += err.decode() os.chdir(current_path) pwmfiles = glob.glob("{}/tmp*/result/*pwm".format(self.tmpdir)) if len(pwmfiles) > 0: out_file = pwmfiles[0] stdout += "\nOutfile: {}".format(out_file) my_motifs = [] if os.path.exists(out_file): my_motifs = read_motifs(out_file, fmt="pwm") for m in motifs: m.id = "{}_{}".format(self.name, m.id) stdout += "\nTrawler: {} motifs".format(len(motifs)) # remove temporary files if os.path.exists(tmp.name): os.unlink(tmp.name) for motif in my_motifs: motif.id = "{}_{}_{}".format(self.name, wildcard, motif.id) motifs += my_motifs else: stderr += "\nNo outfile found" return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "params", "=", "self", ".", "_parse_params", "(", "params", ")", "tmp", "=", "NamedTemporaryFile", "(", "mode", "=", "\"w\"", ",", "dir", "=", "self", ".", "tmpdir", ",", "delete", "=", "False", ")", "shutil", ".", "copy", "(", "fastafile", ",", "tmp", ".", "name", ")", "fastafile", "=", "tmp", ".", "name", "current_path", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "self", ".", "dir", "(", ")", ")", "motifs", "=", "[", "]", "stdout", "=", "\"\"", "stderr", "=", "\"\"", "for", "wildcard", "in", "[", "0", ",", "1", ",", "2", "]", ":", "cmd", "=", "\"%s -sample %s -background %s -directory %s -strand %s -wildcard %s\"", "%", "(", "bin", ",", "fastafile", ",", "params", "[", "\"background\"", "]", ",", "self", ".", "tmpdir", ",", "params", "[", "\"strand\"", "]", ",", "wildcard", ",", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "stdout", "+=", "out", ".", "decode", "(", ")", "stderr", "+=", "err", ".", "decode", "(", ")", "os", ".", "chdir", "(", "current_path", ")", "pwmfiles", "=", "glob", ".", "glob", "(", "\"{}/tmp*/result/*pwm\"", ".", "format", "(", "self", ".", "tmpdir", ")", ")", "if", "len", "(", "pwmfiles", ")", ">", "0", ":", "out_file", "=", "pwmfiles", "[", "0", "]", "stdout", "+=", "\"\\nOutfile: {}\"", ".", "format", "(", "out_file", ")", "my_motifs", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "out_file", ")", ":", "my_motifs", "=", "read_motifs", "(", "out_file", ",", "fmt", "=", "\"pwm\"", ")", "for", "m", "in", "motifs", ":", "m", ".", "id", "=", "\"{}_{}\"", ".", "format", "(", "self", ".", "name", ",", "m", ".", "id", ")", "stdout", "+=", "\"\\nTrawler: {} motifs\"", ".", "format", "(", "len", "(", "motifs", ")", ")", "# remove temporary files", "if", "os", ".", "path", ".", "exists", "(", "tmp", ".", "name", ")", ":", "os", ".", "unlink", "(", "tmp", ".", "name", ")", "for", "motif", "in", "my_motifs", ":", "motif", ".", "id", "=", "\"{}_{}_{}\"", ".", "format", "(", "self", ".", "name", ",", "wildcard", ",", "motif", ".", "id", ")", "motifs", "+=", "my_motifs", "else", ":", "stderr", "+=", "\"\\nNo outfile found\"", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run Trawler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "Trawler", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L945-L1023
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Weeder._run_program
def _run_program(self, bin,fastafile, params=None): """ Run Weeder and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) organism = params["organism"] weeder_organisms = { "hg18":"HS", "hg19":"HS", "hg38":"HS", "mm9":"MM", "mm10":"MM", "dm3":"DM", "dm5":"DM", "dm6":"DM", "yeast":"SC", "sacCer2":"SC", "sacCer3":"SC", "TAIR10":"AT", "TAIR11":"AT", } weeder_organism = weeder_organisms.get(organism, "HS") tmp = NamedTemporaryFile(dir=self.tmpdir) name = tmp.name tmp.close() shutil.copy(fastafile, name) fastafile = name cmd = "{} -f {} -O".format( self.cmd, fastafile, weeder_organism, ) if params["single"]: cmd += " -ss" #print cmd stdout, stderr = "", "" p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, cwd=self.tmpdir) out,err = p.communicate() stdout += out.decode() stderr += err.decode() motifs = [] if os.path.exists(fastafile + ".matrix.w2"): f = open(fastafile + ".matrix.w2") motifs = self.parse(f) f.close() for m in motifs: m.id = "{}_{}".format(self.name, m.id.split("\t")[0]) for ext in [".w2", ".matrix.w2" ]: if os.path.exists(fastafile + ext): os.unlink(fastafile + ext) return motifs, stdout, stderr
python
def _run_program(self, bin,fastafile, params=None): """ Run Weeder and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) organism = params["organism"] weeder_organisms = { "hg18":"HS", "hg19":"HS", "hg38":"HS", "mm9":"MM", "mm10":"MM", "dm3":"DM", "dm5":"DM", "dm6":"DM", "yeast":"SC", "sacCer2":"SC", "sacCer3":"SC", "TAIR10":"AT", "TAIR11":"AT", } weeder_organism = weeder_organisms.get(organism, "HS") tmp = NamedTemporaryFile(dir=self.tmpdir) name = tmp.name tmp.close() shutil.copy(fastafile, name) fastafile = name cmd = "{} -f {} -O".format( self.cmd, fastafile, weeder_organism, ) if params["single"]: cmd += " -ss" #print cmd stdout, stderr = "", "" p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE, cwd=self.tmpdir) out,err = p.communicate() stdout += out.decode() stderr += err.decode() motifs = [] if os.path.exists(fastafile + ".matrix.w2"): f = open(fastafile + ".matrix.w2") motifs = self.parse(f) f.close() for m in motifs: m.id = "{}_{}".format(self.name, m.id.split("\t")[0]) for ext in [".w2", ".matrix.w2" ]: if os.path.exists(fastafile + ext): os.unlink(fastafile + ext) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "params", "=", "self", ".", "_parse_params", "(", "params", ")", "organism", "=", "params", "[", "\"organism\"", "]", "weeder_organisms", "=", "{", "\"hg18\"", ":", "\"HS\"", ",", "\"hg19\"", ":", "\"HS\"", ",", "\"hg38\"", ":", "\"HS\"", ",", "\"mm9\"", ":", "\"MM\"", ",", "\"mm10\"", ":", "\"MM\"", ",", "\"dm3\"", ":", "\"DM\"", ",", "\"dm5\"", ":", "\"DM\"", ",", "\"dm6\"", ":", "\"DM\"", ",", "\"yeast\"", ":", "\"SC\"", ",", "\"sacCer2\"", ":", "\"SC\"", ",", "\"sacCer3\"", ":", "\"SC\"", ",", "\"TAIR10\"", ":", "\"AT\"", ",", "\"TAIR11\"", ":", "\"AT\"", ",", "}", "weeder_organism", "=", "weeder_organisms", ".", "get", "(", "organism", ",", "\"HS\"", ")", "tmp", "=", "NamedTemporaryFile", "(", "dir", "=", "self", ".", "tmpdir", ")", "name", "=", "tmp", ".", "name", "tmp", ".", "close", "(", ")", "shutil", ".", "copy", "(", "fastafile", ",", "name", ")", "fastafile", "=", "name", "cmd", "=", "\"{} -f {} -O\"", ".", "format", "(", "self", ".", "cmd", ",", "fastafile", ",", "weeder_organism", ",", ")", "if", "params", "[", "\"single\"", "]", ":", "cmd", "+=", "\" -ss\"", "#print cmd", "stdout", ",", "stderr", "=", "\"\"", ",", "\"\"", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ",", "cwd", "=", "self", ".", "tmpdir", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "stdout", "+=", "out", ".", "decode", "(", ")", "stderr", "+=", "err", ".", "decode", "(", ")", "motifs", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "fastafile", "+", "\".matrix.w2\"", ")", ":", "f", "=", "open", "(", "fastafile", "+", "\".matrix.w2\"", ")", "motifs", "=", "self", ".", "parse", "(", "f", ")", "f", ".", "close", "(", ")", "for", "m", "in", "motifs", ":", "m", ".", "id", "=", "\"{}_{}\"", ".", "format", "(", "self", ".", "name", ",", "m", ".", "id", ".", "split", "(", "\"\\t\"", ")", "[", "0", "]", ")", "for", "ext", "in", "[", "\".w2\"", ",", "\".matrix.w2\"", "]", ":", "if", "os", ".", "path", ".", "exists", "(", "fastafile", "+", "ext", ")", ":", "os", ".", "unlink", "(", "fastafile", "+", "ext", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run Weeder and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "Weeder", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1055-L1137
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MotifSampler._parse_params
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) if prm["background_model"]: # Absolute path, just to be sure prm["background_model"] = os.path.abspath(prm["background_model"]) else: if prm.get("organism", None): prm["background_model"] = os.path.join( self.config.get_bg_dir(), "{}.{}.bg".format( prm["organism"], "MotifSampler")) else: raise Exception("No background specified for {}".format(self.name)) prm["strand"] = 1 if prm["single"]: prm["strand"] = 0 tmp = NamedTemporaryFile(dir=self.tmpdir) prm["pwmfile"] = tmp.name tmp2 = NamedTemporaryFile(dir=self.tmpdir) prm["outfile"] = tmp2.name return prm
python
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) if prm["background_model"]: # Absolute path, just to be sure prm["background_model"] = os.path.abspath(prm["background_model"]) else: if prm.get("organism", None): prm["background_model"] = os.path.join( self.config.get_bg_dir(), "{}.{}.bg".format( prm["organism"], "MotifSampler")) else: raise Exception("No background specified for {}".format(self.name)) prm["strand"] = 1 if prm["single"]: prm["strand"] = 0 tmp = NamedTemporaryFile(dir=self.tmpdir) prm["pwmfile"] = tmp.name tmp2 = NamedTemporaryFile(dir=self.tmpdir) prm["outfile"] = tmp2.name return prm
[ "def", "_parse_params", "(", "self", ",", "params", "=", "None", ")", ":", "prm", "=", "self", ".", "default_params", ".", "copy", "(", ")", "if", "params", "is", "not", "None", ":", "prm", ".", "update", "(", "params", ")", "if", "prm", "[", "\"background_model\"", "]", ":", "# Absolute path, just to be sure", "prm", "[", "\"background_model\"", "]", "=", "os", ".", "path", ".", "abspath", "(", "prm", "[", "\"background_model\"", "]", ")", "else", ":", "if", "prm", ".", "get", "(", "\"organism\"", ",", "None", ")", ":", "prm", "[", "\"background_model\"", "]", "=", "os", ".", "path", ".", "join", "(", "self", ".", "config", ".", "get_bg_dir", "(", ")", ",", "\"{}.{}.bg\"", ".", "format", "(", "prm", "[", "\"organism\"", "]", ",", "\"MotifSampler\"", ")", ")", "else", ":", "raise", "Exception", "(", "\"No background specified for {}\"", ".", "format", "(", "self", ".", "name", ")", ")", "prm", "[", "\"strand\"", "]", "=", "1", "if", "prm", "[", "\"single\"", "]", ":", "prm", "[", "\"strand\"", "]", "=", "0", "tmp", "=", "NamedTemporaryFile", "(", "dir", "=", "self", ".", "tmpdir", ")", "prm", "[", "\"pwmfile\"", "]", "=", "tmp", ".", "name", "tmp2", "=", "NamedTemporaryFile", "(", "dir", "=", "self", ".", "tmpdir", ")", "prm", "[", "\"outfile\"", "]", "=", "tmp2", ".", "name", "return", "prm" ]
Parse parameters. Combine default and user-defined parameters.
[ "Parse", "parameters", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1173-L1206
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MotifSampler._run_program
def _run_program(self, bin, fastafile, params=None): """ Run MotifSampler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) # TODO: test organism #cmd = "%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s > /dev/null 2>&1" % ( cmd = "%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s" % ( bin, fastafile, params["background_model"], params["pwmfile"], params["width"], params["number"], params["outfile"], params["strand"], ) #print cmd p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() #stdout,stderr = "","" #p = Popen(cmd, shell=True) #p.wait() motifs = [] if os.path.exists(params["outfile"]): with open(params["outfile"]) as f: motifs = self.parse_out(f) for motif in motifs: motif.id = "%s_%s" % (self.name, motif.id) return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run MotifSampler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) # TODO: test organism #cmd = "%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s > /dev/null 2>&1" % ( cmd = "%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s" % ( bin, fastafile, params["background_model"], params["pwmfile"], params["width"], params["number"], params["outfile"], params["strand"], ) #print cmd p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() #stdout,stderr = "","" #p = Popen(cmd, shell=True) #p.wait() motifs = [] if os.path.exists(params["outfile"]): with open(params["outfile"]) as f: motifs = self.parse_out(f) for motif in motifs: motif.id = "%s_%s" % (self.name, motif.id) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "params", "=", "self", ".", "_parse_params", "(", "params", ")", "# TODO: test organism", "#cmd = \"%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s > /dev/null 2>&1\" % (", "cmd", "=", "\"%s -f %s -b %s -m %s -w %s -n %s -o %s -s %s\"", "%", "(", "bin", ",", "fastafile", ",", "params", "[", "\"background_model\"", "]", ",", "params", "[", "\"pwmfile\"", "]", ",", "params", "[", "\"width\"", "]", ",", "params", "[", "\"number\"", "]", ",", "params", "[", "\"outfile\"", "]", ",", "params", "[", "\"strand\"", "]", ",", ")", "#print cmd", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "#stdout,stderr = \"\",\"\"", "#p = Popen(cmd, shell=True)", "#p.wait()", "motifs", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "params", "[", "\"outfile\"", "]", ")", ":", "with", "open", "(", "params", "[", "\"outfile\"", "]", ")", "as", "f", ":", "motifs", "=", "self", ".", "parse_out", "(", "f", ")", "for", "motif", "in", "motifs", ":", "motif", ".", "id", "=", "\"%s_%s\"", "%", "(", "self", ".", "name", ",", "motif", ".", "id", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run MotifSampler and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "MotifSampler", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1208-L1264
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MotifSampler.parse
def parse(self, fo): """ Convert MotifSampler output to motifs Parameters ---------- fo : file-like File object containing MotifSampler output. Returns ------- motifs : list List of Motif instances. """ motifs = [] pwm = [] info = {} for line in fo.readlines(): if line.startswith("#"): vals = line.strip()[1:].split(" = ") if len(vals) > 1: info[vals[0]] = vals[1] elif len(line) > 1: pwm.append([float(x) for x in line.strip().split("\t")]) else: motifs.append(Motif()) motifs[-1].consensus = info["Consensus"] motifs[-1].width = info["W"] motifs[-1].id = info["ID"] motifs[-1].pwm = pwm[:] pwm = [] return motifs
python
def parse(self, fo): """ Convert MotifSampler output to motifs Parameters ---------- fo : file-like File object containing MotifSampler output. Returns ------- motifs : list List of Motif instances. """ motifs = [] pwm = [] info = {} for line in fo.readlines(): if line.startswith("#"): vals = line.strip()[1:].split(" = ") if len(vals) > 1: info[vals[0]] = vals[1] elif len(line) > 1: pwm.append([float(x) for x in line.strip().split("\t")]) else: motifs.append(Motif()) motifs[-1].consensus = info["Consensus"] motifs[-1].width = info["W"] motifs[-1].id = info["ID"] motifs[-1].pwm = pwm[:] pwm = [] return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "pwm", "=", "[", "]", "info", "=", "{", "}", "for", "line", "in", "fo", ".", "readlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "vals", "=", "line", ".", "strip", "(", ")", "[", "1", ":", "]", ".", "split", "(", "\" = \"", ")", "if", "len", "(", "vals", ")", ">", "1", ":", "info", "[", "vals", "[", "0", "]", "]", "=", "vals", "[", "1", "]", "elif", "len", "(", "line", ")", ">", "1", ":", "pwm", ".", "append", "(", "[", "float", "(", "x", ")", "for", "x", "in", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "]", ")", "else", ":", "motifs", ".", "append", "(", "Motif", "(", ")", ")", "motifs", "[", "-", "1", "]", ".", "consensus", "=", "info", "[", "\"Consensus\"", "]", "motifs", "[", "-", "1", "]", ".", "width", "=", "info", "[", "\"W\"", "]", "motifs", "[", "-", "1", "]", ".", "id", "=", "info", "[", "\"ID\"", "]", "motifs", "[", "-", "1", "]", ".", "pwm", "=", "pwm", "[", ":", "]", "pwm", "=", "[", "]", "return", "motifs" ]
Convert MotifSampler output to motifs Parameters ---------- fo : file-like File object containing MotifSampler output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "MotifSampler", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "MotifSampler", "output", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1266-L1299
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MotifSampler.parse_out
def parse_out(self, fo): """ Convert MotifSampler output to motifs Parameters ---------- fo : file-like File object containing MotifSampler output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} pseudo = 0.0 # Should be 1/sqrt(# of seqs) aligns = {} for line in fo.readlines(): if line.startswith("#"): pass elif len(line) > 1: vals = line.strip().split("\t") m_id, site = [x.strip().split(" ")[1].replace('"',"") for x in vals[8].split(";") if x] #if vals[6] == "+": if site.upper().find("N") == -1: aligns.setdefault(m_id, []).append(site) #else: # print site, rc(site) # aligns.setdefault(id, []).append(rc(site)) for m_id, align in aligns.items(): #print id, len(align) width = len(align[0]) pfm = [[0 for x in range(4)] for x in range(width)] for row in align: for i in range(len(row)): pfm[i][nucs[row[i]]] += 1 total = float(len(align)) pwm = [[(x + pseudo/4)/total+(pseudo) for x in row] for row in pfm] m = Motif() m.align = align[:] m.pwm = pwm[:] m.pfm = pfm[:] m.id = m_id motifs.append(m) return motifs
python
def parse_out(self, fo): """ Convert MotifSampler output to motifs Parameters ---------- fo : file-like File object containing MotifSampler output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} pseudo = 0.0 # Should be 1/sqrt(# of seqs) aligns = {} for line in fo.readlines(): if line.startswith("#"): pass elif len(line) > 1: vals = line.strip().split("\t") m_id, site = [x.strip().split(" ")[1].replace('"',"") for x in vals[8].split(";") if x] #if vals[6] == "+": if site.upper().find("N") == -1: aligns.setdefault(m_id, []).append(site) #else: # print site, rc(site) # aligns.setdefault(id, []).append(rc(site)) for m_id, align in aligns.items(): #print id, len(align) width = len(align[0]) pfm = [[0 for x in range(4)] for x in range(width)] for row in align: for i in range(len(row)): pfm[i][nucs[row[i]]] += 1 total = float(len(align)) pwm = [[(x + pseudo/4)/total+(pseudo) for x in row] for row in pfm] m = Motif() m.align = align[:] m.pwm = pwm[:] m.pfm = pfm[:] m.id = m_id motifs.append(m) return motifs
[ "def", "parse_out", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "nucs", "=", "{", "\"A\"", ":", "0", ",", "\"C\"", ":", "1", ",", "\"G\"", ":", "2", ",", "\"T\"", ":", "3", "}", "pseudo", "=", "0.0", "# Should be 1/sqrt(# of seqs)", "aligns", "=", "{", "}", "for", "line", "in", "fo", ".", "readlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "\"#\"", ")", ":", "pass", "elif", "len", "(", "line", ")", ">", "1", ":", "vals", "=", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "m_id", ",", "site", "=", "[", "x", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "[", "1", "]", ".", "replace", "(", "'\"'", ",", "\"\"", ")", "for", "x", "in", "vals", "[", "8", "]", ".", "split", "(", "\";\"", ")", "if", "x", "]", "#if vals[6] == \"+\":", "if", "site", ".", "upper", "(", ")", ".", "find", "(", "\"N\"", ")", "==", "-", "1", ":", "aligns", ".", "setdefault", "(", "m_id", ",", "[", "]", ")", ".", "append", "(", "site", ")", "#else:", "# print site, rc(site)", "# aligns.setdefault(id, []).append(rc(site))", "for", "m_id", ",", "align", "in", "aligns", ".", "items", "(", ")", ":", "#print id, len(align)", "width", "=", "len", "(", "align", "[", "0", "]", ")", "pfm", "=", "[", "[", "0", "for", "x", "in", "range", "(", "4", ")", "]", "for", "x", "in", "range", "(", "width", ")", "]", "for", "row", "in", "align", ":", "for", "i", "in", "range", "(", "len", "(", "row", ")", ")", ":", "pfm", "[", "i", "]", "[", "nucs", "[", "row", "[", "i", "]", "]", "]", "+=", "1", "total", "=", "float", "(", "len", "(", "align", ")", ")", "pwm", "=", "[", "[", "(", "x", "+", "pseudo", "/", "4", ")", "/", "total", "+", "(", "pseudo", ")", "for", "x", "in", "row", "]", "for", "row", "in", "pfm", "]", "m", "=", "Motif", "(", ")", "m", ".", "align", "=", "align", "[", ":", "]", "m", ".", "pwm", "=", "pwm", "[", ":", "]", "m", ".", "pfm", "=", "pfm", "[", ":", "]", "m", ".", "id", "=", "m_id", "motifs", ".", "append", "(", "m", ")", "return", "motifs" ]
Convert MotifSampler output to motifs Parameters ---------- fo : file-like File object containing MotifSampler output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "MotifSampler", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "MotifSampler", "output", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1301-L1348
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MDmodule._run_program
def _run_program(self, bin, fastafile, params=None): """ Run MDmodule and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ default_params = {"width":10, "number":10} if params is not None: default_params.update(params) new_file = os.path.join(self.tmpdir, "mdmodule_in.fa") shutil.copy(fastafile, new_file) fastafile = new_file pwmfile = fastafile + ".out" width = default_params['width'] number = default_params['number'] current_path = os.getcwd() os.chdir(self.tmpdir) cmd = "%s -i %s -a 1 -o %s -w %s -t 100 -r %s" % (bin, fastafile, pwmfile, width, number) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout,stderr = p.communicate() stdout = "cmd: {}\n".format(cmd) + stdout.decode() motifs = [] if os.path.exists(pwmfile): with open(pwmfile) as f: motifs = self.parse(f) os.chdir(current_path) for motif in motifs: motif.id = "%s_%s" % (self.name, motif.id) return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run MDmodule and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ default_params = {"width":10, "number":10} if params is not None: default_params.update(params) new_file = os.path.join(self.tmpdir, "mdmodule_in.fa") shutil.copy(fastafile, new_file) fastafile = new_file pwmfile = fastafile + ".out" width = default_params['width'] number = default_params['number'] current_path = os.getcwd() os.chdir(self.tmpdir) cmd = "%s -i %s -a 1 -o %s -w %s -t 100 -r %s" % (bin, fastafile, pwmfile, width, number) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout,stderr = p.communicate() stdout = "cmd: {}\n".format(cmd) + stdout.decode() motifs = [] if os.path.exists(pwmfile): with open(pwmfile) as f: motifs = self.parse(f) os.chdir(current_path) for motif in motifs: motif.id = "%s_%s" % (self.name, motif.id) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "default_params", "=", "{", "\"width\"", ":", "10", ",", "\"number\"", ":", "10", "}", "if", "params", "is", "not", "None", ":", "default_params", ".", "update", "(", "params", ")", "new_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "tmpdir", ",", "\"mdmodule_in.fa\"", ")", "shutil", ".", "copy", "(", "fastafile", ",", "new_file", ")", "fastafile", "=", "new_file", "pwmfile", "=", "fastafile", "+", "\".out\"", "width", "=", "default_params", "[", "'width'", "]", "number", "=", "default_params", "[", "'number'", "]", "current_path", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "self", ".", "tmpdir", ")", "cmd", "=", "\"%s -i %s -a 1 -o %s -w %s -t 100 -r %s\"", "%", "(", "bin", ",", "fastafile", ",", "pwmfile", ",", "width", ",", "number", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "stdout", "=", "\"cmd: {}\\n\"", ".", "format", "(", "cmd", ")", "+", "stdout", ".", "decode", "(", ")", "motifs", "=", "[", "]", "if", "os", ".", "path", ".", "exists", "(", "pwmfile", ")", ":", "with", "open", "(", "pwmfile", ")", "as", "f", ":", "motifs", "=", "self", ".", "parse", "(", "f", ")", "os", ".", "chdir", "(", "current_path", ")", "for", "motif", "in", "motifs", ":", "motif", ".", "id", "=", "\"%s_%s\"", "%", "(", "self", ".", "name", ",", "motif", ".", "id", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run MDmodule and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "MDmodule", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1378-L1436
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
MDmodule.parse
def parse(self, fo): """ Convert MDmodule output to motifs Parameters ---------- fo : file-like File object containing MDmodule output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} p = re.compile(r'(\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)') pf = re.compile(r'>.+\s+[bf]\d+\s+(\w+)') pwm = [] pfm = [] align = [] m_id = "" for line in fo.readlines(): if line.startswith("Motif"): if m_id: motifs.append(Motif()) motifs[-1].id = m_id motifs[-1].pwm = pwm motifs[-1].pfm = pfm motifs[-1].align = align pwm = [] pfm = [] align = [] m_id = line.split("\t")[0] else: m = p.search(line) if m: pwm.append([float(m.group(x))/100 for x in [2,3,4,5]]) m = pf.search(line) if m: if not pfm: pfm = [[0 for x in range(4)] for x in range(len(m.group(1)))] for i in range(len(m.group(1))): pfm[i][nucs[m.group(1)[i]]] += 1 align.append(m.group(1)) if pwm: motifs.append(Motif()) motifs[-1].id = m_id motifs[-1].pwm = pwm motifs[-1].pfm = pfm motifs[-1].align = align return motifs
python
def parse(self, fo): """ Convert MDmodule output to motifs Parameters ---------- fo : file-like File object containing MDmodule output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} p = re.compile(r'(\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)\s+(\d+\.\d+)') pf = re.compile(r'>.+\s+[bf]\d+\s+(\w+)') pwm = [] pfm = [] align = [] m_id = "" for line in fo.readlines(): if line.startswith("Motif"): if m_id: motifs.append(Motif()) motifs[-1].id = m_id motifs[-1].pwm = pwm motifs[-1].pfm = pfm motifs[-1].align = align pwm = [] pfm = [] align = [] m_id = line.split("\t")[0] else: m = p.search(line) if m: pwm.append([float(m.group(x))/100 for x in [2,3,4,5]]) m = pf.search(line) if m: if not pfm: pfm = [[0 for x in range(4)] for x in range(len(m.group(1)))] for i in range(len(m.group(1))): pfm[i][nucs[m.group(1)[i]]] += 1 align.append(m.group(1)) if pwm: motifs.append(Motif()) motifs[-1].id = m_id motifs[-1].pwm = pwm motifs[-1].pfm = pfm motifs[-1].align = align return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "nucs", "=", "{", "\"A\"", ":", "0", ",", "\"C\"", ":", "1", ",", "\"G\"", ":", "2", ",", "\"T\"", ":", "3", "}", "p", "=", "re", ".", "compile", "(", "r'(\\d+)\\s+(\\d+\\.\\d+)\\s+(\\d+\\.\\d+)\\s+(\\d+\\.\\d+)\\s+(\\d+\\.\\d+)'", ")", "pf", "=", "re", ".", "compile", "(", "r'>.+\\s+[bf]\\d+\\s+(\\w+)'", ")", "pwm", "=", "[", "]", "pfm", "=", "[", "]", "align", "=", "[", "]", "m_id", "=", "\"\"", "for", "line", "in", "fo", ".", "readlines", "(", ")", ":", "if", "line", ".", "startswith", "(", "\"Motif\"", ")", ":", "if", "m_id", ":", "motifs", ".", "append", "(", "Motif", "(", ")", ")", "motifs", "[", "-", "1", "]", ".", "id", "=", "m_id", "motifs", "[", "-", "1", "]", ".", "pwm", "=", "pwm", "motifs", "[", "-", "1", "]", ".", "pfm", "=", "pfm", "motifs", "[", "-", "1", "]", ".", "align", "=", "align", "pwm", "=", "[", "]", "pfm", "=", "[", "]", "align", "=", "[", "]", "m_id", "=", "line", ".", "split", "(", "\"\\t\"", ")", "[", "0", "]", "else", ":", "m", "=", "p", ".", "search", "(", "line", ")", "if", "m", ":", "pwm", ".", "append", "(", "[", "float", "(", "m", ".", "group", "(", "x", ")", ")", "/", "100", "for", "x", "in", "[", "2", ",", "3", ",", "4", ",", "5", "]", "]", ")", "m", "=", "pf", ".", "search", "(", "line", ")", "if", "m", ":", "if", "not", "pfm", ":", "pfm", "=", "[", "[", "0", "for", "x", "in", "range", "(", "4", ")", "]", "for", "x", "in", "range", "(", "len", "(", "m", ".", "group", "(", "1", ")", ")", ")", "]", "for", "i", "in", "range", "(", "len", "(", "m", ".", "group", "(", "1", ")", ")", ")", ":", "pfm", "[", "i", "]", "[", "nucs", "[", "m", ".", "group", "(", "1", ")", "[", "i", "]", "]", "]", "+=", "1", "align", ".", "append", "(", "m", ".", "group", "(", "1", ")", ")", "if", "pwm", ":", "motifs", ".", "append", "(", "Motif", "(", ")", ")", "motifs", "[", "-", "1", "]", ".", "id", "=", "m_id", "motifs", "[", "-", "1", "]", ".", "pwm", "=", "pwm", "motifs", "[", "-", "1", "]", ".", "pfm", "=", "pfm", "motifs", "[", "-", "1", "]", ".", "align", "=", "align", "return", "motifs" ]
Convert MDmodule output to motifs Parameters ---------- fo : file-like File object containing MDmodule output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "MDmodule", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "MDmodule", "output", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1438-L1493
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
ChIPMunk._parse_params
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) return prm
python
def _parse_params(self, params=None): """ Parse parameters. Combine default and user-defined parameters. """ prm = self.default_params.copy() if params is not None: prm.update(params) return prm
[ "def", "_parse_params", "(", "self", ",", "params", "=", "None", ")", ":", "prm", "=", "self", ".", "default_params", ".", "copy", "(", ")", "if", "params", "is", "not", "None", ":", "prm", ".", "update", "(", "params", ")", "return", "prm" ]
Parse parameters. Combine default and user-defined parameters.
[ "Parse", "parameters", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1509-L1519
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
ChIPMunk._run_program
def _run_program(self, bin, fastafile, params=None): """ Run ChIPMunk and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) basename = "munk_in.fa" new_file = os.path.join(self.tmpdir, basename) out = open(new_file, "w") f = Fasta(fastafile) for seq in f.seqs: header = len(seq) // 2 out.write(">%s\n" % header) out.write("%s\n" % seq) out.close() fastafile = new_file outfile = fastafile + ".out" current_path = os.getcwd() os.chdir(self.dir()) motifs = [] # Max recommended by ChIPMunk userguide ncpus = 4 stdout = "" stderr = "" for zoops_factor in ["oops", 0.0, 0.5, 1.0]: cmd = "{} {} {} y {} m:{} 100 10 1 {} 1>{}".format( bin, params.get("width", 8), params.get("width", 20), zoops_factor, fastafile, ncpus, outfile ) #print("command: ", cmd) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) std = p.communicate() stdout = stdout + std[0].decode() stderr = stderr + std[1].decode() if "RuntimeException" in stderr: return [], stdout, stderr if os.path.exists(outfile): with open(outfile) as f: motifs += self.parse(f) os.chdir(current_path) return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run ChIPMunk and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ params = self._parse_params(params) basename = "munk_in.fa" new_file = os.path.join(self.tmpdir, basename) out = open(new_file, "w") f = Fasta(fastafile) for seq in f.seqs: header = len(seq) // 2 out.write(">%s\n" % header) out.write("%s\n" % seq) out.close() fastafile = new_file outfile = fastafile + ".out" current_path = os.getcwd() os.chdir(self.dir()) motifs = [] # Max recommended by ChIPMunk userguide ncpus = 4 stdout = "" stderr = "" for zoops_factor in ["oops", 0.0, 0.5, 1.0]: cmd = "{} {} {} y {} m:{} 100 10 1 {} 1>{}".format( bin, params.get("width", 8), params.get("width", 20), zoops_factor, fastafile, ncpus, outfile ) #print("command: ", cmd) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) std = p.communicate() stdout = stdout + std[0].decode() stderr = stderr + std[1].decode() if "RuntimeException" in stderr: return [], stdout, stderr if os.path.exists(outfile): with open(outfile) as f: motifs += self.parse(f) os.chdir(current_path) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "params", "=", "self", ".", "_parse_params", "(", "params", ")", "basename", "=", "\"munk_in.fa\"", "new_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "tmpdir", ",", "basename", ")", "out", "=", "open", "(", "new_file", ",", "\"w\"", ")", "f", "=", "Fasta", "(", "fastafile", ")", "for", "seq", "in", "f", ".", "seqs", ":", "header", "=", "len", "(", "seq", ")", "//", "2", "out", ".", "write", "(", "\">%s\\n\"", "%", "header", ")", "out", ".", "write", "(", "\"%s\\n\"", "%", "seq", ")", "out", ".", "close", "(", ")", "fastafile", "=", "new_file", "outfile", "=", "fastafile", "+", "\".out\"", "current_path", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "self", ".", "dir", "(", ")", ")", "motifs", "=", "[", "]", "# Max recommended by ChIPMunk userguide", "ncpus", "=", "4", "stdout", "=", "\"\"", "stderr", "=", "\"\"", "for", "zoops_factor", "in", "[", "\"oops\"", ",", "0.0", ",", "0.5", ",", "1.0", "]", ":", "cmd", "=", "\"{} {} {} y {} m:{} 100 10 1 {} 1>{}\"", ".", "format", "(", "bin", ",", "params", ".", "get", "(", "\"width\"", ",", "8", ")", ",", "params", ".", "get", "(", "\"width\"", ",", "20", ")", ",", "zoops_factor", ",", "fastafile", ",", "ncpus", ",", "outfile", ")", "#print(\"command: \", cmd)", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "std", "=", "p", ".", "communicate", "(", ")", "stdout", "=", "stdout", "+", "std", "[", "0", "]", ".", "decode", "(", ")", "stderr", "=", "stderr", "+", "std", "[", "1", "]", ".", "decode", "(", ")", "if", "\"RuntimeException\"", "in", "stderr", ":", "return", "[", "]", ",", "stdout", ",", "stderr", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "with", "open", "(", "outfile", ")", "as", "f", ":", "motifs", "+=", "self", ".", "parse", "(", "f", ")", "os", ".", "chdir", "(", "current_path", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run ChIPMunk and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "ChIPMunk", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1521-L1596
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
ChIPMunk.parse
def parse(self, fo): """ Convert ChIPMunk output to motifs Parameters ---------- fo : file-like File object containing ChIPMunk output. Returns ------- motifs : list List of Motif instances. """ #KDIC|6.124756232026243 #A|517.9999999999999 42.99999999999999 345.99999999999994 25.999999999999996 602.9999999999999 155.99999999999997 2.9999999999999996 91.99999999999999 #C|5.999999999999999 4.999999999999999 2.9999999999999996 956.9999999999999 91.99999999999999 17.999999999999996 22.999999999999996 275.99999999999994 #G|340.99999999999994 943.9999999999999 630.9999999999999 6.999999999999999 16.999999999999996 48.99999999999999 960.9999999999999 14.999999999999998 #T|134.99999999999997 7.999999999999999 19.999999999999996 9.999999999999998 287.99999999999994 776.9999999999999 12.999999999999998 616.9999999999999 #N|999.9999999999998 line = fo.readline() if not line: return [] while not line.startswith("A|"): line = fo.readline() matrix = [] for _ in range(4): matrix.append([float(x) for x in line.strip().split("|")[1].split(" ")]) line = fo.readline() #print matrix matrix = [[matrix[x][y] for x in range(4)] for y in range(len(matrix[0]))] #print matrix m = Motif(matrix) m.id = "ChIPMunk_w%s" % len(m) return [m]
python
def parse(self, fo): """ Convert ChIPMunk output to motifs Parameters ---------- fo : file-like File object containing ChIPMunk output. Returns ------- motifs : list List of Motif instances. """ #KDIC|6.124756232026243 #A|517.9999999999999 42.99999999999999 345.99999999999994 25.999999999999996 602.9999999999999 155.99999999999997 2.9999999999999996 91.99999999999999 #C|5.999999999999999 4.999999999999999 2.9999999999999996 956.9999999999999 91.99999999999999 17.999999999999996 22.999999999999996 275.99999999999994 #G|340.99999999999994 943.9999999999999 630.9999999999999 6.999999999999999 16.999999999999996 48.99999999999999 960.9999999999999 14.999999999999998 #T|134.99999999999997 7.999999999999999 19.999999999999996 9.999999999999998 287.99999999999994 776.9999999999999 12.999999999999998 616.9999999999999 #N|999.9999999999998 line = fo.readline() if not line: return [] while not line.startswith("A|"): line = fo.readline() matrix = [] for _ in range(4): matrix.append([float(x) for x in line.strip().split("|")[1].split(" ")]) line = fo.readline() #print matrix matrix = [[matrix[x][y] for x in range(4)] for y in range(len(matrix[0]))] #print matrix m = Motif(matrix) m.id = "ChIPMunk_w%s" % len(m) return [m]
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "#KDIC|6.124756232026243", "#A|517.9999999999999 42.99999999999999 345.99999999999994 25.999999999999996 602.9999999999999 155.99999999999997 2.9999999999999996 91.99999999999999", "#C|5.999999999999999 4.999999999999999 2.9999999999999996 956.9999999999999 91.99999999999999 17.999999999999996 22.999999999999996 275.99999999999994", "#G|340.99999999999994 943.9999999999999 630.9999999999999 6.999999999999999 16.999999999999996 48.99999999999999 960.9999999999999 14.999999999999998", "#T|134.99999999999997 7.999999999999999 19.999999999999996 9.999999999999998 287.99999999999994 776.9999999999999 12.999999999999998 616.9999999999999", "#N|999.9999999999998", "line", "=", "fo", ".", "readline", "(", ")", "if", "not", "line", ":", "return", "[", "]", "while", "not", "line", ".", "startswith", "(", "\"A|\"", ")", ":", "line", "=", "fo", ".", "readline", "(", ")", "matrix", "=", "[", "]", "for", "_", "in", "range", "(", "4", ")", ":", "matrix", ".", "append", "(", "[", "float", "(", "x", ")", "for", "x", "in", "line", ".", "strip", "(", ")", ".", "split", "(", "\"|\"", ")", "[", "1", "]", ".", "split", "(", "\" \"", ")", "]", ")", "line", "=", "fo", ".", "readline", "(", ")", "#print matrix", "matrix", "=", "[", "[", "matrix", "[", "x", "]", "[", "y", "]", "for", "x", "in", "range", "(", "4", ")", "]", "for", "y", "in", "range", "(", "len", "(", "matrix", "[", "0", "]", ")", ")", "]", "#print matrix", "m", "=", "Motif", "(", "matrix", ")", "m", ".", "id", "=", "\"ChIPMunk_w%s\"", "%", "len", "(", "m", ")", "return", "[", "m", "]" ]
Convert ChIPMunk output to motifs Parameters ---------- fo : file-like File object containing ChIPMunk output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "ChIPMunk", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "ChIPMunk", "output", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1598-L1633
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Posmo._run_program
def _run_program(self, bin, fastafile, params=None): """ Run Posmo and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ default_params = {} if params is not None: default_params.update(params) width = params.get("width", 8) basename = "posmo_in.fa" new_file = os.path.join(self.tmpdir, basename) shutil.copy(fastafile, new_file) fastafile = new_file #pwmfile = fastafile + ".pwm" motifs = [] current_path = os.getcwd() os.chdir(self.tmpdir) for n_ones in range(4, min(width, 11), 2): x = "1" * n_ones outfile = "%s.%s.out" % (fastafile, x) cmd = "%s 5000 %s %s 1.6 2.5 %s 200" % (bin, x, fastafile, width) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() stdout = stdout.decode() stderr = stderr.decode() context_file = fastafile.replace(basename, "context.%s.%s.txt" % (basename, x)) cmd = "%s %s %s simi.txt 0.88 10 2 10" % (bin.replace("posmo","clusterwd"), context_file, outfile) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out, err = p.communicate() stdout += out.decode() stderr += err.decode() if os.path.exists(outfile): with open(outfile) as f: motifs += self.parse(f, width, n_ones) os.chdir(current_path) return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run Posmo and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ default_params = {} if params is not None: default_params.update(params) width = params.get("width", 8) basename = "posmo_in.fa" new_file = os.path.join(self.tmpdir, basename) shutil.copy(fastafile, new_file) fastafile = new_file #pwmfile = fastafile + ".pwm" motifs = [] current_path = os.getcwd() os.chdir(self.tmpdir) for n_ones in range(4, min(width, 11), 2): x = "1" * n_ones outfile = "%s.%s.out" % (fastafile, x) cmd = "%s 5000 %s %s 1.6 2.5 %s 200" % (bin, x, fastafile, width) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) stdout, stderr = p.communicate() stdout = stdout.decode() stderr = stderr.decode() context_file = fastafile.replace(basename, "context.%s.%s.txt" % (basename, x)) cmd = "%s %s %s simi.txt 0.88 10 2 10" % (bin.replace("posmo","clusterwd"), context_file, outfile) p = Popen(cmd, shell=True, stdout=PIPE, stderr=PIPE) out, err = p.communicate() stdout += out.decode() stderr += err.decode() if os.path.exists(outfile): with open(outfile) as f: motifs += self.parse(f, width, n_ones) os.chdir(current_path) return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "default_params", "=", "{", "}", "if", "params", "is", "not", "None", ":", "default_params", ".", "update", "(", "params", ")", "width", "=", "params", ".", "get", "(", "\"width\"", ",", "8", ")", "basename", "=", "\"posmo_in.fa\"", "new_file", "=", "os", ".", "path", ".", "join", "(", "self", ".", "tmpdir", ",", "basename", ")", "shutil", ".", "copy", "(", "fastafile", ",", "new_file", ")", "fastafile", "=", "new_file", "#pwmfile = fastafile + \".pwm\"", "motifs", "=", "[", "]", "current_path", "=", "os", ".", "getcwd", "(", ")", "os", ".", "chdir", "(", "self", ".", "tmpdir", ")", "for", "n_ones", "in", "range", "(", "4", ",", "min", "(", "width", ",", "11", ")", ",", "2", ")", ":", "x", "=", "\"1\"", "*", "n_ones", "outfile", "=", "\"%s.%s.out\"", "%", "(", "fastafile", ",", "x", ")", "cmd", "=", "\"%s 5000 %s %s 1.6 2.5 %s 200\"", "%", "(", "bin", ",", "x", ",", "fastafile", ",", "width", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "stdout", "=", "stdout", ".", "decode", "(", ")", "stderr", "=", "stderr", ".", "decode", "(", ")", "context_file", "=", "fastafile", ".", "replace", "(", "basename", ",", "\"context.%s.%s.txt\"", "%", "(", "basename", ",", "x", ")", ")", "cmd", "=", "\"%s %s %s simi.txt 0.88 10 2 10\"", "%", "(", "bin", ".", "replace", "(", "\"posmo\"", ",", "\"clusterwd\"", ")", ",", "context_file", ",", "outfile", ")", "p", "=", "Popen", "(", "cmd", ",", "shell", "=", "True", ",", "stdout", "=", "PIPE", ",", "stderr", "=", "PIPE", ")", "out", ",", "err", "=", "p", ".", "communicate", "(", ")", "stdout", "+=", "out", ".", "decode", "(", ")", "stderr", "+=", "err", ".", "decode", "(", ")", "if", "os", ".", "path", ".", "exists", "(", "outfile", ")", ":", "with", "open", "(", "outfile", ")", "as", "f", ":", "motifs", "+=", "self", ".", "parse", "(", "f", ",", "width", ",", "n_ones", ")", "os", ".", "chdir", "(", "current_path", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run Posmo and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "Posmo", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1664-L1729
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Posmo.parse
def parse(self, fo, width, seed=None): """ Convert Posmo output to motifs Parameters ---------- fo : file-like File object containing Posmo output. Returns ------- motifs : list List of Motif instances. """ motifs = [] lines = [fo.readline() for x in range(6)] while lines[0]: matrix = [[float(x) for x in line.strip().split("\t")] for line in lines[2:]] matrix = [[matrix[x][y] for x in range(4)] for y in range(len(matrix[0]))] m = Motif(matrix) m.trim(0.1) m.id = lines[0].strip().split(" ")[-1] motifs.append(m) lines = [fo.readline() for x in range(6)] for i,motif in enumerate(motifs): if seed: motif.id = "%s_w%s.%s_%s" % (self.name, width, seed, i + 1) else: motif.id = "%s_w%s_%s" % (self.name, width, i + 1) motif.trim(0.25) return motifs
python
def parse(self, fo, width, seed=None): """ Convert Posmo output to motifs Parameters ---------- fo : file-like File object containing Posmo output. Returns ------- motifs : list List of Motif instances. """ motifs = [] lines = [fo.readline() for x in range(6)] while lines[0]: matrix = [[float(x) for x in line.strip().split("\t")] for line in lines[2:]] matrix = [[matrix[x][y] for x in range(4)] for y in range(len(matrix[0]))] m = Motif(matrix) m.trim(0.1) m.id = lines[0].strip().split(" ")[-1] motifs.append(m) lines = [fo.readline() for x in range(6)] for i,motif in enumerate(motifs): if seed: motif.id = "%s_w%s.%s_%s" % (self.name, width, seed, i + 1) else: motif.id = "%s_w%s_%s" % (self.name, width, i + 1) motif.trim(0.25) return motifs
[ "def", "parse", "(", "self", ",", "fo", ",", "width", ",", "seed", "=", "None", ")", ":", "motifs", "=", "[", "]", "lines", "=", "[", "fo", ".", "readline", "(", ")", "for", "x", "in", "range", "(", "6", ")", "]", "while", "lines", "[", "0", "]", ":", "matrix", "=", "[", "[", "float", "(", "x", ")", "for", "x", "in", "line", ".", "strip", "(", ")", ".", "split", "(", "\"\\t\"", ")", "]", "for", "line", "in", "lines", "[", "2", ":", "]", "]", "matrix", "=", "[", "[", "matrix", "[", "x", "]", "[", "y", "]", "for", "x", "in", "range", "(", "4", ")", "]", "for", "y", "in", "range", "(", "len", "(", "matrix", "[", "0", "]", ")", ")", "]", "m", "=", "Motif", "(", "matrix", ")", "m", ".", "trim", "(", "0.1", ")", "m", ".", "id", "=", "lines", "[", "0", "]", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "[", "-", "1", "]", "motifs", ".", "append", "(", "m", ")", "lines", "=", "[", "fo", ".", "readline", "(", ")", "for", "x", "in", "range", "(", "6", ")", "]", "for", "i", ",", "motif", "in", "enumerate", "(", "motifs", ")", ":", "if", "seed", ":", "motif", ".", "id", "=", "\"%s_w%s.%s_%s\"", "%", "(", "self", ".", "name", ",", "width", ",", "seed", ",", "i", "+", "1", ")", "else", ":", "motif", ".", "id", "=", "\"%s_w%s_%s\"", "%", "(", "self", ".", "name", ",", "width", ",", "i", "+", "1", ")", "motif", ".", "trim", "(", "0.25", ")", "return", "motifs" ]
Convert Posmo output to motifs Parameters ---------- fo : file-like File object containing Posmo output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "Posmo", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "Posmo", "output", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1731-L1764
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Gadem.parse
def parse(self, fo): """ Convert GADEM output to motifs Parameters ---------- fo : file-like File object containing GADEM output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} lines = fo.readlines() for i in range(0, len(lines), 5): align = [] pwm = [] pfm = [] m_id = "" line = lines[i].strip() m_id = line[1:] number = m_id.split("_")[0][1:] if os.path.exists("%s.seq" % number): with open("%s.seq" % number) as f: for l in f: if "x" not in l and "n" not in l: l = l.strip().upper() align.append(l) if not pfm: pfm = [[0 for x in range(4)] for x in range(len(l))] for p in range(len(l)): pfm[p][nucs[l[p]]] += 1 m = [l.strip().split(" ")[1].split("\t") for l in lines[i + 1: i + 5]] pwm = [[float(m[x][y]) for x in range(4)] for y in range(len(m[0]))] motifs.append(Motif(pwm)) motifs[-1].id = "{}_{}".format(self.name, m_id) #motifs[-1].pwm = pwm if align: motifs[-1].pfm = pfm motifs[-1].align = align return motifs
python
def parse(self, fo): """ Convert GADEM output to motifs Parameters ---------- fo : file-like File object containing GADEM output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} lines = fo.readlines() for i in range(0, len(lines), 5): align = [] pwm = [] pfm = [] m_id = "" line = lines[i].strip() m_id = line[1:] number = m_id.split("_")[0][1:] if os.path.exists("%s.seq" % number): with open("%s.seq" % number) as f: for l in f: if "x" not in l and "n" not in l: l = l.strip().upper() align.append(l) if not pfm: pfm = [[0 for x in range(4)] for x in range(len(l))] for p in range(len(l)): pfm[p][nucs[l[p]]] += 1 m = [l.strip().split(" ")[1].split("\t") for l in lines[i + 1: i + 5]] pwm = [[float(m[x][y]) for x in range(4)] for y in range(len(m[0]))] motifs.append(Motif(pwm)) motifs[-1].id = "{}_{}".format(self.name, m_id) #motifs[-1].pwm = pwm if align: motifs[-1].pfm = pfm motifs[-1].align = align return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "nucs", "=", "{", "\"A\"", ":", "0", ",", "\"C\"", ":", "1", ",", "\"G\"", ":", "2", ",", "\"T\"", ":", "3", "}", "lines", "=", "fo", ".", "readlines", "(", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "lines", ")", ",", "5", ")", ":", "align", "=", "[", "]", "pwm", "=", "[", "]", "pfm", "=", "[", "]", "m_id", "=", "\"\"", "line", "=", "lines", "[", "i", "]", ".", "strip", "(", ")", "m_id", "=", "line", "[", "1", ":", "]", "number", "=", "m_id", ".", "split", "(", "\"_\"", ")", "[", "0", "]", "[", "1", ":", "]", "if", "os", ".", "path", ".", "exists", "(", "\"%s.seq\"", "%", "number", ")", ":", "with", "open", "(", "\"%s.seq\"", "%", "number", ")", "as", "f", ":", "for", "l", "in", "f", ":", "if", "\"x\"", "not", "in", "l", "and", "\"n\"", "not", "in", "l", ":", "l", "=", "l", ".", "strip", "(", ")", ".", "upper", "(", ")", "align", ".", "append", "(", "l", ")", "if", "not", "pfm", ":", "pfm", "=", "[", "[", "0", "for", "x", "in", "range", "(", "4", ")", "]", "for", "x", "in", "range", "(", "len", "(", "l", ")", ")", "]", "for", "p", "in", "range", "(", "len", "(", "l", ")", ")", ":", "pfm", "[", "p", "]", "[", "nucs", "[", "l", "[", "p", "]", "]", "]", "+=", "1", "m", "=", "[", "l", ".", "strip", "(", ")", ".", "split", "(", "\" \"", ")", "[", "1", "]", ".", "split", "(", "\"\\t\"", ")", "for", "l", "in", "lines", "[", "i", "+", "1", ":", "i", "+", "5", "]", "]", "pwm", "=", "[", "[", "float", "(", "m", "[", "x", "]", "[", "y", "]", ")", "for", "x", "in", "range", "(", "4", ")", "]", "for", "y", "in", "range", "(", "len", "(", "m", "[", "0", "]", ")", ")", "]", "motifs", ".", "append", "(", "Motif", "(", "pwm", ")", ")", "motifs", "[", "-", "1", "]", ".", "id", "=", "\"{}_{}\"", ".", "format", "(", "self", ".", "name", ",", "m_id", ")", "#motifs[-1].pwm = pwm", "if", "align", ":", "motifs", "[", "-", "1", "]", ".", "pfm", "=", "pfm", "motifs", "[", "-", "1", "]", ".", "align", "=", "align", "return", "motifs" ]
Convert GADEM output to motifs Parameters ---------- fo : file-like File object containing GADEM output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "GADEM", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "GADEM", "output", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1847-L1896
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Jaspar._run_program
def _run_program(self, bin, fastafile, params=None): """ Get enriched JASPAR motifs in a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ fname = os.path.join(self.config.get_motif_dir(), "JASPAR2010_vertebrate.pwm") motifs = read_motifs(fname, fmt="pwm") for motif in motifs: motif.id = "JASPAR_%s" % motif.id return motifs, "", ""
python
def _run_program(self, bin, fastafile, params=None): """ Get enriched JASPAR motifs in a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ fname = os.path.join(self.config.get_motif_dir(), "JASPAR2010_vertebrate.pwm") motifs = read_motifs(fname, fmt="pwm") for motif in motifs: motif.id = "JASPAR_%s" % motif.id return motifs, "", ""
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "fname", "=", "os", ".", "path", ".", "join", "(", "self", ".", "config", ".", "get_motif_dir", "(", ")", ",", "\"JASPAR2010_vertebrate.pwm\"", ")", "motifs", "=", "read_motifs", "(", "fname", ",", "fmt", "=", "\"pwm\"", ")", "for", "motif", "in", "motifs", ":", "motif", ".", "id", "=", "\"JASPAR_%s\"", "%", "motif", ".", "id", "return", "motifs", ",", "\"\"", ",", "\"\"" ]
Get enriched JASPAR motifs in a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Get", "enriched", "JASPAR", "motifs", "in", "a", "FASTA", "file", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1919-L1951
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Meme._run_program
def _run_program(self, bin, fastafile, params=None): """ Run MEME and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ default_params = {"width":10, "single":False, "number":10} if params is not None: default_params.update(params) tmp = NamedTemporaryFile(dir=self.tmpdir) tmpname = tmp.name strand = "-revcomp" width = default_params["width"] number = default_params["number"] cmd = [bin, fastafile, "-text","-dna","-nostatus","-mod", "zoops","-nmotifs", "%s" % number, "-w","%s" % width, "-maxsize", "10000000"] if not default_params["single"]: cmd.append(strand) #sys.stderr.write(" ".join(cmd) + "\n") p = Popen(cmd, bufsize=1, stderr=PIPE, stdout=PIPE) stdout,stderr = p.communicate() motifs = [] motifs = self.parse(io.StringIO(stdout.decode())) # Delete temporary files tmp.close() return motifs, stdout, stderr
python
def _run_program(self, bin, fastafile, params=None): """ Run MEME and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool. """ default_params = {"width":10, "single":False, "number":10} if params is not None: default_params.update(params) tmp = NamedTemporaryFile(dir=self.tmpdir) tmpname = tmp.name strand = "-revcomp" width = default_params["width"] number = default_params["number"] cmd = [bin, fastafile, "-text","-dna","-nostatus","-mod", "zoops","-nmotifs", "%s" % number, "-w","%s" % width, "-maxsize", "10000000"] if not default_params["single"]: cmd.append(strand) #sys.stderr.write(" ".join(cmd) + "\n") p = Popen(cmd, bufsize=1, stderr=PIPE, stdout=PIPE) stdout,stderr = p.communicate() motifs = [] motifs = self.parse(io.StringIO(stdout.decode())) # Delete temporary files tmp.close() return motifs, stdout, stderr
[ "def", "_run_program", "(", "self", ",", "bin", ",", "fastafile", ",", "params", "=", "None", ")", ":", "default_params", "=", "{", "\"width\"", ":", "10", ",", "\"single\"", ":", "False", ",", "\"number\"", ":", "10", "}", "if", "params", "is", "not", "None", ":", "default_params", ".", "update", "(", "params", ")", "tmp", "=", "NamedTemporaryFile", "(", "dir", "=", "self", ".", "tmpdir", ")", "tmpname", "=", "tmp", ".", "name", "strand", "=", "\"-revcomp\"", "width", "=", "default_params", "[", "\"width\"", "]", "number", "=", "default_params", "[", "\"number\"", "]", "cmd", "=", "[", "bin", ",", "fastafile", ",", "\"-text\"", ",", "\"-dna\"", ",", "\"-nostatus\"", ",", "\"-mod\"", ",", "\"zoops\"", ",", "\"-nmotifs\"", ",", "\"%s\"", "%", "number", ",", "\"-w\"", ",", "\"%s\"", "%", "width", ",", "\"-maxsize\"", ",", "\"10000000\"", "]", "if", "not", "default_params", "[", "\"single\"", "]", ":", "cmd", ".", "append", "(", "strand", ")", "#sys.stderr.write(\" \".join(cmd) + \"\\n\")", "p", "=", "Popen", "(", "cmd", ",", "bufsize", "=", "1", ",", "stderr", "=", "PIPE", ",", "stdout", "=", "PIPE", ")", "stdout", ",", "stderr", "=", "p", ".", "communicate", "(", ")", "motifs", "=", "[", "]", "motifs", "=", "self", ".", "parse", "(", "io", ".", "StringIO", "(", "stdout", ".", "decode", "(", ")", ")", ")", "# Delete temporary files", "tmp", ".", "close", "(", ")", "return", "motifs", ",", "stdout", ",", "stderr" ]
Run MEME and predict motifs from a FASTA file. Parameters ---------- bin : str Command used to run the tool. fastafile : str Name of the FASTA input file. params : dict, optional Optional parameters. For some of the tools required parameters are passed using this dictionary. Returns ------- motifs : list of Motif instances The predicted motifs. stdout : str Standard out of the tool. stderr : str Standard error of the tool.
[ "Run", "MEME", "and", "predict", "motifs", "from", "a", "FASTA", "file", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L1981-L2033
vanheeringen-lab/gimmemotifs
gimmemotifs/tools.py
Meme.parse
def parse(self, fo): """ Convert MEME output to motifs Parameters ---------- fo : file-like File object containing MEME output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} p = re.compile('MOTIF.+MEME-(\d+)\s*width\s*=\s*(\d+)\s+sites\s*=\s*(\d+)') pa = re.compile('\)\s+([A-Z]+)') line = fo.readline() while line: m = p.search(line) align = [] pfm = None if m: #print(m.group(0)) id = "%s_%s_w%s" % (self.name, m.group(1), m.group(2)) while not line.startswith("//"): ma = pa.search(line) if ma: #print(ma.group(0)) l = ma.group(1) align.append(l) if not pfm: pfm = [[0 for x in range(4)] for x in range(len(l))] for pos in range(len(l)): if l[pos] in nucs: pfm[pos][nucs[l[pos]]] += 1 else: for i in range(4): pfm[pos][i] += 0.25 line = fo.readline() motifs.append(Motif(pfm[:])) motifs[-1].id = id motifs[-1].align = align[:] line = fo.readline() return motifs
python
def parse(self, fo): """ Convert MEME output to motifs Parameters ---------- fo : file-like File object containing MEME output. Returns ------- motifs : list List of Motif instances. """ motifs = [] nucs = {"A":0,"C":1,"G":2,"T":3} p = re.compile('MOTIF.+MEME-(\d+)\s*width\s*=\s*(\d+)\s+sites\s*=\s*(\d+)') pa = re.compile('\)\s+([A-Z]+)') line = fo.readline() while line: m = p.search(line) align = [] pfm = None if m: #print(m.group(0)) id = "%s_%s_w%s" % (self.name, m.group(1), m.group(2)) while not line.startswith("//"): ma = pa.search(line) if ma: #print(ma.group(0)) l = ma.group(1) align.append(l) if not pfm: pfm = [[0 for x in range(4)] for x in range(len(l))] for pos in range(len(l)): if l[pos] in nucs: pfm[pos][nucs[l[pos]]] += 1 else: for i in range(4): pfm[pos][i] += 0.25 line = fo.readline() motifs.append(Motif(pfm[:])) motifs[-1].id = id motifs[-1].align = align[:] line = fo.readline() return motifs
[ "def", "parse", "(", "self", ",", "fo", ")", ":", "motifs", "=", "[", "]", "nucs", "=", "{", "\"A\"", ":", "0", ",", "\"C\"", ":", "1", ",", "\"G\"", ":", "2", ",", "\"T\"", ":", "3", "}", "p", "=", "re", ".", "compile", "(", "'MOTIF.+MEME-(\\d+)\\s*width\\s*=\\s*(\\d+)\\s+sites\\s*=\\s*(\\d+)'", ")", "pa", "=", "re", ".", "compile", "(", "'\\)\\s+([A-Z]+)'", ")", "line", "=", "fo", ".", "readline", "(", ")", "while", "line", ":", "m", "=", "p", ".", "search", "(", "line", ")", "align", "=", "[", "]", "pfm", "=", "None", "if", "m", ":", "#print(m.group(0))", "id", "=", "\"%s_%s_w%s\"", "%", "(", "self", ".", "name", ",", "m", ".", "group", "(", "1", ")", ",", "m", ".", "group", "(", "2", ")", ")", "while", "not", "line", ".", "startswith", "(", "\"//\"", ")", ":", "ma", "=", "pa", ".", "search", "(", "line", ")", "if", "ma", ":", "#print(ma.group(0))", "l", "=", "ma", ".", "group", "(", "1", ")", "align", ".", "append", "(", "l", ")", "if", "not", "pfm", ":", "pfm", "=", "[", "[", "0", "for", "x", "in", "range", "(", "4", ")", "]", "for", "x", "in", "range", "(", "len", "(", "l", ")", ")", "]", "for", "pos", "in", "range", "(", "len", "(", "l", ")", ")", ":", "if", "l", "[", "pos", "]", "in", "nucs", ":", "pfm", "[", "pos", "]", "[", "nucs", "[", "l", "[", "pos", "]", "]", "]", "+=", "1", "else", ":", "for", "i", "in", "range", "(", "4", ")", ":", "pfm", "[", "pos", "]", "[", "i", "]", "+=", "0.25", "line", "=", "fo", ".", "readline", "(", ")", "motifs", ".", "append", "(", "Motif", "(", "pfm", "[", ":", "]", ")", ")", "motifs", "[", "-", "1", "]", ".", "id", "=", "id", "motifs", "[", "-", "1", "]", ".", "align", "=", "align", "[", ":", "]", "line", "=", "fo", ".", "readline", "(", ")", "return", "motifs" ]
Convert MEME output to motifs Parameters ---------- fo : file-like File object containing MEME output. Returns ------- motifs : list List of Motif instances.
[ "Convert", "MEME", "output", "to", "motifs", "Parameters", "----------", "fo", ":", "file", "-", "like", "File", "object", "containing", "MEME", "output", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/tools.py#L2035-L2084
vanheeringen-lab/gimmemotifs
gimmemotifs/maelstrom.py
scan_to_table
def scan_to_table(input_table, genome, scoring, pwmfile=None, ncpus=None): """Scan regions in input table with motifs. Parameters ---------- input_table : str Filename of input table. Can be either a text-separated tab file or a feather file. genome : str Genome name. Can be either the name of a FASTA-formatted file or a genomepy genome name. scoring : str "count" or "score" pwmfile : str, optional Specify a PFM file for scanning. ncpus : int, optional If defined this specifies the number of cores to use. Returns ------- table : pandas.DataFrame DataFrame with motif ids as column names and regions as index. Values are either counts or scores depending on the 'scoring' parameter.s """ config = MotifConfig() if pwmfile is None: pwmfile = config.get_default_params().get("motif_db", None) if pwmfile is not None: pwmfile = os.path.join(config.get_motif_dir(), pwmfile) if pwmfile is None: raise ValueError("no pwmfile given and no default database specified") logger.info("reading table") if input_table.endswith("feather"): df = pd.read_feather(input_table) idx = df.iloc[:,0].values else: df = pd.read_table(input_table, index_col=0, comment="#") idx = df.index regions = list(idx) s = Scanner(ncpus=ncpus) s.set_motifs(pwmfile) s.set_genome(genome) s.set_background(genome=genome) nregions = len(regions) scores = [] if scoring == "count": logger.info("setting threshold") s.set_threshold(fpr=FPR) logger.info("creating count table") for row in s.count(regions): scores.append(row) logger.info("done") else: s.set_threshold(threshold=0.0) logger.info("creating score table") for row in s.best_score(regions, normalize=True): scores.append(row) logger.info("done") motif_names = [m.id for m in read_motifs(pwmfile)] logger.info("creating dataframe") return pd.DataFrame(scores, index=idx, columns=motif_names)
python
def scan_to_table(input_table, genome, scoring, pwmfile=None, ncpus=None): """Scan regions in input table with motifs. Parameters ---------- input_table : str Filename of input table. Can be either a text-separated tab file or a feather file. genome : str Genome name. Can be either the name of a FASTA-formatted file or a genomepy genome name. scoring : str "count" or "score" pwmfile : str, optional Specify a PFM file for scanning. ncpus : int, optional If defined this specifies the number of cores to use. Returns ------- table : pandas.DataFrame DataFrame with motif ids as column names and regions as index. Values are either counts or scores depending on the 'scoring' parameter.s """ config = MotifConfig() if pwmfile is None: pwmfile = config.get_default_params().get("motif_db", None) if pwmfile is not None: pwmfile = os.path.join(config.get_motif_dir(), pwmfile) if pwmfile is None: raise ValueError("no pwmfile given and no default database specified") logger.info("reading table") if input_table.endswith("feather"): df = pd.read_feather(input_table) idx = df.iloc[:,0].values else: df = pd.read_table(input_table, index_col=0, comment="#") idx = df.index regions = list(idx) s = Scanner(ncpus=ncpus) s.set_motifs(pwmfile) s.set_genome(genome) s.set_background(genome=genome) nregions = len(regions) scores = [] if scoring == "count": logger.info("setting threshold") s.set_threshold(fpr=FPR) logger.info("creating count table") for row in s.count(regions): scores.append(row) logger.info("done") else: s.set_threshold(threshold=0.0) logger.info("creating score table") for row in s.best_score(regions, normalize=True): scores.append(row) logger.info("done") motif_names = [m.id for m in read_motifs(pwmfile)] logger.info("creating dataframe") return pd.DataFrame(scores, index=idx, columns=motif_names)
[ "def", "scan_to_table", "(", "input_table", ",", "genome", ",", "scoring", ",", "pwmfile", "=", "None", ",", "ncpus", "=", "None", ")", ":", "config", "=", "MotifConfig", "(", ")", "if", "pwmfile", "is", "None", ":", "pwmfile", "=", "config", ".", "get_default_params", "(", ")", ".", "get", "(", "\"motif_db\"", ",", "None", ")", "if", "pwmfile", "is", "not", "None", ":", "pwmfile", "=", "os", ".", "path", ".", "join", "(", "config", ".", "get_motif_dir", "(", ")", ",", "pwmfile", ")", "if", "pwmfile", "is", "None", ":", "raise", "ValueError", "(", "\"no pwmfile given and no default database specified\"", ")", "logger", ".", "info", "(", "\"reading table\"", ")", "if", "input_table", ".", "endswith", "(", "\"feather\"", ")", ":", "df", "=", "pd", ".", "read_feather", "(", "input_table", ")", "idx", "=", "df", ".", "iloc", "[", ":", ",", "0", "]", ".", "values", "else", ":", "df", "=", "pd", ".", "read_table", "(", "input_table", ",", "index_col", "=", "0", ",", "comment", "=", "\"#\"", ")", "idx", "=", "df", ".", "index", "regions", "=", "list", "(", "idx", ")", "s", "=", "Scanner", "(", "ncpus", "=", "ncpus", ")", "s", ".", "set_motifs", "(", "pwmfile", ")", "s", ".", "set_genome", "(", "genome", ")", "s", ".", "set_background", "(", "genome", "=", "genome", ")", "nregions", "=", "len", "(", "regions", ")", "scores", "=", "[", "]", "if", "scoring", "==", "\"count\"", ":", "logger", ".", "info", "(", "\"setting threshold\"", ")", "s", ".", "set_threshold", "(", "fpr", "=", "FPR", ")", "logger", ".", "info", "(", "\"creating count table\"", ")", "for", "row", "in", "s", ".", "count", "(", "regions", ")", ":", "scores", ".", "append", "(", "row", ")", "logger", ".", "info", "(", "\"done\"", ")", "else", ":", "s", ".", "set_threshold", "(", "threshold", "=", "0.0", ")", "logger", ".", "info", "(", "\"creating score table\"", ")", "for", "row", "in", "s", ".", "best_score", "(", "regions", ",", "normalize", "=", "True", ")", ":", "scores", ".", "append", "(", "row", ")", "logger", ".", "info", "(", "\"done\"", ")", "motif_names", "=", "[", "m", ".", "id", "for", "m", "in", "read_motifs", "(", "pwmfile", ")", "]", "logger", ".", "info", "(", "\"creating dataframe\"", ")", "return", "pd", ".", "DataFrame", "(", "scores", ",", "index", "=", "idx", ",", "columns", "=", "motif_names", ")" ]
Scan regions in input table with motifs. Parameters ---------- input_table : str Filename of input table. Can be either a text-separated tab file or a feather file. genome : str Genome name. Can be either the name of a FASTA-formatted file or a genomepy genome name. scoring : str "count" or "score" pwmfile : str, optional Specify a PFM file for scanning. ncpus : int, optional If defined this specifies the number of cores to use. Returns ------- table : pandas.DataFrame DataFrame with motif ids as column names and regions as index. Values are either counts or scores depending on the 'scoring' parameter.s
[ "Scan", "regions", "in", "input", "table", "with", "motifs", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/maelstrom.py#L52-L123
vanheeringen-lab/gimmemotifs
gimmemotifs/maelstrom.py
run_maelstrom
def run_maelstrom(infile, genome, outdir, pwmfile=None, plot=True, cluster=False, score_table=None, count_table=None, methods=None, ncpus=None): """Run maelstrom on an input table. Parameters ---------- infile : str Filename of input table. Can be either a text-separated tab file or a feather file. genome : str Genome name. Can be either the name of a FASTA-formatted file or a genomepy genome name. outdir : str Output directory for all results. pwmfile : str, optional Specify a PFM file for scanning. plot : bool, optional Create heatmaps. cluster : bool, optional If True and if the input table has more than one column, the data is clustered and the cluster activity methods are also run. Not well-tested. score_table : str, optional Filename of pre-calculated table with motif scores. count_table : str, optional Filename of pre-calculated table with motif counts. methods : list, optional Activity methods to use. By default are all used. ncpus : int, optional If defined this specifies the number of cores to use. """ logger.info("Starting maelstrom") if infile.endswith("feather"): df = pd.read_feather(infile) df = df.set_index(df.columns[0]) else: df = pd.read_table(infile, index_col=0, comment="#") # Check for duplicates if df.index.duplicated(keep=False).any(): raise ValueError("Input file contains duplicate regions! " "Please remove them.") if not os.path.exists(outdir): os.mkdir(outdir) if methods is None: methods = Moap.list_predictors() methods = [m.lower() for m in methods] shutil.copyfile(infile, os.path.join(outdir, "input.table.txt")) # Copy the motif informatuon pwmfile = pwmfile_location(pwmfile) if pwmfile: shutil.copy2(pwmfile, outdir) mapfile = re.sub(".p[fw]m$", ".motif2factors.txt", pwmfile) if os.path.exists(mapfile): shutil.copy2(mapfile, outdir) # Create a file with the number of motif matches if not count_table: count_table = os.path.join(outdir, "motif.count.txt.gz") if not os.path.exists(count_table): logger.info("Motif scanning (counts)") counts = scan_to_table(infile, genome, "count", pwmfile=pwmfile, ncpus=ncpus) counts.to_csv(count_table, sep="\t", compression="gzip") else: logger.info("Counts, using: %s", count_table) # Create a file with the score of the best motif match if not score_table: score_table = os.path.join(outdir, "motif.score.txt.gz") if not os.path.exists(score_table): logger.info("Motif scanning (scores)") scores = scan_to_table(infile, genome, "score", pwmfile=pwmfile, ncpus=ncpus) scores.to_csv(score_table, sep="\t", float_format="%.3f", compression="gzip") else: logger.info("Scores, using: %s", score_table) if cluster: cluster = False for method in methods: m = Moap.create(method, ncpus=ncpus) if m.ptype == "classification": cluster = True break if not cluster: logger.info("Skipping clustering, no classification methods") exps = [] clusterfile = infile if df.shape[1] != 1: # More than one column for method in Moap.list_regression_predictors(): if method in methods: m = Moap.create(method, ncpus=ncpus) exps.append([method, m.pref_table, infile]) logger.debug("Adding %s", method) if cluster: clusterfile = os.path.join(outdir, os.path.basename(infile) + ".cluster.txt") df[:] = scale(df, axis=0) names = df.columns df_changed = pd.DataFrame(index=df.index) df_changed["cluster"] = np.nan for name in names: df_changed.loc[(df[name] - df.loc[:,df.columns != name].max(1)) > 0.5, "cluster"] = name df_changed.dropna().to_csv(clusterfile, sep="\t") if df.shape[1] == 1 or cluster: for method in Moap.list_classification_predictors(): if method in methods: m = Moap.create(method, ncpus=ncpus) exps.append([method, m.pref_table, clusterfile]) if len(exps) == 0: logger.error("No method to run.") sys.exit(1) for method, scoring, fname in exps: try: if scoring == "count" and count_table: moap_with_table(fname, count_table, outdir, method, scoring, ncpus=ncpus) elif scoring == "score" and score_table: moap_with_table(fname, score_table, outdir, method, scoring, ncpus=ncpus) else: moap_with_bg(fname, genome, outdir, method, scoring, pwmfile=pwmfile, ncpus=ncpus) except Exception as e: logger.warn("Method %s with scoring %s failed", method, scoring) logger.warn(e) logger.warn("Skipping") raise dfs = {} for method, scoring,fname in exps: t = "{}.{}".format(method,scoring) fname = os.path.join(outdir, "activity.{}.{}.out.txt".format( method, scoring)) try: dfs[t] = pd.read_table(fname, index_col=0, comment="#") except: logging.warn("Activity file for {} not found!\n".format(t)) if len(methods) > 1: logger.info("Rank aggregation") df_p = df_rank_aggregation(df, dfs, exps) df_p.to_csv(os.path.join(outdir, "final.out.csv"), sep="\t") #df_p = df_p.join(m2f) # Write motif frequency table if df.shape[1] == 1: mcount = df.join(pd.read_table(count_table, index_col=0, comment="#")) m_group = mcount.groupby(df.columns[0]) freq = m_group.sum() / m_group.count() freq.to_csv(os.path.join(outdir, "motif.freq.txt"), sep="\t") if plot and len(methods) > 1: logger.info("html report") maelstrom_html_report( outdir, os.path.join(outdir, "final.out.csv"), pwmfile ) logger.info(os.path.join(outdir, "gimme.maelstrom.report.html"))
python
def run_maelstrom(infile, genome, outdir, pwmfile=None, plot=True, cluster=False, score_table=None, count_table=None, methods=None, ncpus=None): """Run maelstrom on an input table. Parameters ---------- infile : str Filename of input table. Can be either a text-separated tab file or a feather file. genome : str Genome name. Can be either the name of a FASTA-formatted file or a genomepy genome name. outdir : str Output directory for all results. pwmfile : str, optional Specify a PFM file for scanning. plot : bool, optional Create heatmaps. cluster : bool, optional If True and if the input table has more than one column, the data is clustered and the cluster activity methods are also run. Not well-tested. score_table : str, optional Filename of pre-calculated table with motif scores. count_table : str, optional Filename of pre-calculated table with motif counts. methods : list, optional Activity methods to use. By default are all used. ncpus : int, optional If defined this specifies the number of cores to use. """ logger.info("Starting maelstrom") if infile.endswith("feather"): df = pd.read_feather(infile) df = df.set_index(df.columns[0]) else: df = pd.read_table(infile, index_col=0, comment="#") # Check for duplicates if df.index.duplicated(keep=False).any(): raise ValueError("Input file contains duplicate regions! " "Please remove them.") if not os.path.exists(outdir): os.mkdir(outdir) if methods is None: methods = Moap.list_predictors() methods = [m.lower() for m in methods] shutil.copyfile(infile, os.path.join(outdir, "input.table.txt")) # Copy the motif informatuon pwmfile = pwmfile_location(pwmfile) if pwmfile: shutil.copy2(pwmfile, outdir) mapfile = re.sub(".p[fw]m$", ".motif2factors.txt", pwmfile) if os.path.exists(mapfile): shutil.copy2(mapfile, outdir) # Create a file with the number of motif matches if not count_table: count_table = os.path.join(outdir, "motif.count.txt.gz") if not os.path.exists(count_table): logger.info("Motif scanning (counts)") counts = scan_to_table(infile, genome, "count", pwmfile=pwmfile, ncpus=ncpus) counts.to_csv(count_table, sep="\t", compression="gzip") else: logger.info("Counts, using: %s", count_table) # Create a file with the score of the best motif match if not score_table: score_table = os.path.join(outdir, "motif.score.txt.gz") if not os.path.exists(score_table): logger.info("Motif scanning (scores)") scores = scan_to_table(infile, genome, "score", pwmfile=pwmfile, ncpus=ncpus) scores.to_csv(score_table, sep="\t", float_format="%.3f", compression="gzip") else: logger.info("Scores, using: %s", score_table) if cluster: cluster = False for method in methods: m = Moap.create(method, ncpus=ncpus) if m.ptype == "classification": cluster = True break if not cluster: logger.info("Skipping clustering, no classification methods") exps = [] clusterfile = infile if df.shape[1] != 1: # More than one column for method in Moap.list_regression_predictors(): if method in methods: m = Moap.create(method, ncpus=ncpus) exps.append([method, m.pref_table, infile]) logger.debug("Adding %s", method) if cluster: clusterfile = os.path.join(outdir, os.path.basename(infile) + ".cluster.txt") df[:] = scale(df, axis=0) names = df.columns df_changed = pd.DataFrame(index=df.index) df_changed["cluster"] = np.nan for name in names: df_changed.loc[(df[name] - df.loc[:,df.columns != name].max(1)) > 0.5, "cluster"] = name df_changed.dropna().to_csv(clusterfile, sep="\t") if df.shape[1] == 1 or cluster: for method in Moap.list_classification_predictors(): if method in methods: m = Moap.create(method, ncpus=ncpus) exps.append([method, m.pref_table, clusterfile]) if len(exps) == 0: logger.error("No method to run.") sys.exit(1) for method, scoring, fname in exps: try: if scoring == "count" and count_table: moap_with_table(fname, count_table, outdir, method, scoring, ncpus=ncpus) elif scoring == "score" and score_table: moap_with_table(fname, score_table, outdir, method, scoring, ncpus=ncpus) else: moap_with_bg(fname, genome, outdir, method, scoring, pwmfile=pwmfile, ncpus=ncpus) except Exception as e: logger.warn("Method %s with scoring %s failed", method, scoring) logger.warn(e) logger.warn("Skipping") raise dfs = {} for method, scoring,fname in exps: t = "{}.{}".format(method,scoring) fname = os.path.join(outdir, "activity.{}.{}.out.txt".format( method, scoring)) try: dfs[t] = pd.read_table(fname, index_col=0, comment="#") except: logging.warn("Activity file for {} not found!\n".format(t)) if len(methods) > 1: logger.info("Rank aggregation") df_p = df_rank_aggregation(df, dfs, exps) df_p.to_csv(os.path.join(outdir, "final.out.csv"), sep="\t") #df_p = df_p.join(m2f) # Write motif frequency table if df.shape[1] == 1: mcount = df.join(pd.read_table(count_table, index_col=0, comment="#")) m_group = mcount.groupby(df.columns[0]) freq = m_group.sum() / m_group.count() freq.to_csv(os.path.join(outdir, "motif.freq.txt"), sep="\t") if plot and len(methods) > 1: logger.info("html report") maelstrom_html_report( outdir, os.path.join(outdir, "final.out.csv"), pwmfile ) logger.info(os.path.join(outdir, "gimme.maelstrom.report.html"))
[ "def", "run_maelstrom", "(", "infile", ",", "genome", ",", "outdir", ",", "pwmfile", "=", "None", ",", "plot", "=", "True", ",", "cluster", "=", "False", ",", "score_table", "=", "None", ",", "count_table", "=", "None", ",", "methods", "=", "None", ",", "ncpus", "=", "None", ")", ":", "logger", ".", "info", "(", "\"Starting maelstrom\"", ")", "if", "infile", ".", "endswith", "(", "\"feather\"", ")", ":", "df", "=", "pd", ".", "read_feather", "(", "infile", ")", "df", "=", "df", ".", "set_index", "(", "df", ".", "columns", "[", "0", "]", ")", "else", ":", "df", "=", "pd", ".", "read_table", "(", "infile", ",", "index_col", "=", "0", ",", "comment", "=", "\"#\"", ")", "# Check for duplicates", "if", "df", ".", "index", ".", "duplicated", "(", "keep", "=", "False", ")", ".", "any", "(", ")", ":", "raise", "ValueError", "(", "\"Input file contains duplicate regions! \"", "\"Please remove them.\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "outdir", ")", ":", "os", ".", "mkdir", "(", "outdir", ")", "if", "methods", "is", "None", ":", "methods", "=", "Moap", ".", "list_predictors", "(", ")", "methods", "=", "[", "m", ".", "lower", "(", ")", "for", "m", "in", "methods", "]", "shutil", ".", "copyfile", "(", "infile", ",", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"input.table.txt\"", ")", ")", "# Copy the motif informatuon", "pwmfile", "=", "pwmfile_location", "(", "pwmfile", ")", "if", "pwmfile", ":", "shutil", ".", "copy2", "(", "pwmfile", ",", "outdir", ")", "mapfile", "=", "re", ".", "sub", "(", "\".p[fw]m$\"", ",", "\".motif2factors.txt\"", ",", "pwmfile", ")", "if", "os", ".", "path", ".", "exists", "(", "mapfile", ")", ":", "shutil", ".", "copy2", "(", "mapfile", ",", "outdir", ")", "# Create a file with the number of motif matches", "if", "not", "count_table", ":", "count_table", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"motif.count.txt.gz\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "count_table", ")", ":", "logger", ".", "info", "(", "\"Motif scanning (counts)\"", ")", "counts", "=", "scan_to_table", "(", "infile", ",", "genome", ",", "\"count\"", ",", "pwmfile", "=", "pwmfile", ",", "ncpus", "=", "ncpus", ")", "counts", ".", "to_csv", "(", "count_table", ",", "sep", "=", "\"\\t\"", ",", "compression", "=", "\"gzip\"", ")", "else", ":", "logger", ".", "info", "(", "\"Counts, using: %s\"", ",", "count_table", ")", "# Create a file with the score of the best motif match", "if", "not", "score_table", ":", "score_table", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"motif.score.txt.gz\"", ")", "if", "not", "os", ".", "path", ".", "exists", "(", "score_table", ")", ":", "logger", ".", "info", "(", "\"Motif scanning (scores)\"", ")", "scores", "=", "scan_to_table", "(", "infile", ",", "genome", ",", "\"score\"", ",", "pwmfile", "=", "pwmfile", ",", "ncpus", "=", "ncpus", ")", "scores", ".", "to_csv", "(", "score_table", ",", "sep", "=", "\"\\t\"", ",", "float_format", "=", "\"%.3f\"", ",", "compression", "=", "\"gzip\"", ")", "else", ":", "logger", ".", "info", "(", "\"Scores, using: %s\"", ",", "score_table", ")", "if", "cluster", ":", "cluster", "=", "False", "for", "method", "in", "methods", ":", "m", "=", "Moap", ".", "create", "(", "method", ",", "ncpus", "=", "ncpus", ")", "if", "m", ".", "ptype", "==", "\"classification\"", ":", "cluster", "=", "True", "break", "if", "not", "cluster", ":", "logger", ".", "info", "(", "\"Skipping clustering, no classification methods\"", ")", "exps", "=", "[", "]", "clusterfile", "=", "infile", "if", "df", ".", "shape", "[", "1", "]", "!=", "1", ":", "# More than one column", "for", "method", "in", "Moap", ".", "list_regression_predictors", "(", ")", ":", "if", "method", "in", "methods", ":", "m", "=", "Moap", ".", "create", "(", "method", ",", "ncpus", "=", "ncpus", ")", "exps", ".", "append", "(", "[", "method", ",", "m", ".", "pref_table", ",", "infile", "]", ")", "logger", ".", "debug", "(", "\"Adding %s\"", ",", "method", ")", "if", "cluster", ":", "clusterfile", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "os", ".", "path", ".", "basename", "(", "infile", ")", "+", "\".cluster.txt\"", ")", "df", "[", ":", "]", "=", "scale", "(", "df", ",", "axis", "=", "0", ")", "names", "=", "df", ".", "columns", "df_changed", "=", "pd", ".", "DataFrame", "(", "index", "=", "df", ".", "index", ")", "df_changed", "[", "\"cluster\"", "]", "=", "np", ".", "nan", "for", "name", "in", "names", ":", "df_changed", ".", "loc", "[", "(", "df", "[", "name", "]", "-", "df", ".", "loc", "[", ":", ",", "df", ".", "columns", "!=", "name", "]", ".", "max", "(", "1", ")", ")", ">", "0.5", ",", "\"cluster\"", "]", "=", "name", "df_changed", ".", "dropna", "(", ")", ".", "to_csv", "(", "clusterfile", ",", "sep", "=", "\"\\t\"", ")", "if", "df", ".", "shape", "[", "1", "]", "==", "1", "or", "cluster", ":", "for", "method", "in", "Moap", ".", "list_classification_predictors", "(", ")", ":", "if", "method", "in", "methods", ":", "m", "=", "Moap", ".", "create", "(", "method", ",", "ncpus", "=", "ncpus", ")", "exps", ".", "append", "(", "[", "method", ",", "m", ".", "pref_table", ",", "clusterfile", "]", ")", "if", "len", "(", "exps", ")", "==", "0", ":", "logger", ".", "error", "(", "\"No method to run.\"", ")", "sys", ".", "exit", "(", "1", ")", "for", "method", ",", "scoring", ",", "fname", "in", "exps", ":", "try", ":", "if", "scoring", "==", "\"count\"", "and", "count_table", ":", "moap_with_table", "(", "fname", ",", "count_table", ",", "outdir", ",", "method", ",", "scoring", ",", "ncpus", "=", "ncpus", ")", "elif", "scoring", "==", "\"score\"", "and", "score_table", ":", "moap_with_table", "(", "fname", ",", "score_table", ",", "outdir", ",", "method", ",", "scoring", ",", "ncpus", "=", "ncpus", ")", "else", ":", "moap_with_bg", "(", "fname", ",", "genome", ",", "outdir", ",", "method", ",", "scoring", ",", "pwmfile", "=", "pwmfile", ",", "ncpus", "=", "ncpus", ")", "except", "Exception", "as", "e", ":", "logger", ".", "warn", "(", "\"Method %s with scoring %s failed\"", ",", "method", ",", "scoring", ")", "logger", ".", "warn", "(", "e", ")", "logger", ".", "warn", "(", "\"Skipping\"", ")", "raise", "dfs", "=", "{", "}", "for", "method", ",", "scoring", ",", "fname", "in", "exps", ":", "t", "=", "\"{}.{}\"", ".", "format", "(", "method", ",", "scoring", ")", "fname", "=", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"activity.{}.{}.out.txt\"", ".", "format", "(", "method", ",", "scoring", ")", ")", "try", ":", "dfs", "[", "t", "]", "=", "pd", ".", "read_table", "(", "fname", ",", "index_col", "=", "0", ",", "comment", "=", "\"#\"", ")", "except", ":", "logging", ".", "warn", "(", "\"Activity file for {} not found!\\n\"", ".", "format", "(", "t", ")", ")", "if", "len", "(", "methods", ")", ">", "1", ":", "logger", ".", "info", "(", "\"Rank aggregation\"", ")", "df_p", "=", "df_rank_aggregation", "(", "df", ",", "dfs", ",", "exps", ")", "df_p", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"final.out.csv\"", ")", ",", "sep", "=", "\"\\t\"", ")", "#df_p = df_p.join(m2f)", "# Write motif frequency table", "if", "df", ".", "shape", "[", "1", "]", "==", "1", ":", "mcount", "=", "df", ".", "join", "(", "pd", ".", "read_table", "(", "count_table", ",", "index_col", "=", "0", ",", "comment", "=", "\"#\"", ")", ")", "m_group", "=", "mcount", ".", "groupby", "(", "df", ".", "columns", "[", "0", "]", ")", "freq", "=", "m_group", ".", "sum", "(", ")", "/", "m_group", ".", "count", "(", ")", "freq", ".", "to_csv", "(", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"motif.freq.txt\"", ")", ",", "sep", "=", "\"\\t\"", ")", "if", "plot", "and", "len", "(", "methods", ")", ">", "1", ":", "logger", ".", "info", "(", "\"html report\"", ")", "maelstrom_html_report", "(", "outdir", ",", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"final.out.csv\"", ")", ",", "pwmfile", ")", "logger", ".", "info", "(", "os", ".", "path", ".", "join", "(", "outdir", ",", "\"gimme.maelstrom.report.html\"", ")", ")" ]
Run maelstrom on an input table. Parameters ---------- infile : str Filename of input table. Can be either a text-separated tab file or a feather file. genome : str Genome name. Can be either the name of a FASTA-formatted file or a genomepy genome name. outdir : str Output directory for all results. pwmfile : str, optional Specify a PFM file for scanning. plot : bool, optional Create heatmaps. cluster : bool, optional If True and if the input table has more than one column, the data is clustered and the cluster activity methods are also run. Not well-tested. score_table : str, optional Filename of pre-calculated table with motif scores. count_table : str, optional Filename of pre-calculated table with motif counts. methods : list, optional Activity methods to use. By default are all used. ncpus : int, optional If defined this specifies the number of cores to use.
[ "Run", "maelstrom", "on", "an", "input", "table", ".", "Parameters", "----------", "infile", ":", "str", "Filename", "of", "input", "table", ".", "Can", "be", "either", "a", "text", "-", "separated", "tab", "file", "or", "a", "feather", "file", ".", "genome", ":", "str", "Genome", "name", ".", "Can", "be", "either", "the", "name", "of", "a", "FASTA", "-", "formatted", "file", "or", "a", "genomepy", "genome", "name", ".", "outdir", ":", "str", "Output", "directory", "for", "all", "results", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/maelstrom.py#L250-L428
vanheeringen-lab/gimmemotifs
gimmemotifs/maelstrom.py
MaelstromResult.plot_heatmap
def plot_heatmap(self, kind="final", min_freq=0.01, threshold=2, name=True, max_len=50, aspect=1, **kwargs): """Plot clustered heatmap of predicted motif activity. Parameters ---------- kind : str, optional Which data type to use for plotting. Default is 'final', which will plot the result of the rang aggregation. Other options are 'freq' for the motif frequencies, or any of the individual activities such as 'rf.score'. min_freq : float, optional Minimum frequency of motif occurrence. threshold : float, optional Minimum activity (absolute) of the rank aggregation result. name : bool, optional Use factor names instead of motif names for plotting. max_len : int, optional Truncate the list of factors to this maximum length. aspect : int, optional Aspect ratio for tweaking the plot. kwargs : other keyword arguments All other keyword arguments are passed to sns.clustermap Returns ------- cg : ClusterGrid A seaborn ClusterGrid instance. """ filt = np.any(np.abs(self.result) >= threshold, 1) & np.any(np.abs(self.freq.T) >= min_freq, 1) idx = self.result[filt].index cmap = "RdBu_r" if kind == "final": data = self.result elif kind == "freq": data = self.freq.T cmap = "Reds" elif kind in self.activity: data = self.activity[dtype] if kind in ["hypergeom.count", "mwu.score"]: cmap = "Reds" else: raise ValueError("Unknown dtype") #print(data.head()) #plt.figure( m = data.loc[idx] if name: m["factors"] = [join_max(self.motifs[n].factors, max_len, ",", suffix=",(...)") for n in m.index] m = m.set_index("factors") h,w = m.shape cg = sns.clustermap(m, cmap=cmap, col_cluster=False, figsize=(2 + w * 0.5 * aspect, 0.5 * h), linewidths=1, **kwargs) cg.ax_col_dendrogram.set_visible(False) plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation=0); return cg
python
def plot_heatmap(self, kind="final", min_freq=0.01, threshold=2, name=True, max_len=50, aspect=1, **kwargs): """Plot clustered heatmap of predicted motif activity. Parameters ---------- kind : str, optional Which data type to use for plotting. Default is 'final', which will plot the result of the rang aggregation. Other options are 'freq' for the motif frequencies, or any of the individual activities such as 'rf.score'. min_freq : float, optional Minimum frequency of motif occurrence. threshold : float, optional Minimum activity (absolute) of the rank aggregation result. name : bool, optional Use factor names instead of motif names for plotting. max_len : int, optional Truncate the list of factors to this maximum length. aspect : int, optional Aspect ratio for tweaking the plot. kwargs : other keyword arguments All other keyword arguments are passed to sns.clustermap Returns ------- cg : ClusterGrid A seaborn ClusterGrid instance. """ filt = np.any(np.abs(self.result) >= threshold, 1) & np.any(np.abs(self.freq.T) >= min_freq, 1) idx = self.result[filt].index cmap = "RdBu_r" if kind == "final": data = self.result elif kind == "freq": data = self.freq.T cmap = "Reds" elif kind in self.activity: data = self.activity[dtype] if kind in ["hypergeom.count", "mwu.score"]: cmap = "Reds" else: raise ValueError("Unknown dtype") #print(data.head()) #plt.figure( m = data.loc[idx] if name: m["factors"] = [join_max(self.motifs[n].factors, max_len, ",", suffix=",(...)") for n in m.index] m = m.set_index("factors") h,w = m.shape cg = sns.clustermap(m, cmap=cmap, col_cluster=False, figsize=(2 + w * 0.5 * aspect, 0.5 * h), linewidths=1, **kwargs) cg.ax_col_dendrogram.set_visible(False) plt.setp(cg.ax_heatmap.yaxis.get_majorticklabels(), rotation=0); return cg
[ "def", "plot_heatmap", "(", "self", ",", "kind", "=", "\"final\"", ",", "min_freq", "=", "0.01", ",", "threshold", "=", "2", ",", "name", "=", "True", ",", "max_len", "=", "50", ",", "aspect", "=", "1", ",", "*", "*", "kwargs", ")", ":", "filt", "=", "np", ".", "any", "(", "np", ".", "abs", "(", "self", ".", "result", ")", ">=", "threshold", ",", "1", ")", "&", "np", ".", "any", "(", "np", ".", "abs", "(", "self", ".", "freq", ".", "T", ")", ">=", "min_freq", ",", "1", ")", "idx", "=", "self", ".", "result", "[", "filt", "]", ".", "index", "cmap", "=", "\"RdBu_r\"", "if", "kind", "==", "\"final\"", ":", "data", "=", "self", ".", "result", "elif", "kind", "==", "\"freq\"", ":", "data", "=", "self", ".", "freq", ".", "T", "cmap", "=", "\"Reds\"", "elif", "kind", "in", "self", ".", "activity", ":", "data", "=", "self", ".", "activity", "[", "dtype", "]", "if", "kind", "in", "[", "\"hypergeom.count\"", ",", "\"mwu.score\"", "]", ":", "cmap", "=", "\"Reds\"", "else", ":", "raise", "ValueError", "(", "\"Unknown dtype\"", ")", "#print(data.head())", "#plt.figure(", "m", "=", "data", ".", "loc", "[", "idx", "]", "if", "name", ":", "m", "[", "\"factors\"", "]", "=", "[", "join_max", "(", "self", ".", "motifs", "[", "n", "]", ".", "factors", ",", "max_len", ",", "\",\"", ",", "suffix", "=", "\",(...)\"", ")", "for", "n", "in", "m", ".", "index", "]", "m", "=", "m", ".", "set_index", "(", "\"factors\"", ")", "h", ",", "w", "=", "m", ".", "shape", "cg", "=", "sns", ".", "clustermap", "(", "m", ",", "cmap", "=", "cmap", ",", "col_cluster", "=", "False", ",", "figsize", "=", "(", "2", "+", "w", "*", "0.5", "*", "aspect", ",", "0.5", "*", "h", ")", ",", "linewidths", "=", "1", ",", "*", "*", "kwargs", ")", "cg", ".", "ax_col_dendrogram", ".", "set_visible", "(", "False", ")", "plt", ".", "setp", "(", "cg", ".", "ax_heatmap", ".", "yaxis", ".", "get_majorticklabels", "(", ")", ",", "rotation", "=", "0", ")", "return", "cg" ]
Plot clustered heatmap of predicted motif activity. Parameters ---------- kind : str, optional Which data type to use for plotting. Default is 'final', which will plot the result of the rang aggregation. Other options are 'freq' for the motif frequencies, or any of the individual activities such as 'rf.score'. min_freq : float, optional Minimum frequency of motif occurrence. threshold : float, optional Minimum activity (absolute) of the rank aggregation result. name : bool, optional Use factor names instead of motif names for plotting. max_len : int, optional Truncate the list of factors to this maximum length. aspect : int, optional Aspect ratio for tweaking the plot. kwargs : other keyword arguments All other keyword arguments are passed to sns.clustermap Returns ------- cg : ClusterGrid A seaborn ClusterGrid instance.
[ "Plot", "clustered", "heatmap", "of", "predicted", "motif", "activity", ".", "Parameters", "----------", "kind", ":", "str", "optional", "Which", "data", "type", "to", "use", "for", "plotting", ".", "Default", "is", "final", "which", "will", "plot", "the", "result", "of", "the", "rang", "aggregation", ".", "Other", "options", "are", "freq", "for", "the", "motif", "frequencies", "or", "any", "of", "the", "individual", "activities", "such", "as", "rf", ".", "score", ".", "min_freq", ":", "float", "optional", "Minimum", "frequency", "of", "motif", "occurrence", ".", "threshold", ":", "float", "optional", "Minimum", "activity", "(", "absolute", ")", "of", "the", "rank", "aggregation", "result", ".", "name", ":", "bool", "optional", "Use", "factor", "names", "instead", "of", "motif", "names", "for", "plotting", ".", "max_len", ":", "int", "optional", "Truncate", "the", "list", "of", "factors", "to", "this", "maximum", "length", ".", "aspect", ":", "int", "optional", "Aspect", "ratio", "for", "tweaking", "the", "plot", ".", "kwargs", ":", "other", "keyword", "arguments", "All", "other", "keyword", "arguments", "are", "passed", "to", "sns", ".", "clustermap" ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/maelstrom.py#L495-L558
vanheeringen-lab/gimmemotifs
gimmemotifs/maelstrom.py
MaelstromResult.plot_scores
def plot_scores(self, motifs, name=True, max_len=50): """Create motif scores boxplot of different clusters. Motifs can be specified as either motif or factor names. The motif scores will be scaled and plotted as z-scores. Parameters ---------- motifs : iterable or str List of motif or factor names. name : bool, optional Use factor names instead of motif names for plotting. max_len : int, optional Truncate the list of factors to this maximum length. Returns ------- g : FacetGrid Returns the seaborn FacetGrid object with the plot. """ if self.input.shape[1] != 1: raise ValueError("Can't make a categorical plot with real-valued data") if type("") == type(motifs): motifs = [motifs] plot_motifs = [] for motif in motifs: if motif in self.motifs: plot_motifs.append(motif) else: for m in self.motifs.values(): if motif in m.factors: plot_motifs.append(m.id) data = self.scores[plot_motifs] data[:] = data.scale(data, axix=0) if name: data = data.T data["factors"] = [join_max(self.motifs[n].factors, max_len, ",", suffix=",(...)") for n in plot_motifs] data = data.set_index("factors").T data = pd.melt(self.input.join(data), id_vars=["cluster"]) data.columns = ["cluster", "motif", "z-score"] g = sns.factorplot(data=data, y="motif", x="z-score", hue="cluster", kind="box", aspect=2) return g
python
def plot_scores(self, motifs, name=True, max_len=50): """Create motif scores boxplot of different clusters. Motifs can be specified as either motif or factor names. The motif scores will be scaled and plotted as z-scores. Parameters ---------- motifs : iterable or str List of motif or factor names. name : bool, optional Use factor names instead of motif names for plotting. max_len : int, optional Truncate the list of factors to this maximum length. Returns ------- g : FacetGrid Returns the seaborn FacetGrid object with the plot. """ if self.input.shape[1] != 1: raise ValueError("Can't make a categorical plot with real-valued data") if type("") == type(motifs): motifs = [motifs] plot_motifs = [] for motif in motifs: if motif in self.motifs: plot_motifs.append(motif) else: for m in self.motifs.values(): if motif in m.factors: plot_motifs.append(m.id) data = self.scores[plot_motifs] data[:] = data.scale(data, axix=0) if name: data = data.T data["factors"] = [join_max(self.motifs[n].factors, max_len, ",", suffix=",(...)") for n in plot_motifs] data = data.set_index("factors").T data = pd.melt(self.input.join(data), id_vars=["cluster"]) data.columns = ["cluster", "motif", "z-score"] g = sns.factorplot(data=data, y="motif", x="z-score", hue="cluster", kind="box", aspect=2) return g
[ "def", "plot_scores", "(", "self", ",", "motifs", ",", "name", "=", "True", ",", "max_len", "=", "50", ")", ":", "if", "self", ".", "input", ".", "shape", "[", "1", "]", "!=", "1", ":", "raise", "ValueError", "(", "\"Can't make a categorical plot with real-valued data\"", ")", "if", "type", "(", "\"\"", ")", "==", "type", "(", "motifs", ")", ":", "motifs", "=", "[", "motifs", "]", "plot_motifs", "=", "[", "]", "for", "motif", "in", "motifs", ":", "if", "motif", "in", "self", ".", "motifs", ":", "plot_motifs", ".", "append", "(", "motif", ")", "else", ":", "for", "m", "in", "self", ".", "motifs", ".", "values", "(", ")", ":", "if", "motif", "in", "m", ".", "factors", ":", "plot_motifs", ".", "append", "(", "m", ".", "id", ")", "data", "=", "self", ".", "scores", "[", "plot_motifs", "]", "data", "[", ":", "]", "=", "data", ".", "scale", "(", "data", ",", "axix", "=", "0", ")", "if", "name", ":", "data", "=", "data", ".", "T", "data", "[", "\"factors\"", "]", "=", "[", "join_max", "(", "self", ".", "motifs", "[", "n", "]", ".", "factors", ",", "max_len", ",", "\",\"", ",", "suffix", "=", "\",(...)\"", ")", "for", "n", "in", "plot_motifs", "]", "data", "=", "data", ".", "set_index", "(", "\"factors\"", ")", ".", "T", "data", "=", "pd", ".", "melt", "(", "self", ".", "input", ".", "join", "(", "data", ")", ",", "id_vars", "=", "[", "\"cluster\"", "]", ")", "data", ".", "columns", "=", "[", "\"cluster\"", ",", "\"motif\"", ",", "\"z-score\"", "]", "g", "=", "sns", ".", "factorplot", "(", "data", "=", "data", ",", "y", "=", "\"motif\"", ",", "x", "=", "\"z-score\"", ",", "hue", "=", "\"cluster\"", ",", "kind", "=", "\"box\"", ",", "aspect", "=", "2", ")", "return", "g" ]
Create motif scores boxplot of different clusters. Motifs can be specified as either motif or factor names. The motif scores will be scaled and plotted as z-scores. Parameters ---------- motifs : iterable or str List of motif or factor names. name : bool, optional Use factor names instead of motif names for plotting. max_len : int, optional Truncate the list of factors to this maximum length. Returns ------- g : FacetGrid Returns the seaborn FacetGrid object with the plot.
[ "Create", "motif", "scores", "boxplot", "of", "different", "clusters", ".", "Motifs", "can", "be", "specified", "as", "either", "motif", "or", "factor", "names", ".", "The", "motif", "scores", "will", "be", "scaled", "and", "plotted", "as", "z", "-", "scores", ".", "Parameters", "----------", "motifs", ":", "iterable", "or", "str", "List", "of", "motif", "or", "factor", "names", ".", "name", ":", "bool", "optional", "Use", "factor", "names", "instead", "of", "motif", "names", "for", "plotting", ".", "max_len", ":", "int", "optional", "Truncate", "the", "list", "of", "factors", "to", "this", "maximum", "length", ".", "Returns", "-------", "g", ":", "FacetGrid", "Returns", "the", "seaborn", "FacetGrid", "object", "with", "the", "plot", "." ]
train
https://github.com/vanheeringen-lab/gimmemotifs/blob/1dc0572179e5d0c8f96958060133c1f8d92c6675/gimmemotifs/maelstrom.py#L561-L608
biosustain/swiglpk
scripts/find_swiglpk_version.py
get_version
def get_version(package, url_pattern=URL_PATTERN): """Return version of package on pypi.python.org using json. Adapted from https://stackoverflow.com/a/34366589""" req = requests.get(url_pattern.format(package=package)) version = parse('0') if req.status_code == requests.codes.ok: # j = json.loads(req.text.encode(req.encoding)) j = req.json() releases = j.get('releases', []) for release in releases: ver = parse(release) if not ver.is_prerelease: version = max(version, ver) return version
python
def get_version(package, url_pattern=URL_PATTERN): """Return version of package on pypi.python.org using json. Adapted from https://stackoverflow.com/a/34366589""" req = requests.get(url_pattern.format(package=package)) version = parse('0') if req.status_code == requests.codes.ok: # j = json.loads(req.text.encode(req.encoding)) j = req.json() releases = j.get('releases', []) for release in releases: ver = parse(release) if not ver.is_prerelease: version = max(version, ver) return version
[ "def", "get_version", "(", "package", ",", "url_pattern", "=", "URL_PATTERN", ")", ":", "req", "=", "requests", ".", "get", "(", "url_pattern", ".", "format", "(", "package", "=", "package", ")", ")", "version", "=", "parse", "(", "'0'", ")", "if", "req", ".", "status_code", "==", "requests", ".", "codes", ".", "ok", ":", "# j = json.loads(req.text.encode(req.encoding))", "j", "=", "req", ".", "json", "(", ")", "releases", "=", "j", ".", "get", "(", "'releases'", ",", "[", "]", ")", "for", "release", "in", "releases", ":", "ver", "=", "parse", "(", "release", ")", "if", "not", "ver", ".", "is_prerelease", ":", "version", "=", "max", "(", "version", ",", "ver", ")", "return", "version" ]
Return version of package on pypi.python.org using json. Adapted from https://stackoverflow.com/a/34366589
[ "Return", "version", "of", "package", "on", "pypi", ".", "python", ".", "org", "using", "json", ".", "Adapted", "from", "https", ":", "//", "stackoverflow", ".", "com", "/", "a", "/", "34366589" ]
train
https://github.com/biosustain/swiglpk/blob/a83bc5d0bb4bf7795756ba11a33fb5cb2c501bef/scripts/find_swiglpk_version.py#L13-L25
osantana/prettyconf
prettyconf/loaders.py
get_args
def get_args(parser): """ Converts arguments extracted from a parser to a dict, and will dismiss arguments which default to NOT_SET. :param parser: an ``argparse.ArgumentParser`` instance. :type parser: argparse.ArgumentParser :return: Dictionary with the configs found in the parsed CLI arguments. :rtype: dict """ args = vars(parser.parse_args()).items() return {key: val for key, val in args if not isinstance(val, NotSet)}
python
def get_args(parser): """ Converts arguments extracted from a parser to a dict, and will dismiss arguments which default to NOT_SET. :param parser: an ``argparse.ArgumentParser`` instance. :type parser: argparse.ArgumentParser :return: Dictionary with the configs found in the parsed CLI arguments. :rtype: dict """ args = vars(parser.parse_args()).items() return {key: val for key, val in args if not isinstance(val, NotSet)}
[ "def", "get_args", "(", "parser", ")", ":", "args", "=", "vars", "(", "parser", ".", "parse_args", "(", ")", ")", ".", "items", "(", ")", "return", "{", "key", ":", "val", "for", "key", ",", "val", "in", "args", "if", "not", "isinstance", "(", "val", ",", "NotSet", ")", "}" ]
Converts arguments extracted from a parser to a dict, and will dismiss arguments which default to NOT_SET. :param parser: an ``argparse.ArgumentParser`` instance. :type parser: argparse.ArgumentParser :return: Dictionary with the configs found in the parsed CLI arguments. :rtype: dict
[ "Converts", "arguments", "extracted", "from", "a", "parser", "to", "a", "dict", "and", "will", "dismiss", "arguments", "which", "default", "to", "NOT_SET", "." ]
train
https://github.com/osantana/prettyconf/blob/ddbbc8a592ebd7d80d9c3f87c8671523e3692a0d/prettyconf/loaders.py#L22-L33
aouyar/PyMunin
pysysinfo/util.py
parse_value
def parse_value(val, parsebool=False): """Parse input string and return int, float or str depending on format. @param val: Input string. @param parsebool: If True parse yes / no, on / off as boolean. @return: Value of type int, float or str. """ try: return int(val) except ValueError: pass try: return float(val) except: pass if parsebool: if re.match('yes|on', str(val), re.IGNORECASE): return True elif re.match('no|off', str(val), re.IGNORECASE): return False return val
python
def parse_value(val, parsebool=False): """Parse input string and return int, float or str depending on format. @param val: Input string. @param parsebool: If True parse yes / no, on / off as boolean. @return: Value of type int, float or str. """ try: return int(val) except ValueError: pass try: return float(val) except: pass if parsebool: if re.match('yes|on', str(val), re.IGNORECASE): return True elif re.match('no|off', str(val), re.IGNORECASE): return False return val
[ "def", "parse_value", "(", "val", ",", "parsebool", "=", "False", ")", ":", "try", ":", "return", "int", "(", "val", ")", "except", "ValueError", ":", "pass", "try", ":", "return", "float", "(", "val", ")", "except", ":", "pass", "if", "parsebool", ":", "if", "re", ".", "match", "(", "'yes|on'", ",", "str", "(", "val", ")", ",", "re", ".", "IGNORECASE", ")", ":", "return", "True", "elif", "re", ".", "match", "(", "'no|off'", ",", "str", "(", "val", ")", ",", "re", ".", "IGNORECASE", ")", ":", "return", "False", "return", "val" ]
Parse input string and return int, float or str depending on format. @param val: Input string. @param parsebool: If True parse yes / no, on / off as boolean. @return: Value of type int, float or str.
[ "Parse", "input", "string", "and", "return", "int", "float", "or", "str", "depending", "on", "format", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L27-L48
aouyar/PyMunin
pysysinfo/util.py
socket_read
def socket_read(fp): """Buffered read from socket. Reads all data available from socket. @fp: File pointer for socket. @return: String of characters read from buffer. """ response = '' oldlen = 0 newlen = 0 while True: response += fp.read(buffSize) newlen = len(response) if newlen - oldlen == 0: break else: oldlen = newlen return response
python
def socket_read(fp): """Buffered read from socket. Reads all data available from socket. @fp: File pointer for socket. @return: String of characters read from buffer. """ response = '' oldlen = 0 newlen = 0 while True: response += fp.read(buffSize) newlen = len(response) if newlen - oldlen == 0: break else: oldlen = newlen return response
[ "def", "socket_read", "(", "fp", ")", ":", "response", "=", "''", "oldlen", "=", "0", "newlen", "=", "0", "while", "True", ":", "response", "+=", "fp", ".", "read", "(", "buffSize", ")", "newlen", "=", "len", "(", "response", ")", "if", "newlen", "-", "oldlen", "==", "0", ":", "break", "else", ":", "oldlen", "=", "newlen", "return", "response" ]
Buffered read from socket. Reads all data available from socket. @fp: File pointer for socket. @return: String of characters read from buffer.
[ "Buffered", "read", "from", "socket", ".", "Reads", "all", "data", "available", "from", "socket", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L64-L81
aouyar/PyMunin
pysysinfo/util.py
exec_command
def exec_command(args, env=None): """Convenience function that executes command and returns result. @param args: Tuple of command and arguments. @param env: Dictionary of environment variables. (Environment is not modified if None.) @return: Command output. """ try: cmd = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=buffSize, env=env) except OSError, e: raise Exception("Execution of command failed.\n", " Command: %s\n Error: %s" % (' '.join(args), str(e))) out, err = cmd.communicate(None) if cmd.returncode != 0: raise Exception("Execution of command failed with error code: %s\n%s\n" % (cmd.returncode, err)) return out
python
def exec_command(args, env=None): """Convenience function that executes command and returns result. @param args: Tuple of command and arguments. @param env: Dictionary of environment variables. (Environment is not modified if None.) @return: Command output. """ try: cmd = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.PIPE, bufsize=buffSize, env=env) except OSError, e: raise Exception("Execution of command failed.\n", " Command: %s\n Error: %s" % (' '.join(args), str(e))) out, err = cmd.communicate(None) if cmd.returncode != 0: raise Exception("Execution of command failed with error code: %s\n%s\n" % (cmd.returncode, err)) return out
[ "def", "exec_command", "(", "args", ",", "env", "=", "None", ")", ":", "try", ":", "cmd", "=", "subprocess", ".", "Popen", "(", "args", ",", "stdout", "=", "subprocess", ".", "PIPE", ",", "stderr", "=", "subprocess", ".", "PIPE", ",", "bufsize", "=", "buffSize", ",", "env", "=", "env", ")", "except", "OSError", ",", "e", ":", "raise", "Exception", "(", "\"Execution of command failed.\\n\"", ",", "\" Command: %s\\n Error: %s\"", "%", "(", "' '", ".", "join", "(", "args", ")", ",", "str", "(", "e", ")", ")", ")", "out", ",", "err", "=", "cmd", ".", "communicate", "(", "None", ")", "if", "cmd", ".", "returncode", "!=", "0", ":", "raise", "Exception", "(", "\"Execution of command failed with error code: %s\\n%s\\n\"", "%", "(", "cmd", ".", "returncode", ",", "err", ")", ")", "return", "out" ]
Convenience function that executes command and returns result. @param args: Tuple of command and arguments. @param env: Dictionary of environment variables. (Environment is not modified if None.) @return: Command output.
[ "Convenience", "function", "that", "executes", "command", "and", "returns", "result", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L84-L106
aouyar/PyMunin
pysysinfo/util.py
NestedDict.set_nested
def set_nested(self, klist, value): """D.set_nested((k1, k2,k3, ...), v) -> D[k1][k2][k3] ... = v""" keys = list(klist) if len(keys) > 0: curr_dict = self last_key = keys.pop() for key in keys: if not curr_dict.has_key(key) or not isinstance(curr_dict[key], NestedDict): curr_dict[key] = type(self)() curr_dict = curr_dict[key] curr_dict[last_key] = value
python
def set_nested(self, klist, value): """D.set_nested((k1, k2,k3, ...), v) -> D[k1][k2][k3] ... = v""" keys = list(klist) if len(keys) > 0: curr_dict = self last_key = keys.pop() for key in keys: if not curr_dict.has_key(key) or not isinstance(curr_dict[key], NestedDict): curr_dict[key] = type(self)() curr_dict = curr_dict[key] curr_dict[last_key] = value
[ "def", "set_nested", "(", "self", ",", "klist", ",", "value", ")", ":", "keys", "=", "list", "(", "klist", ")", "if", "len", "(", "keys", ")", ">", "0", ":", "curr_dict", "=", "self", "last_key", "=", "keys", ".", "pop", "(", ")", "for", "key", "in", "keys", ":", "if", "not", "curr_dict", ".", "has_key", "(", "key", ")", "or", "not", "isinstance", "(", "curr_dict", "[", "key", "]", ",", "NestedDict", ")", ":", "curr_dict", "[", "key", "]", "=", "type", "(", "self", ")", "(", ")", "curr_dict", "=", "curr_dict", "[", "key", "]", "curr_dict", "[", "last_key", "]", "=", "value" ]
D.set_nested((k1, k2,k3, ...), v) -> D[k1][k2][k3] ... = v
[ "D", ".", "set_nested", "((", "k1", "k2", "k3", "...", ")", "v", ")", "-", ">", "D", "[", "k1", "]", "[", "k2", "]", "[", "k3", "]", "...", "=", "v" ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L156-L167
aouyar/PyMunin
pysysinfo/util.py
TableFilter.registerFilter
def registerFilter(self, column, patterns, is_regex=False, ignore_case=False): """Register filter on a column of table. @param column: The column name. @param patterns: A single pattern or a list of patterns used for matching column values. @param is_regex: The patterns will be treated as regex if True, the column values will be tested for equality with the patterns otherwise. @param ignore_case: Case insensitive matching will be used if True. """ if isinstance(patterns, basestring): patt_list = (patterns,) elif isinstance(patterns, (tuple, list)): patt_list = list(patterns) else: raise ValueError("The patterns parameter must either be as string " "or a tuple / list of strings.") if is_regex: if ignore_case: flags = re.IGNORECASE else: flags = 0 patt_exprs = [re.compile(pattern, flags) for pattern in patt_list] else: if ignore_case: patt_exprs = [pattern.lower() for pattern in patt_list] else: patt_exprs = patt_list self._filters[column] = (patt_exprs, is_regex, ignore_case)
python
def registerFilter(self, column, patterns, is_regex=False, ignore_case=False): """Register filter on a column of table. @param column: The column name. @param patterns: A single pattern or a list of patterns used for matching column values. @param is_regex: The patterns will be treated as regex if True, the column values will be tested for equality with the patterns otherwise. @param ignore_case: Case insensitive matching will be used if True. """ if isinstance(patterns, basestring): patt_list = (patterns,) elif isinstance(patterns, (tuple, list)): patt_list = list(patterns) else: raise ValueError("The patterns parameter must either be as string " "or a tuple / list of strings.") if is_regex: if ignore_case: flags = re.IGNORECASE else: flags = 0 patt_exprs = [re.compile(pattern, flags) for pattern in patt_list] else: if ignore_case: patt_exprs = [pattern.lower() for pattern in patt_list] else: patt_exprs = patt_list self._filters[column] = (patt_exprs, is_regex, ignore_case)
[ "def", "registerFilter", "(", "self", ",", "column", ",", "patterns", ",", "is_regex", "=", "False", ",", "ignore_case", "=", "False", ")", ":", "if", "isinstance", "(", "patterns", ",", "basestring", ")", ":", "patt_list", "=", "(", "patterns", ",", ")", "elif", "isinstance", "(", "patterns", ",", "(", "tuple", ",", "list", ")", ")", ":", "patt_list", "=", "list", "(", "patterns", ")", "else", ":", "raise", "ValueError", "(", "\"The patterns parameter must either be as string \"", "\"or a tuple / list of strings.\"", ")", "if", "is_regex", ":", "if", "ignore_case", ":", "flags", "=", "re", ".", "IGNORECASE", "else", ":", "flags", "=", "0", "patt_exprs", "=", "[", "re", ".", "compile", "(", "pattern", ",", "flags", ")", "for", "pattern", "in", "patt_list", "]", "else", ":", "if", "ignore_case", ":", "patt_exprs", "=", "[", "pattern", ".", "lower", "(", ")", "for", "pattern", "in", "patt_list", "]", "else", ":", "patt_exprs", "=", "patt_list", "self", ".", "_filters", "[", "column", "]", "=", "(", "patt_exprs", ",", "is_regex", ",", "ignore_case", ")" ]
Register filter on a column of table. @param column: The column name. @param patterns: A single pattern or a list of patterns used for matching column values. @param is_regex: The patterns will be treated as regex if True, the column values will be tested for equality with the patterns otherwise. @param ignore_case: Case insensitive matching will be used if True.
[ "Register", "filter", "on", "a", "column", "of", "table", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L233-L264
aouyar/PyMunin
pysysinfo/util.py
TableFilter.unregisterFilter
def unregisterFilter(self, column): """Unregister filter on a column of the table. @param column: The column header. """ if self._filters.has_key(column): del self._filters[column]
python
def unregisterFilter(self, column): """Unregister filter on a column of the table. @param column: The column header. """ if self._filters.has_key(column): del self._filters[column]
[ "def", "unregisterFilter", "(", "self", ",", "column", ")", ":", "if", "self", ".", "_filters", ".", "has_key", "(", "column", ")", ":", "del", "self", ".", "_filters", "[", "column", "]" ]
Unregister filter on a column of the table. @param column: The column header.
[ "Unregister", "filter", "on", "a", "column", "of", "the", "table", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L266-L273
aouyar/PyMunin
pysysinfo/util.py
TableFilter.registerFilters
def registerFilters(self, **kwargs): """Register multiple filters at once. @param **kwargs: Multiple filters are registered using keyword variables. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. """ for (key, patterns) in kwargs.items(): if key.endswith('_regex'): col = key[:-len('_regex')] is_regex = True else: col = key is_regex = False if col.endswith('_ic'): col = col[:-len('_ic')] ignore_case = True else: ignore_case = False self.registerFilter(col, patterns, is_regex, ignore_case)
python
def registerFilters(self, **kwargs): """Register multiple filters at once. @param **kwargs: Multiple filters are registered using keyword variables. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match. """ for (key, patterns) in kwargs.items(): if key.endswith('_regex'): col = key[:-len('_regex')] is_regex = True else: col = key is_regex = False if col.endswith('_ic'): col = col[:-len('_ic')] ignore_case = True else: ignore_case = False self.registerFilter(col, patterns, is_regex, ignore_case)
[ "def", "registerFilters", "(", "self", ",", "*", "*", "kwargs", ")", ":", "for", "(", "key", ",", "patterns", ")", "in", "kwargs", ".", "items", "(", ")", ":", "if", "key", ".", "endswith", "(", "'_regex'", ")", ":", "col", "=", "key", "[", ":", "-", "len", "(", "'_regex'", ")", "]", "is_regex", "=", "True", "else", ":", "col", "=", "key", "is_regex", "=", "False", "if", "col", ".", "endswith", "(", "'_ic'", ")", ":", "col", "=", "col", "[", ":", "-", "len", "(", "'_ic'", ")", "]", "ignore_case", "=", "True", "else", ":", "ignore_case", "=", "False", "self", ".", "registerFilter", "(", "col", ",", "patterns", ",", "is_regex", ",", "ignore_case", ")" ]
Register multiple filters at once. @param **kwargs: Multiple filters are registered using keyword variables. Each keyword must correspond to a field name with an optional suffix: field: Field equal to value or in list of values. field_ic: Field equal to value or in list of values, using case insensitive comparison. field_regex: Field matches regex value or matches with any regex in list of values. field_ic_regex: Field matches regex value or matches with any regex in list of values using case insensitive match.
[ "Register", "multiple", "filters", "at", "once", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L275-L304
aouyar/PyMunin
pysysinfo/util.py
TableFilter.applyFilters
def applyFilters(self, headers, table): """Apply filter on ps command result. @param headers: List of column headers. @param table: Nested list of rows and columns. @return: Nested list of rows and columns filtered using registered filters. """ result = [] column_idxs = {} for column in self._filters.keys(): try: column_idxs[column] = headers.index(column) except ValueError: raise ValueError('Invalid column name %s in filter.' % column) for row in table: for (column, (patterns, is_regex, ignore_case)) in self._filters.items(): col_idx = column_idxs[column] col_val = row[col_idx] if is_regex: for pattern in patterns: if pattern.search(col_val): break else: break else: if ignore_case: col_val = col_val.lower() if col_val in patterns: pass else: break else: result.append(row) return result
python
def applyFilters(self, headers, table): """Apply filter on ps command result. @param headers: List of column headers. @param table: Nested list of rows and columns. @return: Nested list of rows and columns filtered using registered filters. """ result = [] column_idxs = {} for column in self._filters.keys(): try: column_idxs[column] = headers.index(column) except ValueError: raise ValueError('Invalid column name %s in filter.' % column) for row in table: for (column, (patterns, is_regex, ignore_case)) in self._filters.items(): col_idx = column_idxs[column] col_val = row[col_idx] if is_regex: for pattern in patterns: if pattern.search(col_val): break else: break else: if ignore_case: col_val = col_val.lower() if col_val in patterns: pass else: break else: result.append(row) return result
[ "def", "applyFilters", "(", "self", ",", "headers", ",", "table", ")", ":", "result", "=", "[", "]", "column_idxs", "=", "{", "}", "for", "column", "in", "self", ".", "_filters", ".", "keys", "(", ")", ":", "try", ":", "column_idxs", "[", "column", "]", "=", "headers", ".", "index", "(", "column", ")", "except", "ValueError", ":", "raise", "ValueError", "(", "'Invalid column name %s in filter.'", "%", "column", ")", "for", "row", "in", "table", ":", "for", "(", "column", ",", "(", "patterns", ",", "is_regex", ",", "ignore_case", ")", ")", "in", "self", ".", "_filters", ".", "items", "(", ")", ":", "col_idx", "=", "column_idxs", "[", "column", "]", "col_val", "=", "row", "[", "col_idx", "]", "if", "is_regex", ":", "for", "pattern", "in", "patterns", ":", "if", "pattern", ".", "search", "(", "col_val", ")", ":", "break", "else", ":", "break", "else", ":", "if", "ignore_case", ":", "col_val", "=", "col_val", ".", "lower", "(", ")", "if", "col_val", "in", "patterns", ":", "pass", "else", ":", "break", "else", ":", "result", ".", "append", "(", "row", ")", "return", "result" ]
Apply filter on ps command result. @param headers: List of column headers. @param table: Nested list of rows and columns. @return: Nested list of rows and columns filtered using registered filters.
[ "Apply", "filter", "on", "ps", "command", "result", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L306-L343
aouyar/PyMunin
pysysinfo/util.py
Telnet.open
def open(self, host=None, port=0, socket_file=None, timeout=socket.getdefaulttimeout()): """Connect to a host. With a host argument, it connects the instance using TCP; port number and timeout are optional, socket_file must be None. The port number defaults to the standard telnet port (23). With a socket_file argument, it connects the instance using named socket; timeout is optional and host must be None. Don't try to reopen an already connected instance. """ self.socket_file = socket_file if host is not None: if sys.version_info[:2] >= (2,6): telnetlib.Telnet.open(self, host, port, timeout) else: telnetlib.Telnet.open(self, host, port) elif socket_file is not None: self.eof = 0 self.host = host self.port = port self.timeout = timeout self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.settimeout(timeout) self.sock.connect(socket_file) else: raise TypeError("Either host or socket_file argument is required.")
python
def open(self, host=None, port=0, socket_file=None, timeout=socket.getdefaulttimeout()): """Connect to a host. With a host argument, it connects the instance using TCP; port number and timeout are optional, socket_file must be None. The port number defaults to the standard telnet port (23). With a socket_file argument, it connects the instance using named socket; timeout is optional and host must be None. Don't try to reopen an already connected instance. """ self.socket_file = socket_file if host is not None: if sys.version_info[:2] >= (2,6): telnetlib.Telnet.open(self, host, port, timeout) else: telnetlib.Telnet.open(self, host, port) elif socket_file is not None: self.eof = 0 self.host = host self.port = port self.timeout = timeout self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.settimeout(timeout) self.sock.connect(socket_file) else: raise TypeError("Either host or socket_file argument is required.")
[ "def", "open", "(", "self", ",", "host", "=", "None", ",", "port", "=", "0", ",", "socket_file", "=", "None", ",", "timeout", "=", "socket", ".", "getdefaulttimeout", "(", ")", ")", ":", "self", ".", "socket_file", "=", "socket_file", "if", "host", "is", "not", "None", ":", "if", "sys", ".", "version_info", "[", ":", "2", "]", ">=", "(", "2", ",", "6", ")", ":", "telnetlib", ".", "Telnet", ".", "open", "(", "self", ",", "host", ",", "port", ",", "timeout", ")", "else", ":", "telnetlib", ".", "Telnet", ".", "open", "(", "self", ",", "host", ",", "port", ")", "elif", "socket_file", "is", "not", "None", ":", "self", ".", "eof", "=", "0", "self", ".", "host", "=", "host", "self", ".", "port", "=", "port", "self", ".", "timeout", "=", "timeout", "self", ".", "sock", "=", "socket", ".", "socket", "(", "socket", ".", "AF_UNIX", ",", "socket", ".", "SOCK_STREAM", ")", "self", ".", "sock", ".", "settimeout", "(", "timeout", ")", "self", ".", "sock", ".", "connect", "(", "socket_file", ")", "else", ":", "raise", "TypeError", "(", "\"Either host or socket_file argument is required.\"", ")" ]
Connect to a host. With a host argument, it connects the instance using TCP; port number and timeout are optional, socket_file must be None. The port number defaults to the standard telnet port (23). With a socket_file argument, it connects the instance using named socket; timeout is optional and host must be None. Don't try to reopen an already connected instance.
[ "Connect", "to", "a", "host", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/util.py#L366-L395
ContextLab/quail
quail/analysis/analysis.py
analyze
def analyze(egg, subjgroup=None, listgroup=None, subjname='Subject', listname='List', analysis=None, position=0, permute=False, n_perms=1000, parallel=False, match='exact', distance='euclidean', features=None, ts=None): """ General analysis function that groups data by subject/list number and performs analysis. Parameters ---------- egg : Egg data object The data to be analyzed subjgroup : list of strings or ints String/int variables indicating how to group over subjects. Must be the length of the number of subjects subjname : string Name of the subject grouping variable listgroup : list of strings or ints String/int variables indicating how to group over list. Must be the length of the number of lists listname : string Name of the list grouping variable analysis : string This is the analysis you want to run. Can be accuracy, spc, pfr, temporal or fingerprint position : int Optional argument for pnr analysis. Defines encoding position of item to run pnr. Default is 0, and it is zero indexed permute : bool Optional argument for fingerprint/temporal cluster analyses. Determines whether to correct clustering scores by shuffling recall order for each list to create a distribution of clustering scores (for each feature). The "corrected" clustering score is the proportion of clustering scores in that random distribution that were lower than the clustering score for the observed recall sequence. Default is False. n_perms : int Optional argument for fingerprint/temporal cluster analyses. Number of permutations to run for "corrected" clustering scores. Default is 1000 ( per recall list). parallel : bool Option to use multiprocessing (this can help speed up the permutations tests in the clustering calculations) match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- result : quail.FriedEgg Class instance containing the analysis results """ if analysis is None: raise ValueError('You must pass an analysis type.') if analysis not in analyses.keys(): raise ValueError('Analysis not recognized. Choose one of the following: ' 'accuracy, spc, pfr, lag-crp, fingerprint, temporal') from ..egg import FriedEgg if hasattr(egg, 'subjgroup'): if egg.subjgroup is not None: subjgroup = egg.subjgroup if hasattr(egg, 'subjname'): if egg.subjname is not None: subjname = egg.subjname if hasattr(egg, 'listgroup'): if egg.listgroup is not None: listgroup = egg.listgroup if hasattr(egg, 'listname'): if egg.listname is not None: listname = egg.listname if features is None: features = egg.feature_names opts = { 'subjgroup' : subjgroup, 'listgroup' : listgroup, 'subjname' : subjname, 'parallel' : parallel, 'match' : match, 'distance' : distance, 'features' : features, 'analysis_type' : analysis, 'analysis' : analyses[analysis] } if analysis is 'pfr': opts.update({'position' : 0}) elif analysis is 'pnr': opts.update({'position' : position}) if analysis is 'temporal': opts.update({'features' : ['temporal']}) if analysis in ['temporal', 'fingerprint']: opts.update({'permute' : permute, 'n_perms' : n_perms}) if analysis is 'lagcrp': opts.update({'ts' : ts}) return FriedEgg(data=_analyze_chunk(egg, **opts), analysis=analysis, list_length=egg.list_length, n_lists=egg.n_lists, n_subjects=egg.n_subjects, position=position)
python
def analyze(egg, subjgroup=None, listgroup=None, subjname='Subject', listname='List', analysis=None, position=0, permute=False, n_perms=1000, parallel=False, match='exact', distance='euclidean', features=None, ts=None): """ General analysis function that groups data by subject/list number and performs analysis. Parameters ---------- egg : Egg data object The data to be analyzed subjgroup : list of strings or ints String/int variables indicating how to group over subjects. Must be the length of the number of subjects subjname : string Name of the subject grouping variable listgroup : list of strings or ints String/int variables indicating how to group over list. Must be the length of the number of lists listname : string Name of the list grouping variable analysis : string This is the analysis you want to run. Can be accuracy, spc, pfr, temporal or fingerprint position : int Optional argument for pnr analysis. Defines encoding position of item to run pnr. Default is 0, and it is zero indexed permute : bool Optional argument for fingerprint/temporal cluster analyses. Determines whether to correct clustering scores by shuffling recall order for each list to create a distribution of clustering scores (for each feature). The "corrected" clustering score is the proportion of clustering scores in that random distribution that were lower than the clustering score for the observed recall sequence. Default is False. n_perms : int Optional argument for fingerprint/temporal cluster analyses. Number of permutations to run for "corrected" clustering scores. Default is 1000 ( per recall list). parallel : bool Option to use multiprocessing (this can help speed up the permutations tests in the clustering calculations) match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- result : quail.FriedEgg Class instance containing the analysis results """ if analysis is None: raise ValueError('You must pass an analysis type.') if analysis not in analyses.keys(): raise ValueError('Analysis not recognized. Choose one of the following: ' 'accuracy, spc, pfr, lag-crp, fingerprint, temporal') from ..egg import FriedEgg if hasattr(egg, 'subjgroup'): if egg.subjgroup is not None: subjgroup = egg.subjgroup if hasattr(egg, 'subjname'): if egg.subjname is not None: subjname = egg.subjname if hasattr(egg, 'listgroup'): if egg.listgroup is not None: listgroup = egg.listgroup if hasattr(egg, 'listname'): if egg.listname is not None: listname = egg.listname if features is None: features = egg.feature_names opts = { 'subjgroup' : subjgroup, 'listgroup' : listgroup, 'subjname' : subjname, 'parallel' : parallel, 'match' : match, 'distance' : distance, 'features' : features, 'analysis_type' : analysis, 'analysis' : analyses[analysis] } if analysis is 'pfr': opts.update({'position' : 0}) elif analysis is 'pnr': opts.update({'position' : position}) if analysis is 'temporal': opts.update({'features' : ['temporal']}) if analysis in ['temporal', 'fingerprint']: opts.update({'permute' : permute, 'n_perms' : n_perms}) if analysis is 'lagcrp': opts.update({'ts' : ts}) return FriedEgg(data=_analyze_chunk(egg, **opts), analysis=analysis, list_length=egg.list_length, n_lists=egg.n_lists, n_subjects=egg.n_subjects, position=position)
[ "def", "analyze", "(", "egg", ",", "subjgroup", "=", "None", ",", "listgroup", "=", "None", ",", "subjname", "=", "'Subject'", ",", "listname", "=", "'List'", ",", "analysis", "=", "None", ",", "position", "=", "0", ",", "permute", "=", "False", ",", "n_perms", "=", "1000", ",", "parallel", "=", "False", ",", "match", "=", "'exact'", ",", "distance", "=", "'euclidean'", ",", "features", "=", "None", ",", "ts", "=", "None", ")", ":", "if", "analysis", "is", "None", ":", "raise", "ValueError", "(", "'You must pass an analysis type.'", ")", "if", "analysis", "not", "in", "analyses", ".", "keys", "(", ")", ":", "raise", "ValueError", "(", "'Analysis not recognized. Choose one of the following: '", "'accuracy, spc, pfr, lag-crp, fingerprint, temporal'", ")", "from", ".", ".", "egg", "import", "FriedEgg", "if", "hasattr", "(", "egg", ",", "'subjgroup'", ")", ":", "if", "egg", ".", "subjgroup", "is", "not", "None", ":", "subjgroup", "=", "egg", ".", "subjgroup", "if", "hasattr", "(", "egg", ",", "'subjname'", ")", ":", "if", "egg", ".", "subjname", "is", "not", "None", ":", "subjname", "=", "egg", ".", "subjname", "if", "hasattr", "(", "egg", ",", "'listgroup'", ")", ":", "if", "egg", ".", "listgroup", "is", "not", "None", ":", "listgroup", "=", "egg", ".", "listgroup", "if", "hasattr", "(", "egg", ",", "'listname'", ")", ":", "if", "egg", ".", "listname", "is", "not", "None", ":", "listname", "=", "egg", ".", "listname", "if", "features", "is", "None", ":", "features", "=", "egg", ".", "feature_names", "opts", "=", "{", "'subjgroup'", ":", "subjgroup", ",", "'listgroup'", ":", "listgroup", ",", "'subjname'", ":", "subjname", ",", "'parallel'", ":", "parallel", ",", "'match'", ":", "match", ",", "'distance'", ":", "distance", ",", "'features'", ":", "features", ",", "'analysis_type'", ":", "analysis", ",", "'analysis'", ":", "analyses", "[", "analysis", "]", "}", "if", "analysis", "is", "'pfr'", ":", "opts", ".", "update", "(", "{", "'position'", ":", "0", "}", ")", "elif", "analysis", "is", "'pnr'", ":", "opts", ".", "update", "(", "{", "'position'", ":", "position", "}", ")", "if", "analysis", "is", "'temporal'", ":", "opts", ".", "update", "(", "{", "'features'", ":", "[", "'temporal'", "]", "}", ")", "if", "analysis", "in", "[", "'temporal'", ",", "'fingerprint'", "]", ":", "opts", ".", "update", "(", "{", "'permute'", ":", "permute", ",", "'n_perms'", ":", "n_perms", "}", ")", "if", "analysis", "is", "'lagcrp'", ":", "opts", ".", "update", "(", "{", "'ts'", ":", "ts", "}", ")", "return", "FriedEgg", "(", "data", "=", "_analyze_chunk", "(", "egg", ",", "*", "*", "opts", ")", ",", "analysis", "=", "analysis", ",", "list_length", "=", "egg", ".", "list_length", ",", "n_lists", "=", "egg", ".", "n_lists", ",", "n_subjects", "=", "egg", ".", "n_subjects", ",", "position", "=", "position", ")" ]
General analysis function that groups data by subject/list number and performs analysis. Parameters ---------- egg : Egg data object The data to be analyzed subjgroup : list of strings or ints String/int variables indicating how to group over subjects. Must be the length of the number of subjects subjname : string Name of the subject grouping variable listgroup : list of strings or ints String/int variables indicating how to group over list. Must be the length of the number of lists listname : string Name of the list grouping variable analysis : string This is the analysis you want to run. Can be accuracy, spc, pfr, temporal or fingerprint position : int Optional argument for pnr analysis. Defines encoding position of item to run pnr. Default is 0, and it is zero indexed permute : bool Optional argument for fingerprint/temporal cluster analyses. Determines whether to correct clustering scores by shuffling recall order for each list to create a distribution of clustering scores (for each feature). The "corrected" clustering score is the proportion of clustering scores in that random distribution that were lower than the clustering score for the observed recall sequence. Default is False. n_perms : int Optional argument for fingerprint/temporal cluster analyses. Number of permutations to run for "corrected" clustering scores. Default is 1000 ( per recall list). parallel : bool Option to use multiprocessing (this can help speed up the permutations tests in the clustering calculations) match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- result : quail.FriedEgg Class instance containing the analysis results
[ "General", "analysis", "function", "that", "groups", "data", "by", "subject", "/", "list", "number", "and", "performs", "analysis", "." ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/analysis/analysis.py#L31-L155
ContextLab/quail
quail/analysis/analysis.py
_analyze_chunk
def _analyze_chunk(data, subjgroup=None, subjname='Subject', listgroup=None, listname='List', analysis=None, analysis_type=None, pass_features=False, features=None, parallel=False, **kwargs): """ Private function that groups data by subject/list number and performs analysis for a chunk of data. Parameters ---------- data : Egg data object The data to be analyzed subjgroup : list of strings or ints String/int variables indicating how to group over subjects. Must be the length of the number of subjects subjname : string Name of the subject grouping variable listgroup : list of strings or ints String/int variables indicating how to group over list. Must be the length of the number of lists listname : string Name of the list grouping variable analysis : function This function analyzes data and returns it. pass_features : bool Logical indicating whether the analyses uses the features field of the Egg Returns ---------- analyzed_data : Pandas DataFrame DataFrame containing the analysis results """ # perform the analysis def _analysis(c): subj, lst = c subjects = [s for s in subjdict[subj]] lists = [l for l in listdict[subj][lst]] s = data.crack(lists=lists, subjects=subjects) index = pd.MultiIndex.from_arrays([[subj],[lst]], names=[subjname, listname]) opts = dict() if analysis_type is 'fingerprint': opts.update({'columns' : features}) elif analysis_type is 'lagcrp': if kwargs['ts']: opts.update({'columns' : range(-kwargs['ts'],kwargs['ts']+1)}) else: opts.update({'columns' : range(-data.list_length,data.list_length+1)}) return pd.DataFrame([analysis(s, features=features, **kwargs)], index=index, **opts) subjgroup = subjgroup if subjgroup else data.pres.index.levels[0].values listgroup = listgroup if listgroup else data.pres.index.levels[1].values subjdict = {subj : data.pres.index.levels[0].values[subj==np.array(subjgroup)] for subj in set(subjgroup)} if all(isinstance(el, list) for el in listgroup): listdict = [{lst : data.pres.index.levels[1].values[lst==np.array(listgrpsub)] for lst in set(listgrpsub)} for listgrpsub in listgroup] else: listdict = [{lst : data.pres.index.levels[1].values[lst==np.array(listgroup)] for lst in set(listgroup)} for subj in subjdict] chunks = [(subj, lst) for subj in subjdict for lst in listdict[0]] if parallel: import multiprocessing from pathos.multiprocessing import ProcessingPool as Pool p = Pool(multiprocessing.cpu_count()) res = p.map(_analysis, chunks) else: res = [_analysis(c) for c in chunks] return pd.concat(res)
python
def _analyze_chunk(data, subjgroup=None, subjname='Subject', listgroup=None, listname='List', analysis=None, analysis_type=None, pass_features=False, features=None, parallel=False, **kwargs): """ Private function that groups data by subject/list number and performs analysis for a chunk of data. Parameters ---------- data : Egg data object The data to be analyzed subjgroup : list of strings or ints String/int variables indicating how to group over subjects. Must be the length of the number of subjects subjname : string Name of the subject grouping variable listgroup : list of strings or ints String/int variables indicating how to group over list. Must be the length of the number of lists listname : string Name of the list grouping variable analysis : function This function analyzes data and returns it. pass_features : bool Logical indicating whether the analyses uses the features field of the Egg Returns ---------- analyzed_data : Pandas DataFrame DataFrame containing the analysis results """ # perform the analysis def _analysis(c): subj, lst = c subjects = [s for s in subjdict[subj]] lists = [l for l in listdict[subj][lst]] s = data.crack(lists=lists, subjects=subjects) index = pd.MultiIndex.from_arrays([[subj],[lst]], names=[subjname, listname]) opts = dict() if analysis_type is 'fingerprint': opts.update({'columns' : features}) elif analysis_type is 'lagcrp': if kwargs['ts']: opts.update({'columns' : range(-kwargs['ts'],kwargs['ts']+1)}) else: opts.update({'columns' : range(-data.list_length,data.list_length+1)}) return pd.DataFrame([analysis(s, features=features, **kwargs)], index=index, **opts) subjgroup = subjgroup if subjgroup else data.pres.index.levels[0].values listgroup = listgroup if listgroup else data.pres.index.levels[1].values subjdict = {subj : data.pres.index.levels[0].values[subj==np.array(subjgroup)] for subj in set(subjgroup)} if all(isinstance(el, list) for el in listgroup): listdict = [{lst : data.pres.index.levels[1].values[lst==np.array(listgrpsub)] for lst in set(listgrpsub)} for listgrpsub in listgroup] else: listdict = [{lst : data.pres.index.levels[1].values[lst==np.array(listgroup)] for lst in set(listgroup)} for subj in subjdict] chunks = [(subj, lst) for subj in subjdict for lst in listdict[0]] if parallel: import multiprocessing from pathos.multiprocessing import ProcessingPool as Pool p = Pool(multiprocessing.cpu_count()) res = p.map(_analysis, chunks) else: res = [_analysis(c) for c in chunks] return pd.concat(res)
[ "def", "_analyze_chunk", "(", "data", ",", "subjgroup", "=", "None", ",", "subjname", "=", "'Subject'", ",", "listgroup", "=", "None", ",", "listname", "=", "'List'", ",", "analysis", "=", "None", ",", "analysis_type", "=", "None", ",", "pass_features", "=", "False", ",", "features", "=", "None", ",", "parallel", "=", "False", ",", "*", "*", "kwargs", ")", ":", "# perform the analysis", "def", "_analysis", "(", "c", ")", ":", "subj", ",", "lst", "=", "c", "subjects", "=", "[", "s", "for", "s", "in", "subjdict", "[", "subj", "]", "]", "lists", "=", "[", "l", "for", "l", "in", "listdict", "[", "subj", "]", "[", "lst", "]", "]", "s", "=", "data", ".", "crack", "(", "lists", "=", "lists", ",", "subjects", "=", "subjects", ")", "index", "=", "pd", ".", "MultiIndex", ".", "from_arrays", "(", "[", "[", "subj", "]", ",", "[", "lst", "]", "]", ",", "names", "=", "[", "subjname", ",", "listname", "]", ")", "opts", "=", "dict", "(", ")", "if", "analysis_type", "is", "'fingerprint'", ":", "opts", ".", "update", "(", "{", "'columns'", ":", "features", "}", ")", "elif", "analysis_type", "is", "'lagcrp'", ":", "if", "kwargs", "[", "'ts'", "]", ":", "opts", ".", "update", "(", "{", "'columns'", ":", "range", "(", "-", "kwargs", "[", "'ts'", "]", ",", "kwargs", "[", "'ts'", "]", "+", "1", ")", "}", ")", "else", ":", "opts", ".", "update", "(", "{", "'columns'", ":", "range", "(", "-", "data", ".", "list_length", ",", "data", ".", "list_length", "+", "1", ")", "}", ")", "return", "pd", ".", "DataFrame", "(", "[", "analysis", "(", "s", ",", "features", "=", "features", ",", "*", "*", "kwargs", ")", "]", ",", "index", "=", "index", ",", "*", "*", "opts", ")", "subjgroup", "=", "subjgroup", "if", "subjgroup", "else", "data", ".", "pres", ".", "index", ".", "levels", "[", "0", "]", ".", "values", "listgroup", "=", "listgroup", "if", "listgroup", "else", "data", ".", "pres", ".", "index", ".", "levels", "[", "1", "]", ".", "values", "subjdict", "=", "{", "subj", ":", "data", ".", "pres", ".", "index", ".", "levels", "[", "0", "]", ".", "values", "[", "subj", "==", "np", ".", "array", "(", "subjgroup", ")", "]", "for", "subj", "in", "set", "(", "subjgroup", ")", "}", "if", "all", "(", "isinstance", "(", "el", ",", "list", ")", "for", "el", "in", "listgroup", ")", ":", "listdict", "=", "[", "{", "lst", ":", "data", ".", "pres", ".", "index", ".", "levels", "[", "1", "]", ".", "values", "[", "lst", "==", "np", ".", "array", "(", "listgrpsub", ")", "]", "for", "lst", "in", "set", "(", "listgrpsub", ")", "}", "for", "listgrpsub", "in", "listgroup", "]", "else", ":", "listdict", "=", "[", "{", "lst", ":", "data", ".", "pres", ".", "index", ".", "levels", "[", "1", "]", ".", "values", "[", "lst", "==", "np", ".", "array", "(", "listgroup", ")", "]", "for", "lst", "in", "set", "(", "listgroup", ")", "}", "for", "subj", "in", "subjdict", "]", "chunks", "=", "[", "(", "subj", ",", "lst", ")", "for", "subj", "in", "subjdict", "for", "lst", "in", "listdict", "[", "0", "]", "]", "if", "parallel", ":", "import", "multiprocessing", "from", "pathos", ".", "multiprocessing", "import", "ProcessingPool", "as", "Pool", "p", "=", "Pool", "(", "multiprocessing", ".", "cpu_count", "(", ")", ")", "res", "=", "p", ".", "map", "(", "_analysis", ",", "chunks", ")", "else", ":", "res", "=", "[", "_analysis", "(", "c", ")", "for", "c", "in", "chunks", "]", "return", "pd", ".", "concat", "(", "res", ")" ]
Private function that groups data by subject/list number and performs analysis for a chunk of data. Parameters ---------- data : Egg data object The data to be analyzed subjgroup : list of strings or ints String/int variables indicating how to group over subjects. Must be the length of the number of subjects subjname : string Name of the subject grouping variable listgroup : list of strings or ints String/int variables indicating how to group over list. Must be the length of the number of lists listname : string Name of the list grouping variable analysis : function This function analyzes data and returns it. pass_features : bool Logical indicating whether the analyses uses the features field of the Egg Returns ---------- analyzed_data : Pandas DataFrame DataFrame containing the analysis results
[ "Private", "function", "that", "groups", "data", "by", "subject", "/", "list", "number", "and", "performs", "analysis", "for", "a", "chunk", "of", "data", "." ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/analysis/analysis.py#L157-L236
aouyar/PyMunin
pymunin/plugins/tomcatstats.py
MuninTomcatPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" if self.hasGraph('tomcat_memory'): stats = self._tomcatInfo.getMemoryStats() self.setGraphVal('tomcat_memory', 'used', stats['total'] - stats['free']) self.setGraphVal('tomcat_memory', 'free', stats['free']) self.setGraphVal('tomcat_memory', 'max', stats['max']) for (port, stats) in self._tomcatInfo.getConnectorStats().iteritems(): thrstats = stats['threadInfo'] reqstats = stats['requestInfo'] if self.portIncluded(port): name = "tomcat_threads_%d" % port if self.hasGraph(name): self.setGraphVal(name, 'busy', thrstats['currentThreadsBusy']) self.setGraphVal(name, 'idle', thrstats['currentThreadCount'] - thrstats['currentThreadsBusy']) self.setGraphVal(name, 'max', thrstats['maxThreads']) name = "tomcat_access_%d" % port if self.hasGraph(name): self.setGraphVal(name, 'reqs', reqstats['requestCount']) name = "tomcat_error_%d" % port if self.hasGraph(name): self.setGraphVal(name, 'errors', reqstats['errorCount']) name = "tomcat_traffic_%d" % port if self.hasGraph(name): self.setGraphVal(name, 'rx', reqstats['bytesReceived']) self.setGraphVal(name, 'tx', reqstats['bytesSent'])
python
def retrieveVals(self): """Retrieve values for graphs.""" if self.hasGraph('tomcat_memory'): stats = self._tomcatInfo.getMemoryStats() self.setGraphVal('tomcat_memory', 'used', stats['total'] - stats['free']) self.setGraphVal('tomcat_memory', 'free', stats['free']) self.setGraphVal('tomcat_memory', 'max', stats['max']) for (port, stats) in self._tomcatInfo.getConnectorStats().iteritems(): thrstats = stats['threadInfo'] reqstats = stats['requestInfo'] if self.portIncluded(port): name = "tomcat_threads_%d" % port if self.hasGraph(name): self.setGraphVal(name, 'busy', thrstats['currentThreadsBusy']) self.setGraphVal(name, 'idle', thrstats['currentThreadCount'] - thrstats['currentThreadsBusy']) self.setGraphVal(name, 'max', thrstats['maxThreads']) name = "tomcat_access_%d" % port if self.hasGraph(name): self.setGraphVal(name, 'reqs', reqstats['requestCount']) name = "tomcat_error_%d" % port if self.hasGraph(name): self.setGraphVal(name, 'errors', reqstats['errorCount']) name = "tomcat_traffic_%d" % port if self.hasGraph(name): self.setGraphVal(name, 'rx', reqstats['bytesReceived']) self.setGraphVal(name, 'tx', reqstats['bytesSent'])
[ "def", "retrieveVals", "(", "self", ")", ":", "if", "self", ".", "hasGraph", "(", "'tomcat_memory'", ")", ":", "stats", "=", "self", ".", "_tomcatInfo", ".", "getMemoryStats", "(", ")", "self", ".", "setGraphVal", "(", "'tomcat_memory'", ",", "'used'", ",", "stats", "[", "'total'", "]", "-", "stats", "[", "'free'", "]", ")", "self", ".", "setGraphVal", "(", "'tomcat_memory'", ",", "'free'", ",", "stats", "[", "'free'", "]", ")", "self", ".", "setGraphVal", "(", "'tomcat_memory'", ",", "'max'", ",", "stats", "[", "'max'", "]", ")", "for", "(", "port", ",", "stats", ")", "in", "self", ".", "_tomcatInfo", ".", "getConnectorStats", "(", ")", ".", "iteritems", "(", ")", ":", "thrstats", "=", "stats", "[", "'threadInfo'", "]", "reqstats", "=", "stats", "[", "'requestInfo'", "]", "if", "self", ".", "portIncluded", "(", "port", ")", ":", "name", "=", "\"tomcat_threads_%d\"", "%", "port", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "'busy'", ",", "thrstats", "[", "'currentThreadsBusy'", "]", ")", "self", ".", "setGraphVal", "(", "name", ",", "'idle'", ",", "thrstats", "[", "'currentThreadCount'", "]", "-", "thrstats", "[", "'currentThreadsBusy'", "]", ")", "self", ".", "setGraphVal", "(", "name", ",", "'max'", ",", "thrstats", "[", "'maxThreads'", "]", ")", "name", "=", "\"tomcat_access_%d\"", "%", "port", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "'reqs'", ",", "reqstats", "[", "'requestCount'", "]", ")", "name", "=", "\"tomcat_error_%d\"", "%", "port", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "'errors'", ",", "reqstats", "[", "'errorCount'", "]", ")", "name", "=", "\"tomcat_traffic_%d\"", "%", "port", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "'rx'", ",", "reqstats", "[", "'bytesReceived'", "]", ")", "self", ".", "setGraphVal", "(", "name", ",", "'tx'", ",", "reqstats", "[", "'bytesSent'", "]", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/tomcatstats.py#L196-L225
dfm/acor
acor/acor.py
function
def function(data, maxt=None): """ Calculate the autocorrelation function for a 1D time series. Parameters ---------- data : numpy.ndarray (N,) The time series. Returns ------- rho : numpy.ndarray (N,) An autocorrelation function. """ data = np.atleast_1d(data) assert len(np.shape(data)) == 1, \ "The autocorrelation function can only by computed " \ + "on a 1D time series." if maxt is None: maxt = len(data) result = np.zeros(maxt, dtype=float) _acor.function(np.array(data, dtype=float), result) return result / result[0]
python
def function(data, maxt=None): """ Calculate the autocorrelation function for a 1D time series. Parameters ---------- data : numpy.ndarray (N,) The time series. Returns ------- rho : numpy.ndarray (N,) An autocorrelation function. """ data = np.atleast_1d(data) assert len(np.shape(data)) == 1, \ "The autocorrelation function can only by computed " \ + "on a 1D time series." if maxt is None: maxt = len(data) result = np.zeros(maxt, dtype=float) _acor.function(np.array(data, dtype=float), result) return result / result[0]
[ "def", "function", "(", "data", ",", "maxt", "=", "None", ")", ":", "data", "=", "np", ".", "atleast_1d", "(", "data", ")", "assert", "len", "(", "np", ".", "shape", "(", "data", ")", ")", "==", "1", ",", "\"The autocorrelation function can only by computed \"", "+", "\"on a 1D time series.\"", "if", "maxt", "is", "None", ":", "maxt", "=", "len", "(", "data", ")", "result", "=", "np", ".", "zeros", "(", "maxt", ",", "dtype", "=", "float", ")", "_acor", ".", "function", "(", "np", ".", "array", "(", "data", ",", "dtype", "=", "float", ")", ",", "result", ")", "return", "result", "/", "result", "[", "0", "]" ]
Calculate the autocorrelation function for a 1D time series. Parameters ---------- data : numpy.ndarray (N,) The time series. Returns ------- rho : numpy.ndarray (N,) An autocorrelation function.
[ "Calculate", "the", "autocorrelation", "function", "for", "a", "1D", "time", "series", "." ]
train
https://github.com/dfm/acor/blob/b55eb8efa7df6c73b6f3f0c9b64fa1c801e8f821/acor/acor.py#L36-L59
aouyar/PyMunin
pymunin/plugins/nginxstats.py
MuninNginxPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" nginxInfo = NginxInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) stats = nginxInfo.getServerStats() if stats: if self.hasGraph('nginx_activeconn'): self.setGraphVal('nginx_activeconn', 'proc', stats['writing']) self.setGraphVal('nginx_activeconn', 'read', stats['reading']) self.setGraphVal('nginx_activeconn', 'wait', stats['waiting']) self.setGraphVal('nginx_activeconn', 'total', stats['connections']) if self.hasGraph('nginx_connections'): self.setGraphVal('nginx_connections', 'handled', stats['handled']) self.setGraphVal('nginx_connections', 'nothandled', stats['accepts'] - stats['handled']) if self.hasGraph('nginx_requests'): self.setGraphVal('nginx_requests', 'requests', stats['requests']) if self.hasGraph('nginx_requestsperconn'): curr_stats = (stats['handled'], stats['requests']) hist_stats = self.restoreState() if hist_stats: prev_stats = hist_stats[0] else: hist_stats = [] prev_stats = (0,0) conns = max(curr_stats[0] - prev_stats[0], 0) reqs = max(curr_stats[1] - prev_stats[1], 0) if conns > 0: self.setGraphVal('nginx_requestsperconn', 'requests', float(reqs) / float(conns)) else: self.setGraphVal('nginx_requestsperconn', 'requests', 0) hist_stats.append(curr_stats) self.saveState(hist_stats[-self._numSamples:])
python
def retrieveVals(self): """Retrieve values for graphs.""" nginxInfo = NginxInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) stats = nginxInfo.getServerStats() if stats: if self.hasGraph('nginx_activeconn'): self.setGraphVal('nginx_activeconn', 'proc', stats['writing']) self.setGraphVal('nginx_activeconn', 'read', stats['reading']) self.setGraphVal('nginx_activeconn', 'wait', stats['waiting']) self.setGraphVal('nginx_activeconn', 'total', stats['connections']) if self.hasGraph('nginx_connections'): self.setGraphVal('nginx_connections', 'handled', stats['handled']) self.setGraphVal('nginx_connections', 'nothandled', stats['accepts'] - stats['handled']) if self.hasGraph('nginx_requests'): self.setGraphVal('nginx_requests', 'requests', stats['requests']) if self.hasGraph('nginx_requestsperconn'): curr_stats = (stats['handled'], stats['requests']) hist_stats = self.restoreState() if hist_stats: prev_stats = hist_stats[0] else: hist_stats = [] prev_stats = (0,0) conns = max(curr_stats[0] - prev_stats[0], 0) reqs = max(curr_stats[1] - prev_stats[1], 0) if conns > 0: self.setGraphVal('nginx_requestsperconn', 'requests', float(reqs) / float(conns)) else: self.setGraphVal('nginx_requestsperconn', 'requests', 0) hist_stats.append(curr_stats) self.saveState(hist_stats[-self._numSamples:])
[ "def", "retrieveVals", "(", "self", ")", ":", "nginxInfo", "=", "NginxInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_statuspath", ",", "self", ".", "_ssl", ")", "stats", "=", "nginxInfo", ".", "getServerStats", "(", ")", "if", "stats", ":", "if", "self", ".", "hasGraph", "(", "'nginx_activeconn'", ")", ":", "self", ".", "setGraphVal", "(", "'nginx_activeconn'", ",", "'proc'", ",", "stats", "[", "'writing'", "]", ")", "self", ".", "setGraphVal", "(", "'nginx_activeconn'", ",", "'read'", ",", "stats", "[", "'reading'", "]", ")", "self", ".", "setGraphVal", "(", "'nginx_activeconn'", ",", "'wait'", ",", "stats", "[", "'waiting'", "]", ")", "self", ".", "setGraphVal", "(", "'nginx_activeconn'", ",", "'total'", ",", "stats", "[", "'connections'", "]", ")", "if", "self", ".", "hasGraph", "(", "'nginx_connections'", ")", ":", "self", ".", "setGraphVal", "(", "'nginx_connections'", ",", "'handled'", ",", "stats", "[", "'handled'", "]", ")", "self", ".", "setGraphVal", "(", "'nginx_connections'", ",", "'nothandled'", ",", "stats", "[", "'accepts'", "]", "-", "stats", "[", "'handled'", "]", ")", "if", "self", ".", "hasGraph", "(", "'nginx_requests'", ")", ":", "self", ".", "setGraphVal", "(", "'nginx_requests'", ",", "'requests'", ",", "stats", "[", "'requests'", "]", ")", "if", "self", ".", "hasGraph", "(", "'nginx_requestsperconn'", ")", ":", "curr_stats", "=", "(", "stats", "[", "'handled'", "]", ",", "stats", "[", "'requests'", "]", ")", "hist_stats", "=", "self", ".", "restoreState", "(", ")", "if", "hist_stats", ":", "prev_stats", "=", "hist_stats", "[", "0", "]", "else", ":", "hist_stats", "=", "[", "]", "prev_stats", "=", "(", "0", ",", "0", ")", "conns", "=", "max", "(", "curr_stats", "[", "0", "]", "-", "prev_stats", "[", "0", "]", ",", "0", ")", "reqs", "=", "max", "(", "curr_stats", "[", "1", "]", "-", "prev_stats", "[", "1", "]", ",", "0", ")", "if", "conns", ">", "0", ":", "self", ".", "setGraphVal", "(", "'nginx_requestsperconn'", ",", "'requests'", ",", "float", "(", "reqs", ")", "/", "float", "(", "conns", ")", ")", "else", ":", "self", ".", "setGraphVal", "(", "'nginx_requestsperconn'", ",", "'requests'", ",", "0", ")", "hist_stats", ".", "append", "(", "curr_stats", ")", "self", ".", "saveState", "(", "hist_stats", "[", "-", "self", ".", "_numSamples", ":", "]", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/nginxstats.py#L151-L186
aouyar/PyMunin
pymunin/plugins/nginxstats.py
MuninNginxPlugin.autoconf
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ nginxInfo = NginxInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) return nginxInfo is not None
python
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ nginxInfo = NginxInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) return nginxInfo is not None
[ "def", "autoconf", "(", "self", ")", ":", "nginxInfo", "=", "NginxInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_statuspath", ",", "self", ".", "_ssl", ")", "return", "nginxInfo", "is", "not", "None" ]
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
[ "Implements", "Munin", "Plugin", "Auto", "-", "Configuration", "Option", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/nginxstats.py#L188-L197
aouyar/PyMunin
pysysinfo/phpfpm.py
PHPfpmInfo.getStats
def getStats(self): """Query and parse Web Server Status Page. """ url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._monpath) response = util.get_url(url, self._user, self._password) stats = {} for line in response.splitlines(): mobj = re.match('([\w\s]+):\s+(\w+)$', line) if mobj: stats[mobj.group(1)] = util.parse_value(mobj.group(2)) return stats
python
def getStats(self): """Query and parse Web Server Status Page. """ url = "%s://%s:%d/%s" % (self._proto, self._host, self._port, self._monpath) response = util.get_url(url, self._user, self._password) stats = {} for line in response.splitlines(): mobj = re.match('([\w\s]+):\s+(\w+)$', line) if mobj: stats[mobj.group(1)] = util.parse_value(mobj.group(2)) return stats
[ "def", "getStats", "(", "self", ")", ":", "url", "=", "\"%s://%s:%d/%s\"", "%", "(", "self", ".", "_proto", ",", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_monpath", ")", "response", "=", "util", ".", "get_url", "(", "url", ",", "self", ".", "_user", ",", "self", ".", "_password", ")", "stats", "=", "{", "}", "for", "line", "in", "response", ".", "splitlines", "(", ")", ":", "mobj", "=", "re", ".", "match", "(", "'([\\w\\s]+):\\s+(\\w+)$'", ",", "line", ")", "if", "mobj", ":", "stats", "[", "mobj", ".", "group", "(", "1", ")", "]", "=", "util", ".", "parse_value", "(", "mobj", ".", "group", "(", "2", ")", ")", "return", "stats" ]
Query and parse Web Server Status Page.
[ "Query", "and", "parse", "Web", "Server", "Status", "Page", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/phpfpm.py#L65-L77
aouyar/PyMunin
pymunin/plugins/phpapcstats.py
MuninPHPapcPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" apcinfo = APCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl, self._extras) stats = apcinfo.getAllStats() if self.hasGraph('php_apc_memory') and stats: filecache = stats['cache_sys']['mem_size'] usercache = stats['cache_user']['mem_size'] total = stats['memory']['seg_size'] * stats['memory']['num_seg'] free = stats['memory']['avail_mem'] other = total - free - filecache - usercache self.setGraphVal('php_apc_memory', 'filecache', filecache) self.setGraphVal('php_apc_memory', 'usercache', usercache) self.setGraphVal('php_apc_memory', 'other', other) self.setGraphVal('php_apc_memory', 'free', free) if self.hasGraph('php_apc_items') and stats: self.setGraphVal('php_apc_items', 'filecache', stats['cache_sys']['num_entries']) self.setGraphVal('php_apc_items', 'usercache', stats['cache_user']['num_entries']) if self.hasGraph('php_apc_reqs_filecache') and stats: self.setGraphVal('php_apc_reqs_filecache', 'hits', stats['cache_sys']['num_hits']) self.setGraphVal('php_apc_reqs_filecache', 'misses', stats['cache_sys']['num_misses']) self.setGraphVal('php_apc_reqs_filecache', 'inserts', stats['cache_sys']['num_inserts']) if self.hasGraph('php_apc_reqs_usercache') and stats: self.setGraphVal('php_apc_reqs_usercache', 'hits', stats['cache_user']['num_hits']) self.setGraphVal('php_apc_reqs_usercache', 'misses', stats['cache_user']['num_misses']) self.setGraphVal('php_apc_reqs_usercache', 'inserts', stats['cache_user']['num_inserts']) if self.hasGraph('php_apc_expunge') and stats: self.setGraphVal('php_apc_expunge', 'filecache', stats['cache_sys']['expunges']) self.setGraphVal('php_apc_expunge', 'usercache', stats['cache_user']['expunges']) if self.hasGraph('php_apc_mem_util_frag'): self.setGraphVal('php_apc_mem_util_frag', 'util', stats['memory']['utilization_ratio'] * 100) self.setGraphVal('php_apc_mem_util_frag', 'frag', stats['memory']['fragmentation_ratio'] * 100) if self.hasGraph('php_apc_mem_frag_count'): self.setGraphVal('php_apc_mem_frag_count', 'num', stats['memory']['fragment_count']) if self.hasGraph('php_apc_mem_frag_avgsize'): self.setGraphVal('php_apc_mem_frag_avgsize', 'size', stats['memory']['fragment_avg_size'])
python
def retrieveVals(self): """Retrieve values for graphs.""" apcinfo = APCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl, self._extras) stats = apcinfo.getAllStats() if self.hasGraph('php_apc_memory') and stats: filecache = stats['cache_sys']['mem_size'] usercache = stats['cache_user']['mem_size'] total = stats['memory']['seg_size'] * stats['memory']['num_seg'] free = stats['memory']['avail_mem'] other = total - free - filecache - usercache self.setGraphVal('php_apc_memory', 'filecache', filecache) self.setGraphVal('php_apc_memory', 'usercache', usercache) self.setGraphVal('php_apc_memory', 'other', other) self.setGraphVal('php_apc_memory', 'free', free) if self.hasGraph('php_apc_items') and stats: self.setGraphVal('php_apc_items', 'filecache', stats['cache_sys']['num_entries']) self.setGraphVal('php_apc_items', 'usercache', stats['cache_user']['num_entries']) if self.hasGraph('php_apc_reqs_filecache') and stats: self.setGraphVal('php_apc_reqs_filecache', 'hits', stats['cache_sys']['num_hits']) self.setGraphVal('php_apc_reqs_filecache', 'misses', stats['cache_sys']['num_misses']) self.setGraphVal('php_apc_reqs_filecache', 'inserts', stats['cache_sys']['num_inserts']) if self.hasGraph('php_apc_reqs_usercache') and stats: self.setGraphVal('php_apc_reqs_usercache', 'hits', stats['cache_user']['num_hits']) self.setGraphVal('php_apc_reqs_usercache', 'misses', stats['cache_user']['num_misses']) self.setGraphVal('php_apc_reqs_usercache', 'inserts', stats['cache_user']['num_inserts']) if self.hasGraph('php_apc_expunge') and stats: self.setGraphVal('php_apc_expunge', 'filecache', stats['cache_sys']['expunges']) self.setGraphVal('php_apc_expunge', 'usercache', stats['cache_user']['expunges']) if self.hasGraph('php_apc_mem_util_frag'): self.setGraphVal('php_apc_mem_util_frag', 'util', stats['memory']['utilization_ratio'] * 100) self.setGraphVal('php_apc_mem_util_frag', 'frag', stats['memory']['fragmentation_ratio'] * 100) if self.hasGraph('php_apc_mem_frag_count'): self.setGraphVal('php_apc_mem_frag_count', 'num', stats['memory']['fragment_count']) if self.hasGraph('php_apc_mem_frag_avgsize'): self.setGraphVal('php_apc_mem_frag_avgsize', 'size', stats['memory']['fragment_avg_size'])
[ "def", "retrieveVals", "(", "self", ")", ":", "apcinfo", "=", "APCinfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_monpath", ",", "self", ".", "_ssl", ",", "self", ".", "_extras", ")", "stats", "=", "apcinfo", ".", "getAllStats", "(", ")", "if", "self", ".", "hasGraph", "(", "'php_apc_memory'", ")", "and", "stats", ":", "filecache", "=", "stats", "[", "'cache_sys'", "]", "[", "'mem_size'", "]", "usercache", "=", "stats", "[", "'cache_user'", "]", "[", "'mem_size'", "]", "total", "=", "stats", "[", "'memory'", "]", "[", "'seg_size'", "]", "*", "stats", "[", "'memory'", "]", "[", "'num_seg'", "]", "free", "=", "stats", "[", "'memory'", "]", "[", "'avail_mem'", "]", "other", "=", "total", "-", "free", "-", "filecache", "-", "usercache", "self", ".", "setGraphVal", "(", "'php_apc_memory'", ",", "'filecache'", ",", "filecache", ")", "self", ".", "setGraphVal", "(", "'php_apc_memory'", ",", "'usercache'", ",", "usercache", ")", "self", ".", "setGraphVal", "(", "'php_apc_memory'", ",", "'other'", ",", "other", ")", "self", ".", "setGraphVal", "(", "'php_apc_memory'", ",", "'free'", ",", "free", ")", "if", "self", ".", "hasGraph", "(", "'php_apc_items'", ")", "and", "stats", ":", "self", ".", "setGraphVal", "(", "'php_apc_items'", ",", "'filecache'", ",", "stats", "[", "'cache_sys'", "]", "[", "'num_entries'", "]", ")", "self", ".", "setGraphVal", "(", "'php_apc_items'", ",", "'usercache'", ",", "stats", "[", "'cache_user'", "]", "[", "'num_entries'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_apc_reqs_filecache'", ")", "and", "stats", ":", "self", ".", "setGraphVal", "(", "'php_apc_reqs_filecache'", ",", "'hits'", ",", "stats", "[", "'cache_sys'", "]", "[", "'num_hits'", "]", ")", "self", ".", "setGraphVal", "(", "'php_apc_reqs_filecache'", ",", "'misses'", ",", "stats", "[", "'cache_sys'", "]", "[", "'num_misses'", "]", ")", "self", ".", "setGraphVal", "(", "'php_apc_reqs_filecache'", ",", "'inserts'", ",", "stats", "[", "'cache_sys'", "]", "[", "'num_inserts'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_apc_reqs_usercache'", ")", "and", "stats", ":", "self", ".", "setGraphVal", "(", "'php_apc_reqs_usercache'", ",", "'hits'", ",", "stats", "[", "'cache_user'", "]", "[", "'num_hits'", "]", ")", "self", ".", "setGraphVal", "(", "'php_apc_reqs_usercache'", ",", "'misses'", ",", "stats", "[", "'cache_user'", "]", "[", "'num_misses'", "]", ")", "self", ".", "setGraphVal", "(", "'php_apc_reqs_usercache'", ",", "'inserts'", ",", "stats", "[", "'cache_user'", "]", "[", "'num_inserts'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_apc_expunge'", ")", "and", "stats", ":", "self", ".", "setGraphVal", "(", "'php_apc_expunge'", ",", "'filecache'", ",", "stats", "[", "'cache_sys'", "]", "[", "'expunges'", "]", ")", "self", ".", "setGraphVal", "(", "'php_apc_expunge'", ",", "'usercache'", ",", "stats", "[", "'cache_user'", "]", "[", "'expunges'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_apc_mem_util_frag'", ")", ":", "self", ".", "setGraphVal", "(", "'php_apc_mem_util_frag'", ",", "'util'", ",", "stats", "[", "'memory'", "]", "[", "'utilization_ratio'", "]", "*", "100", ")", "self", ".", "setGraphVal", "(", "'php_apc_mem_util_frag'", ",", "'frag'", ",", "stats", "[", "'memory'", "]", "[", "'fragmentation_ratio'", "]", "*", "100", ")", "if", "self", ".", "hasGraph", "(", "'php_apc_mem_frag_count'", ")", ":", "self", ".", "setGraphVal", "(", "'php_apc_mem_frag_count'", ",", "'num'", ",", "stats", "[", "'memory'", "]", "[", "'fragment_count'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_apc_mem_frag_avgsize'", ")", ":", "self", ".", "setGraphVal", "(", "'php_apc_mem_frag_avgsize'", ",", "'size'", ",", "stats", "[", "'memory'", "]", "[", "'fragment_avg_size'", "]", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/phpapcstats.py#L196-L246
aouyar/PyMunin
pymunin/plugins/phpapcstats.py
MuninPHPapcPlugin.autoconf
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ apcinfo = APCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) return apcinfo is not None
python
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ apcinfo = APCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) return apcinfo is not None
[ "def", "autoconf", "(", "self", ")", ":", "apcinfo", "=", "APCinfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_monpath", ",", "self", ".", "_ssl", ")", "return", "apcinfo", "is", "not", "None" ]
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
[ "Implements", "Munin", "Plugin", "Auto", "-", "Configuration", "Option", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/phpapcstats.py#L248-L256
aouyar/PyMunin
pysysinfo/varnish.py
VarnishInfo.getStats
def getStats(self): """Runs varnishstats command to get stats from Varnish Cache. @return: Dictionary of stats. """ info_dict = {} args = [varnishstatCmd, '-1'] if self._instance is not None: args.extend(['-n', self._instance]) output = util.exec_command(args) if self._descDict is None: self._descDict = {} for line in output.splitlines(): mobj = re.match('(\S+)\s+(\d+)\s+(\d+\.\d+|\.)\s+(\S.*\S)\s*$', line) if mobj: fname = mobj.group(1).replace('.', '_') info_dict[fname] = util.parse_value(mobj.group(2)) self._descDict[fname] = mobj.group(4) return info_dict
python
def getStats(self): """Runs varnishstats command to get stats from Varnish Cache. @return: Dictionary of stats. """ info_dict = {} args = [varnishstatCmd, '-1'] if self._instance is not None: args.extend(['-n', self._instance]) output = util.exec_command(args) if self._descDict is None: self._descDict = {} for line in output.splitlines(): mobj = re.match('(\S+)\s+(\d+)\s+(\d+\.\d+|\.)\s+(\S.*\S)\s*$', line) if mobj: fname = mobj.group(1).replace('.', '_') info_dict[fname] = util.parse_value(mobj.group(2)) self._descDict[fname] = mobj.group(4) return info_dict
[ "def", "getStats", "(", "self", ")", ":", "info_dict", "=", "{", "}", "args", "=", "[", "varnishstatCmd", ",", "'-1'", "]", "if", "self", ".", "_instance", "is", "not", "None", ":", "args", ".", "extend", "(", "[", "'-n'", ",", "self", ".", "_instance", "]", ")", "output", "=", "util", ".", "exec_command", "(", "args", ")", "if", "self", ".", "_descDict", "is", "None", ":", "self", ".", "_descDict", "=", "{", "}", "for", "line", "in", "output", ".", "splitlines", "(", ")", ":", "mobj", "=", "re", ".", "match", "(", "'(\\S+)\\s+(\\d+)\\s+(\\d+\\.\\d+|\\.)\\s+(\\S.*\\S)\\s*$'", ",", "line", ")", "if", "mobj", ":", "fname", "=", "mobj", ".", "group", "(", "1", ")", ".", "replace", "(", "'.'", ",", "'_'", ")", "info_dict", "[", "fname", "]", "=", "util", ".", "parse_value", "(", "mobj", ".", "group", "(", "2", ")", ")", "self", ".", "_descDict", "[", "fname", "]", "=", "mobj", ".", "group", "(", "4", ")", "return", "info_dict" ]
Runs varnishstats command to get stats from Varnish Cache. @return: Dictionary of stats.
[ "Runs", "varnishstats", "command", "to", "get", "stats", "from", "Varnish", "Cache", ".", "@return", ":", "Dictionary", "of", "stats", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/varnish.py#L39-L59
aouyar/PyMunin
pysysinfo/varnish.py
VarnishInfo.getDesc
def getDesc(self, entry): """Returns description for stat entry. @param entry: Entry name. @return: Description for entry. """ if len(self._descDict) == 0: self.getStats() return self._descDict.get(entry)
python
def getDesc(self, entry): """Returns description for stat entry. @param entry: Entry name. @return: Description for entry. """ if len(self._descDict) == 0: self.getStats() return self._descDict.get(entry)
[ "def", "getDesc", "(", "self", ",", "entry", ")", ":", "if", "len", "(", "self", ".", "_descDict", ")", "==", "0", ":", "self", ".", "getStats", "(", ")", "return", "self", ".", "_descDict", ".", "get", "(", "entry", ")" ]
Returns description for stat entry. @param entry: Entry name. @return: Description for entry.
[ "Returns", "description", "for", "stat", "entry", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/varnish.py#L71-L80
aouyar/PyMunin
pymunin/plugins/phpopcstats.py
MuninPHPOPCPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" opcinfo = OPCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) stats = opcinfo.getAllStats() if self.hasGraph('php_opc_memory') and stats: mem = stats['memory_usage'] keys = ('used_memory', 'wasted_memory', 'free_memory') map(lambda k:self.setGraphVal('php_opc_memory',k,mem[k]), keys) if self.hasGraph('php_opc_opcache_statistics') and stats: st = stats['opcache_statistics'] self.setGraphVal('php_opc_opcache_statistics', 'hits', st['hits']) self.setGraphVal('php_opc_opcache_statistics', 'misses', st['misses']) if self.hasGraph('php_opc_opcache_hitrate') and stats: st = stats['opcache_statistics'] self.setGraphVal('php_opc_opcache_hitrate', 'opcache_hit_rate', st['opcache_hit_rate']) if self.hasGraph('php_opc_key_status') and stats: st = stats['opcache_statistics'] wasted = st['num_cached_keys'] - st['num_cached_scripts'] free = st['max_cached_keys'] - st['num_cached_keys'] self.setGraphVal('php_opc_key_status', 'num_cached_scripts', st['num_cached_scripts']) self.setGraphVal('php_opc_key_status', 'num_wasted_keys', wasted) self.setGraphVal('php_opc_key_status', 'num_free_keys', free)
python
def retrieveVals(self): """Retrieve values for graphs.""" opcinfo = OPCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) stats = opcinfo.getAllStats() if self.hasGraph('php_opc_memory') and stats: mem = stats['memory_usage'] keys = ('used_memory', 'wasted_memory', 'free_memory') map(lambda k:self.setGraphVal('php_opc_memory',k,mem[k]), keys) if self.hasGraph('php_opc_opcache_statistics') and stats: st = stats['opcache_statistics'] self.setGraphVal('php_opc_opcache_statistics', 'hits', st['hits']) self.setGraphVal('php_opc_opcache_statistics', 'misses', st['misses']) if self.hasGraph('php_opc_opcache_hitrate') and stats: st = stats['opcache_statistics'] self.setGraphVal('php_opc_opcache_hitrate', 'opcache_hit_rate', st['opcache_hit_rate']) if self.hasGraph('php_opc_key_status') and stats: st = stats['opcache_statistics'] wasted = st['num_cached_keys'] - st['num_cached_scripts'] free = st['max_cached_keys'] - st['num_cached_keys'] self.setGraphVal('php_opc_key_status', 'num_cached_scripts', st['num_cached_scripts']) self.setGraphVal('php_opc_key_status', 'num_wasted_keys', wasted) self.setGraphVal('php_opc_key_status', 'num_free_keys', free)
[ "def", "retrieveVals", "(", "self", ")", ":", "opcinfo", "=", "OPCinfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_monpath", ",", "self", ".", "_ssl", ")", "stats", "=", "opcinfo", ".", "getAllStats", "(", ")", "if", "self", ".", "hasGraph", "(", "'php_opc_memory'", ")", "and", "stats", ":", "mem", "=", "stats", "[", "'memory_usage'", "]", "keys", "=", "(", "'used_memory'", ",", "'wasted_memory'", ",", "'free_memory'", ")", "map", "(", "lambda", "k", ":", "self", ".", "setGraphVal", "(", "'php_opc_memory'", ",", "k", ",", "mem", "[", "k", "]", ")", ",", "keys", ")", "if", "self", ".", "hasGraph", "(", "'php_opc_opcache_statistics'", ")", "and", "stats", ":", "st", "=", "stats", "[", "'opcache_statistics'", "]", "self", ".", "setGraphVal", "(", "'php_opc_opcache_statistics'", ",", "'hits'", ",", "st", "[", "'hits'", "]", ")", "self", ".", "setGraphVal", "(", "'php_opc_opcache_statistics'", ",", "'misses'", ",", "st", "[", "'misses'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_opc_opcache_hitrate'", ")", "and", "stats", ":", "st", "=", "stats", "[", "'opcache_statistics'", "]", "self", ".", "setGraphVal", "(", "'php_opc_opcache_hitrate'", ",", "'opcache_hit_rate'", ",", "st", "[", "'opcache_hit_rate'", "]", ")", "if", "self", ".", "hasGraph", "(", "'php_opc_key_status'", ")", "and", "stats", ":", "st", "=", "stats", "[", "'opcache_statistics'", "]", "wasted", "=", "st", "[", "'num_cached_keys'", "]", "-", "st", "[", "'num_cached_scripts'", "]", "free", "=", "st", "[", "'max_cached_keys'", "]", "-", "st", "[", "'num_cached_keys'", "]", "self", ".", "setGraphVal", "(", "'php_opc_key_status'", ",", "'num_cached_scripts'", ",", "st", "[", "'num_cached_scripts'", "]", ")", "self", ".", "setGraphVal", "(", "'php_opc_key_status'", ",", "'num_wasted_keys'", ",", "wasted", ")", "self", ".", "setGraphVal", "(", "'php_opc_key_status'", ",", "'num_free_keys'", ",", "free", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/phpopcstats.py#L148-L177
aouyar/PyMunin
pymunin/plugins/phpopcstats.py
MuninPHPOPCPlugin.autoconf
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ opcinfo = OPCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) return opcinfo is not None
python
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ opcinfo = OPCinfo(self._host, self._port, self._user, self._password, self._monpath, self._ssl) return opcinfo is not None
[ "def", "autoconf", "(", "self", ")", ":", "opcinfo", "=", "OPCinfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_monpath", ",", "self", ".", "_ssl", ")", "return", "opcinfo", "is", "not", "None" ]
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
[ "Implements", "Munin", "Plugin", "Auto", "-", "Configuration", "Option", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/phpopcstats.py#L179-L187
ContextLab/quail
quail/analysis/clustering.py
fingerprint_helper
def fingerprint_helper(egg, permute=False, n_perms=1000, match='exact', distance='euclidean', features=None): """ Computes clustering along a set of feature dimensions Parameters ---------- egg : quail.Egg Data to analyze dist_funcs : dict Dictionary of distance functions for feature clustering analyses Returns ---------- probabilities : Numpy array Each number represents clustering along a different feature dimension """ if features is None: features = egg.dist_funcs.keys() inds = egg.pres.index.tolist() slices = [egg.crack(subjects=[i], lists=[j]) for i, j in inds] weights = _get_weights(slices, features, distdict, permute, n_perms, match, distance) return np.nanmean(weights, axis=0)
python
def fingerprint_helper(egg, permute=False, n_perms=1000, match='exact', distance='euclidean', features=None): """ Computes clustering along a set of feature dimensions Parameters ---------- egg : quail.Egg Data to analyze dist_funcs : dict Dictionary of distance functions for feature clustering analyses Returns ---------- probabilities : Numpy array Each number represents clustering along a different feature dimension """ if features is None: features = egg.dist_funcs.keys() inds = egg.pres.index.tolist() slices = [egg.crack(subjects=[i], lists=[j]) for i, j in inds] weights = _get_weights(slices, features, distdict, permute, n_perms, match, distance) return np.nanmean(weights, axis=0)
[ "def", "fingerprint_helper", "(", "egg", ",", "permute", "=", "False", ",", "n_perms", "=", "1000", ",", "match", "=", "'exact'", ",", "distance", "=", "'euclidean'", ",", "features", "=", "None", ")", ":", "if", "features", "is", "None", ":", "features", "=", "egg", ".", "dist_funcs", ".", "keys", "(", ")", "inds", "=", "egg", ".", "pres", ".", "index", ".", "tolist", "(", ")", "slices", "=", "[", "egg", ".", "crack", "(", "subjects", "=", "[", "i", "]", ",", "lists", "=", "[", "j", "]", ")", "for", "i", ",", "j", "in", "inds", "]", "weights", "=", "_get_weights", "(", "slices", ",", "features", ",", "distdict", ",", "permute", ",", "n_perms", ",", "match", ",", "distance", ")", "return", "np", ".", "nanmean", "(", "weights", ",", "axis", "=", "0", ")" ]
Computes clustering along a set of feature dimensions Parameters ---------- egg : quail.Egg Data to analyze dist_funcs : dict Dictionary of distance functions for feature clustering analyses Returns ---------- probabilities : Numpy array Each number represents clustering along a different feature dimension
[ "Computes", "clustering", "along", "a", "set", "of", "feature", "dimensions" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/analysis/clustering.py#L9-L37
ContextLab/quail
quail/analysis/clustering.py
compute_feature_weights
def compute_feature_weights(pres_list, rec_list, feature_list, distances): """ Compute clustering scores along a set of feature dimensions Parameters ---------- pres_list : list list of presented words rec_list : list list of recalled words feature_list : list list of feature dicts for presented words distances : dict dict of distance matrices for each feature Returns ---------- weights : list list of clustering scores for each feature dimension """ # initialize the weights object for just this list weights = {} for feature in feature_list[0]: weights[feature] = [] # return default list if there is not enough data to compute the fingerprint if len(rec_list) <= 2: print('Not enough recalls to compute fingerprint, returning default' 'fingerprint.. (everything is .5)') for feature in feature_list[0]: weights[feature] = .5 return [weights[key] for key in weights] # initialize past word list past_words = [] past_idxs = [] # loop over words for i in range(len(rec_list)-1): # grab current word c = rec_list[i] # grab the next word n = rec_list[i + 1] # if both recalled words are in the encoding list and haven't been recalled before if (c in pres_list and n in pres_list) and (c not in past_words and n not in past_words): # for each feature for feature in feature_list[0]: # get the distance vector for the current word dists = distances[feature][pres_list.index(c),:] # distance between current and next word cdist = dists[pres_list.index(n)] # filter dists removing the words that have already been recalled dists_filt = np.array([dist for idx, dist in enumerate(dists) if idx not in past_idxs]) # get indices avg_rank = np.mean(np.where(np.sort(dists_filt)[::-1] == cdist)[0]+1) # compute the weight weights[feature].append(avg_rank / len(dists_filt)) # keep track of what has been recalled already past_idxs.append(pres_list.index(c)) past_words.append(c) # average over the cluster scores for a particular dimension for feature in weights: with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) weights[feature] = np.nanmean(weights[feature]) return [weights[key] for key in weights]
python
def compute_feature_weights(pres_list, rec_list, feature_list, distances): """ Compute clustering scores along a set of feature dimensions Parameters ---------- pres_list : list list of presented words rec_list : list list of recalled words feature_list : list list of feature dicts for presented words distances : dict dict of distance matrices for each feature Returns ---------- weights : list list of clustering scores for each feature dimension """ # initialize the weights object for just this list weights = {} for feature in feature_list[0]: weights[feature] = [] # return default list if there is not enough data to compute the fingerprint if len(rec_list) <= 2: print('Not enough recalls to compute fingerprint, returning default' 'fingerprint.. (everything is .5)') for feature in feature_list[0]: weights[feature] = .5 return [weights[key] for key in weights] # initialize past word list past_words = [] past_idxs = [] # loop over words for i in range(len(rec_list)-1): # grab current word c = rec_list[i] # grab the next word n = rec_list[i + 1] # if both recalled words are in the encoding list and haven't been recalled before if (c in pres_list and n in pres_list) and (c not in past_words and n not in past_words): # for each feature for feature in feature_list[0]: # get the distance vector for the current word dists = distances[feature][pres_list.index(c),:] # distance between current and next word cdist = dists[pres_list.index(n)] # filter dists removing the words that have already been recalled dists_filt = np.array([dist for idx, dist in enumerate(dists) if idx not in past_idxs]) # get indices avg_rank = np.mean(np.where(np.sort(dists_filt)[::-1] == cdist)[0]+1) # compute the weight weights[feature].append(avg_rank / len(dists_filt)) # keep track of what has been recalled already past_idxs.append(pres_list.index(c)) past_words.append(c) # average over the cluster scores for a particular dimension for feature in weights: with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) weights[feature] = np.nanmean(weights[feature]) return [weights[key] for key in weights]
[ "def", "compute_feature_weights", "(", "pres_list", ",", "rec_list", ",", "feature_list", ",", "distances", ")", ":", "# initialize the weights object for just this list", "weights", "=", "{", "}", "for", "feature", "in", "feature_list", "[", "0", "]", ":", "weights", "[", "feature", "]", "=", "[", "]", "# return default list if there is not enough data to compute the fingerprint", "if", "len", "(", "rec_list", ")", "<=", "2", ":", "print", "(", "'Not enough recalls to compute fingerprint, returning default'", "'fingerprint.. (everything is .5)'", ")", "for", "feature", "in", "feature_list", "[", "0", "]", ":", "weights", "[", "feature", "]", "=", ".5", "return", "[", "weights", "[", "key", "]", "for", "key", "in", "weights", "]", "# initialize past word list", "past_words", "=", "[", "]", "past_idxs", "=", "[", "]", "# loop over words", "for", "i", "in", "range", "(", "len", "(", "rec_list", ")", "-", "1", ")", ":", "# grab current word", "c", "=", "rec_list", "[", "i", "]", "# grab the next word", "n", "=", "rec_list", "[", "i", "+", "1", "]", "# if both recalled words are in the encoding list and haven't been recalled before", "if", "(", "c", "in", "pres_list", "and", "n", "in", "pres_list", ")", "and", "(", "c", "not", "in", "past_words", "and", "n", "not", "in", "past_words", ")", ":", "# for each feature", "for", "feature", "in", "feature_list", "[", "0", "]", ":", "# get the distance vector for the current word", "dists", "=", "distances", "[", "feature", "]", "[", "pres_list", ".", "index", "(", "c", ")", ",", ":", "]", "# distance between current and next word", "cdist", "=", "dists", "[", "pres_list", ".", "index", "(", "n", ")", "]", "# filter dists removing the words that have already been recalled", "dists_filt", "=", "np", ".", "array", "(", "[", "dist", "for", "idx", ",", "dist", "in", "enumerate", "(", "dists", ")", "if", "idx", "not", "in", "past_idxs", "]", ")", "# get indices", "avg_rank", "=", "np", ".", "mean", "(", "np", ".", "where", "(", "np", ".", "sort", "(", "dists_filt", ")", "[", ":", ":", "-", "1", "]", "==", "cdist", ")", "[", "0", "]", "+", "1", ")", "# compute the weight", "weights", "[", "feature", "]", ".", "append", "(", "avg_rank", "/", "len", "(", "dists_filt", ")", ")", "# keep track of what has been recalled already", "past_idxs", ".", "append", "(", "pres_list", ".", "index", "(", "c", ")", ")", "past_words", ".", "append", "(", "c", ")", "# average over the cluster scores for a particular dimension", "for", "feature", "in", "weights", ":", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ",", "category", "=", "RuntimeWarning", ")", "weights", "[", "feature", "]", "=", "np", ".", "nanmean", "(", "weights", "[", "feature", "]", ")", "return", "[", "weights", "[", "key", "]", "for", "key", "in", "weights", "]" ]
Compute clustering scores along a set of feature dimensions Parameters ---------- pres_list : list list of presented words rec_list : list list of recalled words feature_list : list list of feature dicts for presented words distances : dict dict of distance matrices for each feature Returns ---------- weights : list list of clustering scores for each feature dimension
[ "Compute", "clustering", "scores", "along", "a", "set", "of", "feature", "dimensions" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/analysis/clustering.py#L137-L218
ContextLab/quail
quail/analysis/lagcrp.py
lagcrp_helper
def lagcrp_helper(egg, match='exact', distance='euclidean', ts=None, features=None): """ Computes probabilities for each transition distance (probability that a word recalled will be a given distance--in presentation order--from the previous recalled word). Parameters ---------- egg : quail.Egg Data to analyze match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- prec : numpy array each float is the probability of transition distance (distnaces indexed by position, from -(n-1) to (n-1), excluding zero """ def lagcrp(rec, lstlen): """Computes lag-crp for a given recall list""" def check_pair(a, b): if (a>0 and b>0) and (a!=b): return True else: return False def compute_actual(rec, lstlen): arr=pd.Series(data=np.zeros((lstlen)*2), index=list(range(-lstlen,0))+list(range(1,lstlen+1))) recalled=[] for trial in range(0,len(rec)-1): a=rec[trial] b=rec[trial+1] if check_pair(a, b) and (a not in recalled) and (b not in recalled): arr[b-a]+=1 recalled.append(a) return arr def compute_possible(rec, lstlen): arr=pd.Series(data=np.zeros((lstlen)*2), index=list(range(-lstlen,0))+list(range(1,lstlen+1))) recalled=[] for trial in rec: if np.isnan(trial): pass else: lbound=int(1-trial) ubound=int(lstlen-trial) chances=list(range(lbound,0))+list(range(1,ubound+1)) for each in recalled: if each-trial in chances: chances.remove(each-trial) arr[chances]+=1 recalled.append(trial) return arr actual = compute_actual(rec, lstlen) possible = compute_possible(rec, lstlen) crp = [0.0 if j == 0 else i / j for i, j in zip(actual, possible)] crp.insert(int(len(crp) / 2), np.nan) return crp def nlagcrp(distmat, ts=None): def lagcrp_model(s): idx = list(range(0, -s, -1)) return np.array([list(range(i, i+s)) for i in idx]) # remove nan columns distmat = distmat[:,~np.all(np.isnan(distmat), axis=0)].T model = lagcrp_model(distmat.shape[1]) lagcrp = np.zeros(ts * 2) for rdx in range(len(distmat)-1): item = distmat[rdx, :] next_item = distmat[rdx+1, :] if not np.isnan(item).any() and not np.isnan(next_item).any(): outer = np.outer(item, next_item) lagcrp += np.array(list(map(lambda lag: np.mean(outer[model==lag]), range(-ts, ts)))) lagcrp /= ts lagcrp = list(lagcrp) lagcrp.insert(int(len(lagcrp) / 2), np.nan) return np.array(lagcrp) def _format(p, r): p = np.matrix([np.array(i) for i in p]) if p.shape[0]==1: p=p.T r = map(lambda x: [np.nan]*p.shape[1] if check_nan(x) else x, r) r = np.matrix([np.array(i) for i in r]) if r.shape[0]==1: r=r.T return p, r opts = dict(match=match, distance=distance, features=features) if match is 'exact': opts.update({'features' : 'item'}) recmat = recall_matrix(egg, **opts) if not ts: ts = egg.pres.shape[1] if match in ['exact', 'best']: lagcrp = [lagcrp(lst, egg.list_length) for lst in recmat] elif match is 'smooth': lagcrp = np.atleast_2d(np.mean([nlagcrp(r, ts=ts) for r in recmat], 0)) else: raise ValueError('Match must be set to exact, best or smooth.') return np.nanmean(lagcrp, axis=0)
python
def lagcrp_helper(egg, match='exact', distance='euclidean', ts=None, features=None): """ Computes probabilities for each transition distance (probability that a word recalled will be a given distance--in presentation order--from the previous recalled word). Parameters ---------- egg : quail.Egg Data to analyze match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- prec : numpy array each float is the probability of transition distance (distnaces indexed by position, from -(n-1) to (n-1), excluding zero """ def lagcrp(rec, lstlen): """Computes lag-crp for a given recall list""" def check_pair(a, b): if (a>0 and b>0) and (a!=b): return True else: return False def compute_actual(rec, lstlen): arr=pd.Series(data=np.zeros((lstlen)*2), index=list(range(-lstlen,0))+list(range(1,lstlen+1))) recalled=[] for trial in range(0,len(rec)-1): a=rec[trial] b=rec[trial+1] if check_pair(a, b) and (a not in recalled) and (b not in recalled): arr[b-a]+=1 recalled.append(a) return arr def compute_possible(rec, lstlen): arr=pd.Series(data=np.zeros((lstlen)*2), index=list(range(-lstlen,0))+list(range(1,lstlen+1))) recalled=[] for trial in rec: if np.isnan(trial): pass else: lbound=int(1-trial) ubound=int(lstlen-trial) chances=list(range(lbound,0))+list(range(1,ubound+1)) for each in recalled: if each-trial in chances: chances.remove(each-trial) arr[chances]+=1 recalled.append(trial) return arr actual = compute_actual(rec, lstlen) possible = compute_possible(rec, lstlen) crp = [0.0 if j == 0 else i / j for i, j in zip(actual, possible)] crp.insert(int(len(crp) / 2), np.nan) return crp def nlagcrp(distmat, ts=None): def lagcrp_model(s): idx = list(range(0, -s, -1)) return np.array([list(range(i, i+s)) for i in idx]) # remove nan columns distmat = distmat[:,~np.all(np.isnan(distmat), axis=0)].T model = lagcrp_model(distmat.shape[1]) lagcrp = np.zeros(ts * 2) for rdx in range(len(distmat)-1): item = distmat[rdx, :] next_item = distmat[rdx+1, :] if not np.isnan(item).any() and not np.isnan(next_item).any(): outer = np.outer(item, next_item) lagcrp += np.array(list(map(lambda lag: np.mean(outer[model==lag]), range(-ts, ts)))) lagcrp /= ts lagcrp = list(lagcrp) lagcrp.insert(int(len(lagcrp) / 2), np.nan) return np.array(lagcrp) def _format(p, r): p = np.matrix([np.array(i) for i in p]) if p.shape[0]==1: p=p.T r = map(lambda x: [np.nan]*p.shape[1] if check_nan(x) else x, r) r = np.matrix([np.array(i) for i in r]) if r.shape[0]==1: r=r.T return p, r opts = dict(match=match, distance=distance, features=features) if match is 'exact': opts.update({'features' : 'item'}) recmat = recall_matrix(egg, **opts) if not ts: ts = egg.pres.shape[1] if match in ['exact', 'best']: lagcrp = [lagcrp(lst, egg.list_length) for lst in recmat] elif match is 'smooth': lagcrp = np.atleast_2d(np.mean([nlagcrp(r, ts=ts) for r in recmat], 0)) else: raise ValueError('Match must be set to exact, best or smooth.') return np.nanmean(lagcrp, axis=0)
[ "def", "lagcrp_helper", "(", "egg", ",", "match", "=", "'exact'", ",", "distance", "=", "'euclidean'", ",", "ts", "=", "None", ",", "features", "=", "None", ")", ":", "def", "lagcrp", "(", "rec", ",", "lstlen", ")", ":", "\"\"\"Computes lag-crp for a given recall list\"\"\"", "def", "check_pair", "(", "a", ",", "b", ")", ":", "if", "(", "a", ">", "0", "and", "b", ">", "0", ")", "and", "(", "a", "!=", "b", ")", ":", "return", "True", "else", ":", "return", "False", "def", "compute_actual", "(", "rec", ",", "lstlen", ")", ":", "arr", "=", "pd", ".", "Series", "(", "data", "=", "np", ".", "zeros", "(", "(", "lstlen", ")", "*", "2", ")", ",", "index", "=", "list", "(", "range", "(", "-", "lstlen", ",", "0", ")", ")", "+", "list", "(", "range", "(", "1", ",", "lstlen", "+", "1", ")", ")", ")", "recalled", "=", "[", "]", "for", "trial", "in", "range", "(", "0", ",", "len", "(", "rec", ")", "-", "1", ")", ":", "a", "=", "rec", "[", "trial", "]", "b", "=", "rec", "[", "trial", "+", "1", "]", "if", "check_pair", "(", "a", ",", "b", ")", "and", "(", "a", "not", "in", "recalled", ")", "and", "(", "b", "not", "in", "recalled", ")", ":", "arr", "[", "b", "-", "a", "]", "+=", "1", "recalled", ".", "append", "(", "a", ")", "return", "arr", "def", "compute_possible", "(", "rec", ",", "lstlen", ")", ":", "arr", "=", "pd", ".", "Series", "(", "data", "=", "np", ".", "zeros", "(", "(", "lstlen", ")", "*", "2", ")", ",", "index", "=", "list", "(", "range", "(", "-", "lstlen", ",", "0", ")", ")", "+", "list", "(", "range", "(", "1", ",", "lstlen", "+", "1", ")", ")", ")", "recalled", "=", "[", "]", "for", "trial", "in", "rec", ":", "if", "np", ".", "isnan", "(", "trial", ")", ":", "pass", "else", ":", "lbound", "=", "int", "(", "1", "-", "trial", ")", "ubound", "=", "int", "(", "lstlen", "-", "trial", ")", "chances", "=", "list", "(", "range", "(", "lbound", ",", "0", ")", ")", "+", "list", "(", "range", "(", "1", ",", "ubound", "+", "1", ")", ")", "for", "each", "in", "recalled", ":", "if", "each", "-", "trial", "in", "chances", ":", "chances", ".", "remove", "(", "each", "-", "trial", ")", "arr", "[", "chances", "]", "+=", "1", "recalled", ".", "append", "(", "trial", ")", "return", "arr", "actual", "=", "compute_actual", "(", "rec", ",", "lstlen", ")", "possible", "=", "compute_possible", "(", "rec", ",", "lstlen", ")", "crp", "=", "[", "0.0", "if", "j", "==", "0", "else", "i", "/", "j", "for", "i", ",", "j", "in", "zip", "(", "actual", ",", "possible", ")", "]", "crp", ".", "insert", "(", "int", "(", "len", "(", "crp", ")", "/", "2", ")", ",", "np", ".", "nan", ")", "return", "crp", "def", "nlagcrp", "(", "distmat", ",", "ts", "=", "None", ")", ":", "def", "lagcrp_model", "(", "s", ")", ":", "idx", "=", "list", "(", "range", "(", "0", ",", "-", "s", ",", "-", "1", ")", ")", "return", "np", ".", "array", "(", "[", "list", "(", "range", "(", "i", ",", "i", "+", "s", ")", ")", "for", "i", "in", "idx", "]", ")", "# remove nan columns", "distmat", "=", "distmat", "[", ":", ",", "~", "np", ".", "all", "(", "np", ".", "isnan", "(", "distmat", ")", ",", "axis", "=", "0", ")", "]", ".", "T", "model", "=", "lagcrp_model", "(", "distmat", ".", "shape", "[", "1", "]", ")", "lagcrp", "=", "np", ".", "zeros", "(", "ts", "*", "2", ")", "for", "rdx", "in", "range", "(", "len", "(", "distmat", ")", "-", "1", ")", ":", "item", "=", "distmat", "[", "rdx", ",", ":", "]", "next_item", "=", "distmat", "[", "rdx", "+", "1", ",", ":", "]", "if", "not", "np", ".", "isnan", "(", "item", ")", ".", "any", "(", ")", "and", "not", "np", ".", "isnan", "(", "next_item", ")", ".", "any", "(", ")", ":", "outer", "=", "np", ".", "outer", "(", "item", ",", "next_item", ")", "lagcrp", "+=", "np", ".", "array", "(", "list", "(", "map", "(", "lambda", "lag", ":", "np", ".", "mean", "(", "outer", "[", "model", "==", "lag", "]", ")", ",", "range", "(", "-", "ts", ",", "ts", ")", ")", ")", ")", "lagcrp", "/=", "ts", "lagcrp", "=", "list", "(", "lagcrp", ")", "lagcrp", ".", "insert", "(", "int", "(", "len", "(", "lagcrp", ")", "/", "2", ")", ",", "np", ".", "nan", ")", "return", "np", ".", "array", "(", "lagcrp", ")", "def", "_format", "(", "p", ",", "r", ")", ":", "p", "=", "np", ".", "matrix", "(", "[", "np", ".", "array", "(", "i", ")", "for", "i", "in", "p", "]", ")", "if", "p", ".", "shape", "[", "0", "]", "==", "1", ":", "p", "=", "p", ".", "T", "r", "=", "map", "(", "lambda", "x", ":", "[", "np", ".", "nan", "]", "*", "p", ".", "shape", "[", "1", "]", "if", "check_nan", "(", "x", ")", "else", "x", ",", "r", ")", "r", "=", "np", ".", "matrix", "(", "[", "np", ".", "array", "(", "i", ")", "for", "i", "in", "r", "]", ")", "if", "r", ".", "shape", "[", "0", "]", "==", "1", ":", "r", "=", "r", ".", "T", "return", "p", ",", "r", "opts", "=", "dict", "(", "match", "=", "match", ",", "distance", "=", "distance", ",", "features", "=", "features", ")", "if", "match", "is", "'exact'", ":", "opts", ".", "update", "(", "{", "'features'", ":", "'item'", "}", ")", "recmat", "=", "recall_matrix", "(", "egg", ",", "*", "*", "opts", ")", "if", "not", "ts", ":", "ts", "=", "egg", ".", "pres", ".", "shape", "[", "1", "]", "if", "match", "in", "[", "'exact'", ",", "'best'", "]", ":", "lagcrp", "=", "[", "lagcrp", "(", "lst", ",", "egg", ".", "list_length", ")", "for", "lst", "in", "recmat", "]", "elif", "match", "is", "'smooth'", ":", "lagcrp", "=", "np", ".", "atleast_2d", "(", "np", ".", "mean", "(", "[", "nlagcrp", "(", "r", ",", "ts", "=", "ts", ")", "for", "r", "in", "recmat", "]", ",", "0", ")", ")", "else", ":", "raise", "ValueError", "(", "'Match must be set to exact, best or smooth.'", ")", "return", "np", ".", "nanmean", "(", "lagcrp", ",", "axis", "=", "0", ")" ]
Computes probabilities for each transition distance (probability that a word recalled will be a given distance--in presentation order--from the previous recalled word). Parameters ---------- egg : quail.Egg Data to analyze match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- prec : numpy array each float is the probability of transition distance (distnaces indexed by position, from -(n-1) to (n-1), excluding zero
[ "Computes", "probabilities", "for", "each", "transition", "distance", "(", "probability", "that", "a", "word", "recalled", "will", "be", "a", "given", "distance", "--", "in", "presentation", "order", "--", "from", "the", "previous", "recalled", "word", ")", "." ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/analysis/lagcrp.py#L7-L129
aouyar/PyMunin
pymunin/plugins/diskiostats.py
MuninDiskIOplugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" if self._diskList: self._fetchDevAll('disk', self._diskList, self._info.getDiskStats) if self._mdList: self._fetchDevAll('md', self._mdList, self._info.getMDstats) if self._partList: self._fetchDevAll('part', self._partList, self._info.getPartitionStats) if self._lvList: self._fetchDevAll('lv', self._lvList, self._info.getLVstats) self._fetchDevAll('fs', self._fsList, self._info.getFilesystemStats)
python
def retrieveVals(self): """Retrieve values for graphs.""" if self._diskList: self._fetchDevAll('disk', self._diskList, self._info.getDiskStats) if self._mdList: self._fetchDevAll('md', self._mdList, self._info.getMDstats) if self._partList: self._fetchDevAll('part', self._partList, self._info.getPartitionStats) if self._lvList: self._fetchDevAll('lv', self._lvList, self._info.getLVstats) self._fetchDevAll('fs', self._fsList, self._info.getFilesystemStats)
[ "def", "retrieveVals", "(", "self", ")", ":", "if", "self", ".", "_diskList", ":", "self", ".", "_fetchDevAll", "(", "'disk'", ",", "self", ".", "_diskList", ",", "self", ".", "_info", ".", "getDiskStats", ")", "if", "self", ".", "_mdList", ":", "self", ".", "_fetchDevAll", "(", "'md'", ",", "self", ".", "_mdList", ",", "self", ".", "_info", ".", "getMDstats", ")", "if", "self", ".", "_partList", ":", "self", ".", "_fetchDevAll", "(", "'part'", ",", "self", ".", "_partList", ",", "self", ".", "_info", ".", "getPartitionStats", ")", "if", "self", ".", "_lvList", ":", "self", ".", "_fetchDevAll", "(", "'lv'", ",", "self", ".", "_lvList", ",", "self", ".", "_info", ".", "getLVstats", ")", "self", ".", "_fetchDevAll", "(", "'fs'", ",", "self", ".", "_fsList", ",", "self", ".", "_info", ".", "getFilesystemStats", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/diskiostats.py#L125-L140
aouyar/PyMunin
pymunin/plugins/diskiostats.py
MuninDiskIOplugin._configDevRequests
def _configDevRequests(self, namestr, titlestr, devlist): """Generate configuration for I/O Request stats. @param namestr: Field name component indicating device type. @param titlestr: Title component indicating device type. @param devlist: List of devices. """ name = 'diskio_%s_requests' % namestr if self.graphEnabled(name): graph = MuninGraph('Disk I/O - %s - Requests' % titlestr, self._category, info='Disk I/O - %s Throughput, Read / write requests per second.' % titlestr, args='--base 1000 --lower-limit 0', vlabel='reqs/sec read (-) / write (+)', printf='%6.1lf', autoFixNames = True) for dev in devlist: graph.addField(dev + '_read', fixLabel(dev, maxLabelLenGraphDual, repl = '..', truncend=False, delim = self._labelDelim.get(namestr)), draw='LINE2', type='DERIVE', min=0, graph=False) graph.addField(dev + '_write', fixLabel(dev, maxLabelLenGraphDual, repl = '..', truncend=False, delim = self._labelDelim.get(namestr)), draw='LINE2', type='DERIVE', min=0, negative=(dev + '_read'),info=dev) self.appendGraph(name, graph)
python
def _configDevRequests(self, namestr, titlestr, devlist): """Generate configuration for I/O Request stats. @param namestr: Field name component indicating device type. @param titlestr: Title component indicating device type. @param devlist: List of devices. """ name = 'diskio_%s_requests' % namestr if self.graphEnabled(name): graph = MuninGraph('Disk I/O - %s - Requests' % titlestr, self._category, info='Disk I/O - %s Throughput, Read / write requests per second.' % titlestr, args='--base 1000 --lower-limit 0', vlabel='reqs/sec read (-) / write (+)', printf='%6.1lf', autoFixNames = True) for dev in devlist: graph.addField(dev + '_read', fixLabel(dev, maxLabelLenGraphDual, repl = '..', truncend=False, delim = self._labelDelim.get(namestr)), draw='LINE2', type='DERIVE', min=0, graph=False) graph.addField(dev + '_write', fixLabel(dev, maxLabelLenGraphDual, repl = '..', truncend=False, delim = self._labelDelim.get(namestr)), draw='LINE2', type='DERIVE', min=0, negative=(dev + '_read'),info=dev) self.appendGraph(name, graph)
[ "def", "_configDevRequests", "(", "self", ",", "namestr", ",", "titlestr", ",", "devlist", ")", ":", "name", "=", "'diskio_%s_requests'", "%", "namestr", "if", "self", ".", "graphEnabled", "(", "name", ")", ":", "graph", "=", "MuninGraph", "(", "'Disk I/O - %s - Requests'", "%", "titlestr", ",", "self", ".", "_category", ",", "info", "=", "'Disk I/O - %s Throughput, Read / write requests per second.'", "%", "titlestr", ",", "args", "=", "'--base 1000 --lower-limit 0'", ",", "vlabel", "=", "'reqs/sec read (-) / write (+)'", ",", "printf", "=", "'%6.1lf'", ",", "autoFixNames", "=", "True", ")", "for", "dev", "in", "devlist", ":", "graph", ".", "addField", "(", "dev", "+", "'_read'", ",", "fixLabel", "(", "dev", ",", "maxLabelLenGraphDual", ",", "repl", "=", "'..'", ",", "truncend", "=", "False", ",", "delim", "=", "self", ".", "_labelDelim", ".", "get", "(", "namestr", ")", ")", ",", "draw", "=", "'LINE2'", ",", "type", "=", "'DERIVE'", ",", "min", "=", "0", ",", "graph", "=", "False", ")", "graph", ".", "addField", "(", "dev", "+", "'_write'", ",", "fixLabel", "(", "dev", ",", "maxLabelLenGraphDual", ",", "repl", "=", "'..'", ",", "truncend", "=", "False", ",", "delim", "=", "self", ".", "_labelDelim", ".", "get", "(", "namestr", ")", ")", ",", "draw", "=", "'LINE2'", ",", "type", "=", "'DERIVE'", ",", "min", "=", "0", ",", "negative", "=", "(", "dev", "+", "'_read'", ")", ",", "info", "=", "dev", ")", "self", ".", "appendGraph", "(", "name", ",", "graph", ")" ]
Generate configuration for I/O Request stats. @param namestr: Field name component indicating device type. @param titlestr: Title component indicating device type. @param devlist: List of devices.
[ "Generate", "configuration", "for", "I", "/", "O", "Request", "stats", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/diskiostats.py#L142-L170
aouyar/PyMunin
pymunin/plugins/diskiostats.py
MuninDiskIOplugin._configDevActive
def _configDevActive(self, namestr, titlestr, devlist): """Generate configuration for I/O Queue Length. @param namestr: Field name component indicating device type. @param titlestr: Title component indicating device type. @param devlist: List of devices. """ name = 'diskio_%s_active' % namestr if self.graphEnabled(name): graph = MuninGraph('Disk I/O - %s - Queue Length' % titlestr, self._category, info='Disk I/O - Number of I/O Operations in Progress for every %s.' % titlestr, args='--base 1000 --lower-limit 0', printf='%6.1lf', autoFixNames = True) for dev in devlist: graph.addField(dev, fixLabel(dev, maxLabelLenGraphSimple, repl = '..', truncend=False, delim = self._labelDelim.get(namestr)), draw='AREASTACK', type='GAUGE', info=dev) self.appendGraph(name, graph)
python
def _configDevActive(self, namestr, titlestr, devlist): """Generate configuration for I/O Queue Length. @param namestr: Field name component indicating device type. @param titlestr: Title component indicating device type. @param devlist: List of devices. """ name = 'diskio_%s_active' % namestr if self.graphEnabled(name): graph = MuninGraph('Disk I/O - %s - Queue Length' % titlestr, self._category, info='Disk I/O - Number of I/O Operations in Progress for every %s.' % titlestr, args='--base 1000 --lower-limit 0', printf='%6.1lf', autoFixNames = True) for dev in devlist: graph.addField(dev, fixLabel(dev, maxLabelLenGraphSimple, repl = '..', truncend=False, delim = self._labelDelim.get(namestr)), draw='AREASTACK', type='GAUGE', info=dev) self.appendGraph(name, graph)
[ "def", "_configDevActive", "(", "self", ",", "namestr", ",", "titlestr", ",", "devlist", ")", ":", "name", "=", "'diskio_%s_active'", "%", "namestr", "if", "self", ".", "graphEnabled", "(", "name", ")", ":", "graph", "=", "MuninGraph", "(", "'Disk I/O - %s - Queue Length'", "%", "titlestr", ",", "self", ".", "_category", ",", "info", "=", "'Disk I/O - Number of I/O Operations in Progress for every %s.'", "%", "titlestr", ",", "args", "=", "'--base 1000 --lower-limit 0'", ",", "printf", "=", "'%6.1lf'", ",", "autoFixNames", "=", "True", ")", "for", "dev", "in", "devlist", ":", "graph", ".", "addField", "(", "dev", ",", "fixLabel", "(", "dev", ",", "maxLabelLenGraphSimple", ",", "repl", "=", "'..'", ",", "truncend", "=", "False", ",", "delim", "=", "self", ".", "_labelDelim", ".", "get", "(", "namestr", ")", ")", ",", "draw", "=", "'AREASTACK'", ",", "type", "=", "'GAUGE'", ",", "info", "=", "dev", ")", "self", ".", "appendGraph", "(", "name", ",", "graph", ")" ]
Generate configuration for I/O Queue Length. @param namestr: Field name component indicating device type. @param titlestr: Title component indicating device type. @param devlist: List of devices.
[ "Generate", "configuration", "for", "I", "/", "O", "Queue", "Length", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/diskiostats.py#L202-L224
aouyar/PyMunin
pymunin/plugins/diskiostats.py
MuninDiskIOplugin._fetchDevAll
def _fetchDevAll(self, namestr, devlist, statsfunc): """Initialize I/O stats for devices. @param namestr: Field name component indicating device type. @param devlist: List of devices. @param statsfunc: Function for retrieving stats for device. """ for dev in devlist: stats = statsfunc(dev) name = 'diskio_%s_requests' % namestr if self.hasGraph(name): self.setGraphVal(name, dev + '_read', stats['rios']) self.setGraphVal(name, dev + '_write', stats['wios']) name = 'diskio_%s_bytes' % namestr if self.hasGraph(name): self.setGraphVal(name, dev + '_read', stats['rbytes']) self.setGraphVal(name, dev + '_write', stats['wbytes']) name = 'diskio_%s_active' % namestr if self.hasGraph(name): self.setGraphVal(name, dev, stats['ios_active'])
python
def _fetchDevAll(self, namestr, devlist, statsfunc): """Initialize I/O stats for devices. @param namestr: Field name component indicating device type. @param devlist: List of devices. @param statsfunc: Function for retrieving stats for device. """ for dev in devlist: stats = statsfunc(dev) name = 'diskio_%s_requests' % namestr if self.hasGraph(name): self.setGraphVal(name, dev + '_read', stats['rios']) self.setGraphVal(name, dev + '_write', stats['wios']) name = 'diskio_%s_bytes' % namestr if self.hasGraph(name): self.setGraphVal(name, dev + '_read', stats['rbytes']) self.setGraphVal(name, dev + '_write', stats['wbytes']) name = 'diskio_%s_active' % namestr if self.hasGraph(name): self.setGraphVal(name, dev, stats['ios_active'])
[ "def", "_fetchDevAll", "(", "self", ",", "namestr", ",", "devlist", ",", "statsfunc", ")", ":", "for", "dev", "in", "devlist", ":", "stats", "=", "statsfunc", "(", "dev", ")", "name", "=", "'diskio_%s_requests'", "%", "namestr", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "dev", "+", "'_read'", ",", "stats", "[", "'rios'", "]", ")", "self", ".", "setGraphVal", "(", "name", ",", "dev", "+", "'_write'", ",", "stats", "[", "'wios'", "]", ")", "name", "=", "'diskio_%s_bytes'", "%", "namestr", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "dev", "+", "'_read'", ",", "stats", "[", "'rbytes'", "]", ")", "self", ".", "setGraphVal", "(", "name", ",", "dev", "+", "'_write'", ",", "stats", "[", "'wbytes'", "]", ")", "name", "=", "'diskio_%s_active'", "%", "namestr", "if", "self", ".", "hasGraph", "(", "name", ")", ":", "self", ".", "setGraphVal", "(", "name", ",", "dev", ",", "stats", "[", "'ios_active'", "]", ")" ]
Initialize I/O stats for devices. @param namestr: Field name component indicating device type. @param devlist: List of devices. @param statsfunc: Function for retrieving stats for device.
[ "Initialize", "I", "/", "O", "stats", "for", "devices", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/diskiostats.py#L226-L246
aouyar/PyMunin
pymunin/plugins/redisstats.py
RedisPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" for graph_name in self.getGraphList(): for field_name in self.getGraphFieldList(graph_name): self.setGraphVal(graph_name, field_name, self._stats.get(field_name))
python
def retrieveVals(self): """Retrieve values for graphs.""" for graph_name in self.getGraphList(): for field_name in self.getGraphFieldList(graph_name): self.setGraphVal(graph_name, field_name, self._stats.get(field_name))
[ "def", "retrieveVals", "(", "self", ")", ":", "for", "graph_name", "in", "self", ".", "getGraphList", "(", ")", ":", "for", "field_name", "in", "self", ".", "getGraphFieldList", "(", "graph_name", ")", ":", "self", ".", "setGraphVal", "(", "graph_name", ",", "field_name", ",", "self", ".", "_stats", ".", "get", "(", "field_name", ")", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/redisstats.py#L296-L300
aouyar/PyMunin
pymunin/plugins/sysstats.py
MuninSysStatsPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" if self.hasGraph('sys_loadavg'): self._loadstats = self._sysinfo.getLoadAvg() if self._loadstats: self.setGraphVal('sys_loadavg', 'load15min', self._loadstats[2]) self.setGraphVal('sys_loadavg', 'load5min', self._loadstats[1]) self.setGraphVal('sys_loadavg', 'load1min', self._loadstats[0]) if self._cpustats and self.hasGraph('sys_cpu_util'): for field in self.getGraphFieldList('sys_cpu_util'): self.setGraphVal('sys_cpu_util', field, int(self._cpustats[field] * 1000)) if self._memstats: if self.hasGraph('sys_mem_util'): for field in self.getGraphFieldList('sys_mem_util'): self.setGraphVal('sys_mem_util', field, self._memstats[field]) if self.hasGraph('sys_mem_avail'): for field in self.getGraphFieldList('sys_mem_avail'): self.setGraphVal('sys_mem_avail', field, self._memstats[field]) if self.hasGraph('sys_mem_huge'): for field in ['Rsvd', 'Surp', 'Free']: fkey = 'HugePages_' + field if self._memstats.has_key(fkey): self.setGraphVal('sys_mem_huge', field, self._memstats[fkey] * self._memstats['Hugepagesize']) if self.hasGraph('sys_processes'): if self._procstats is None: self._procstats = self._sysinfo.getProcessStats() if self._procstats: self.setGraphVal('sys_processes', 'running', self._procstats['procs_running']) self.setGraphVal('sys_processes', 'blocked', self._procstats['procs_blocked']) if self.hasGraph('sys_forks'): if self._procstats is None: self._procstats = self._sysinfo.getProcessStats() if self._procstats: self.setGraphVal('sys_forks', 'forks', self._procstats['processes']) if self.hasGraph('sys_intr_ctxt'): if self._procstats is None: self._procstats = self._sysinfo.getProcessStats() if self._procstats: for field in self.getGraphFieldList('sys_intr_ctxt'): self.setGraphVal('sys_intr_ctxt', field, self._procstats[field]) if self.hasGraph('sys_vm_paging'): if self._vmstats is None: self._vmstats = self._sysinfo.getVMstats() if self._vmstats: self.setGraphVal('sys_vm_paging', 'in', self._vmstats['pgpgin']) self.setGraphVal('sys_vm_paging', 'out', self._vmstats['pgpgout']) if self.hasGraph('sys_vm_swapping'): if self._vmstats is None: self._vmstats = self._sysinfo.getVMstats() if self._vmstats: self.setGraphVal('sys_vm_swapping', 'in', self._vmstats['pswpin']) self.setGraphVal('sys_vm_swapping', 'out', self._vmstats['pswpout'])
python
def retrieveVals(self): """Retrieve values for graphs.""" if self.hasGraph('sys_loadavg'): self._loadstats = self._sysinfo.getLoadAvg() if self._loadstats: self.setGraphVal('sys_loadavg', 'load15min', self._loadstats[2]) self.setGraphVal('sys_loadavg', 'load5min', self._loadstats[1]) self.setGraphVal('sys_loadavg', 'load1min', self._loadstats[0]) if self._cpustats and self.hasGraph('sys_cpu_util'): for field in self.getGraphFieldList('sys_cpu_util'): self.setGraphVal('sys_cpu_util', field, int(self._cpustats[field] * 1000)) if self._memstats: if self.hasGraph('sys_mem_util'): for field in self.getGraphFieldList('sys_mem_util'): self.setGraphVal('sys_mem_util', field, self._memstats[field]) if self.hasGraph('sys_mem_avail'): for field in self.getGraphFieldList('sys_mem_avail'): self.setGraphVal('sys_mem_avail', field, self._memstats[field]) if self.hasGraph('sys_mem_huge'): for field in ['Rsvd', 'Surp', 'Free']: fkey = 'HugePages_' + field if self._memstats.has_key(fkey): self.setGraphVal('sys_mem_huge', field, self._memstats[fkey] * self._memstats['Hugepagesize']) if self.hasGraph('sys_processes'): if self._procstats is None: self._procstats = self._sysinfo.getProcessStats() if self._procstats: self.setGraphVal('sys_processes', 'running', self._procstats['procs_running']) self.setGraphVal('sys_processes', 'blocked', self._procstats['procs_blocked']) if self.hasGraph('sys_forks'): if self._procstats is None: self._procstats = self._sysinfo.getProcessStats() if self._procstats: self.setGraphVal('sys_forks', 'forks', self._procstats['processes']) if self.hasGraph('sys_intr_ctxt'): if self._procstats is None: self._procstats = self._sysinfo.getProcessStats() if self._procstats: for field in self.getGraphFieldList('sys_intr_ctxt'): self.setGraphVal('sys_intr_ctxt', field, self._procstats[field]) if self.hasGraph('sys_vm_paging'): if self._vmstats is None: self._vmstats = self._sysinfo.getVMstats() if self._vmstats: self.setGraphVal('sys_vm_paging', 'in', self._vmstats['pgpgin']) self.setGraphVal('sys_vm_paging', 'out', self._vmstats['pgpgout']) if self.hasGraph('sys_vm_swapping'): if self._vmstats is None: self._vmstats = self._sysinfo.getVMstats() if self._vmstats: self.setGraphVal('sys_vm_swapping', 'in', self._vmstats['pswpin']) self.setGraphVal('sys_vm_swapping', 'out', self._vmstats['pswpout'])
[ "def", "retrieveVals", "(", "self", ")", ":", "if", "self", ".", "hasGraph", "(", "'sys_loadavg'", ")", ":", "self", ".", "_loadstats", "=", "self", ".", "_sysinfo", ".", "getLoadAvg", "(", ")", "if", "self", ".", "_loadstats", ":", "self", ".", "setGraphVal", "(", "'sys_loadavg'", ",", "'load15min'", ",", "self", ".", "_loadstats", "[", "2", "]", ")", "self", ".", "setGraphVal", "(", "'sys_loadavg'", ",", "'load5min'", ",", "self", ".", "_loadstats", "[", "1", "]", ")", "self", ".", "setGraphVal", "(", "'sys_loadavg'", ",", "'load1min'", ",", "self", ".", "_loadstats", "[", "0", "]", ")", "if", "self", ".", "_cpustats", "and", "self", ".", "hasGraph", "(", "'sys_cpu_util'", ")", ":", "for", "field", "in", "self", ".", "getGraphFieldList", "(", "'sys_cpu_util'", ")", ":", "self", ".", "setGraphVal", "(", "'sys_cpu_util'", ",", "field", ",", "int", "(", "self", ".", "_cpustats", "[", "field", "]", "*", "1000", ")", ")", "if", "self", ".", "_memstats", ":", "if", "self", ".", "hasGraph", "(", "'sys_mem_util'", ")", ":", "for", "field", "in", "self", ".", "getGraphFieldList", "(", "'sys_mem_util'", ")", ":", "self", ".", "setGraphVal", "(", "'sys_mem_util'", ",", "field", ",", "self", ".", "_memstats", "[", "field", "]", ")", "if", "self", ".", "hasGraph", "(", "'sys_mem_avail'", ")", ":", "for", "field", "in", "self", ".", "getGraphFieldList", "(", "'sys_mem_avail'", ")", ":", "self", ".", "setGraphVal", "(", "'sys_mem_avail'", ",", "field", ",", "self", ".", "_memstats", "[", "field", "]", ")", "if", "self", ".", "hasGraph", "(", "'sys_mem_huge'", ")", ":", "for", "field", "in", "[", "'Rsvd'", ",", "'Surp'", ",", "'Free'", "]", ":", "fkey", "=", "'HugePages_'", "+", "field", "if", "self", ".", "_memstats", ".", "has_key", "(", "fkey", ")", ":", "self", ".", "setGraphVal", "(", "'sys_mem_huge'", ",", "field", ",", "self", ".", "_memstats", "[", "fkey", "]", "*", "self", ".", "_memstats", "[", "'Hugepagesize'", "]", ")", "if", "self", ".", "hasGraph", "(", "'sys_processes'", ")", ":", "if", "self", ".", "_procstats", "is", "None", ":", "self", ".", "_procstats", "=", "self", ".", "_sysinfo", ".", "getProcessStats", "(", ")", "if", "self", ".", "_procstats", ":", "self", ".", "setGraphVal", "(", "'sys_processes'", ",", "'running'", ",", "self", ".", "_procstats", "[", "'procs_running'", "]", ")", "self", ".", "setGraphVal", "(", "'sys_processes'", ",", "'blocked'", ",", "self", ".", "_procstats", "[", "'procs_blocked'", "]", ")", "if", "self", ".", "hasGraph", "(", "'sys_forks'", ")", ":", "if", "self", ".", "_procstats", "is", "None", ":", "self", ".", "_procstats", "=", "self", ".", "_sysinfo", ".", "getProcessStats", "(", ")", "if", "self", ".", "_procstats", ":", "self", ".", "setGraphVal", "(", "'sys_forks'", ",", "'forks'", ",", "self", ".", "_procstats", "[", "'processes'", "]", ")", "if", "self", ".", "hasGraph", "(", "'sys_intr_ctxt'", ")", ":", "if", "self", ".", "_procstats", "is", "None", ":", "self", ".", "_procstats", "=", "self", ".", "_sysinfo", ".", "getProcessStats", "(", ")", "if", "self", ".", "_procstats", ":", "for", "field", "in", "self", ".", "getGraphFieldList", "(", "'sys_intr_ctxt'", ")", ":", "self", ".", "setGraphVal", "(", "'sys_intr_ctxt'", ",", "field", ",", "self", ".", "_procstats", "[", "field", "]", ")", "if", "self", ".", "hasGraph", "(", "'sys_vm_paging'", ")", ":", "if", "self", ".", "_vmstats", "is", "None", ":", "self", ".", "_vmstats", "=", "self", ".", "_sysinfo", ".", "getVMstats", "(", ")", "if", "self", ".", "_vmstats", ":", "self", ".", "setGraphVal", "(", "'sys_vm_paging'", ",", "'in'", ",", "self", ".", "_vmstats", "[", "'pgpgin'", "]", ")", "self", ".", "setGraphVal", "(", "'sys_vm_paging'", ",", "'out'", ",", "self", ".", "_vmstats", "[", "'pgpgout'", "]", ")", "if", "self", ".", "hasGraph", "(", "'sys_vm_swapping'", ")", ":", "if", "self", ".", "_vmstats", "is", "None", ":", "self", ".", "_vmstats", "=", "self", ".", "_sysinfo", ".", "getVMstats", "(", ")", "if", "self", ".", "_vmstats", ":", "self", ".", "setGraphVal", "(", "'sys_vm_swapping'", ",", "'in'", ",", "self", ".", "_vmstats", "[", "'pswpin'", "]", ")", "self", ".", "setGraphVal", "(", "'sys_vm_swapping'", ",", "'out'", ",", "self", ".", "_vmstats", "[", "'pswpout'", "]", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/sysstats.py#L211-L274
mattseymour/python-env
dotenv/__init__.py
get
def get(key, default=None): """ Searches os.environ. If a key is found try evaluating its type else; return the string. returns: k->value (type as defined by ast.literal_eval) """ try: # Attempt to evaluate into python literal return ast.literal_eval(os.environ.get(key.upper(), default)) except (ValueError, SyntaxError): return os.environ.get(key.upper(), default)
python
def get(key, default=None): """ Searches os.environ. If a key is found try evaluating its type else; return the string. returns: k->value (type as defined by ast.literal_eval) """ try: # Attempt to evaluate into python literal return ast.literal_eval(os.environ.get(key.upper(), default)) except (ValueError, SyntaxError): return os.environ.get(key.upper(), default)
[ "def", "get", "(", "key", ",", "default", "=", "None", ")", ":", "try", ":", "# Attempt to evaluate into python literal", "return", "ast", ".", "literal_eval", "(", "os", ".", "environ", ".", "get", "(", "key", ".", "upper", "(", ")", ",", "default", ")", ")", "except", "(", "ValueError", ",", "SyntaxError", ")", ":", "return", "os", ".", "environ", ".", "get", "(", "key", ".", "upper", "(", ")", ",", "default", ")" ]
Searches os.environ. If a key is found try evaluating its type else; return the string. returns: k->value (type as defined by ast.literal_eval)
[ "Searches", "os", ".", "environ", ".", "If", "a", "key", "is", "found", "try", "evaluating", "its", "type", "else", ";", "return", "the", "string", "." ]
train
https://github.com/mattseymour/python-env/blob/5ac09b1685fbba75c174c79cb40287aa49d0f208/dotenv/__init__.py#L17-L28
mattseymour/python-env
dotenv/__init__.py
save
def save(filepath=None, **kwargs): """ Saves a list of keyword arguments as environment variables to a file. If no filepath given will default to the default `.env` file. """ if filepath is None: filepath = os.path.join('.env') with open(filepath, 'wb') as file_handle: file_handle.writelines( '{0}={1}\n'.format(key.upper(), val) for key, val in kwargs.items() )
python
def save(filepath=None, **kwargs): """ Saves a list of keyword arguments as environment variables to a file. If no filepath given will default to the default `.env` file. """ if filepath is None: filepath = os.path.join('.env') with open(filepath, 'wb') as file_handle: file_handle.writelines( '{0}={1}\n'.format(key.upper(), val) for key, val in kwargs.items() )
[ "def", "save", "(", "filepath", "=", "None", ",", "*", "*", "kwargs", ")", ":", "if", "filepath", "is", "None", ":", "filepath", "=", "os", ".", "path", ".", "join", "(", "'.env'", ")", "with", "open", "(", "filepath", ",", "'wb'", ")", "as", "file_handle", ":", "file_handle", ".", "writelines", "(", "'{0}={1}\\n'", ".", "format", "(", "key", ".", "upper", "(", ")", ",", "val", ")", "for", "key", ",", "val", "in", "kwargs", ".", "items", "(", ")", ")" ]
Saves a list of keyword arguments as environment variables to a file. If no filepath given will default to the default `.env` file.
[ "Saves", "a", "list", "of", "keyword", "arguments", "as", "environment", "variables", "to", "a", "file", ".", "If", "no", "filepath", "given", "will", "default", "to", "the", "default", ".", "env", "file", "." ]
train
https://github.com/mattseymour/python-env/blob/5ac09b1685fbba75c174c79cb40287aa49d0f208/dotenv/__init__.py#L31-L43
mattseymour/python-env
dotenv/__init__.py
load
def load(filepath=None): """ Reads a .env file into os.environ. For a set filepath, open the file and read contents into os.environ. If filepath is not set then look in current dir for a .env file. """ if filepath and os.path.exists(filepath): pass else: if not os.path.exists('.env'): return False filepath = os.path.join('.env') for key, value in _get_line_(filepath): # set the key, value in the python environment vars dictionary # does not make modifications system wide. os.environ.setdefault(key, str(value)) return True
python
def load(filepath=None): """ Reads a .env file into os.environ. For a set filepath, open the file and read contents into os.environ. If filepath is not set then look in current dir for a .env file. """ if filepath and os.path.exists(filepath): pass else: if not os.path.exists('.env'): return False filepath = os.path.join('.env') for key, value in _get_line_(filepath): # set the key, value in the python environment vars dictionary # does not make modifications system wide. os.environ.setdefault(key, str(value)) return True
[ "def", "load", "(", "filepath", "=", "None", ")", ":", "if", "filepath", "and", "os", ".", "path", ".", "exists", "(", "filepath", ")", ":", "pass", "else", ":", "if", "not", "os", ".", "path", ".", "exists", "(", "'.env'", ")", ":", "return", "False", "filepath", "=", "os", ".", "path", ".", "join", "(", "'.env'", ")", "for", "key", ",", "value", "in", "_get_line_", "(", "filepath", ")", ":", "# set the key, value in the python environment vars dictionary", "# does not make modifications system wide.", "os", ".", "environ", ".", "setdefault", "(", "key", ",", "str", "(", "value", ")", ")", "return", "True" ]
Reads a .env file into os.environ. For a set filepath, open the file and read contents into os.environ. If filepath is not set then look in current dir for a .env file.
[ "Reads", "a", ".", "env", "file", "into", "os", ".", "environ", "." ]
train
https://github.com/mattseymour/python-env/blob/5ac09b1685fbba75c174c79cb40287aa49d0f208/dotenv/__init__.py#L46-L64
mattseymour/python-env
dotenv/__init__.py
_get_line_
def _get_line_(filepath): """ Gets each line from the file and parse the data. Attempt to translate the value into a python type is possible (falls back to string). """ for line in open(filepath): line = line.strip() # allows for comments in the file if line.startswith('#') or '=' not in line: continue # split on the first =, allows for subsiquent `=` in strings key, value = line.split('=', 1) key = key.strip().upper() value = value.strip() if not (key and value): continue try: # evaluate the string before adding into environment # resolves any hanging (') characters value = ast.literal_eval(value) except (ValueError, SyntaxError): pass #return line yield (key, value)
python
def _get_line_(filepath): """ Gets each line from the file and parse the data. Attempt to translate the value into a python type is possible (falls back to string). """ for line in open(filepath): line = line.strip() # allows for comments in the file if line.startswith('#') or '=' not in line: continue # split on the first =, allows for subsiquent `=` in strings key, value = line.split('=', 1) key = key.strip().upper() value = value.strip() if not (key and value): continue try: # evaluate the string before adding into environment # resolves any hanging (') characters value = ast.literal_eval(value) except (ValueError, SyntaxError): pass #return line yield (key, value)
[ "def", "_get_line_", "(", "filepath", ")", ":", "for", "line", "in", "open", "(", "filepath", ")", ":", "line", "=", "line", ".", "strip", "(", ")", "# allows for comments in the file", "if", "line", ".", "startswith", "(", "'#'", ")", "or", "'='", "not", "in", "line", ":", "continue", "# split on the first =, allows for subsiquent `=` in strings", "key", ",", "value", "=", "line", ".", "split", "(", "'='", ",", "1", ")", "key", "=", "key", ".", "strip", "(", ")", ".", "upper", "(", ")", "value", "=", "value", ".", "strip", "(", ")", "if", "not", "(", "key", "and", "value", ")", ":", "continue", "try", ":", "# evaluate the string before adding into environment", "# resolves any hanging (') characters", "value", "=", "ast", ".", "literal_eval", "(", "value", ")", "except", "(", "ValueError", ",", "SyntaxError", ")", ":", "pass", "#return line", "yield", "(", "key", ",", "value", ")" ]
Gets each line from the file and parse the data. Attempt to translate the value into a python type is possible (falls back to string).
[ "Gets", "each", "line", "from", "the", "file", "and", "parse", "the", "data", ".", "Attempt", "to", "translate", "the", "value", "into", "a", "python", "type", "is", "possible", "(", "falls", "back", "to", "string", ")", "." ]
train
https://github.com/mattseymour/python-env/blob/5ac09b1685fbba75c174c79cb40287aa49d0f208/dotenv/__init__.py#L67-L94
aouyar/PyMunin
pysysinfo/apache.py
ApacheInfo.initStats
def initStats(self): """Query and parse Apache Web Server Status Page.""" url = "%s://%s:%d/%s?auto" % (self._proto, self._host, self._port, self._statuspath) response = util.get_url(url, self._user, self._password) self._statusDict = {} for line in response.splitlines(): mobj = re.match('(\S.*\S)\s*:\s*(\S+)\s*$', line) if mobj: self._statusDict[mobj.group(1)] = util.parse_value(mobj.group(2)) if self._statusDict.has_key('Scoreboard'): self._statusDict['MaxWorkers'] = len(self._statusDict['Scoreboard'])
python
def initStats(self): """Query and parse Apache Web Server Status Page.""" url = "%s://%s:%d/%s?auto" % (self._proto, self._host, self._port, self._statuspath) response = util.get_url(url, self._user, self._password) self._statusDict = {} for line in response.splitlines(): mobj = re.match('(\S.*\S)\s*:\s*(\S+)\s*$', line) if mobj: self._statusDict[mobj.group(1)] = util.parse_value(mobj.group(2)) if self._statusDict.has_key('Scoreboard'): self._statusDict['MaxWorkers'] = len(self._statusDict['Scoreboard'])
[ "def", "initStats", "(", "self", ")", ":", "url", "=", "\"%s://%s:%d/%s?auto\"", "%", "(", "self", ".", "_proto", ",", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_statuspath", ")", "response", "=", "util", ".", "get_url", "(", "url", ",", "self", ".", "_user", ",", "self", ".", "_password", ")", "self", ".", "_statusDict", "=", "{", "}", "for", "line", "in", "response", ".", "splitlines", "(", ")", ":", "mobj", "=", "re", ".", "match", "(", "'(\\S.*\\S)\\s*:\\s*(\\S+)\\s*$'", ",", "line", ")", "if", "mobj", ":", "self", ".", "_statusDict", "[", "mobj", ".", "group", "(", "1", ")", "]", "=", "util", ".", "parse_value", "(", "mobj", ".", "group", "(", "2", ")", ")", "if", "self", ".", "_statusDict", ".", "has_key", "(", "'Scoreboard'", ")", ":", "self", ".", "_statusDict", "[", "'MaxWorkers'", "]", "=", "len", "(", "self", ".", "_statusDict", "[", "'Scoreboard'", "]", ")" ]
Query and parse Apache Web Server Status Page.
[ "Query", "and", "parse", "Apache", "Web", "Server", "Status", "Page", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pysysinfo/apache.py#L68-L79
ContextLab/quail
quail/egg.py
Egg.get_pres_features
def get_pres_features(self, features=None): """ Returns a df of features for presented items """ if features is None: features = self.dist_funcs.keys() elif not isinstance(features, list): features = [features] return self.pres.applymap(lambda x: {k:v for k,v in x.items() if k in features} if x is not None else None)
python
def get_pres_features(self, features=None): """ Returns a df of features for presented items """ if features is None: features = self.dist_funcs.keys() elif not isinstance(features, list): features = [features] return self.pres.applymap(lambda x: {k:v for k,v in x.items() if k in features} if x is not None else None)
[ "def", "get_pres_features", "(", "self", ",", "features", "=", "None", ")", ":", "if", "features", "is", "None", ":", "features", "=", "self", ".", "dist_funcs", ".", "keys", "(", ")", "elif", "not", "isinstance", "(", "features", ",", "list", ")", ":", "features", "=", "[", "features", "]", "return", "self", ".", "pres", ".", "applymap", "(", "lambda", "x", ":", "{", "k", ":", "v", "for", "k", ",", "v", "in", "x", ".", "items", "(", ")", "if", "k", "in", "features", "}", "if", "x", "is", "not", "None", "else", "None", ")" ]
Returns a df of features for presented items
[ "Returns", "a", "df", "of", "features", "for", "presented", "items" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/egg.py#L220-L228
ContextLab/quail
quail/egg.py
Egg.get_rec_features
def get_rec_features(self, features=None): """ Returns a df of features for recalled items """ if features is None: features = self.dist_funcs.keys() elif not isinstance(features, list): features = [features] return self.rec.applymap(lambda x: {k:v for k,v in x.items() if k != 'item'} if x is not None else None)
python
def get_rec_features(self, features=None): """ Returns a df of features for recalled items """ if features is None: features = self.dist_funcs.keys() elif not isinstance(features, list): features = [features] return self.rec.applymap(lambda x: {k:v for k,v in x.items() if k != 'item'} if x is not None else None)
[ "def", "get_rec_features", "(", "self", ",", "features", "=", "None", ")", ":", "if", "features", "is", "None", ":", "features", "=", "self", ".", "dist_funcs", ".", "keys", "(", ")", "elif", "not", "isinstance", "(", "features", ",", "list", ")", ":", "features", "=", "[", "features", "]", "return", "self", ".", "rec", ".", "applymap", "(", "lambda", "x", ":", "{", "k", ":", "v", "for", "k", ",", "v", "in", "x", ".", "items", "(", ")", "if", "k", "!=", "'item'", "}", "if", "x", "is", "not", "None", "else", "None", ")" ]
Returns a df of features for recalled items
[ "Returns", "a", "df", "of", "features", "for", "recalled", "items" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/egg.py#L236-L244
ContextLab/quail
quail/egg.py
Egg.info
def info(self): """ Print info about the data egg """ print('Number of subjects: ' + str(self.n_subjects)) print('Number of lists per subject: ' + str(self.n_lists)) print('Number of words per list: ' + str(self.list_length)) print('Date created: ' + str(self.date_created)) print('Meta data: ' + str(self.meta))
python
def info(self): """ Print info about the data egg """ print('Number of subjects: ' + str(self.n_subjects)) print('Number of lists per subject: ' + str(self.n_lists)) print('Number of words per list: ' + str(self.list_length)) print('Date created: ' + str(self.date_created)) print('Meta data: ' + str(self.meta))
[ "def", "info", "(", "self", ")", ":", "print", "(", "'Number of subjects: '", "+", "str", "(", "self", ".", "n_subjects", ")", ")", "print", "(", "'Number of lists per subject: '", "+", "str", "(", "self", ".", "n_lists", ")", ")", "print", "(", "'Number of words per list: '", "+", "str", "(", "self", ".", "list_length", ")", ")", "print", "(", "'Date created: '", "+", "str", "(", "self", ".", "date_created", ")", ")", "print", "(", "'Meta data: '", "+", "str", "(", "self", ".", "meta", ")", ")" ]
Print info about the data egg
[ "Print", "info", "about", "the", "data", "egg" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/egg.py#L247-L255
ContextLab/quail
quail/egg.py
Egg.save
def save(self, fname, compression='blosc'): """ Save method for the Egg object The data will be saved as a 'egg' file, which is a dictionary containing the elements of a Egg saved in the hd5 format using `deepdish`. Parameters ---------- fname : str A name for the file. If the file extension (.egg) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save """ # put egg vars into a dict egg = { 'pres' : df2list(self.pres), 'rec' : df2list(self.rec), 'dist_funcs' : self.dist_funcs, 'subjgroup' : self.subjgroup, 'subjname' : self.subjname, 'listgroup' : self.listgroup, 'listname' : self.listname, 'date_created' : self.date_created, 'meta' : self.meta } # if extension wasn't included, add it if fname[-4:]!='.egg': fname+='.egg' # save with warnings.catch_warnings(): warnings.simplefilter("ignore") dd.io.save(fname, egg, compression=compression)
python
def save(self, fname, compression='blosc'): """ Save method for the Egg object The data will be saved as a 'egg' file, which is a dictionary containing the elements of a Egg saved in the hd5 format using `deepdish`. Parameters ---------- fname : str A name for the file. If the file extension (.egg) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save """ # put egg vars into a dict egg = { 'pres' : df2list(self.pres), 'rec' : df2list(self.rec), 'dist_funcs' : self.dist_funcs, 'subjgroup' : self.subjgroup, 'subjname' : self.subjname, 'listgroup' : self.listgroup, 'listname' : self.listname, 'date_created' : self.date_created, 'meta' : self.meta } # if extension wasn't included, add it if fname[-4:]!='.egg': fname+='.egg' # save with warnings.catch_warnings(): warnings.simplefilter("ignore") dd.io.save(fname, egg, compression=compression)
[ "def", "save", "(", "self", ",", "fname", ",", "compression", "=", "'blosc'", ")", ":", "# put egg vars into a dict", "egg", "=", "{", "'pres'", ":", "df2list", "(", "self", ".", "pres", ")", ",", "'rec'", ":", "df2list", "(", "self", ".", "rec", ")", ",", "'dist_funcs'", ":", "self", ".", "dist_funcs", ",", "'subjgroup'", ":", "self", ".", "subjgroup", ",", "'subjname'", ":", "self", ".", "subjname", ",", "'listgroup'", ":", "self", ".", "listgroup", ",", "'listname'", ":", "self", ".", "listname", ",", "'date_created'", ":", "self", ".", "date_created", ",", "'meta'", ":", "self", ".", "meta", "}", "# if extension wasn't included, add it", "if", "fname", "[", "-", "4", ":", "]", "!=", "'.egg'", ":", "fname", "+=", "'.egg'", "# save", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "dd", ".", "io", ".", "save", "(", "fname", ",", "egg", ",", "compression", "=", "compression", ")" ]
Save method for the Egg object The data will be saved as a 'egg' file, which is a dictionary containing the elements of a Egg saved in the hd5 format using `deepdish`. Parameters ---------- fname : str A name for the file. If the file extension (.egg) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save
[ "Save", "method", "for", "the", "Egg", "object" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/egg.py#L257-L298
ContextLab/quail
quail/egg.py
FriedEgg.save
def save(self, fname, compression='blosc'): """ Save method for the FriedEgg object The data will be saved as a 'fegg' file, which is a dictionary containing the elements of a FriedEgg saved in the hd5 format using `deepdish`. Parameters ---------- fname : str A name for the file. If the file extension (.fegg) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save """ egg = { 'data' : self.data, 'analysis' : self.analysis, 'list_length' : self.list_length, 'n_lists' : self.n_lists, 'n_subjects' : self.n_subjects, 'position' : self.position, 'date_created' : self.date_created, 'meta' : self.meta } if fname[-4:]!='.fegg': fname+='.fegg' with warnings.catch_warnings(): warnings.simplefilter("ignore") dd.io.save(fname, egg, compression=compression)
python
def save(self, fname, compression='blosc'): """ Save method for the FriedEgg object The data will be saved as a 'fegg' file, which is a dictionary containing the elements of a FriedEgg saved in the hd5 format using `deepdish`. Parameters ---------- fname : str A name for the file. If the file extension (.fegg) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save """ egg = { 'data' : self.data, 'analysis' : self.analysis, 'list_length' : self.list_length, 'n_lists' : self.n_lists, 'n_subjects' : self.n_subjects, 'position' : self.position, 'date_created' : self.date_created, 'meta' : self.meta } if fname[-4:]!='.fegg': fname+='.fegg' with warnings.catch_warnings(): warnings.simplefilter("ignore") dd.io.save(fname, egg, compression=compression)
[ "def", "save", "(", "self", ",", "fname", ",", "compression", "=", "'blosc'", ")", ":", "egg", "=", "{", "'data'", ":", "self", ".", "data", ",", "'analysis'", ":", "self", ".", "analysis", ",", "'list_length'", ":", "self", ".", "list_length", ",", "'n_lists'", ":", "self", ".", "n_lists", ",", "'n_subjects'", ":", "self", ".", "n_subjects", ",", "'position'", ":", "self", ".", "position", ",", "'date_created'", ":", "self", ".", "date_created", ",", "'meta'", ":", "self", ".", "meta", "}", "if", "fname", "[", "-", "4", ":", "]", "!=", "'.fegg'", ":", "fname", "+=", "'.fegg'", "with", "warnings", ".", "catch_warnings", "(", ")", ":", "warnings", ".", "simplefilter", "(", "\"ignore\"", ")", "dd", ".", "io", ".", "save", "(", "fname", ",", "egg", ",", "compression", "=", "compression", ")" ]
Save method for the FriedEgg object The data will be saved as a 'fegg' file, which is a dictionary containing the elements of a FriedEgg saved in the hd5 format using `deepdish`. Parameters ---------- fname : str A name for the file. If the file extension (.fegg) is not specified, it will be appended. compression : str The kind of compression to use. See the deepdish documentation for options: http://deepdish.readthedocs.io/en/latest/api_io.html#deepdish.io.save
[ "Save", "method", "for", "the", "FriedEgg", "object" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/egg.py#L381-L418
ContextLab/quail
quail/analysis/pnr.py
pnr_helper
def pnr_helper(egg, position, match='exact', distance='euclidean', features=None): """ Computes probability of a word being recalled nth (in the appropriate recall list), given its presentation position. Note: zero indexed Parameters ---------- egg : quail.Egg Data to analyze position : int Position of item to be analyzed match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- prob_recalled : numpy array each number represents the probability of nth recall for a word presented in given position/index """ def pnr(lst, position): return [1 if pos==lst[position] else 0 for pos in range(1,egg.list_length+1)] opts = dict(match=match, distance=distance, features=features) if match is 'exact': opts.update({'features' : 'item'}) recmat = recall_matrix(egg, **opts) if match in ['exact', 'best']: result = [pnr(lst, position) for lst in recmat] elif match is 'smooth': result = np.atleast_2d(recmat[:, :, 0]) else: raise ValueError('Match must be set to exact, best or smooth.') return np.nanmean(result, axis=0)
python
def pnr_helper(egg, position, match='exact', distance='euclidean', features=None): """ Computes probability of a word being recalled nth (in the appropriate recall list), given its presentation position. Note: zero indexed Parameters ---------- egg : quail.Egg Data to analyze position : int Position of item to be analyzed match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- prob_recalled : numpy array each number represents the probability of nth recall for a word presented in given position/index """ def pnr(lst, position): return [1 if pos==lst[position] else 0 for pos in range(1,egg.list_length+1)] opts = dict(match=match, distance=distance, features=features) if match is 'exact': opts.update({'features' : 'item'}) recmat = recall_matrix(egg, **opts) if match in ['exact', 'best']: result = [pnr(lst, position) for lst in recmat] elif match is 'smooth': result = np.atleast_2d(recmat[:, :, 0]) else: raise ValueError('Match must be set to exact, best or smooth.') return np.nanmean(result, axis=0)
[ "def", "pnr_helper", "(", "egg", ",", "position", ",", "match", "=", "'exact'", ",", "distance", "=", "'euclidean'", ",", "features", "=", "None", ")", ":", "def", "pnr", "(", "lst", ",", "position", ")", ":", "return", "[", "1", "if", "pos", "==", "lst", "[", "position", "]", "else", "0", "for", "pos", "in", "range", "(", "1", ",", "egg", ".", "list_length", "+", "1", ")", "]", "opts", "=", "dict", "(", "match", "=", "match", ",", "distance", "=", "distance", ",", "features", "=", "features", ")", "if", "match", "is", "'exact'", ":", "opts", ".", "update", "(", "{", "'features'", ":", "'item'", "}", ")", "recmat", "=", "recall_matrix", "(", "egg", ",", "*", "*", "opts", ")", "if", "match", "in", "[", "'exact'", ",", "'best'", "]", ":", "result", "=", "[", "pnr", "(", "lst", ",", "position", ")", "for", "lst", "in", "recmat", "]", "elif", "match", "is", "'smooth'", ":", "result", "=", "np", ".", "atleast_2d", "(", "recmat", "[", ":", ",", ":", ",", "0", "]", ")", "else", ":", "raise", "ValueError", "(", "'Match must be set to exact, best or smooth.'", ")", "return", "np", ".", "nanmean", "(", "result", ",", "axis", "=", "0", ")" ]
Computes probability of a word being recalled nth (in the appropriate recall list), given its presentation position. Note: zero indexed Parameters ---------- egg : quail.Egg Data to analyze position : int Position of item to be analyzed match : str (exact, best or smooth) Matching approach to compute recall matrix. If exact, the presented and recalled items must be identical (default). If best, the recalled item that is most similar to the presented items will be selected. If smooth, a weighted average of all presented items will be used, where the weights are derived from the similarity between the recalled item and each presented item. distance : str The distance function used to compare presented and recalled items. Applies only to 'best' and 'smooth' matching approaches. Can be any distance function supported by numpy.spatial.distance.cdist. Returns ---------- prob_recalled : numpy array each number represents the probability of nth recall for a word presented in given position/index
[ "Computes", "probability", "of", "a", "word", "being", "recalled", "nth", "(", "in", "the", "appropriate", "recall", "list", ")", "given", "its", "presentation", "position", ".", "Note", ":", "zero", "indexed" ]
train
https://github.com/ContextLab/quail/blob/71dd53c792dd915dc84879d8237e3582dd68b7a4/quail/analysis/pnr.py#L4-L52
aouyar/PyMunin
pymunin/plugins/apachestats.py
MuninApachePlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" apacheInfo = ApacheInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) stats = apacheInfo.getServerStats() if self.hasGraph('apache_access'): self.setGraphVal('apache_access', 'reqs', stats['Total Accesses']) if self.hasGraph('apache_bytes'): self.setGraphVal('apache_bytes', 'bytes', stats['Total kBytes'] * 1000) if self.hasGraph('apache_workers'): self.setGraphVal('apache_workers', 'busy', stats['BusyWorkers']) self.setGraphVal('apache_workers', 'idle', stats['IdleWorkers']) self.setGraphVal('apache_workers', 'max', stats['MaxWorkers'])
python
def retrieveVals(self): """Retrieve values for graphs.""" apacheInfo = ApacheInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) stats = apacheInfo.getServerStats() if self.hasGraph('apache_access'): self.setGraphVal('apache_access', 'reqs', stats['Total Accesses']) if self.hasGraph('apache_bytes'): self.setGraphVal('apache_bytes', 'bytes', stats['Total kBytes'] * 1000) if self.hasGraph('apache_workers'): self.setGraphVal('apache_workers', 'busy', stats['BusyWorkers']) self.setGraphVal('apache_workers', 'idle', stats['IdleWorkers']) self.setGraphVal('apache_workers', 'max', stats['MaxWorkers'])
[ "def", "retrieveVals", "(", "self", ")", ":", "apacheInfo", "=", "ApacheInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_statuspath", ",", "self", ".", "_ssl", ")", "stats", "=", "apacheInfo", ".", "getServerStats", "(", ")", "if", "self", ".", "hasGraph", "(", "'apache_access'", ")", ":", "self", ".", "setGraphVal", "(", "'apache_access'", ",", "'reqs'", ",", "stats", "[", "'Total Accesses'", "]", ")", "if", "self", ".", "hasGraph", "(", "'apache_bytes'", ")", ":", "self", ".", "setGraphVal", "(", "'apache_bytes'", ",", "'bytes'", ",", "stats", "[", "'Total kBytes'", "]", "*", "1000", ")", "if", "self", ".", "hasGraph", "(", "'apache_workers'", ")", ":", "self", ".", "setGraphVal", "(", "'apache_workers'", ",", "'busy'", ",", "stats", "[", "'BusyWorkers'", "]", ")", "self", ".", "setGraphVal", "(", "'apache_workers'", ",", "'idle'", ",", "stats", "[", "'IdleWorkers'", "]", ")", "self", ".", "setGraphVal", "(", "'apache_workers'", ",", "'max'", ",", "stats", "[", "'MaxWorkers'", "]", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/apachestats.py#L124-L138
aouyar/PyMunin
pymunin/plugins/apachestats.py
MuninApachePlugin.autoconf
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ apacheInfo = ApacheInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) return apacheInfo is not None
python
def autoconf(self): """Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise. """ apacheInfo = ApacheInfo(self._host, self._port, self._user, self._password, self._statuspath, self._ssl) return apacheInfo is not None
[ "def", "autoconf", "(", "self", ")", ":", "apacheInfo", "=", "ApacheInfo", "(", "self", ".", "_host", ",", "self", ".", "_port", ",", "self", ".", "_user", ",", "self", ".", "_password", ",", "self", ".", "_statuspath", ",", "self", ".", "_ssl", ")", "return", "apacheInfo", "is", "not", "None" ]
Implements Munin Plugin Auto-Configuration Option. @return: True if plugin can be auto-configured, False otherwise.
[ "Implements", "Munin", "Plugin", "Auto", "-", "Configuration", "Option", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/apachestats.py#L140-L149
aouyar/PyMunin
pymunin/plugins/ntphostoffsets.py
MuninNTPhostOffsetsPlugin.retrieveVals
def retrieveVals(self): """Retrieve values for graphs.""" ntpinfo = NTPinfo() ntpstats = ntpinfo.getHostOffsets(self._remoteHosts) if ntpstats: for host in self._remoteHosts: hostkey = re.sub('\.', '_', host) hoststats = ntpstats.get(host) if hoststats: if self.hasGraph('ntp_host_stratums'): self.setGraphVal('ntp_host_stratums', hostkey, hoststats.get('stratum')) if self.hasGraph('ntp_host_offsets'): self.setGraphVal('ntp_host_offsets', hostkey, hoststats.get('offset')) if self.hasGraph('ntp_host_delays'): self.setGraphVal('ntp_host_delays', hostkey, hoststats.get('delay'))
python
def retrieveVals(self): """Retrieve values for graphs.""" ntpinfo = NTPinfo() ntpstats = ntpinfo.getHostOffsets(self._remoteHosts) if ntpstats: for host in self._remoteHosts: hostkey = re.sub('\.', '_', host) hoststats = ntpstats.get(host) if hoststats: if self.hasGraph('ntp_host_stratums'): self.setGraphVal('ntp_host_stratums', hostkey, hoststats.get('stratum')) if self.hasGraph('ntp_host_offsets'): self.setGraphVal('ntp_host_offsets', hostkey, hoststats.get('offset')) if self.hasGraph('ntp_host_delays'): self.setGraphVal('ntp_host_delays', hostkey, hoststats.get('delay'))
[ "def", "retrieveVals", "(", "self", ")", ":", "ntpinfo", "=", "NTPinfo", "(", ")", "ntpstats", "=", "ntpinfo", ".", "getHostOffsets", "(", "self", ".", "_remoteHosts", ")", "if", "ntpstats", ":", "for", "host", "in", "self", ".", "_remoteHosts", ":", "hostkey", "=", "re", ".", "sub", "(", "'\\.'", ",", "'_'", ",", "host", ")", "hoststats", "=", "ntpstats", ".", "get", "(", "host", ")", "if", "hoststats", ":", "if", "self", ".", "hasGraph", "(", "'ntp_host_stratums'", ")", ":", "self", ".", "setGraphVal", "(", "'ntp_host_stratums'", ",", "hostkey", ",", "hoststats", ".", "get", "(", "'stratum'", ")", ")", "if", "self", ".", "hasGraph", "(", "'ntp_host_offsets'", ")", ":", "self", ".", "setGraphVal", "(", "'ntp_host_offsets'", ",", "hostkey", ",", "hoststats", ".", "get", "(", "'offset'", ")", ")", "if", "self", ".", "hasGraph", "(", "'ntp_host_delays'", ")", ":", "self", ".", "setGraphVal", "(", "'ntp_host_delays'", ",", "hostkey", ",", "hoststats", ".", "get", "(", "'delay'", ")", ")" ]
Retrieve values for graphs.
[ "Retrieve", "values", "for", "graphs", "." ]
train
https://github.com/aouyar/PyMunin/blob/4f58a64b6b37c85a84cc7e1e07aafaa0321b249d/pymunin/plugins/ntphostoffsets.py#L119-L136