_id
stringlengths 2
7
| title
stringlengths 1
88
| partition
stringclasses 3
values | text
stringlengths 75
19.8k
| language
stringclasses 1
value | meta_information
dict |
|---|---|---|---|---|---|
q13800
|
_read_bam
|
train
|
def _read_bam(bam_fn, precursors):
"""
read bam file and perform realignment of hits
"""
mode = "r" if bam_fn.endswith("sam") else "rb"
handle = pysam.Samfile(bam_fn, mode)
reads = defaultdict(realign)
for line in handle:
chrom = handle.getrname(line.reference_id)
# print("%s %s %s %s" % (line.query_name, line.reference_start, line.query_sequence, chrom))
query_name = line.query_name
if query_name not in reads:
reads[query_name].sequence = line.query_sequence
iso = isomir()
iso.align = line
iso.start = line.reference_start
iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], line.reference_start)
reads[query_name].set_precursor(chrom, iso)
reads = _clean_hits(reads)
return reads
|
python
|
{
"resource": ""
}
|
q13801
|
_collapse_fastq
|
train
|
def _collapse_fastq(in_fn):
"""
collapse reads into unique sequences
"""
args = argparse.Namespace()
args.fastq = in_fn
args.minimum = 1
args.out = op.dirname(in_fn)
return collapse_fastq(args)
|
python
|
{
"resource": ""
}
|
q13802
|
_read_pyMatch
|
train
|
def _read_pyMatch(fn, precursors):
"""
read pyMatch file and perform realignment of hits
"""
with open(fn) as handle:
reads = defaultdict(realign)
for line in handle:
query_name, seq, chrom, reference_start, end, mism, add = line.split()
reference_start = int(reference_start)
# chrom = handle.getrname(cols[1])
# print("%s %s %s %s" % (line.query_name, line.reference_start, line.query_sequence, chrom))
if query_name not in reads:
reads[query_name].sequence = seq
iso = isomir()
iso.align = line
iso.start = reference_start
iso.subs, iso.add = _realign(reads[query_name].sequence, precursors[chrom], reference_start)
logger.debug("%s %s %s %s %s" % (query_name, reference_start, chrom, iso.subs, iso.add))
if len(iso.subs) > 1:
continue
reads[query_name].set_precursor(chrom, iso)
reads = _clean_hits(reads)
return reads
|
python
|
{
"resource": ""
}
|
q13803
|
_parse_mut
|
train
|
def _parse_mut(subs):
"""
Parse mutation tag from miraligner output
"""
if subs!="0":
subs = [[subs.replace(subs[-2:], ""),subs[-2], subs[-1]]]
return subs
|
python
|
{
"resource": ""
}
|
q13804
|
_read_miraligner
|
train
|
def _read_miraligner(fn):
"""Read ouput of miraligner and create compatible output."""
reads = defaultdict(realign)
with open(fn) as in_handle:
in_handle.next()
for line in in_handle:
cols = line.strip().split("\t")
iso = isomir()
query_name, seq = cols[1], cols[0]
chrom, reference_start = cols[-2], cols[3]
iso.mirna = cols[3]
subs, add, iso.t5, iso.t3 = cols[6:10]
if query_name not in reads:
reads[query_name].sequence = seq
iso.align = line
iso.start = reference_start
iso.subs, iso.add = _parse_mut(subs), add
logger.debug("%s %s %s %s %s" % (query_name, reference_start, chrom, iso.subs, iso.add))
reads[query_name].set_precursor(chrom, iso)
return reads
|
python
|
{
"resource": ""
}
|
q13805
|
_cmd_miraligner
|
train
|
def _cmd_miraligner(fn, out_file, species, hairpin, out):
"""
Run miraligner for miRNA annotation
"""
tool = _get_miraligner()
path_db = op.dirname(op.abspath(hairpin))
cmd = "{tool} -freq -i {fn} -o {out_file} -s {species} -db {path_db} -sub 1 -trim 3 -add 3"
if not file_exists(out_file):
logger.info("Running miraligner with %s" % fn)
do.run(cmd.format(**locals()), "miraligner with %s" % fn)
shutil.move(out_file + ".mirna", out_file)
return out_file
|
python
|
{
"resource": ""
}
|
q13806
|
_mirtop
|
train
|
def _mirtop(out_files, hairpin, gff3, species, out):
"""
Convert miraligner to mirtop format
"""
args = argparse.Namespace()
args.hairpin = hairpin
args.sps = species
args.gtf = gff3
args.add_extra = True
args.files = out_files
args.format = "seqbuster"
args.out_format = "gff"
args.out = out
reader(args)
|
python
|
{
"resource": ""
}
|
q13807
|
_merge
|
train
|
def _merge(dts):
"""
merge multiple samples in one matrix
"""
df = pd.concat(dts)
ma = df.pivot(index='isomir', columns='sample', values='counts')
ma_mirna = ma
ma = ma.fillna(0)
ma_mirna['mirna'] = [m.split(":")[0] for m in ma.index.values]
ma_mirna = ma_mirna.groupby(['mirna']).sum()
ma_mirna = ma_mirna.fillna(0)
return ma, ma_mirna
|
python
|
{
"resource": ""
}
|
q13808
|
_create_counts
|
train
|
def _create_counts(out_dts, out_dir):
"""Summarize results into single files."""
ma, ma_mirna = _merge(out_dts)
out_ma = op.join(out_dir, "counts.tsv")
out_ma_mirna = op.join(out_dir, "counts_mirna.tsv")
ma.to_csv(out_ma, sep="\t")
ma_mirna.to_csv(out_ma_mirna, sep="\t")
return out_ma_mirna, out_ma
|
python
|
{
"resource": ""
}
|
q13809
|
miraligner
|
train
|
def miraligner(args):
"""
Realign BAM hits to miRBAse to get better accuracy and annotation
"""
hairpin, mirna = _download_mirbase(args)
precursors = _read_precursor(args.hairpin, args.sps)
matures = _read_mature(args.mirna, args.sps)
gtf = _read_gtf(args.gtf)
out_dts = []
out_files = []
for bam_fn in args.files:
sample = op.splitext(op.basename(bam_fn))[0]
logger.info("Reading %s" % bam_fn)
if bam_fn.endswith("bam") or bam_fn.endswith("sam"):
bam_fn = _sam_to_bam(bam_fn)
bam_sort_by_n = op.splitext(bam_fn)[0] + "_sort"
pysam.sort("-n", bam_fn, bam_sort_by_n)
reads = _read_bam(bam_sort_by_n + ".bam", precursors)
elif bam_fn.endswith("fasta") or bam_fn.endswith("fa") or \
bam_fn.endswith("fastq"):
if args.collapse:
bam_fn = _collapse_fastq(bam_fn)
out_file = op.join(args.out, sample + ".premirna")
bam_fn = _filter_seqs(bam_fn)
if args.miraligner:
_cmd_miraligner(bam_fn, out_file, args.sps, args.hairpin, args.out)
reads = _read_miraligner(out_file)
out_files.append(out_file)
else:
raise ValueError("Format not recognized.")
if args.miraligner:
_mirtop(out_files, args.hairpin, args.gtf, args.sps, args.out)
if not args.miraligner:
reads = _annotate(reads, matures, precursors)
out_file = op.join(args.out, sample + ".mirna")
out_file, dt, dt_pre = _tab_output(reads, out_file, sample)
try:
vcf_file = op.join(args.out, sample + ".vcf")
if not file_exists(vcf_file):
# if True:
create_vcf(dt_pre, matures, gtf, vcf_file)
try:
import vcf
vcf.Reader(filename=vcf_file)
except Exception as e:
logger.warning(e.__doc__)
logger.warning(e.message)
except Exception as e:
# traceback.print_exc()
logger.warning(e.__doc__)
logger.warning(e.message)
if isinstance(dt, pd.DataFrame):
out_dts.append(dt)
if out_dts:
_create_counts(out_dts, args.out)
else:
print("No files analyzed!")
|
python
|
{
"resource": ""
}
|
q13810
|
chdir
|
train
|
def chdir(new_dir):
"""
stolen from bcbio.
Context manager to temporarily change to a new directory.
http://lucentbeing.com/blog/context-managers-and-the-with-statement-in-python/
"""
cur_dir = os.getcwd()
_mkdir(new_dir)
os.chdir(new_dir)
try:
yield
finally:
os.chdir(cur_dir)
|
python
|
{
"resource": ""
}
|
q13811
|
_get_flavor
|
train
|
def _get_flavor():
"""
Download flavor from github
"""
target = op.join("seqcluster", "flavor")
url = "https://github.com/lpantano/seqcluster.git"
if not os.path.exists(target):
# shutil.rmtree("seqcluster")
subprocess.check_call(["git", "clone","-b", "flavor", "--single-branch", url])
return op.abspath(target)
|
python
|
{
"resource": ""
}
|
q13812
|
_install
|
train
|
def _install(path, args):
"""
small helper for installation in case outside bcbio
"""
try:
from bcbio import install as bcb
except:
raise ImportError("It needs bcbio to do the quick installation.")
path_flavor = _get_flavor()
s = {"fabricrc_overrides": {"system_install": path,
"local_install": os.path.join(path, "local_install"),
"use_sudo": "false",
"edition": "minimal"}}
s = {"flavor": path_flavor,
# "target": "[brew, conda]",
"vm_provider": "novm",
"hostname": "localhost",
"fabricrc_overrides": {"edition": "minimal",
"use_sudo": "false",
"keep_isolated": "true",
"conda_cmd": bcb._get_conda_bin(),
"distribution": "__auto__",
"dist_name": "__auto__"}}
s["actions"] = ["install_biolinux"]
s["fabricrc_overrides"]["system_install"] = path
s["fabricrc_overrides"]["local_install"] = os.path.join(path, "local_install")
cbl = bcb.get_cloudbiolinux(bcb.REMOTES)
sys.path.insert(0, cbl["dir"])
cbl_deploy = __import__("cloudbio.deploy", fromlist=["deploy"])
cbl_deploy.deploy(s)
|
python
|
{
"resource": ""
}
|
q13813
|
predictions
|
train
|
def predictions(args):
"""
Create predictions of clusters
"""
logger.info(args)
logger.info("reading sequeces")
out_file = os.path.abspath(os.path.splitext(args.json)[0] + "_prediction.json")
data = load_data(args.json)
out_dir = os.path.abspath(safe_dirs(os.path.join(args.out, "predictions")))
logger.info("make predictions")
data = is_tRNA(data, out_dir, args)
if args.coral:
logger.info("make CoRaL predictions")
run_coral(data, out_dir, args)
write_data(data[0], out_file)
logger.info("Done")
|
python
|
{
"resource": ""
}
|
q13814
|
sort_precursor
|
train
|
def sort_precursor(c, loci):
"""
Sort loci according to number of sequences mapped there.
"""
# Original Py 2.7 code
#data_loci = map(lambda (x): [x, loci[x].chr, int(loci[x].start), int(loci[x].end), loci[x].strand, len(c.loci2seq[x])], c.loci2seq.keys())
# 2to3 suggested Py 3 rewrite
data_loci = [[x, loci[x].chr, int(loci[x].start), int(loci[x].end), loci[x].strand, len(c.loci2seq[x])] for x in list(c.loci2seq.keys())]
data_loci = sorted(data_loci, key=itemgetter(5), reverse=True)
return data_loci
|
python
|
{
"resource": ""
}
|
q13815
|
best_precursor
|
train
|
def best_precursor(clus, loci):
"""
Select best precursor asuming size around 100 nt
"""
data_loci = sort_precursor(clus, loci)
current_size = data_loci[0][5]
best = 0
for item, locus in enumerate(data_loci):
if locus[3] - locus[2] > 70:
if locus[5] > current_size * 0.8:
best = item
break
best_loci = data_loci[best]
del data_loci[best]
data_loci.insert(0, best_loci)
return data_loci
|
python
|
{
"resource": ""
}
|
q13816
|
_open_file
|
train
|
def _open_file(in_file):
"""From bcbio code"""
_, ext = os.path.splitext(in_file)
if ext == ".gz":
return gzip.open(in_file, 'rb')
if ext in [".fastq", ".fq"]:
return open(in_file, 'r')
# default to just opening it
return open(in_file, "r")
|
python
|
{
"resource": ""
}
|
q13817
|
select_snps
|
train
|
def select_snps(mirna, snp, out):
"""
Use bedtools to intersect coordinates
"""
with open(out, 'w') as out_handle:
print(_create_header(mirna, snp, out), file=out_handle, end="")
snp_in_mirna = pybedtools.BedTool(snp).intersect(pybedtools.BedTool(mirna), wo=True)
for single in snp_in_mirna:
if single[10] == "miRNA" and len(single[3]) + len(single[4]) == 2:
line = []
rel_p = _lift_positions(single)
line.append(_get_mirna_name(single[16]))
line.append(str(rel_p))
line.append(single[2])
line.append(_complement(single[3], single[14]))
line.append(_complement(single[4], single[14]))
line.append(single[5])
line.append(single[6])
line.append(single[7])
print("\t".join(line), file=out_handle, end="")
return out
|
python
|
{
"resource": ""
}
|
q13818
|
up_threshold
|
train
|
def up_threshold(x, s, p):
"""function to decide if similarity is
below cutoff"""
if 1.0 * x/s >= p:
return True
elif stat.binom_test(x, s, p) > 0.01:
return True
return False
|
python
|
{
"resource": ""
}
|
q13819
|
_scan
|
train
|
def _scan(positions):
"""get the region inside the vector with more expression"""
scores = []
for start in range(0, len(positions) - 17, 5):
end = start = 17
scores.add(_enrichment(positions[start:end], positions[:start], positions[end:]))
|
python
|
{
"resource": ""
}
|
q13820
|
_check_args
|
train
|
def _check_args(args):
"""
check arguments before starting analysis.
"""
logger.info("Checking parameters and files")
args.dir_out = args.out
args.samplename = "pro"
global decision_cluster
global similar
if not os.path.isdir(args.out):
logger.warning("the output folder doens't exists")
os.mkdirs(args.out)
if args.bed and args.gtf:
logger.error("cannot provide -b and -g at the same time")
raise SyntaxError
if args.debug:
logger.info("DEBUG messages will be showed in file.")
if args.bed:
args.list_files = args.bed
args.type_ann = "bed"
if args.gtf:
args.list_files = args.gtf
args.type_ann = "gtf"
logger.info("Output dir will be: %s" % args.dir_out)
if not all([file_exists(args.ffile), file_exists(args.afile)]):
logger.error("I/O error: Seqs.ma or Seqs.bam. ")
raise IOError("Seqs.ma or/and Seqs.bam doesn't exists.")
if hasattr(args, 'list_files'):
beds = args.list_files.split(",")
for filebed in beds:
if not file_exists(filebed):
logger.error("I/O error: {0}".format(filebed))
raise IOError("%s annotation files doesn't exist" % filebed)
param.decision_cluster = args.method
if args.similar:
param.similar = float(args.similar)
if args.min_seqs:
param.min_seqs = int(args.min_seqs)
return args
|
python
|
{
"resource": ""
}
|
q13821
|
_total_counts
|
train
|
def _total_counts(seqs, seqL, aligned=False):
"""
Counts total seqs after each step
"""
total = Counter()
if isinstance(seqs, list):
if not aligned:
l = len([total.update(seqL[s].freq) for s in seqs])
else:
l = len([total.update(seqL[s].freq) for s in seqs if seqL[s].align > 0])
elif isinstance(seqs, dict):
[total.update(seqs[s].get_freq(seqL)) for s in seqs]
l = sum(len(seqs[s].idmembers) for s in seqs)
return total, l
|
python
|
{
"resource": ""
}
|
q13822
|
_get_annotation
|
train
|
def _get_annotation(c, loci):
"""get annotation of transcriptional units"""
data_ann_temp = {}
data_ann = []
counts = Counter()
for lid in c.loci2seq:
# original Py 2.7 code
#for dbi in loci[lid].db_ann.keys():
# data_ann_temp[dbi] = {dbi: map(lambda (x): loci[lid].db_ann[dbi].ann[x].name, loci[lid].db_ann[dbi].ann.keys())}
# suggestion by 2to3
for dbi in list(loci[lid].db_ann.keys()):
data_ann_temp[dbi] = {dbi: [loci[lid].db_ann[dbi].ann[x].name for x in list(loci[lid].db_ann[dbi].ann.keys())]}
logger.debug("_json_: data_ann_temp %s %s" % (dbi, data_ann_temp[dbi]))
counts[dbi] += 1
# original Py 2.7 code
#data_ann = data_ann + map(lambda (x): data_ann_temp[x], data_ann_temp.keys())
# suggestion by 2to3
data_ann = data_ann + [data_ann_temp[x] for x in list(data_ann_temp.keys())]
logger.debug("_json_: data_ann %s" % data_ann)
counts = {k: v for k, v in counts.iteritems()}
total_loci = sum([counts[db] for db in counts])
valid_ann = [k for k, v in counts.iteritems() if up_threshold(v, total_loci, 0.7)]
return data_ann, valid_ann
|
python
|
{
"resource": ""
}
|
q13823
|
_sum_by_samples
|
train
|
def _sum_by_samples(seqs_freq, samples_order):
"""
Sum sequences of a metacluster by samples.
"""
n = len(seqs_freq[seqs_freq.keys()[0]].freq.keys())
y = np.array([0] * n)
for s in seqs_freq:
x = seqs_freq[s].freq
exp = [seqs_freq[s].freq[sam] for sam in samples_order]
y = list(np.array(exp) + y)
return y
|
python
|
{
"resource": ""
}
|
q13824
|
_clean_alignment
|
train
|
def _clean_alignment(args):
"""
Prepare alignment for cluster detection.
"""
logger.info("Clean bam file with highly repetitive reads with low counts. sum(counts)/n_hits > 1%")
bam_file, seq_obj = clean_bam_file(args.afile, args.mask)
logger.info("Using %s file" % bam_file)
detect_complexity(bam_file, args.ref, args.out)
return bam_file, seq_obj
|
python
|
{
"resource": ""
}
|
q13825
|
_create_clusters
|
train
|
def _create_clusters(seqL, bam_file, args):
"""
Cluster sequences and
create metaclusters with multi-mappers.
"""
clus_obj = []
cluster_file = op.join(args.out, "cluster.bed")
if not os.path.exists(op.join(args.out, 'list_obj.pk')):
if not file_exists(cluster_file):
logger.info("Parsing aligned file")
logger.info("Merging sequences")
bedtools = os.path.join(os.path.dirname(sys.executable), "bedtools")
bedtools = bedtools if os.path.exists(bedtools) else "bedtools"
parse_cmd = "awk '{i=i+1;print $1\"\\t\"$2\"\\t\"$3\"\\t\"$4\"\\t\"i\"\\t\"$6}'"
cmd = "{bedtools} bamtobed -i {bam_file} | {parse_cmd} | {bedtools} cluster -s -d 20 -i - > {cluster_file}"
do.run(cmd.format(**locals()))
c = pybedtools.BedTool(cluster_file)
logger.info("Creating clusters")
clus_obj = detect_clusters(c, seqL, args.min_seqs, args.non_un_gl)
with open(op.join(args.out, 'list_obj.pk'), 'wb') as output:
pickle.dump(clus_obj, output, pickle.HIGHEST_PROTOCOL)
else:
logger.info("Loading previous clusters")
with open(op.join(args.out, 'list_obj.pk'), 'rb') as input:
clus_obj = pickle.load(input)
# bedfile = pybedtools.BedTool(generate_position_bed(clus_obj), from_string=True)
# seqs_2_loci = bedfile.intersect(pybedtools.BedTool(aligned_bed, from_string=True), wo=True, s=True)
# seqs_2_position = add_seqs_position_to_loci(seqs_2_loci, seqL)
logger.info("%s clusters found" % (len(clus_obj.clusid)))
return clus_obj
|
python
|
{
"resource": ""
}
|
q13826
|
_cleaning
|
train
|
def _cleaning(clusL, path):
"""
Load saved cluster and jump to next step
"""
backup = op.join(path, "list_obj_red.pk")
if not op.exists(backup):
clus_obj = reduceloci(clusL, path)
with open(backup, 'wb') as output:
pickle.dump(clus_obj, output, pickle.HIGHEST_PROTOCOL)
return clus_obj
else:
logger.info("Loading previous reduced clusters")
with open(backup, 'rb') as in_handle:
clus_obj = pickle.load(in_handle)
return clus_obj
|
python
|
{
"resource": ""
}
|
q13827
|
explore
|
train
|
def explore(args):
"""Create mapping of sequences of two clusters
"""
logger.info("reading sequeces")
data = load_data(args.json)
logger.info("get sequences from json")
#get_sequences_from_cluster()
c1, c2 = args.names.split(",")
seqs, names = get_sequences_from_cluster(c1, c2, data[0])
loci = get_precursors_from_cluster(c1, c2, data[0])
logger.info("map all sequences to all loci")
print("%s" % (loci))
map_to_precursors(seqs, names, loci, os.path.join(args.out, "map.tsv"), args)
#map_sequences_w_bowtie(sequences, precursors)
logger.info("plot sequences on loci")
#get_matrix_position()
#plot_sequences()
logger.info("Done")
|
python
|
{
"resource": ""
}
|
q13828
|
prepare
|
train
|
def prepare(args):
"""
Read all seq.fa files and create a matrix and unique fasta files.
The information is
:param args: options parsed from command line
:param con: logging messages going to console
:param log: logging messages going to console and file
:returns: files - matrix and fasta files that should be used with
and aligner (as bowtie) and run `seqcluster cluster`
"""
try:
f = open(args.config, 'r')
seq_out = open(op.join(args.out, "seqs.fastq"), 'w')
ma_out = open(op.join(args.out, "seqs.ma"), 'w')
except IOError as e:
traceback.print_exc()
raise IOError("Can not create output files: %s, %s or read %s" % (op.join(args.out, "seqs.ma"), op.join(args.out, "seqs.fastq"), args.config))
logger.info("Reading sequeces")
seq_l, sample_l = _read_fastq_files(f, args)
logger.info("Creating matrix with unique sequences")
logger.info("Filtering: min counts %s, min size %s, max size %s, min shared %s" % (args.minc, args.minl, args.maxl, args.min_shared))
_create_matrix_uniq_seq(sample_l, seq_l, ma_out, seq_out, args.min_shared)
logger.info("Finish preprocessing. Get a sorted BAM file of seqs.fa and run seqcluster cluster.")
|
python
|
{
"resource": ""
}
|
q13829
|
_create_matrix_uniq_seq
|
train
|
def _create_matrix_uniq_seq(sample_l, seq_l, maout, out, min_shared):
""" create matrix counts for each different sequence in all the fasta files
:param sample_l: :code:`list_s` is the output of :code:`_read_fasta_files`
:param seq_l: :code:`seq_s` is the output of :code:`_read_fasta_files`
:param maout: is a file handler to write the matrix count information
:param out: is a file handle to write the fasta file with unique sequences
:returns: Null
"""
skip = 0
if int(min_shared) > len(sample_l):
min_shared = len(sample_l)
maout.write("id\tseq")
for g in sample_l:
maout.write("\t%s" % g)
for s in seq_l.keys():
seen = sum([1 for g in seq_l[s].group if seq_l[s].group[g] > 0])
if seen < int(min_shared):
skip += 1
continue
maout.write("\nseq_%s\t%s" % (seq_l[s].idx, seq_l[s].seq))
for g in sample_l:
if g in seq_l[s].group:
maout.write("\t%s" % seq_l[s].group[g])
else:
maout.write("\t0")
qual = "".join(seq_l[s].quality)
out.write("@seq_%s\n%s\n+\n%s\n" % (seq_l[s].idx, seq_l[s].seq, qual))
out.close()
maout.close()
logger.info("Total skipped due to --min-shared parameter (%s) : %s" % (min_shared, skip))
|
python
|
{
"resource": ""
}
|
q13830
|
run_coral
|
train
|
def run_coral(clus_obj, out_dir, args):
"""
Run some CoRaL modules to predict small RNA function
"""
if not args.bed:
raise ValueError("This module needs the bed file output from cluster subcmd.")
workdir = op.abspath(op.join(args.out, 'coral'))
safe_dirs(workdir)
bam_in = op.abspath(args.bam)
bed_in = op.abspath(args.bed)
reference = op.abspath(args.ref)
with chdir(workdir):
bam_clean = coral.prepare_bam(bam_in, bed_in)
out_dir = op.join(workdir, "regions")
safe_dirs(out_dir)
prefix = "seqcluster"
loci_file = coral.detect_regions(bam_clean, bed_in, out_dir, prefix)
coral.create_features(bam_clean, loci_file, reference, out_dir)
|
python
|
{
"resource": ""
}
|
q13831
|
is_tRNA
|
train
|
def is_tRNA(clus_obj, out_dir, args):
"""
Iterates through cluster precursors to predict sRNA types
"""
ref = os.path.abspath(args.reference)
utils.safe_dirs(out_dir)
for nc in clus_obj[0]:
c = clus_obj[0][nc]
loci = c['loci']
out_fa = "cluster_" + nc
if loci[0][3] - loci[0][2] < 500:
with make_temp_directory() as tmpdir:
os.chdir(tmpdir)
get_loci_fasta({loci[0][0]: [loci[0][0:5]]}, out_fa, ref)
summary_file, str_file = _run_tRNA_scan(out_fa)
if "predictions" not in c:
c['predictions'] = {}
c['predictions']['tRNA'] = _read_tRNA_scan(summary_file)
score = _read_tRNA_scan(summary_file)
logger.debug(score)
shutil.move(summary_file, op.join(out_dir, summary_file))
shutil.move(str_file, op.join(out_dir, str_file))
else:
c['errors'].add("precursor too long")
clus_obj[0][nc] = c
return clus_obj
|
python
|
{
"resource": ""
}
|
q13832
|
_read_tRNA_scan
|
train
|
def _read_tRNA_scan(summary_file):
"""
Parse output from tRNA_Scan
"""
score = 0
if os.path.getsize(summary_file) == 0:
return 0
with open(summary_file) as in_handle:
# header = in_handle.next().strip().split()
for line in in_handle:
if not line.startswith("--"):
pre = line.strip().split()
score = pre[-1]
return score
|
python
|
{
"resource": ""
}
|
q13833
|
_run_tRNA_scan
|
train
|
def _run_tRNA_scan(fasta_file):
"""
Run tRNA-scan-SE to predict tRNA
"""
out_file = fasta_file + "_trnascan"
se_file = fasta_file + "_second_str"
cmd = "tRNAscan-SE -q -o {out_file} -f {se_file} {fasta_file}"
run(cmd.format(**locals()))
return out_file, se_file
|
python
|
{
"resource": ""
}
|
q13834
|
_parse_mut
|
train
|
def _parse_mut(mut):
"""
Parse mutation field to get position and nts.
"""
multiplier = 1
if mut.startswith("-"):
mut = mut[1:]
multiplier = -1
nt = mut.strip('0123456789')
pos = int(mut[:-2]) * multiplier
return nt, pos
|
python
|
{
"resource": ""
}
|
q13835
|
_get_reference_position
|
train
|
def _get_reference_position(isomir):
"""
Liftover from isomir to reference mature
"""
mut = isomir.split(":")[1]
if mut == "0":
return mut
nt, pos = _parse_mut(mut)
trim5 = isomir.split(":")[-2]
off = -1 * len(trim5)
if trim5.islower():
off = len(trim5)
if trim5 == "NA" or trim5 == "0":
off = 0
# print(isomir)
# print([mut, pos, off, nt])
return "%s%s" % (pos + off, nt)
|
python
|
{
"resource": ""
}
|
q13836
|
_get_pct
|
train
|
def _get_pct(isomirs, mirna):
"""
Get pct of variants respect to the reference
using reads and different sequences
"""
pass_pos = []
for isomir in isomirs.iterrows():
mir = isomir[1]["chrom"]
mut = isomir[1]["sv"]
mut_counts = isomir[1]["counts"]
total = mirna.loc[mir, "counts"] * 1.0 - mut_counts
mut_diff = isomir[1]["diff"]
ratio = mut_counts / total
if mut_counts > 10 and ratio > 0.4 and mut != "0" and mut_diff > 1:
isomir[1]["ratio"] = ratio
pass_pos.append(isomir[1])
return pass_pos
|
python
|
{
"resource": ""
}
|
q13837
|
_print_header
|
train
|
def _print_header(data):
"""
Create vcf header to make
a valid vcf.
"""
print("##fileformat=VCFv4.2", file=STDOUT, end="")
print("##source=seqbuster2.3", file=STDOUT, end="")
print("##reference=mirbase", file=STDOUT, end="")
for pos in data:
print("##contig=<ID=%s>" % pos["chrom"], file=STDOUT, end="")
print('##INFO=<ID=ID,Number=1,Type=String,Description="miRNA name">', file=STDOUT, end="")
print('##FORMAT=<ID=GT,Number=1,Type=Integer,Description="Genotype">', file=STDOUT, end="")
print('##FORMAT=<ID=NR,Number=A,Type=Integer,Description="Total reads supporting the variant">', file=STDOUT, end="")
print('##FORMAT=<ID=NS,Number=A,Type=Float,Description="Total number of different sequences supporting the variant">', file=STDOUT, end="")
print("#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\tSAMP001", file=STDOUT, end="")
|
python
|
{
"resource": ""
}
|
q13838
|
print_vcf
|
train
|
def print_vcf(data):
"""Print vcf line following rules."""
id_name = "."
qual = "."
chrom = data['chrom']
pos = data['pre_pos']
nt_ref = data['nt'][1]
nt_snp = data['nt'][0]
flt = "PASS"
info = "ID=%s" % data['mature']
frmt = "GT:NR:NS"
gntp = "%s:%s:%s" % (_genotype(data), data["counts"], data["diff"])
print("\t".join(map(str, [chrom, pos, id_name, nt_ref, nt_snp, qual, flt, info, frmt, gntp])), file=STDOUT, end="")
|
python
|
{
"resource": ""
}
|
q13839
|
liftover
|
train
|
def liftover(pass_pos, matures):
"""Make position at precursor scale"""
fixed_pos = []
_print_header(pass_pos)
for pos in pass_pos:
mir = pos["mature"]
db_pos = matures[pos["chrom"]]
mut = _parse_mut(pos["sv"])
print([db_pos[mir], mut, pos["sv"]])
pos['pre_pos'] = db_pos[mir][0] + mut[1] - 1
pos['nt'] = list(mut[0])
fixed_pos.append(pos)
print_vcf(pos)
return fixed_pos
|
python
|
{
"resource": ""
}
|
q13840
|
create_vcf
|
train
|
def create_vcf(isomirs, matures, gtf, vcf_file=None):
"""
Create vcf file of changes for all samples.
PASS will be ones with > 3 isomiRs supporting the position
and > 30% of reads, otherwise LOW
"""
global STDOUT
isomirs['sv'] = [_get_reference_position(m) for m in isomirs["isomir"]]
mirna = isomirs.groupby(['chrom']).sum()
sv = isomirs.groupby(['chrom', 'mature', 'sv'], as_index=False).sum()
sv["diff"] = isomirs.groupby(['chrom', 'mature', 'sv'], as_index=False).size().reset_index().loc[:,0]
pass_pos = _get_pct(sv, mirna)
if vcf_file:
with open(vcf_file, 'w') as out_handle:
STDOUT = out_handle
pass_pos = liftover(pass_pos, matures)
if gtf:
vcf_genome_file = vcf_file.replace(".vcf", "_genome.vcf")
with open(vcf_genome_file, 'w') as out_handle:
STDOUT = out_handle
pass_pos = liftover_to_genome(pass_pos, gtf)
|
python
|
{
"resource": ""
}
|
q13841
|
liftover_to_genome
|
train
|
def liftover_to_genome(pass_pos, gtf):
"""Liftover from precursor to genome"""
fixed_pos = []
for pos in pass_pos:
if pos["chrom"] not in gtf:
continue
db_pos = gtf[pos["chrom"]][0]
mut = _parse_mut(pos["sv"])
print([db_pos, pos])
if db_pos[3] == "+":
pos['pre_pos'] = db_pos[1] + pos["pre_pos"] + 1
else:
pos['pre_pos'] = db_pos[2] - (pos["pre_pos"] - 1)
pos['chrom'] = db_pos[0]
pos['nt'] = list(mut[0])
fixed_pos.append(pos)
_print_header(fixed_pos)
for pos in fixed_pos:
print_vcf(pos)
|
python
|
{
"resource": ""
}
|
q13842
|
_get_seqs_from_cluster
|
train
|
def _get_seqs_from_cluster(seqs, seen):
"""
Returns the sequences that are already part of the cluster
:param seqs: list of sequences ids
:param clus_id: dict of sequences ids that are part of a cluster
:returns:
* :code:`already_in`list of cluster id that contained some of the sequences
* :code:`not_in`list of sequences that don't belong to any cluster yet
"""
already_in = set()
not_in = []
already_in = map(seen.get, seqs)
# if isinstance(already_in, list):
already_in = filter(None, already_in)
not_in = set(seqs) - set(seen.keys())
# for s in seqs:
# if s in seen:
# already_in.add(seen[s])
# else:
# not_in.append(s)
return list(set(already_in)), list(not_in)
|
python
|
{
"resource": ""
}
|
q13843
|
_write_cluster
|
train
|
def _write_cluster(metacluster, cluster, loci, idx, path):
"""
For complex meta-clusters, write all the loci for further debug
"""
out_file = op.join(path, 'log', str(idx) + '.bed')
with utils.safe_run(out_file):
with open(out_file, 'w') as out_handle:
for idc in metacluster:
for idl in cluster[idc].loci2seq:
pos = loci[idl].list()
print("\t".join(pos[:4] + [str(len(cluster[idc].loci2seq[idl]))] + [pos[-1]]), file=out_handle, end="")
|
python
|
{
"resource": ""
}
|
q13844
|
_iter_loci
|
train
|
def _iter_loci(meta, clusters, s2p, filtered, n_cluster):
"""
Go through all locus and decide if they are part
of the same TU or not.
:param idx: int cluster id
:param s2p: dict with [loci].coverage[start] = # of sequences there
:param filtered: dict with clusters object
:param n_cluster: int cluster id
:return:
* filtered: dict of cluster objects
* n_cluster: int cluster id
"""
global CONFLICT
loci = dict(zip(meta, [clusters[idc] for idc in meta]))
n_loci = len(meta)
n_loci_prev = n_loci + 1
cicle = 0
# [logger.note("BEFORE %s %s %s" % (c.id, idl, len(c.loci2seq[idl]))) for idl in c.loci2seq]
internal_cluster = {}
if n_loci == 1:
n_cluster += 1
filtered[n_cluster] = clusters[meta[0]]
filtered[n_cluster].update(id=n_cluster)
filtered[n_cluster].set_freq(s2p[1])
while n_loci < n_loci_prev and n_loci != 1:
n_loci_prev = n_loci
cicle += 1
if (cicle % 1) == 0:
logger.debug("_iter_loci:number of cicle: %s with n_loci %s" % (cicle, n_loci))
loci_similarity = _calculate_similarity(loci)
internal_cluster = _merge_similar(loci, loci_similarity)
n_loci = len(internal_cluster)
loci = internal_cluster
logger.debug("_iter_loci: n_loci %s" % n_loci)
if n_loci > 1:
n_internal_cluster = sorted(internal_cluster.keys(), reverse=True)[0]
CONFLICT += 1
internal_cluster = _solve_conflict(internal_cluster, s2p, n_internal_cluster)
internal_cluster = _clean_cluster(internal_cluster)
for idc in internal_cluster:
n_cluster += 1
logger.debug("_iter_loci: add to filtered %s" % n_cluster)
filtered[n_cluster] = internal_cluster[idc]
filtered[n_cluster].id = n_cluster
filtered[n_cluster].update(id=n_cluster)
filtered[n_cluster].set_freq(s2p[1])
logger.debug("_iter_loci: filtered %s" % filtered.keys())
# for new_c in internal_cluster.values():
# [logger.note("%s %s %s %s" % (meta, new_c.id, idl, len(new_c.loci2seq[idl]))) for idl in new_c.loci2seq]
return filtered, n_cluster
|
python
|
{
"resource": ""
}
|
q13845
|
_convert_to_clusters
|
train
|
def _convert_to_clusters(c):
"""Return 1 cluster per loci"""
new_dict = {}
n_cluster = 0
logger.debug("_convert_to_cluster: loci %s" % c.loci2seq.keys())
for idl in c.loci2seq:
n_cluster += 1
new_c = cluster(n_cluster)
#new_c.id_prev = c.id
new_c.loci2seq[idl] = c.loci2seq[idl]
new_dict[n_cluster] = new_c
logger.debug("_convert_to_cluster: new ids %s" % new_dict.keys())
return new_dict
|
python
|
{
"resource": ""
}
|
q13846
|
_calculate_similarity
|
train
|
def _calculate_similarity(c):
"""Get a similarity matrix of % of shared sequence
:param c: cluster object
:return ma: similarity matrix
"""
ma = {}
for idc in c:
set1 = _get_seqs(c[idc])
[ma.update({(idc, idc2): _common(set1, _get_seqs(c[idc2]), idc, idc2)}) for idc2 in c if idc != idc2 and (idc2, idc) not in ma]
# logger.debug("_calculate_similarity_ %s" % ma)
return ma
|
python
|
{
"resource": ""
}
|
q13847
|
_get_seqs
|
train
|
def _get_seqs(list_idl):
"""get all sequences in a cluster knowing loci"""
seqs = set()
for idl in list_idl.loci2seq:
# logger.debug("_get_seqs_: loci %s" % idl)
[seqs.add(s) for s in list_idl.loci2seq[idl]]
# logger.debug("_get_seqs_: %s" % len(seqs))
return seqs
|
python
|
{
"resource": ""
}
|
q13848
|
_common
|
train
|
def _common(s1, s2, i1, i2):
"""calculate the common % percentage of sequences"""
c = len(set(s1).intersection(s2))
t = min(len(s1), len(s2))
pct = 1.0 * c / t * t
is_gt = up_threshold(pct, t * 1.0, parameters.similar)
logger.debug("_common: pct %s of clusters:%s %s = %s" % (1.0 * c / t, i1, i2, is_gt))
if pct < parameters.similar and is_gt and pct > 0:
pct = parameters.similar
return pct / t
|
python
|
{
"resource": ""
}
|
q13849
|
_is_consistent
|
train
|
def _is_consistent(pairs, common, clus_seen, loci_similarity):
"""
Check if loci shared that match sequences with all
clusters seen until now.
"""
all_true1 = all([all([common and loci_similarity[(p, c)] > parameters.similar for p in pairs if (p, c) in loci_similarity]) for c in clus_seen])
all_true2 = all([all([common and loci_similarity[(c, p)] > parameters.similar for p in pairs if (c, p) in loci_similarity]) for c in clus_seen])
return all_true1 * all_true2
|
python
|
{
"resource": ""
}
|
q13850
|
_merge_similar
|
train
|
def _merge_similar(loci, loci_similarity):
"""
Internal function to reduce loci complexity
:param loci: class cluster
:param locilen_sorted: list of loci sorted by size
:return
c: updated class cluster
"""
n_cluster = 0
internal_cluster = {}
clus_seen = {}
loci_sorted = sorted(loci_similarity.iteritems(), key=operator.itemgetter(1), reverse=True)
for pairs, sim in loci_sorted:
common = sim > parameters.similar
n_cluster += 1
logger.debug("_merge_similar:try new cluster %s" % n_cluster)
new_c = cluster(n_cluster)
p_seen, p_unseen = [], []
size = min(len(_get_seqs(loci[pairs[0]])), len(_get_seqs(loci[pairs[1]])))
if common:
consistent = _is_consistent(pairs, common, clus_seen, loci_similarity)
logger.debug("_merge_similar: clusters seen: %s" % clus_seen)
logger.debug("_merge_similar: id %s common %s|%s total %s consistent %s" % (pairs, sim, common, size, consistent))
if not consistent:
continue
if pairs[0] in clus_seen:
p_seen.append(pairs[0])
p_unseen.append(pairs[1])
if pairs[1] in clus_seen:
p_seen.append(pairs[1])
p_unseen.append(pairs[0])
if len(p_seen) == 0:
new_c = _merge_cluster(loci[pairs[0]], new_c)
new_c = _merge_cluster(loci[pairs[1]], new_c)
[clus_seen.update({p: n_cluster}) for p in pairs]
internal_cluster[n_cluster] = new_c
if len(p_seen) == 1:
idc_seen = clus_seen[p_seen[0]]
internal_cluster[idc_seen] = _merge_cluster(loci[p_unseen[0]], internal_cluster[idc_seen])
clus_seen[p_unseen[0]] = idc_seen
else:
logger.debug("_merge_similar: id %s %s are different" % pairs)
continue
internal_cluster.update(_add_unseen(loci, clus_seen, n_cluster))
logger.debug("_merge_similar: total clus %s" %
len(internal_cluster.keys()))
return internal_cluster
|
python
|
{
"resource": ""
}
|
q13851
|
_merge_cluster
|
train
|
def _merge_cluster(old, new):
"""merge one cluster to another"""
logger.debug("_merge_cluster: %s to %s" % (old.id, new.id))
logger.debug("_merge_cluster: add idls %s" % old.loci2seq.keys())
for idl in old.loci2seq:
# if idl in new.loci2seq:
# new.loci2seq[idl] = list(set(new.loci2seq[idl] + old.loci2seq[idl]))
# new.loci2seq[idl] = old.loci2seq[idl]
new.add_id_member(old.loci2seq[idl], idl)
return new
|
python
|
{
"resource": ""
}
|
q13852
|
_solve_conflict
|
train
|
def _solve_conflict(list_c, s2p, n_cluster):
"""
Make sure sequences are counts once.
Resolve by most-vote or exclussion
:params list_c: dict of objects cluster
:param s2p: dict of [loci].coverage = # num of seqs
:param n_cluster: number of clusters
return dict: new set of clusters
"""
logger.debug("_solve_conflict: count once")
if parameters.decision_cluster == "bayes":
return decide_by_bayes(list_c, s2p)
loci_similarity = _calculate_similarity(list_c)
loci_similarity = sorted(loci_similarity.iteritems(), key=operator.itemgetter(1), reverse=True)
common = sum([score for p, score in loci_similarity])
while common > 0:
n_cluster += 1
logger.debug("_solve_conflict: ma %s" % loci_similarity)
pairs = loci_similarity[0][0]
score = loci_similarity[0][1]
logger.debug("_solve_conflict: common %s, new %s" % (score, n_cluster))
if parameters.decision_cluster.startswith("most-voted"):
list_c = _split_cluster_by_most_vote(list_c, pairs)
else:
list_c = _split_cluster(list_c, pairs, n_cluster)
list_c = {k: v for k, v in list_c.iteritems() if len(v.loci2seq) > 0}
loci_similarity = _calculate_similarity(list_c)
loci_similarity = sorted(loci_similarity.iteritems(), key=operator.itemgetter(1), reverse=True)
#logger.note("%s %s" % (pairs, loci_similarity[0][1]))
common = sum([score for p, score in loci_similarity])
logger.debug("_solve_conflict: solved clusters %s" % len(list_c.keys()))
return list_c
|
python
|
{
"resource": ""
}
|
q13853
|
_split_cluster
|
train
|
def _split_cluster(c, pairs, n):
"""split cluster by exclussion"""
old = c[p[0]]
new = c[p[1]]
new_c = cluster(n)
common = set(_get_seqs(old)).intersection(_get_seqs(new))
for idl in old.loci2seq:
in_common = list(set(common).intersection(old.loci2seq[idl]))
if len(in_common) > 0:
logger.debug("_split_cluster: in_common %s with pair 1" % (len(in_common)))
new_c.add_id_member(in_common, idl)
old.loci2seq[idl] = list(set(old.loci2seq[idl]) - set(common))
logger.debug("_split_cluster: len old %s with pair 1" % (len(old.loci2seq)))
for idl in new.loci2seq:
in_common = list(set(common).intersection(new.loci2seq[idl]))
if len(in_common) > 0:
logger.debug("_split_cluster: in_common %s with pair 2" % (len(in_common)))
new_c.add_id_member(in_common, idl)
new.loci2seq[idl] = list(set(new.loci2seq[idl]) - set(common))
logger.debug("_split_cluster: len old %s with pair 2" % (len(new.loci2seq)))
old.update()
new.update()
old.loci2seq = {k: v for k, v in old.loci2seq.iteritems() if len(v) > 0}
new.loci2seq = {k: v for k, v in new.loci2seq.iteritems() if len(v) > 0}
c[n] = new
c[p[0]] = old
c[p[1]] = new
return c
|
python
|
{
"resource": ""
}
|
q13854
|
_split_cluster_by_most_vote
|
train
|
def _split_cluster_by_most_vote(c, p):
"""split cluster by most-vote strategy"""
old, new = c[p[0]], c[p[1]]
old_size = _get_seqs(old)
new_size = _get_seqs(new)
logger.debug("_most_vote: size of %s with %s - %s with %s" % (old.id, len(old_size), new.id, len(new_size)))
if len(old_size) > len(new_size):
keep, remove = old, new
else:
keep, remove = new, old
common = list(set(old_size).intersection(new_size))
logger.debug("_most_vote: keep %s remove %s with common %s" % (keep.id, remove.id, len(common)))
for idl in remove.loci2seq:
if len(common) > 0:
remove.loci2seq[idl] = list(set(remove.loci2seq[idl]) - set(common))
keep.loci2seq = {k: v for k, v in keep.loci2seq.iteritems() if len(v) > 0}
remove.loci2seq = {k: v for k, v in remove.loci2seq.iteritems() if len(v) > 0}
keep.update()
remove.update()
c[keep.id] = keep
c[remove.id] = remove
return c
|
python
|
{
"resource": ""
}
|
q13855
|
_clean_cluster
|
train
|
def _clean_cluster(list_c):
"""
Remove cluster with less than 10 sequences and
loci with size smaller than 60%
"""
global REMOVED
init = len(list_c)
list_c = {k: v for k, v in list_c.iteritems() if len(_get_seqs(v)) > parameters.min_seqs}
logger.debug("_clean_cluster: number of clusters %s " % len(list_c.keys()))
list_c = {k: _select_loci(v) for k, v in list_c.iteritems()}
end = len(list_c)
REMOVED += init - end
return list_c
|
python
|
{
"resource": ""
}
|
q13856
|
_select_loci
|
train
|
def _select_loci(c):
"""Select only loci with most abundant sequences"""
loci_len = {k: len(v) for k, v in c.loci2seq.iteritems()}
logger.debug("_select_loci: number of loci %s" % len(c.loci2seq.keys()))
loci_len_sort = sorted(loci_len.iteritems(), key=operator.itemgetter(1), reverse=True)
max_size = loci_len_sort[0][1]
logger.debug("_select_loci: max size %s" % max_size)
loci_clean = {locus: c.loci2seq[locus] for locus, size in loci_len_sort if size > 0.8 * max_size}
c.loci2seq = loci_clean
removed = list(set(c.idmembers.keys()) - set(_get_seqs(c)))
c.add_id_member(removed, loci_len_sort[0][0])
logger.debug("_select_loci: number of loci %s after cleaning" % len(c.loci2seq.keys()))
return c
|
python
|
{
"resource": ""
}
|
q13857
|
_solve_loci_deprecated
|
train
|
def _solve_loci_deprecated(c, locilen_sorted, seen_seqs, filtered, maxseq, n_cluster):
"""internal function to reduce loci complexity
The function will read the all loci in a cluster of
sequences and will determine if all loci are part
of the same transcriptional unit(TU) by most-vote locus
or by exclusion of common sequence that are the
minority of two loci.
:param c: class cluster
:param locilen_sorted: list of loci sorted by size
:param seem_seqs: list of seen sequences
:param filtered: final TU list
:param maxseq: bigger locus
"param n_cluster: integer with index of different TU"
:return
c: updated class cluster
seen_seqs: updated list of sequences
filtered: updated dict of TUs
n_cluster: updated int with current index of TUs
"""
first_run = 0
seen_seqs = list()
n_cluster += 1
logger.debug("_solve_loci:new cluster %s" % n_cluster)
new_c = cluster(n_cluster)
for idl, lenl in locilen_sorted:
locus_seqs = c.loci2seq[idl]
if first_run == 0:
seen_seqs = locus_seqs
first_run = 1
first_idl = idl
intersect = list(set(seen_seqs).intersection(locus_seqs))
common = 0
if intersect:
common = len(intersect)*1.0/min(len(seen_seqs), len(locus_seqs))
logger.debug("_sole_loci:id %s idl %s len %s max %s seen %s inter %s common %s " % (c.id, idl, lenl, maxseq, len(seen_seqs), len(intersect), common))
if common*1.0 >= 0.6:
if lenl*1.0 >= 0.6*maxseq:
c, new_c, seen_seqs = _merge_loci_in_cluster(c, new_c, idl, seen_seqs)
else:
c, new_c, seen_seqs = _merge_with_first_loci(c, new_c, first_idl, idl, seen_seqs)
else:
c = _remove_seqs_from_loci(c, idl, seen_seqs)
filtered[n_cluster] = new_c
return c, seen_seqs, filtered, n_cluster
|
python
|
{
"resource": ""
}
|
q13858
|
_get_description
|
train
|
def _get_description(string):
"""
Parse annotation to get nice description
"""
ann = set()
if not string:
return "This cluster is inter-genic."
for item in string:
for db in item:
ann = ann.union(set(item[db]))
return "annotated as: %s ..." % ",".join(list(ann)[:3])
|
python
|
{
"resource": ""
}
|
q13859
|
_set_format
|
train
|
def _set_format(profile):
"""
Prepare dict to list of y values with same x
"""
x = set()
for sample in profile:
x = x.union(set(profile[sample].keys()))
if not x:
return ''
end, start = max(x), min(x)
x = range(start, end, 4)
scaled_profile = defaultdict(list)
for pos in x:
for sample in profile:
y = _get_closer(profile[sample], pos)
if y:
scaled_profile[sample].append(profile[sample][y])
else:
scaled_profile[sample].append(0)
return {'x': list(x), 'y': scaled_profile, 'names': scaled_profile.keys()}
|
python
|
{
"resource": ""
}
|
q13860
|
_insert_data
|
train
|
def _insert_data(con, data):
"""
insert line for each cluster
"""
with con:
cur = con.cursor()
cur.execute("DROP TABLE IF EXISTS clusters;")
cur.execute("CREATE TABLE clusters(Id INT, Description TEXT, Locus TEXT, Annotation TEXT, Sequences TEXT, Profile TXT, Precursor TXT)")
for c in data[0]:
locus = json.dumps(data[0][c]['loci'])
annotation = json.dumps(data[0][c]['ann'])
description = _get_description(data[0][c]['ann'])
sequences = json.dumps(_get_sequences(data[0][c]))
keys = data[0][c]['freq'][0].values()[0].keys()
profile = "Not available."
if 'profile' in data[0][c]:
profile = json.dumps(_set_format(data[0][c]['profile']))
precursor = json.dumps(data[0][c].get('precursor'))
cur.execute("INSERT INTO clusters VALUES(%s, '%s', '%s', '%s', '%s', '%s', '%s')" % (c, description, locus, annotation, sequences, profile, precursor))
|
python
|
{
"resource": ""
}
|
q13861
|
parse_align_file
|
train
|
def parse_align_file(file_in):
"""
Parse sam files with aligned sequences
"""
loc_id = 1
bedfile_clusters = ""
bamfile = pybedtools.BedTool(file_in)
bed = pybedtools.BedTool.bam_to_bed(bamfile)
for c, start, end, name, q, strand in bed:
loc_id += 1
bedfile_clusters += "%s\t%s\t%s\t%s\t%s\t%s\n" % \
(c, start, end, name, loc_id, strand)
return bedfile_clusters
|
python
|
{
"resource": ""
}
|
q13862
|
parse_ma_file
|
train
|
def parse_ma_file(seq_obj, in_file):
"""
read seqs.ma file and create dict with
sequence object
"""
name = ""
index = 1
total = defaultdict(int)
with open(in_file) as handle_in:
line = handle_in.readline().strip()
cols = line.split("\t")
samples = cols[2:]
for line in handle_in:
line = line.strip()
cols = line.split("\t")
name = int(cols[0].replace("seq_", ""))
seq = cols[1]
exp = {}
for i in range(len(samples)):
exp[samples[i]] = int(cols[i+2])
total[samples[i]] += int(cols[i+2])
index = index+1
if name in seq_obj:
seq_obj[name].set_freq(exp)
seq_obj[name].set_seq(seq)
# new_s = sequence(seq, exp, index)
# seq_l[name] = new_s
seq_obj = _normalize_seqs(seq_obj, total)
return seq_obj, total, index
|
python
|
{
"resource": ""
}
|
q13863
|
_position_in_feature
|
train
|
def _position_in_feature(pos_a, pos_b):
"""return distance to 3' and 5' end of the feature"""
strd = "-"
if pos_a[2] in pos_b[2]:
strd = "+"
if pos_a[2] in "+" and pos_b[2] in "+":
lento5 = pos_a[0] - pos_b[1] + 1
lento3 = pos_a[1] - pos_b[1] + 1
if pos_a[2] in "+" and pos_b[2] in "-":
lento5 = pos_a[1] - pos_b[0] + 1
lento3 = pos_a[0] - pos_b[1] + 1
if pos_a[2] in "-" and pos_b[2] in "+":
lento5 = pos_a[0] - pos_b[1] + 1
lento3 = pos_a[1] - pos_b[0] + 1
if pos_a[2] in "-" and pos_b[2] in "-":
lento3 = pos_a[0] - pos_b[0] + 1
lento5 = pos_a[1] - pos_b[1] + 1
else:
lento5 = pos_a[0] - pos_b[0] + 1
lento3 = pos_a[1] - pos_b[1] + 1
return lento5, lento3, strd
|
python
|
{
"resource": ""
}
|
q13864
|
anncluster
|
train
|
def anncluster(c, clus_obj, db, type_ann, feature_id="name"):
"""intersect transcription position with annotation files"""
id_sa, id_ea, id_id, id_idl, id_sta = 1, 2, 3, 4, 5
if type_ann == "bed":
id_sb = 7
id_eb = 8
id_stb = 11
id_tag = 9
ida = 0
clus_id = clus_obj.clus
loci_id = clus_obj.loci
db = os.path.splitext(db)[0]
logger.debug("Type:%s\n" % type_ann)
for cols in c.features():
if type_ann == "gtf":
cb, sb, eb, stb, db, tag = read_gtf_line(cols[6:], feature_id)
else:
sb = int(cols[id_sb])
eb = int(cols[id_eb])
stb = cols[id_stb]
tag = cols[id_tag]
id = int(cols[id_id])
idl = int(cols[id_idl])
if (id in clus_id):
clus = clus_id[id]
sa = int(cols[id_sa])
ea = int(cols[id_ea])
ida += 1
lento5, lento3, strd = _position_in_feature([sa, ea, cols[id_sta]], [sb, eb, stb])
if db in loci_id[idl].db_ann:
ann = annotation(db, tag, strd, lento5, lento3)
tdb = loci_id[idl].db_ann[db]
tdb.add_db_ann(ida, ann)
loci_id[idl].add_db(db, tdb)
else:
ann = annotation(db, tag, strd, lento5, lento3)
tdb = dbannotation(1)
tdb.add_db_ann(ida, ann)
loci_id[idl].add_db(db, tdb)
clus_id[id] = clus
clus_obj.clus = clus_id
clus_obj.loci = loci_id
return clus_obj
|
python
|
{
"resource": ""
}
|
q13865
|
detect_complexity
|
train
|
def detect_complexity(bam_in, genome, out):
"""
genome coverage of small RNA
"""
if not genome:
logger.info("No genome given. skipping.")
return None
out_file = op.join(out, op.basename(bam_in) + "_cov.tsv")
if file_exists(out_file):
return None
fai = genome + ".fai"
cov = pybedtools.BedTool(bam_in).genome_coverage(g=fai, max=1)
cov.saveas(out_file)
total = 0
for region in cov:
if region[0] == "genome" and int(region[1]) != 0:
total += float(region[4])
logger.info("Total genome with sequences: %s " % total)
|
python
|
{
"resource": ""
}
|
q13866
|
detect_clusters
|
train
|
def detect_clusters(c, current_seq, MIN_SEQ, non_un_gl=False):
"""
Parse the merge file of sequences position to create clusters that will have all
sequences that shared any position on the genome
:param c: file from bedtools with merge sequence positions
:param current_seq: list of sequences
:param MIN_SEQ: int cutoff to keep the cluster or not. 10 as default
:return: object with information about:
* cluster
* dict with sequences (as keys) and cluster_id (as value)
* sequences
* loci
"""
current_loci = {}
current_clus = {}
# sequence2clusters = [set()] * (max(current_seq.keys()) + 2)
sequence2clusters = defaultdict(set)
lindex = 0
eindex = 0
previous_id = 0
for line in c.features():
c, start, end, name, score, strand, c_id = line
name = int(name.replace('seq_', ''))
pos = int(start) if strand == "+" else int(end)
if name not in current_seq:
continue
if c.find('Un_gl') > -1 and non_un_gl:
continue
if c_id != previous_id:
if previous_id > 0:
if len(current_clus[eindex].idmembers) < MIN_SEQ:
for s in current_clus[eindex].idmembers:
sequence2clusters[s] = sequence2clusters[s] - set([eindex])
del current_clus[eindex]
logger.debug("detect_cluster: %s %s %s" % (c_id, previous_id, name))
lindex += 1
eindex += 1
current_clus[eindex] = cluster(eindex)
newpos = position(lindex, c, start, end, strand)
current_loci[lindex] = newpos
# update locus, sequences in each line
current_loci[lindex].end = int(end)
current_loci[lindex].coverage[pos] += 1
size = range(pos, pos + current_seq[name].len)
current_loci[lindex].counts.update(dict(zip(size, [current_seq[name].total()] * current_seq[name].len)))
current_clus[eindex].idmembers[name] = 1
current_clus[eindex].add_id_member([name], lindex)
current_seq[name].add_pos(lindex, pos)
# current_seq[name].align = 1
previous_id = c_id
sequence2clusters[name].add(eindex)
logger.info("%s Clusters read" % eindex)
# merge cluster with shared sequences
metacluster_obj, cluster_id = _find_metaclusters(current_clus, sequence2clusters, current_seq, MIN_SEQ)
return cluster_info_obj(current_clus, metacluster_obj, current_loci, current_seq)
|
python
|
{
"resource": ""
}
|
q13867
|
peak_calling
|
train
|
def peak_calling(clus_obj):
"""
Run peak calling inside each cluster
"""
new_cluster = {}
for cid in clus_obj.clus:
cluster = clus_obj.clus[cid]
cluster.update()
logger.debug("peak calling for %s" % cid)
bigger = cluster.locimaxid
if bigger in clus_obj.loci:
s, e = min(clus_obj.loci[bigger].counts.keys()), max(clus_obj.loci[bigger].counts.keys())
scale = s
if clus_obj.loci[bigger].strand == "-":
scale = e
logger.debug("bigger %s at %s-%s" % (bigger, s, e))
dt = np.array([0] * (abs(e - s) + 12))
for pos in clus_obj.loci[bigger].counts:
ss = abs(int(pos) - scale) + 5
dt[ss] += clus_obj.loci[bigger].counts[pos]
x = np.array(range(0, len(dt)))
logger.debug("x %s and y %s" % (x, dt))
# tab = pd.DataFrame({'x': x, 'y': dt})
# tab.to_csv( str(cid) + "peaks.csv", mode='w', header=False, index=False)
if len(x) > 35 + 12:
peaks = list(np.array(pysen.pysenMMean(x, dt)) - 5)
logger.debug(peaks)
else:
peaks = ['short']
cluster.peaks = peaks
new_cluster[cid] = cluster
clus_obj.clus = new_cluster
return clus_obj
|
python
|
{
"resource": ""
}
|
q13868
|
simulate
|
train
|
def simulate(args):
"""Main function that manage simulatin of small RNAs"""
if args.fasta:
name = None
seq = ""
reads = dict()
with open(args.fasta) as in_handle:
for line in in_handle:
if line.startswith(">"):
if name:
reads.update(_generate_reads(seq, name))
seq = ""
name = line[1:-1]
else:
seq += line.strip()
reads.update(_generate_reads(seq, name))
_write_reads(reads, args.out)
|
python
|
{
"resource": ""
}
|
q13869
|
_generate_reads
|
train
|
def _generate_reads(seq, name):
"""Main function that create reads from precursors"""
reads = dict()
if len(seq) < 130 and len(seq) > 70:
reads.update(_mature(seq[:40], 0, name))
reads.update(_mature(seq[-40:], len(seq) - 40, name))
reads.update(_noise(seq, name))
reads.update(_noise(seq, name, 25))
return reads
|
python
|
{
"resource": ""
}
|
q13870
|
_write_reads
|
train
|
def _write_reads(reads, prefix):
"""
Write fasta file, ma file and real position
"""
out_ma = prefix + ".ma"
out_fasta = prefix + ".fasta"
out_real = prefix + ".txt"
with open(out_ma, 'w') as ma_handle:
print("id\tseq\tsample", file=ma_handle, end="")
with open(out_fasta, 'w') as fa_handle:
with open(out_real, 'w') as read_handle:
for idx, r in enumerate(reads):
info = r.split("_")
print("seq_%s\t%s\t%s" % (idx, reads[r][0], reads[r][1]), file=ma_handle, end="")
print(">seq_%s\n%s" % (idx, reads[r][0]), file=fa_handle, end="")
print("%s\t%s\t%s\t%s\t%s\t%s\t%s" % (idx, r, reads[r][0], reads[r][1], info[1], info[2], info[3]), file=read_handle, end="")
|
python
|
{
"resource": ""
}
|
q13871
|
stats
|
train
|
def stats(args):
"""Create stats from the analysis
"""
logger.info("Reading sequeces")
data = parse_ma_file(args.ma)
logger.info("Get sequences from sam")
is_align = _read_sam(args.sam)
is_json, is_db = _read_json(args.json)
res = _summarise_sam(data, is_align, is_json, is_db)
_write_suma(res, os.path.join(args.out, "stats_align.dat"))
logger.info("Done")
|
python
|
{
"resource": ""
}
|
q13872
|
_read_json
|
train
|
def _read_json(fn_json):
"""read json information"""
is_json = set()
is_db = {}
with open(fn_json) as handle:
data = json.load(handle)
# original Py 2.y core
#for item in data[0].values():
# seqs_name = map(lambda (x): x.keys(), item['seqs'])
# rewrite by 2to3
for item in list(data[0].values()):
seqs_name = [list(x.keys()) for x in item['seqs']]
db_name = item['valid'] if "valid" in item else None
[is_json.add(name[0]) for name in seqs_name]
if db_name:
[is_db.update({name[0]: ",".join(db_name)}) for name in seqs_name]
return is_json, is_db
|
python
|
{
"resource": ""
}
|
q13873
|
_do_run
|
train
|
def _do_run(cmd, checks, log_stdout=False):
"""Perform running and check results, raising errors for issues.
"""
cmd, shell_arg, executable_arg = _normalize_cmd_args(cmd)
s = subprocess.Popen(cmd, shell=shell_arg, executable=executable_arg,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT, close_fds=True)
debug_stdout = collections.deque(maxlen=100)
while 1:
line = s.stdout.readline()
if line:
debug_stdout.append(line)
if log_stdout:
logger.debug(line.rstrip())
else:
logger.debug(line.rstrip())
exitcode = s.poll()
if exitcode is not None:
for line in s.stdout:
debug_stdout.append(line)
if exitcode is not None and exitcode != 0:
error_msg = " ".join(cmd) if not isinstance(cmd, basestring) else cmd
error_msg += "\n"
error_msg += "".join(debug_stdout)
s.communicate()
s.stdout.close()
raise subprocess.CalledProcessError(exitcode, error_msg)
else:
break
s.communicate()
s.stdout.close()
# Check for problems not identified by shell return codes
if checks:
for check in checks:
if not check():
raise IOError("External command failed")
|
python
|
{
"resource": ""
}
|
q13874
|
_normalize_seqs
|
train
|
def _normalize_seqs(s, t):
"""Normalize to RPM"""
for ids in s:
obj = s[ids]
[obj.norm_freq.update({sample: 1.0 * obj.freq[sample] / (t[sample]+1) * 1000000}) for sample in obj.norm_freq]
s[ids] = obj
return s
|
python
|
{
"resource": ""
}
|
q13875
|
prepare_bam
|
train
|
def prepare_bam(bam_in, precursors):
"""
Clean BAM file to keep only position inside the bigger cluster
"""
# use pybedtools to keep valid positions
# intersect option with -b bigger_cluster_loci
a = pybedtools.BedTool(bam_in)
b = pybedtools.BedTool(precursors)
c = a.intersect(b, u=True)
out_file = utils.splitext_plus(op.basename(bam_in))[0] + "_clean.bam"
c.saveas(out_file)
return op.abspath(out_file)
|
python
|
{
"resource": ""
}
|
q13876
|
_reorder_columns
|
train
|
def _reorder_columns(bed_file):
"""
Reorder columns to be compatible with CoRaL
"""
new_bed = utils.splitext_plus(bed_file)[0] + '_order.bed'
with open(bed_file) as in_handle:
with open(new_bed, 'w') as out_handle:
for line in in_handle:
cols = line.strip().split("\t")
cols[3] = _select_anno(cols[3]) + "_" + cols[4]
cols[4] = "0"
print("\t".join(cols), file=out_handle, end="")
return new_bed
|
python
|
{
"resource": ""
}
|
q13877
|
detect_regions
|
train
|
def detect_regions(bam_in, bed_file, out_dir, prefix):
"""
Detect regions using first CoRaL module
"""
bed_file = _reorder_columns(bed_file)
counts_reads_cmd = ("coverageBed -s -counts -b {bam_in} "
"-a {bed_file} | sort -k4,4 "
"> {out_dir}/loci.cov")
# with tx_tmpdir() as temp_dir:
with utils.chdir(out_dir):
run(counts_reads_cmd.format(min_trimmed_read_len=min_trimmed_read_len, max_trimmed_read_len=max_trimmed_read_len, **locals()), "Run counts_reads")
loci_file = _fix_score_column(op.join(out_dir, "loci.cov"))
return loci_file
|
python
|
{
"resource": ""
}
|
q13878
|
_reads_per_position
|
train
|
def _reads_per_position(bam_in, loci_file, out_dir):
"""
Create input for compute entropy
"""
data = Counter()
a = pybedtools.BedTool(bam_in)
b = pybedtools.BedTool(loci_file)
c = a.intersect(b, s=True, bed=True, wo=True)
for line in c:
end = int(line[1]) + 1 + int(line[2]) if line[5] == "+" else int(line[1]) + 1
start = int(line[1]) + 1 if line[5] == "+" else int(line[1]) + 1 + int(line[2])
side5 = "%s\t5p\t%s" % (line[15], start)
side3 = "%s\t3p\t%s" % (line[15], end)
data[side5] += 1
data[side3] += 1
counts_reads = op.join(out_dir, 'locus_readpos.counts')
with open(counts_reads, 'w') as out_handle:
for k in data:
print(k, file=out_handle, end="")
return counts_reads
|
python
|
{
"resource": ""
}
|
q13879
|
create_features
|
train
|
def create_features(bam_in, loci_file, reference, out_dir):
"""
Use feature extraction module from CoRaL
"""
lenvec_plus = op.join(out_dir, 'genomic_lenvec.plus')
lenvec_minus = op.join(out_dir, 'genomic_lenvec.minus')
compute_genomic_cmd = ("compute_genomic_lenvectors "
"{bam_in} {lenvec_plus} "
"{lenvec_minus} "
"{min_len} "
"{max_len} ")
index_genomic_cmd = ("index_genomic_lenvectors "
"{lenvec} ")
genomic_lenvec = op.join(out_dir, 'genomic_lenvec')
feat_len_file = op.join(out_dir, 'feat_lengths.txt')
compute_locus_cmd = ("compute_locus_lenvectors "
"{loci_file} "
"{genomic_lenvec} "
"{min_len} "
"{max_len} "
"> {feat_len_file}")
cov_S_file = op.join(out_dir, 'loci.cov_anti')
coverage_anti_cmd = ("coverageBed -S -counts -b "
"{bam_in} -a {loci_file} "
"> {cov_S_file}")
feat_posentropy = op.join(out_dir, 'feat_posentropy.txt')
entropy_cmd = ("compute_locus_entropy.rb "
"{counts_reads} "
"> {feat_posentropy}")
with utils.chdir(out_dir):
run(compute_genomic_cmd.format(min_len=min_trimmed_read_len, max_len=max_trimmed_read_len, **locals()), "Run compute_genomic")
run(index_genomic_cmd.format(lenvec=lenvec_plus), "Run index in plus")
run(index_genomic_cmd.format(lenvec=lenvec_minus), "Run index in minus")
run(compute_locus_cmd.format(min_len=min_trimmed_read_len, max_len=max_trimmed_read_len, **locals()), "Run compute locus")
run(coverage_anti_cmd.format(**locals()), "Run coverage antisense")
feat_antisense = _order_antisense_column(cov_S_file, min_trimmed_read_len)
counts_reads = _reads_per_position(bam_in, loci_file, out_dir)
run(entropy_cmd.format(**locals()), "Run entropy")
rnafold = calculate_structure(loci_file, reference)
|
python
|
{
"resource": ""
}
|
q13880
|
report
|
train
|
def report(args):
"""
Create report in html format
"""
logger.info("reading sequeces")
data = load_data(args.json)
logger.info("create profile")
data = make_profile(data, os.path.join(args.out, "profiles"), args)
logger.info("create database")
make_database(data, "seqcluster.db", args.out)
logger.info("Done. Download https://github.com/lpantano/seqclusterViz/archive/master.zip to browse the output.")
|
python
|
{
"resource": ""
}
|
q13881
|
_summarize_peaks
|
train
|
def _summarize_peaks(peaks):
"""
merge peaks position if closer than 10
"""
previous = peaks[0]
new_peaks = [previous]
for pos in peaks:
if pos > previous + 10:
new_peaks.add(pos)
previous = pos
return new_peaks
|
python
|
{
"resource": ""
}
|
q13882
|
find_mature
|
train
|
def find_mature(x, y, win=10):
"""
Window apprach to find hills in the expression profile
"""
previous = min(y)
peaks = []
intervals = range(x, y, win)
for pos in intervals:
if y[pos] > previous * 10:
previous = y[pos]
peaks.add(pos)
peaks = _summarize_peaks(peaks)
|
python
|
{
"resource": ""
}
|
q13883
|
collapse
|
train
|
def collapse(in_file):
"""collapse identical sequences and keep Q"""
keep = Counter()
with open_fastq(in_file) as handle:
for line in handle:
if line.startswith("@"):
if line.find("UMI") > -1:
logger.info("Find UMI tags in read names, collapsing by UMI.")
return collapse_umi(in_file)
seq = handle.next().strip()
handle.next()
qual = handle.next().strip()
if seq in keep:
keep[seq].update(qual)
else:
keep[seq] = quality(qual)
logger.info("Sequences loaded: %s" % len(keep))
return keep
|
python
|
{
"resource": ""
}
|
q13884
|
collapse_umi
|
train
|
def collapse_umi(in_file):
"""collapse reads using UMI tags"""
keep = defaultdict(dict)
with open_fastq(in_file) as handle:
for line in handle:
if line.startswith("@"):
m = re.search('UMI_([ATGC]*)', line.strip())
umis = m.group(0)
seq = handle.next().strip()
handle.next()
qual = handle.next().strip()
if (umis, seq) in keep:
keep[(umis, seq)][1].update(qual)
keep[(umis, seq)][0].update(seq)
else:
keep[(umis, seq)] = [umi(seq), quality(qual)]
logger.info("Sequences loaded: %s" % len(keep))
return keep
|
python
|
{
"resource": ""
}
|
q13885
|
open_fastq
|
train
|
def open_fastq(in_file):
""" open a fastq file, using gzip if it is gzipped
from bcbio package
"""
_, ext = os.path.splitext(in_file)
if ext == ".gz":
return gzip.open(in_file, 'rb')
if ext in [".fastq", ".fq", ".fasta", ".fa"]:
return open(in_file, 'r')
return ValueError("File needs to be fastq|fasta|fq|fa [.gz]")
|
python
|
{
"resource": ""
}
|
q13886
|
collapse_fastq
|
train
|
def collapse_fastq(args):
"""collapse fasq files after adapter trimming
"""
try:
umi_fn = args.fastq
if _is_umi(args.fastq):
umis = collapse(args.fastq)
umi_fn = os.path.join(args.out, splitext_plus(os.path.basename(args.fastq))[0] + "_umi_trimmed.fastq")
write_output(umi_fn, umis, args.minimum)
seqs = collapse(umi_fn)
out_file = splitext_plus(os.path.basename(args.fastq))[0] + "_trimmed.fastq"
except IOError as e:
logger.error("I/O error({0}): {1}".format(e.errno, e.strerror))
raise "Can not read file"
out_file = os.path.join(args.out, out_file)
write_output(out_file, seqs, args.minimum)
return out_file
|
python
|
{
"resource": ""
}
|
q13887
|
filter_doctree_for_slides
|
train
|
def filter_doctree_for_slides(doctree):
"""Given a doctree, remove all non-slide related elements from it."""
current = 0
num_children = len(doctree.children)
while current < num_children:
child = doctree.children[current]
child.replace_self(
child.traverse(no_autoslides_filter)
)
if len(doctree.children) == num_children:
# nothing removed, increment current
current += 1
else:
# a node was removed; retain current and update length
num_children = len(doctree.children)
|
python
|
{
"resource": ""
}
|
q13888
|
TransformNextSlides._make_title_node
|
train
|
def _make_title_node(self, node, increment=True):
"""Generate a new title node for ``node``.
``node`` is a ``nextslide`` node. The title will use the node's
parent's title, or the title specified as an argument.
"""
parent_title_node = node.parent.next_node(nodes.title)
nextslide_info = getattr(
parent_title_node, 'nextslide_info',
(parent_title_node.deepcopy().children, 1),
)
nextslide_info = (
nextslide_info[0],
nextslide_info[1] + 1,
)
if node.args:
textnodes, messages = node.state.inline_text(
node.args[0],
1,
)
new_title = nodes.title(node.args[0], '', *textnodes)
else:
title_nodes = nextslide_info[0][:]
if 'increment' in node.attributes:
title_nodes.append(
nodes.Text(' (%s)' % nextslide_info[1])
)
new_title = nodes.title(
'', '',
*title_nodes
)
new_title.nextslide_info = nextslide_info
return new_title
|
python
|
{
"resource": ""
}
|
q13889
|
slideconf.apply
|
train
|
def apply(self, builder):
"""Apply the Slide Configuration to a Builder."""
if 'theme' in self.attributes:
builder.apply_theme(
self.attributes['theme'],
builder.theme_options,
)
|
python
|
{
"resource": ""
}
|
q13890
|
slideconf.get_conf
|
train
|
def get_conf(cls, builder, doctree=None):
"""Return a dictionary of slide configuration for this doctree."""
# set up the default conf
result = {
'theme': builder.config.slide_theme,
'autoslides': builder.config.autoslides,
'slide_classes': [],
}
# now look for a slideconf node in the doctree and update the conf
if doctree:
conf_node = cls.get(doctree)
if conf_node:
result.update(conf_node.attributes)
return result
|
python
|
{
"resource": ""
}
|
q13891
|
__fix_context
|
train
|
def __fix_context(context):
"""Return a new context dict based on original context.
The new context will be a copy of the original, and some mutable
members (such as script and css files) will also be copied to
prevent polluting shared context.
"""
COPY_LISTS = ('script_files', 'css_files',)
for attr in COPY_LISTS:
if attr in context:
context[attr] = context[attr][:]
return context
|
python
|
{
"resource": ""
}
|
q13892
|
SlideData.get_slide_context
|
train
|
def get_slide_context(self):
"""Return the context dict for rendering this slide."""
return {
'title': self.title,
'level': self.level,
'content': self.content,
'classes': self.classes,
'slide_classes': self._filter_classes(exclude='content-'),
'content_classes': self._filter_classes(include='content-'),
'slide_number': self.slide_number,
'config': self._translator.builder.config,
'id': self.id,
}
|
python
|
{
"resource": ""
}
|
q13893
|
BaseSlideTranslator._add_slide_number
|
train
|
def _add_slide_number(self, slide_no):
"""Add the slide number to the output if enabled."""
if self.builder.config.slide_numbers:
self.body.append(
'\n<div class="slide-no">%s</div>\n' % (slide_no,),
)
|
python
|
{
"resource": ""
}
|
q13894
|
BaseSlideTranslator._add_slide_footer
|
train
|
def _add_slide_footer(self, slide_no):
"""Add the slide footer to the output if enabled."""
if self.builder.config.slide_footer:
self.body.append(
'\n<div class="slide-footer">%s</div>\n' % (
self.builder.config.slide_footer,
),
)
|
python
|
{
"resource": ""
}
|
q13895
|
inspect_config
|
train
|
def inspect_config(app):
"""Inspect the Sphinx configuration and update for slide-linking.
If links from HTML to slides are enabled, make sure the sidebar
configuration includes the template and add the necessary theme
directory as a loader so the sidebar template can be located.
If the sidebar configuration already includes ``slidelink.html``
(in any key), the configuration will not be changed. If the
configuration is not specified, we'll attempt to emulate what
Sphinx does by default.
"""
# avoid import cycles :/
from hieroglyph import writer
# only reconfigure Sphinx if we're generating HTML
if app.builder.name not in HTML_BUILDERS:
return
if app.config.slide_link_html_to_slides:
# add the slide theme dir as a Loader
app.builder.templates.loaders.append(
SphinxFileSystemLoader(
os.path.join(
os.path.dirname(__file__), 'themes', 'slides',
)
)
)
# add the "show slides" sidebar template
if not app.config.html_sidebars:
# no sidebars explicitly defined, mimic the old style
# behavior + slide links
app.config.html_sidebars = {
'**': [
'localtoc.html',
'relations.html',
'sourcelink.html',
SLIDELINK_TEMPLATE,
'searchbox.html',
],
}
else:
# sidebars defined, add the template if needed
included = False
for glob, templates in app.config.html_sidebars:
if SLIDELINK_TEMPLATE in templates:
included = True
break
if not included:
# the slidelink template was not included; append it
# to the list of sidebars for all templates
app.config.html_sidebars.setdefault('**', []).append(
SLIDELINK_TEMPLATE,
)
if app.config.slide_link_html_sections_to_slides:
# fix up the HTML Translator
if sphinx.version_info >= (1, 6, 0):
override_translator = type(
'SlideLinkTranslator',
(app.builder.get_translator_class(), object),
{
'depart_title': writer.depart_title,
},
)
app.set_translator(app.builder, override_translator)
else:
app.builder.translator_class = type(
'SlideLinkTranslator',
(app.builder.translator_class, object),
{
'depart_title': writer.depart_title,
},
)
|
python
|
{
"resource": ""
}
|
q13896
|
add_link
|
train
|
def add_link(app, pagename, templatename, context, doctree):
"""Add the slides link to the HTML context."""
# we can only show the slidelink if we can resolve the filename
context['show_slidelink'] = (
app.config.slide_link_html_to_slides and
hasattr(app.builder, 'get_outfilename')
)
if context['show_slidelink']:
context['slide_path'] = slide_path(app.builder, pagename)
|
python
|
{
"resource": ""
}
|
q13897
|
AbstractSlideBuilder.apply_theme
|
train
|
def apply_theme(self, themename, themeoptions):
"""Apply a new theme to the document.
This will store the existing theme configuration and apply a new one.
"""
# push the existing values onto the Stack
self._theme_stack.append(
(self.theme, self.theme_options)
)
theme_factory = HTMLThemeFactory(self.app)
theme_factory.load_additional_themes(self.get_builtin_theme_dirs() + self.config.slide_theme_path)
self.theme = theme_factory.create(themename)
self.theme_options = themeoptions.copy()
self.templates.init(self, self.theme)
self.templates.environment.filters['json'] = json.dumps
if self.theme not in self._additional_themes:
self._additional_themes.append(self.theme)
|
python
|
{
"resource": ""
}
|
q13898
|
AbstractSlideBuilder.post_process_images
|
train
|
def post_process_images(self, doctree):
"""Pick the best candidate for all image URIs."""
super(AbstractSlideBuilder, self).post_process_images(doctree)
# figure out where this doctree is in relation to the srcdir
relative_base = (
['..'] *
doctree.attributes.get('source')[len(self.srcdir) + 1:].count('/')
)
for node in doctree.traverse(nodes.image):
if node.get('candidates') is None:
node['candidates'] = ('*',)
# fix up images with absolute paths
if node['uri'].startswith(self.outdir):
node['uri'] = '/'.join(
relative_base + [
node['uri'][len(self.outdir) + 1:]
]
)
|
python
|
{
"resource": ""
}
|
q13899
|
parse_metadata
|
train
|
def parse_metadata(section):
"""Given the first part of a slide, returns metadata associated with it."""
metadata = {}
metadata_lines = section.split('\n')
for line in metadata_lines:
colon_index = line.find(':')
if colon_index != -1:
key = line[:colon_index].strip()
val = line[colon_index + 1:].strip()
metadata[key] = val
return metadata
|
python
|
{
"resource": ""
}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.