_id stringlengths 2 7 | title stringlengths 1 88 | partition stringclasses 3 values | text stringlengths 75 19.8k | language stringclasses 1 value | meta_information dict |
|---|---|---|---|---|---|
q28200 | MultiqcModule.hicup_stats_table | train | def hicup_stats_table(self):
""" Add core HiCUP stats to the general stats table """
headers = OrderedDict()
headers['Percentage_Ditags_Passed_Through_HiCUP'] = {
'title': '% Passed',
'description': 'Percentage Di-Tags Passed Through HiCUP',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn'
}
headers['Deduplication_Read_Pairs_Uniques'] = {
'title': '{} Unique'.format(config.read_count_prefix),
'description': 'Unique Di-Tags ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'PuRd',
'modify': lambda x: x * config.read_count_multiplier,
'shared_key': 'read_count'
}
headers['Percentage_Uniques'] = {
'title': '% Duplicates',
'description': 'Percent Duplicate Di-Tags',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn-rev',
'modify': lambda x: 100 - x
}
headers['Valid_Pairs'] = {
'title': '{} Valid'.format(config.read_count_prefix),
'description': 'Valid Pairs ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'PuRd',
'modify': lambda x: x * config.read_count_multiplier,
'shared_key': 'read_count'
}
headers['Percentage_Valid'] = {
'title': '% Valid',
'description': 'Percent Valid Pairs',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn'
}
headers['Paired_Read_1'] = {
'title': '{} Pairs Aligned'.format(config.read_count_prefix),
'description': 'Paired Alignments ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'PuRd',
'modify': lambda x: x * config.read_count_multiplier,
'shared_key': 'read_count'
}
headers['Percentage_Mapped'] = {
'title': '% Aligned',
'description': 'Percentage of Paired Alignments',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn'
}
self.general_stats_addcols(self.hicup_data, headers, 'HiCUP') | python | {
"resource": ""
} |
q28201 | MultiqcModule.hicup_truncating_chart | train | def hicup_truncating_chart (self):
""" Generate the HiCUP Truncated reads plot """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['Not_Truncated_Reads'] = { 'color': '#2f7ed8', 'name': 'Not Truncated' }
keys['Truncated_Read'] = { 'color': '#0d233a', 'name': 'Truncated' }
# Construct a data structure for the plot - duplicate the samples for read 1 and read 2
data = {}
for s_name in self.hicup_data:
data['{} Read 1'.format(s_name)] = {}
data['{} Read 2'.format(s_name)] = {}
data['{} Read 1'.format(s_name)]['Not_Truncated_Reads'] = self.hicup_data[s_name]['Not_Truncated_Reads_1']
data['{} Read 2'.format(s_name)]['Not_Truncated_Reads'] = self.hicup_data[s_name]['Not_Truncated_Reads_2']
data['{} Read 1'.format(s_name)]['Truncated_Read'] = self.hicup_data[s_name]['Truncated_Read_1']
data['{} Read 2'.format(s_name)]['Truncated_Read'] = self.hicup_data[s_name]['Truncated_Read_2']
# Config for the plot
config = {
'id': 'hicup_truncated_reads_plot',
'title': 'HiCUP: Truncated Reads',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
return bargraph.plot(data, keys, config) | python | {
"resource": ""
} |
q28202 | MultiqcModule.hicup_alignment_chart | train | def hicup_alignment_chart (self):
""" Generate the HiCUP Aligned reads plot """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['Unique_Alignments_Read'] = { 'color': '#2f7ed8', 'name': 'Unique Alignments' }
keys['Multiple_Alignments_Read'] = { 'color': '#492970', 'name': 'Multiple Alignments' }
keys['Failed_To_Align_Read'] = { 'color': '#0d233a', 'name': 'Failed To Align' }
keys['Too_Short_To_Map_Read'] = { 'color': '#f28f43', 'name': 'Too short to map' }
# Construct a data structure for the plot - duplicate the samples for read 1 and read 2
data = {}
for s_name in self.hicup_data:
data['{} Read 1'.format(s_name)] = {}
data['{} Read 2'.format(s_name)] = {}
data['{} Read 1'.format(s_name)]['Unique_Alignments_Read'] = self.hicup_data[s_name]['Unique_Alignments_Read_1']
data['{} Read 2'.format(s_name)]['Unique_Alignments_Read'] = self.hicup_data[s_name]['Unique_Alignments_Read_2']
data['{} Read 1'.format(s_name)]['Multiple_Alignments_Read'] = self.hicup_data[s_name]['Multiple_Alignments_Read_1']
data['{} Read 2'.format(s_name)]['Multiple_Alignments_Read'] = self.hicup_data[s_name]['Multiple_Alignments_Read_2']
data['{} Read 1'.format(s_name)]['Failed_To_Align_Read'] = self.hicup_data[s_name]['Failed_To_Align_Read_1']
data['{} Read 2'.format(s_name)]['Failed_To_Align_Read'] = self.hicup_data[s_name]['Failed_To_Align_Read_2']
data['{} Read 1'.format(s_name)]['Too_Short_To_Map_Read'] = self.hicup_data[s_name]['Too_Short_To_Map_Read_1']
data['{} Read 2'.format(s_name)]['Too_Short_To_Map_Read'] = self.hicup_data[s_name]['Too_Short_To_Map_Read_2']
# Config for the plot
config = {
'id': 'hicup_mapping_stats_plot',
'title': 'HiCUP: Mapping Statistics',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
return bargraph.plot(data, keys, config) | python | {
"resource": ""
} |
q28203 | MultiqcModule.hicup_filtering_chart | train | def hicup_filtering_chart(self):
""" Generate the HiCUP filtering plot """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['Valid_Pairs'] = { 'color': '#2f7ed8', 'name': 'Valid Pairs' }
keys['Same_Fragment_Internal'] = { 'color': '#0d233a', 'name': 'Same Fragment - Internal' }
keys['Same_Circularised'] = { 'color': '#910000', 'name': 'Same Fragment - Circularised' }
keys['Same_Dangling_Ends'] = { 'color': '#8bbc21', 'name': 'Same Fragment - Dangling Ends' }
keys['Re_Ligation'] = { 'color': '#1aadce', 'name': 'Re-ligation' }
keys['Contiguous_Sequence'] = { 'color': '#f28f43', 'name': 'Contiguous Sequence' }
keys['Wrong_Size'] = { 'color': '#492970', 'name': 'Wrong Size' }
# Config for the plot
config = {
'id': 'hicup_filtering_plot',
'title': 'HiCUP: Filtering Statistics',
'ylab': '# Read Pairs',
'cpswitch_counts_label': 'Number of Read Pairs',
'cpswitch_c_active': False
}
return bargraph.plot(self.hicup_data, keys, config) | python | {
"resource": ""
} |
q28204 | parse_reports | train | def parse_reports(self):
""" Find Qualimap BamQC reports and parse their data """
# General stats - genome_results.txt
self.qualimap_bamqc_genome_results = dict()
for f in self.find_log_files('qualimap/bamqc/genome_results'):
parse_genome_results(self, f)
self.qualimap_bamqc_genome_results = self.ignore_samples(self.qualimap_bamqc_genome_results)
# Coverage - coverage_histogram.txt
self.qualimap_bamqc_coverage_hist = dict()
for f in self.find_log_files('qualimap/bamqc/coverage', filehandles=True):
parse_coverage(self, f)
self.qualimap_bamqc_coverage_hist = self.ignore_samples(self.qualimap_bamqc_coverage_hist)
# Insert size - insert_size_histogram.txt
self.qualimap_bamqc_insert_size_hist = dict()
for f in self.find_log_files('qualimap/bamqc/insert_size', filehandles=True):
parse_insert_size(self, f)
self.qualimap_bamqc_insert_size_hist = self.ignore_samples(self.qualimap_bamqc_insert_size_hist)
# GC distribution - mapped_reads_gc-content_distribution.txt
self.qualimap_bamqc_gc_content_dist = dict()
self.qualimap_bamqc_gc_by_species = dict() # {'HUMAN': data_dict, 'MOUSE': data_dict}
for f in self.find_log_files('qualimap/bamqc/gc_dist', filehandles=True):
parse_gc_dist(self, f)
self.qualimap_bamqc_gc_by_species = self.ignore_samples(self.qualimap_bamqc_gc_by_species)
num_parsed = max(
len(self.qualimap_bamqc_genome_results),
len(self.qualimap_bamqc_coverage_hist),
len(self.qualimap_bamqc_insert_size_hist),
len(self.qualimap_bamqc_gc_content_dist)
)
# Go no further if nothing found
if num_parsed == 0:
return 0
try:
covs = config.qualimap_config['general_stats_coverage']
assert type(covs) == list
assert len(covs) > 0
covs = [str(i) for i in covs]
log.debug("Custom Qualimap thresholds: {}".format(", ".join([i for i in covs])))
except (AttributeError, TypeError, AssertionError):
covs = [1, 5, 10, 30, 50]
covs = [str(i) for i in covs]
log.debug("Using default Qualimap thresholds: {}".format(", ".join([i for i in covs])))
self.covs = covs
# Make the plots for the report
report_sections(self)
# Set up the general stats table
general_stats_headers(self)
# Return the number of reports we found
return num_parsed | python | {
"resource": ""
} |
q28205 | parse_genome_results | train | def parse_genome_results(self, f):
""" Parse the contents of the Qualimap BamQC genome_results.txt file """
regexes = {
'bam_file': r"bam file = (.+)",
'total_reads': r"number of reads = ([\d,]+)",
'mapped_reads': r"number of mapped reads = ([\d,]+)",
'mapped_bases': r"number of mapped bases = ([\d,]+)",
'sequenced_bases': r"number of sequenced bases = ([\d,]+)",
'mean_insert_size': r"mean insert size = ([\d,\.]+)",
'median_insert_size': r"median insert size = ([\d,\.]+)",
'mean_mapping_quality': r"mean mapping quality = ([\d,\.]+)",
'general_error_rate': r"general error rate = ([\d,\.]+)",
}
d = dict()
for k, r in regexes.items():
r_search = re.search(r, f['f'], re.MULTILINE)
if r_search:
try:
d[k] = float(r_search.group(1).replace(',',''))
except ValueError:
d[k] = r_search.group(1)
# Check we have an input filename
if 'bam_file' not in d:
log.debug("Couldn't find an input filename in genome_results file {}".format(f['fn']))
return None
# Get a nice sample name
s_name = self.clean_s_name(d['bam_file'], f['root'])
# Add to general stats table & calculate a nice % aligned
try:
self.general_stats_data[s_name]['total_reads'] = d['total_reads']
self.general_stats_data[s_name]['mapped_reads'] = d['mapped_reads']
d['percentage_aligned'] = (d['mapped_reads'] / d['total_reads'])*100
self.general_stats_data[s_name]['percentage_aligned'] = d['percentage_aligned']
self.general_stats_data[s_name]['general_error_rate'] = d['general_error_rate']*100
except KeyError:
pass
# Save results
if s_name in self.qualimap_bamqc_genome_results:
log.debug("Duplicate genome results sample name found! Overwriting: {}".format(s_name))
self.qualimap_bamqc_genome_results[s_name] = d
self.add_data_source(f, s_name=s_name, section='genome_results') | python | {
"resource": ""
} |
q28206 | parse_coverage | train | def parse_coverage(self, f):
""" Parse the contents of the Qualimap BamQC Coverage Histogram file """
# Get the sample name from the parent parent directory
# Typical path: <sample name>/raw_data_qualimapReport/coverage_histogram.txt
s_name = self.get_s_name(f)
d = dict()
for l in f['f']:
if l.startswith('#'):
continue
coverage, count = l.split(None, 1)
coverage = int(round(float(coverage)))
count = float(count)
d[coverage] = count
if len(d) == 0:
log.debug("Couldn't parse contents of coverage histogram file {}".format(f['fn']))
return None
# Find median without importing anything to do it for us
num_counts = sum(d.values())
cum_counts = 0
median_coverage = None
for thiscov, thiscount in d.items():
cum_counts += thiscount
if cum_counts >= num_counts/2:
median_coverage = thiscov
break
self.general_stats_data[s_name]['median_coverage'] = median_coverage
# Save results
if s_name in self.qualimap_bamqc_coverage_hist:
log.debug("Duplicate coverage histogram sample name found! Overwriting: {}".format(s_name))
self.qualimap_bamqc_coverage_hist[s_name] = d
self.add_data_source(f, s_name=s_name, section='coverage_histogram') | python | {
"resource": ""
} |
q28207 | parse_insert_size | train | def parse_insert_size(self, f):
""" Parse the contents of the Qualimap BamQC Insert Size Histogram file """
# Get the sample name from the parent parent directory
# Typical path: <sample name>/raw_data_qualimapReport/insert_size_histogram.txt
s_name = self.get_s_name(f)
d = dict()
zero_insertsize = 0
for l in f['f']:
if l.startswith('#'):
continue
insertsize, count = l.split(None, 1)
insertsize = int(round(float(insertsize)))
count = float(count) / 1000000
if(insertsize == 0):
zero_insertsize = count
else:
d[insertsize] = count
# Find median without importing anything to do it for us
num_counts = sum(d.values())
cum_counts = 0
median_insert_size = None
for thisins, thiscount in d.items():
cum_counts += thiscount
if cum_counts >= num_counts/2:
median_insert_size = thisins
break
# Add the median insert size to the general stats table
self.general_stats_data[s_name]['median_insert_size'] = median_insert_size
# Save results
if s_name in self.qualimap_bamqc_insert_size_hist:
log.debug("Duplicate insert size histogram sample name found! Overwriting: {}".format(s_name))
self.qualimap_bamqc_insert_size_hist[s_name] = d
self.add_data_source(f, s_name=s_name, section='insert_size_histogram') | python | {
"resource": ""
} |
q28208 | parse_gc_dist | train | def parse_gc_dist(self, f):
""" Parse the contents of the Qualimap BamQC Mapped Reads GC content distribution file """
# Get the sample name from the parent parent directory
# Typical path: <sample name>/raw_data_qualimapReport/mapped_reads_gc-content_distribution.txt
s_name = self.get_s_name(f)
d = dict()
reference_species = None
reference_d = dict()
avg_gc = 0
for l in f['f']:
if l.startswith('#'):
sections = l.strip("\n").split("\t", 3)
if len(sections) > 2:
reference_species = sections[2]
continue
sections = l.strip("\n").split("\t", 3)
gc = int(round(float(sections[0])))
content = float(sections[1])
avg_gc += gc * content
d[gc] = content
if len(sections) > 2:
reference_content = float(sections[2])
reference_d[gc] = reference_content
# Add average GC to the general stats table
self.general_stats_data[s_name]['avg_gc'] = avg_gc
# Save results
if s_name in self.qualimap_bamqc_gc_content_dist:
log.debug("Duplicate Mapped Reads GC content distribution sample name found! Overwriting: {}".format(s_name))
self.qualimap_bamqc_gc_content_dist[s_name] = d
if reference_species and reference_species not in self.qualimap_bamqc_gc_by_species:
self.qualimap_bamqc_gc_by_species[reference_species] = reference_d
self.add_data_source(f, s_name=s_name, section='mapped_gc_distribution') | python | {
"resource": ""
} |
q28209 | MultiqcModule.flexbar_barplot | train | def flexbar_barplot (self):
""" Make the HighCharts HTML to plot the flexbar rates """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['remaining_reads'] = { 'color': '#437bb1', 'name': 'Remaining reads' }
keys['skipped_due_to_uncalled_bases'] = { 'color': '#e63491', 'name': 'Skipped due to uncalled bases' }
keys['short_prior_to_adapter_removal'] = { 'color': '#b1084c', 'name': 'Short prior to adapter removal' }
keys['finally_skipped_short_reads'] = { 'color': '#7f0000', 'name': 'Finally skipped short reads' }
# Config for the plot
pconfig = {
'id': 'flexbar_plot',
'title': 'Flexbar: Processed Reads',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads',
'hide_zero_cats': False
}
self.add_section( plot = bargraph.plot(self.flexbar_data, keys, pconfig) ) | python | {
"resource": ""
} |
q28210 | parse_reports | train | def parse_reports(self):
""" Find RSeQC read_GC reports and parse their data """
# Set up vars
self.read_gc = dict()
self.read_gc_pct = dict()
# Go through files and parse data
for f in self.find_log_files('rseqc/read_gc'):
if f['f'].startswith('GC% read_count'):
gc = list()
counts = list()
for l in f['f'].splitlines():
s = l.split()
try:
gc.append(float(s[0]))
counts.append(float(s[1]))
except:
pass
if len(gc) > 0:
sorted_gc_keys = sorted(range(len(gc)), key=lambda k: gc[k])
total = sum(counts)
if f['s_name'] in self.read_gc:
log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name']))
self.add_data_source(f, section='read_GC')
self.read_gc[f['s_name']] = OrderedDict()
self.read_gc_pct[f['s_name']] = OrderedDict()
for i in sorted_gc_keys:
self.read_gc[f['s_name']][gc[i]] = counts[i]
self.read_gc_pct[f['s_name']][gc[i]] = (counts[i]/total)*100
# Filter to strip out ignored sample names
self.read_gc = self.ignore_samples(self.read_gc)
if len(self.read_gc) > 0:
# Add line graph to section
pconfig = {
'id': 'rseqc_read_gc_plot',
'title': 'RSeQC: Read GC Content',
'ylab': 'Number of Reads',
'xlab': "GC content (%)",
'xmin': 0,
'xmax': 100,
'tt_label': "<strong>{point.x}% GC</strong>: {point.y:.2f}",
'data_labels': [
{'name': 'Counts', 'ylab': 'Number of Reads'},
{'name': 'Percentages', 'ylab': 'Percentage of Reads'}
]
}
self.add_section (
name = 'Read GC Content',
anchor = 'rseqc-read_gc',
description = '<a href="http://rseqc.sourceforge.net/#read-gc-py" target="_blank">read_GC</a>' \
" calculates a histogram of read GC content.</p>",
plot = linegraph.plot([self.read_gc, self.read_gc_pct], pconfig)
)
# Return number of samples found
return len(self.read_gc) | python | {
"resource": ""
} |
q28211 | collect_data | train | def collect_data(parent_module):
""" Find Picard VariantCallingMetrics reports and parse their data """
data = dict()
for file_meta in parent_module.find_log_files('picard/variant_calling_metrics', filehandles=True):
s_name = None
for header, value in table_in(file_meta['f'], pre_header_string='## METRICS CLASS'):
if header == 'SAMPLE_ALIAS':
s_name = value
if s_name in data:
log.debug("Duplicate sample name found in {}! Overwriting: {}".format(file_meta['fn'], s_name))
data[s_name] = OrderedDict()
else:
data[s_name][header] = value
return data | python | {
"resource": ""
} |
q28212 | table_in | train | def table_in(filehandle, pre_header_string):
""" Generator that assumes a table starts the line after a given string """
in_histogram = False
next_is_header = False
headers = list()
for line in stripped(filehandle):
if not in_histogram and line.startswith(pre_header_string):
in_histogram = True
next_is_header = True
elif in_histogram and next_is_header:
next_is_header = False
headers = line.split("\t")
elif in_histogram:
values = line.split("\t")
if values != ['']:
for couple in zip(headers, values):
yield couple | python | {
"resource": ""
} |
q28213 | derive_data | train | def derive_data(data):
""" Based on the data derive additional data """
for s_name, values in data.items():
# setup holding variable
# Sum all variants that have been called
total_called_variants = 0
for value_name in ['TOTAL_SNPS', 'TOTAL_COMPLEX_INDELS', 'TOTAL_MULTIALLELIC_SNPS', 'TOTAL_INDELS']:
total_called_variants = total_called_variants + int(values[value_name])
values['total_called_variants'] = total_called_variants
# Sum all variants that have been called and are known
total_called_variants_known = 0
for value_name in ['NUM_IN_DB_SNP', 'NUM_IN_DB_SNP_COMPLEX_INDELS', 'NUM_IN_DB_SNP_MULTIALLELIC']:
total_called_variants_known = total_called_variants_known + int(values[value_name])
total_called_variants_known = total_called_variants_known + int(values['TOTAL_INDELS']) - int(values['NOVEL_INDELS'])
values['total_called_variants_known'] = total_called_variants_known
# Extrapolate the total novel variants
values['total_called_variants_novel'] = total_called_variants - total_called_variants_known | python | {
"resource": ""
} |
q28214 | compare_variants_label_plot | train | def compare_variants_label_plot(data):
""" Return HTML for the Compare variants plot"""
keys = OrderedDict()
keys['total_called_variants_known'] = {'name': 'Known Variants'}
keys['total_called_variants_novel'] = {'name': 'Novel Variants'}
pconfig = {
'id': 'picard_variantCallingMetrics_variant_label',
'title': 'Picard: Variants Called',
'ylab': 'Counts of Variants',
}
return bargraph.plot(data, cats=keys, pconfig=pconfig) | python | {
"resource": ""
} |
q28215 | MultiqcModule.quast_general_stats_table | train | def quast_general_stats_table(self):
""" Take the parsed stats from the QUAST report and add some to the
General Statistics table at the top of the report """
headers = OrderedDict()
headers['N50'] = {
'title': 'N50 ({})'.format(self.contig_length_suffix),
'description': 'N50 is the contig length such that using longer or equal length contigs produces half (50%) of the bases of the assembly (kilo base pairs)',
'min': 0,
'suffix': self.contig_length_suffix,
'scale': 'RdYlGn',
'modify': lambda x: x * self.contig_length_multiplier
}
headers['Total length'] = {
'title': 'Length ({})'.format(self.total_length_suffix),
'description': 'The total number of bases in the assembly (mega base pairs).',
'min': 0,
'suffix': self.total_length_suffix,
'scale': 'YlGn',
'modify': lambda x: x * self.total_length_multiplier
}
self.general_stats_addcols(self.quast_data, headers) | python | {
"resource": ""
} |
q28216 | MultiqcModule.quast_contigs_barplot | train | def quast_contigs_barplot(self):
""" Make a bar plot showing the number and length of contigs for each assembly """
# Prep the data
data = dict()
categories = []
for s_name, d in self.quast_data.items():
nums_by_t = dict()
for k, v in d.items():
m = re.match('# contigs \(>= (\d+) bp\)', k)
if m and v != '-':
nums_by_t[int(m.groups()[0])] = int(v)
tresholds = sorted(nums_by_t.keys(), reverse=True)
p = dict()
cats = []
for i, t in enumerate(tresholds):
if i == 0:
c = '>= ' + str(t) + ' bp'
cats.append(c)
p[c] = nums_by_t[t]
else:
c = str(t) + '-' + str(tresholds[i - 1]) + ' bp'
cats.append(c)
p[c] = nums_by_t[t] - nums_by_t[tresholds[i - 1]]
if not categories:
categories = cats
data[s_name] = p
pconfig = {
'id': 'quast_num_contigs',
'title': 'QUAST: Number of Contigs',
'ylab': '# Contigs',
'yDecimals': False
}
return bargraph.plot(data, categories, pconfig) | python | {
"resource": ""
} |
q28217 | MultiqcModule.quast_predicted_genes_barplot | train | def quast_predicted_genes_barplot(self):
"""
Make a bar plot showing the number and length of predicted genes
for each assembly
"""
# Prep the data
# extract the ranges given to quast with "--gene-thresholds"
prefix = '# predicted genes (>= '
suffix = ' bp)'
all_thresholds = sorted(list(set([
int(key[len(prefix):-len(suffix)])
for _, d in self.quast_data.items()
for key in d.keys()
if key.startswith(prefix)
])))
data = {}
ourpat = '>= {}{} bp'
theirpat = prefix+"{}"+suffix
for s_name, d in self.quast_data.items():
thresholds = sorted(list(set([
int(key[len(prefix):-len(suffix)])
for _, x in self.quast_data.items()
for key in x.keys()
if key.startswith(prefix)
])))
if len(thresholds)<2: continue
p = dict()
try:
p = { ourpat.format(thresholds[-1],""): d[theirpat.format(thresholds[-1])] }
for low,high in zip(thresholds[:-1], thresholds[1:]):
p[ourpat.format(low,-high)] = d[theirpat.format(low)] - d[theirpat.format(high)]
assert sum(p.values()) == d[theirpat.format(0)]
except AssertionError:
log.warning("Predicted gene counts didn't add up properly for \"{}\"".format(s_name))
except KeyError:
log.warning("Not all predicted gene thresholds available for \"{}\"".format(s_name))
data[s_name] = p
cats = [ ourpat.format(low,-high if high else "")
for low,high in zip(all_thresholds, all_thresholds[1:]+[None]) ]
if len(cats) > 0:
return bargraph.plot(data, cats, {'id': 'quast_predicted_genes', 'title': 'QUAST: Number of predicted genes', 'ylab': 'Number of predicted genes'})
else:
return None | python | {
"resource": ""
} |
q28218 | MultiqcModule.clipandmerge_general_stats_table | train | def clipandmerge_general_stats_table(self):
""" Take the parsed stats from the ClipAndMerge report and add it to the
basic stats table at the top of the report """
headers = OrderedDict()
headers['percentage'] = {
'title': '% Merged',
'description': 'Percentage of reads merged',
'min': 0,
'max': 100,
'suffix': '%',
'scale': 'Greens',
'format': '{:,.2f}',
}
self.general_stats_addcols(self.clipandmerge_data, headers) | python | {
"resource": ""
} |
q28219 | plot_basic_hist | train | def plot_basic_hist(samples, file_type, **plot_args):
""" Create line graph plot for basic histogram data for 'file_type'.
The 'samples' parameter could be from the bbmap mod_data dictionary:
samples = bbmap.MultiqcModule.mod_data[file_type]
"""
sumy = sum([int(samples[sample]['data'][x][0])
for sample in samples
for x in samples[sample]['data']])
cutoff = sumy * 0.999
all_x = set()
for item in sorted(chain(*[samples[sample]['data'].items()
for sample in samples])):
all_x.add(item[0])
cutoff -= item[1][0]
if cutoff < 0:
xmax = item[0]
break
else:
xmax = max(all_x)
data = {
sample: {
x: samples[sample]['data'][x][0] if x in samples[sample]['data'] else 0
for x in all_x
}
for sample in samples
}
plot_params = {
'id': 'bbmap-' + file_type + '_plot',
'title': 'BBTools: ' + plot_args['plot_title'],
'xmax': xmax
}
plot_params.update(plot_args['plot_params'])
plot = linegraph.plot(
data,
plot_params
)
return plot | python | {
"resource": ""
} |
q28220 | BaseRecalibratorMixin.parse_gatk_base_recalibrator | train | def parse_gatk_base_recalibrator(self):
""" Find GATK BaseRecalibrator logs and parse their data """
report_table_headers = {
'#:GATKTable:Arguments:Recalibration argument collection values used in this run': 'arguments',
'#:GATKTable:Quantized:Quality quantization map': 'quality_quantization_map',
'#:GATKTable:RecalTable0:': 'recal_table_0',
'#:GATKTable:RecalTable1:': 'recal_table_1',
'#:GATKTable:RecalTable2:': 'recal_table_2',
}
samples_kept = {rt_type: set() for rt_type in recal_table_type}
self.gatk_base_recalibrator = {recal_type:
{table_name: {}
for table_name
in report_table_headers.values()}
for recal_type in recal_table_type}
for f in self.find_log_files('gatk/base_recalibrator', filehandles=True):
parsed_data = self.parse_report(f['f'].readlines(), report_table_headers)
rt_type = determine_recal_table_type(parsed_data)
if len(parsed_data) > 0:
if f['s_name'] in samples_kept[rt_type]:
log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name']))
else:
samples_kept[rt_type].add(f['s_name'])
self.add_data_source(f, section='base_recalibrator')
for table_name, sample_tables in parsed_data.items():
self.gatk_base_recalibrator[rt_type][table_name][
f['s_name']] = sample_tables
# Filter to strip out ignored sample names
for rt_type in recal_table_type:
for table_name, sample_tables in self.gatk_base_recalibrator[rt_type].items():
self.gatk_base_recalibrator[rt_type][table_name] = self.ignore_samples(
sample_tables)
n_reports_found = sum([len(samples_kept[rt_type]) for rt_type in recal_table_type])
if n_reports_found > 0:
log.info("Found {} BaseRecalibrator reports".format(n_reports_found))
self.add_quality_score_vs_no_of_observations_section()
return n_reports_found | python | {
"resource": ""
} |
q28221 | BaseRecalibratorMixin.add_quality_score_vs_no_of_observations_section | train | def add_quality_score_vs_no_of_observations_section(self):
""" Add a section for the quality score vs number of observations line plot """
sample_data = []
data_labels = []
for rt_type_name, rt_type in recal_table_type._asdict().items():
sample_tables = self.gatk_base_recalibrator[rt_type]['quality_quantization_map']
if len(sample_tables) == 0:
continue
sample_data.append({
sample: {int(x): int(y) for x, y in zip(table['QualityScore'], table['Count'])}
for sample, table in sample_tables.items()
})
sample_y_sums = {
sample: sum(int(y) for y in table['Count'])
for sample, table
in sample_tables.items()
}
sample_data.append({
sample: {
int(x): float(y) / sample_y_sums[sample]
for x, y in zip(table['QualityScore'], table['Count'])
}
for sample, table in sample_tables.items()
})
flat_proportions = [float(y) / sample_y_sums[sample]
for sample, table in sample_tables.items()
for y in table['Count']]
prop_ymax = max(flat_proportions)
data_labels.append({'name': "{} Count".format(rt_type_name.capitalize().replace('_', '-')),
'ylab': 'Count'})
data_labels.append({'ymax': prop_ymax,
'name': "{} Percent".format(rt_type_name.capitalize().replace('_', '-')),
'ylab': 'Percent'})
plot = linegraph.plot(
sample_data,
pconfig={
'title': "Observed Quality Score Counts",
'id': 'gatk-base-recalibrator-quality-score-vs-number-of-observations',
'xlab': 'Observed Quality Score',
'ylab': 'Count',
'xDecimals': False,
'data_labels': data_labels,
})
# Reported vs empirical quality scores
self.add_section(
name='Observed Quality Scores',
description=(
'This plot shows the distribution of base quality scores in each sample before and '
'after base quality score recalibration (BQSR). Applying BQSR should broaden the '
'distribution of base quality scores.'
),
helptext=(
'For more information see '
'[the Broad\'s description of BQSR]'
'(https://gatkforums.broadinstitute.org/gatk/discussion/44/base-quality-score-recalibration-bqsr)'
'.'
),
plot=plot,
) | python | {
"resource": ""
} |
q28222 | MultiqcModule.parse_bbt | train | def parse_bbt(self, fh):
""" Parse the BioBloom Tools output into a 3D dict """
parsed_data = OrderedDict()
headers = None
for l in fh:
s = l.split("\t")
if headers is None:
headers = s
else:
parsed_data[s[0]] = dict()
for i, h in enumerate(headers[1:]):
parsed_data[s[0]][h] = float(s[i+1])
return parsed_data | python | {
"resource": ""
} |
q28223 | MultiqcModule.parse_fqscreen | train | def parse_fqscreen(self, f):
""" Parse the FastQ Screen output into a 3D dict """
parsed_data = OrderedDict()
reads_processed = None
nohits_pct = None
for l in f['f']:
if l.startswith('%Hit_no_genomes:') or l.startswith('%Hit_no_libraries:'):
nohits_pct = float(l.split(':', 1)[1])
parsed_data['No hits'] = {'percentages': {'one_hit_one_library': nohits_pct }}
else:
fqs = re.search(r"^(\S+)\s+(\d+)\s+(\d+)\s+([\d\.]+)\s+(\d+)\s+([\d\.]+)\s+(\d+)\s+([\d\.]+)\s+(\d+)\s+([\d\.]+)\s+(\d+)\s+([\d\.]+)$", l)
if fqs:
org = fqs.group(1)
parsed_data[org] = {'percentages':{}, 'counts':{}}
reads_processed = int(fqs.group(2))
parsed_data[org]['counts']['reads_processed'] = int(fqs.group(2))
parsed_data[org]['counts']['unmapped'] = int(fqs.group(3))
parsed_data[org]['percentages']['unmapped'] = float(fqs.group(4))
parsed_data[org]['counts']['one_hit_one_library'] = int(fqs.group(5))
parsed_data[org]['percentages']['one_hit_one_library'] = float(fqs.group(6))
parsed_data[org]['counts']['multiple_hits_one_library'] = int(fqs.group(7))
parsed_data[org]['percentages']['multiple_hits_one_library'] = float(fqs.group(8))
parsed_data[org]['counts']['one_hit_multiple_libraries'] = int(fqs.group(9))
parsed_data[org]['percentages']['one_hit_multiple_libraries'] = float(fqs.group(10))
parsed_data[org]['counts']['multiple_hits_multiple_libraries'] = int(fqs.group(11))
parsed_data[org]['percentages']['multiple_hits_multiple_libraries'] = float(fqs.group(12))
# Can't use #Reads in subset as varies. #Reads_processed should be same for all orgs in a sample
parsed_data['total_reads'] = int(fqs.group(2))
if len(parsed_data) == 0:
return None
# Calculate no hits counts
if reads_processed and nohits_pct:
parsed_data['No hits']['counts'] = {'one_hit_one_library': int((nohits_pct/100.0) * float(reads_processed)) }
else:
log.warn("Couldn't find number of reads with no hits for '{}'".format(f['s_name']))
self.num_orgs = max(len(parsed_data), self.num_orgs)
return parsed_data | python | {
"resource": ""
} |
q28224 | MultiqcModule.fqscreen_plot | train | def fqscreen_plot (self):
""" Makes a fancy custom plot which replicates the plot seen in the main
FastQ Screen program. Not useful if lots of samples as gets too wide. """
categories = list()
getCats = True
data = list()
p_types = OrderedDict()
p_types['multiple_hits_multiple_libraries'] = {'col': '#7f0000', 'name': 'Multiple Hits, Multiple Genomes' }
p_types['one_hit_multiple_libraries'] = {'col': '#ff0000', 'name': 'One Hit, Multiple Genomes' }
p_types['multiple_hits_one_library'] = {'col': '#00007f', 'name': 'Multiple Hits, One Genome' }
p_types['one_hit_one_library'] = {'col': '#0000ff', 'name': 'One Hit, One Genome' }
for k, t in p_types.items():
first = True
for s in sorted(self.fq_screen_data.keys()):
thisdata = list()
if len(categories) > 0:
getCats = False
for org in sorted(self.fq_screen_data[s]):
if org == 'total_reads':
continue
try:
thisdata.append(self.fq_screen_data[s][org]['percentages'][k])
except KeyError:
thisdata.append(None)
if getCats:
categories.append(org)
td = {
'name': t['name'],
'stack': s,
'data': thisdata,
'color': t['col']
}
if first:
first = False
else:
td['linkedTo'] = ':previous'
data.append(td)
html = '<div id="fq_screen_plot" class="hc-plot"></div> \n\
<script type="text/javascript"> \n\
fq_screen_data = {};\n\
fq_screen_categories = {};\n\
$(function () {{ \n\
$("#fq_screen_plot").highcharts({{ \n\
chart: {{ type: "column", backgroundColor: null }}, \n\
title: {{ text: "FastQ Screen Results" }}, \n\
xAxis: {{ categories: fq_screen_categories }}, \n\
yAxis: {{ \n\
max: 100, \n\
min: 0, \n\
title: {{ text: "Percentage Aligned" }} \n\
}}, \n\
tooltip: {{ \n\
formatter: function () {{ \n\
return "<b>" + this.series.stackKey.replace("column","") + " - " + this.x + "</b><br/>" + \n\
this.series.name + ": " + this.y + "%<br/>" + \n\
"Total Alignment: " + this.point.stackTotal + "%"; \n\
}}, \n\
}}, \n\
plotOptions: {{ \n\
column: {{ \n\
pointPadding: 0, \n\
groupPadding: 0.02, \n\
stacking: "normal" }} \n\
}}, \n\
series: fq_screen_data \n\
}}); \n\
}}); \n\
</script>'.format(json.dumps(data), json.dumps(categories))
return html | python | {
"resource": ""
} |
q28225 | MultiqcModule.parse_minionqc_report | train | def parse_minionqc_report(self, s_name, f):
'''
Parses minionqc's 'summary.yaml' report file for results.
Uses only the "All reads" stats. Ignores "Q>=x" part.
'''
try:
# Parsing as OrderedDict is slightly messier with YAML
# http://stackoverflow.com/a/21048064/713980
def dict_constructor(loader, node):
return OrderedDict(loader.construct_pairs(node))
yaml.add_constructor(yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG, dict_constructor)
summary_dict = yaml.safe_load(f)
except Exception as e:
log.error("Error parsing MinIONQC input file: {}".format(f))
return
# Do a deep copy as dicts are immutable
self.minionqc_raw_data[s_name] = copy.deepcopy(summary_dict)
# get q value threshold used for reads
q_threshold = None
for k in summary_dict.keys():
if k.startswith('Q>='):
q_threshold = k
data_dict = {}
data_dict['all'] = summary_dict['All reads'] # all reads
data_dict['q_filt'] = summary_dict[q_threshold] # quality filtered reads
for q_key in ['all', 'q_filt']:
for key_1 in ['reads', 'gigabases']:
for key_2 in data_dict[q_key][key_1]:
new_key = '{} {}'.format(key_1, key_2)
data_dict[q_key][new_key] = data_dict[q_key][key_1][key_2]
data_dict[q_key].pop(key_1) # removes key after flattening
self.minionqc_data[s_name] = data_dict['all'] # stats for all reads
self.qfilt_data[s_name] = data_dict['q_filt'] # stats for q-filtered reads
self.q_threshold_list.add(q_threshold) | python | {
"resource": ""
} |
q28226 | MultiqcModule.headers_to_use | train | def headers_to_use(self):
'''
Defines features of columns to be used in multiqc table
'''
headers = OrderedDict()
headers['total.reads'] = {
'title': 'Total reads',
'description': 'Total number of reads',
'format': '{:,.0f}',
'scale': 'Greys'
}
headers['total.gigabases'] = {
'title': 'Total bases (GB)',
'description': 'Total bases',
'format': '{:,.2f}',
'scale': 'Blues'
}
headers['N50.length'] = {
'title': 'Reads N50',
'description': 'Minimum read length needed to cover 50% of all reads',
'format': '{:,.0f}',
'scale': 'Purples',
}
headers['mean.q'] = {
'title': 'Mean Q score',
'description': 'Mean quality of reads',
'min': 0,
'max': 15,
'format': '{:,.1f}',
'hidden': True,
'scale': 'Greens',
}
headers['median.q'] = {
'title': 'Median Q score',
'description': 'Median quality of reads',
'min': 0,
'max': 15,
'format': '{:,.1f}',
'scale': 'Greens',
}
headers['mean.length'] = {
'title': 'Mean length (bp)',
'description': 'Mean read length',
'format': '{:,.0f}',
'hidden': True,
'scale': 'Blues',
}
headers['median.length'] = {
'title': 'Median length (bp)',
'description': 'Median read length',
'format': '{:,.0f}',
'scale': 'Blues',
}
# Add row ID to avoid duplicates
for k in headers:
h_id = re.sub('[^0-9a-zA-Z]+', '_', headers[k]['title'])
headers[k]['rid'] = "rid_{}".format(h_id)
return headers | python | {
"resource": ""
} |
q28227 | MultiqcModule.table_qALL | train | def table_qALL(self):
""" Table showing stats for all reads """
self.add_section (
name = 'Stats: All reads',
anchor = 'minionqc-stats-qAll',
description = 'MinIONQC statistics for all reads',
plot = table.plot(
self.minionqc_data,
self.headers_to_use(),
{
'namespace': 'MinIONQC',
'id': 'minionqc-stats-qAll-table',
'table_title': 'MinIONQC Stats: All reads'
}
)
) | python | {
"resource": ""
} |
q28228 | MultiqcModule.table_qfiltered | train | def table_qfiltered(self):
""" Table showing stats for q-filtered reads """
description = 'MinIONQC statistics for quality filtered reads. ' + \
'Quailty threshold used: {}.'.format(', '.join(list(self.q_threshold_list)))
if len(self.q_threshold_list) > 1:
description += '''
<div class="alert alert-warning">
<span class="glyphicon glyphicon-warning-sign"></span>
<strong>Warning!</strong> More than one quality thresholds were present.
</div>
'''
log.warning('More than one quality thresholds were present. Thresholds: {}.'.format(', '.join(list(self.q_threshold_list))))
self.add_section (
name = 'Stats: Quality filtered reads',
anchor = 'minionqc-stats-qFilt',
description = description,
plot = table.plot(
self.qfilt_data,
self.headers_to_use(),
{
'namespace': 'MinIONQC',
'id': 'minionqc-stats-qFilt-table',
'table_title': 'MinIONQC Stats: Quality filtered reads'
}
)
) | python | {
"resource": ""
} |
q28229 | RmdupReportMixin.parse_samtools_rmdup | train | def parse_samtools_rmdup(self):
""" Find Samtools rmdup logs and parse their data """
self.samtools_rmdup = dict()
for f in self.find_log_files('samtools/rmdup', filehandles=True):
# Example below:
# [bam_rmdupse_core] 26602816 / 103563641 = 0.2569 in library ' '
dups_regex = "\[bam_rmdups?e?_core\] (\d+) / (\d+) = (\d+\.\d+) in library '(.*)'"
s_name = f['s_name']
for l in f['f']:
match = re.search(dups_regex, l)
if match:
library_name = match.group(4).strip()
if library_name != '':
s_name = library_name
if s_name in self.samtools_rmdup:
log.debug("Duplicate sample name found in {}! Overwriting: {}".format(f['fn'], s_name))
self.add_data_source(f, s_name)
self.samtools_rmdup[s_name] = dict()
self.samtools_rmdup[s_name]['n_dups'] = int(match.group(1))
self.samtools_rmdup[s_name]['n_tot'] = int(match.group(2))
self.samtools_rmdup[s_name]['n_unique'] = int(match.group(2)) - int(match.group(1))
self.samtools_rmdup[s_name]['pct_dups'] = float(match.group(3))*100
# Filter to strip out ignored sample names
self.samtools_rmdup = self.ignore_samples(self.samtools_rmdup)
if len(self.samtools_rmdup) > 0:
# Write parsed report data to a file
self.write_data_file(self.samtools_rmdup, 'multiqc_samtools_rmdup')
# Make a bar plot showing duplicates
keys = OrderedDict()
keys['n_unique'] = {'name': 'Non-duplicated reads'}
keys['n_dups'] = {'name': 'Duplicated reads'}
pconfig = {
'id': 'samtools_rmdup_plot',
'title': 'Samtools rmdup: Duplicate alignments',
'ylab': 'Number of reads',
'yDecimals': False
}
self.add_section (
name = 'Duplicates removed',
anchor = 'samtools-rmdup',
plot = bargraph.plot(self.samtools_rmdup, keys, pconfig)
)
# Add a column to the General Stats table
# General Stats Table
stats_headers = OrderedDict()
stats_headers['pct_dups'] = {
'title': '% Dups',
'description': 'Percent of duplicate alignments',
'min': 0,
'max': 100,
'suffix': '%',
'scale': 'OrRd'
}
self.general_stats_addcols(self.samtools_rmdup, stats_headers, 'Samtools rmdup')
return len(self.samtools_rmdup) | python | {
"resource": ""
} |
q28230 | MultiqcModule.parse_summary | train | def parse_summary(self, contents):
"""Parses summary file into a dictionary of counts."""
lines = contents.strip().split('\n')
data = {}
for row in lines[1:]:
split = row.strip().split('\t')
sample = split[0]
data[sample] = {
'species_a': int(split[1]),
'species_b': int(split[2]),
'ambiguous': int(split[3])
}
return data | python | {
"resource": ""
} |
q28231 | MultiqcModule.add_stats_table | train | def add_stats_table(self):
"""Adds stats to general table."""
totals = {sample: sum(counts.values())
for sample, counts in self.data.items()}
percentages = {sample: {k: (v / totals[sample]) * 100
for k, v in counts.items()}
for sample, counts in self.data.items()}
headers = {
'species_a': {
'title': '% Species a',
'description': 'Percentage of reads mapping to species a',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn'
}
}
self.general_stats_addcols(percentages, headers) | python | {
"resource": ""
} |
q28232 | MultiqcModule.add_stats_plot | train | def add_stats_plot(self):
"""Plots alignment stats as bargraph."""
keys = OrderedDict()
keys['species_a'] = {'color': '#437bb1', 'name': 'Species a'}
keys['species_b'] = {'color': '#b1084c', 'name': 'Species b'}
keys['ambiguous'] = {'color': '#333333', 'name': 'Ambiguous'}
plot_config = {
'id': "disambiguated_alignments",
'title': "Disambiguate: Alignment Counts",
'cpswitch_counts_label': "# Reads",
'ylab': "# Reads"
}
self.add_section(
plot=bargraph.plot(self.data, keys, plot_config)
) | python | {
"resource": ""
} |
q28233 | MultiqcModule.parse_htseq_report | train | def parse_htseq_report (self, f):
""" Parse the HTSeq Count log file. """
keys = [ '__no_feature', '__ambiguous', '__too_low_aQual', '__not_aligned', '__alignment_not_unique' ]
parsed_data = dict()
assigned_counts = 0
for l in f['f']:
s = l.split("\t")
if s[0] in keys:
parsed_data[s[0][2:]] = int(s[-1])
else:
try:
assigned_counts += int(s[-1])
except (ValueError, IndexError):
pass
if len(parsed_data) > 0:
parsed_data['assigned'] = assigned_counts
parsed_data['total_count'] = sum([v for v in parsed_data.values()])
parsed_data['percent_assigned'] = (float(parsed_data['assigned']) / float(parsed_data['total_count'])) * 100.0
return parsed_data
return None | python | {
"resource": ""
} |
q28234 | MultiqcModule.htseq_stats_table | train | def htseq_stats_table(self):
""" Take the parsed stats from the HTSeq Count report and add them to the
basic stats table at the top of the report """
headers = OrderedDict()
headers['percent_assigned'] = {
'title': '% Assigned',
'description': '% Assigned reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'RdYlGn'
}
headers['assigned'] = {
'title': '{} Assigned'.format(config.read_count_prefix),
'description': 'Assigned Reads ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'PuBu',
'modify': lambda x: float(x) * config.read_count_multiplier,
'shared_key': 'read_count'
}
self.general_stats_addcols(self.htseq_data, headers) | python | {
"resource": ""
} |
q28235 | MultiqcModule.htseq_counts_chart | train | def htseq_counts_chart (self):
""" Make the HTSeq Count assignment rates plot """
cats = OrderedDict()
cats['assigned'] = { 'name': 'Assigned' }
cats['ambiguous'] = { 'name': 'Ambiguous' }
cats['alignment_not_unique'] = { 'name': 'Alignment Not Unique' }
cats['no_feature'] = { 'name': 'No Feature' }
cats['too_low_aQual'] = { 'name': 'Too Low aQual' }
cats['not_aligned'] = { 'name': 'Not Aligned' }
config = {
'id': 'htseq_assignment_plot',
'title': 'HTSeq: Count Assignments',
'ylab': '# Reads',
'hide_zero_cats': False,
'cpswitch_counts_label': 'Number of Reads'
}
return bargraph.plot(self.htseq_data, cats, config) | python | {
"resource": ""
} |
q28236 | MultiqcModule.parse_selfsm | train | def parse_selfsm(self, f):
""" Go through selfSM file and create a dictionary with the sample name as a key, """
#create a dictionary to populate from this sample's file
parsed_data = dict()
# set a empty variable which denotes if the headers have been read
headers = None
# for each line in the file
for l in f['f'].splitlines():
# split the line on tab
s = l.split("\t")
# if we haven't already read the header line
if headers is None:
# assign this list to headers variable
headers = s
# for all rows after the first
else:
# clean the sample name (first column) and assign to s_name
s_name = self.clean_s_name(s[0], f['root'])
# create a dictionary entry with the first column as a key (sample name) and empty dictionary as a value
parsed_data[s_name] = {}
# for each item in list of items in the row
for i, v in enumerate(s):
# if it's not the first element (if it's not the name)
if i != 0:
# see if CHIP is in the column header and the value is not NA
if "CHIP" in [headers[i]] and v != "NA":
# set hide_chip_columns = False so they are not hidden
self.hide_chip_columns=False
# try and convert the value into a float
try:
# and add to the dictionary the key as the corrsponding item from the header and the value from the list
parsed_data[s_name][headers[i]] = float(v)
#if can't convert to float...
except ValueError:
# add to the dictionary the key as the corrsponding item from the header and the value from the list
parsed_data[s_name][headers[i]] = v
# else return the dictionary
return parsed_data | python | {
"resource": ""
} |
q28237 | MultiqcModule.hisat2_general_stats_table | train | def hisat2_general_stats_table(self):
""" Take the parsed stats from the HISAT2 report and add it to the
basic stats table at the top of the report """
headers = OrderedDict()
headers['overall_alignment_rate'] = {
'title': '% Aligned',
'description': 'overall alignment rate',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn'
}
self.general_stats_addcols(self.hisat2_data, headers) | python | {
"resource": ""
} |
q28238 | BaseMultiqcModule.add_section | train | def add_section(self, name=None, anchor=None, description='', comment='', helptext='', plot='', content='', autoformat=True, autoformat_type='markdown'):
""" Add a section to the module report output """
# Default anchor
if anchor is None:
if name is not None:
nid = name.lower().strip().replace(' ','-')
anchor = '{}-{}'.format(self.anchor, nid)
else:
sl = len(self.sections) + 1
anchor = '{}-section-{}'.format(self.anchor, sl)
# Skip if user has a config to remove this module section
if anchor in config.remove_sections:
logger.debug("Skipping section '{}' because specified in user config".format(anchor))
return
# Sanitise anchor ID and check for duplicates
anchor = report.save_htmlid(anchor)
# See if we have a user comment in the config
if anchor in config.section_comments:
comment = config.section_comments[anchor]
# Format the content
if autoformat:
if len(description) > 0:
description = textwrap.dedent(description)
if autoformat_type == 'markdown':
description = markdown.markdown(description)
if len(comment) > 0:
comment = textwrap.dedent(comment)
if autoformat_type == 'markdown':
comment = markdown.markdown(comment)
if len(helptext) > 0:
helptext = textwrap.dedent(helptext)
if autoformat_type == 'markdown':
helptext = markdown.markdown(helptext)
# Strip excess whitespace
description = description.strip()
comment = comment.strip()
helptext = helptext.strip()
self.sections.append({
'name': name,
'anchor': anchor,
'description': description,
'comment': comment,
'helptext': helptext,
'plot': plot,
'content': content,
'print_section': any([ n is not None and len(n) > 0 for n in [description, comment, helptext, plot, content] ])
}) | python | {
"resource": ""
} |
q28239 | BaseMultiqcModule.ignore_samples | train | def ignore_samples(self, data):
""" Strip out samples which match `sample_names_ignore` """
try:
if isinstance(data, OrderedDict):
newdata = OrderedDict()
elif isinstance(data, dict):
newdata = dict()
else:
return data
for k,v in data.items():
# Match ignore glob patterns
glob_match = any( fnmatch.fnmatch(k, sn) for sn in config.sample_names_ignore )
re_match = any( re.match(sn, k) for sn in config.sample_names_ignore_re )
if not glob_match and not re_match:
newdata[k] = v
return newdata
except (TypeError, AttributeError):
return data | python | {
"resource": ""
} |
q28240 | parse_single_report | train | def parse_single_report(f):
""" Parse a samtools idxstats idxstats """
parsed_data = OrderedDict()
for l in f.splitlines():
s = l.split("\t")
try:
parsed_data[s[0]] = int(s[2])
except (IndexError, ValueError):
pass
return parsed_data | python | {
"resource": ""
} |
q28241 | MultiqcModule.parse_logs | train | def parse_logs(self, f):
"""Parse a given HiCExplorer log file from hicBuildMatrix."""
data = {}
for l in f.splitlines():
# catch empty lines
if len(l) == 0:
continue
s = l.split("\t")
data_ = []
# catch lines with descriptive content: "Of pairs used:"
for i in s[1:]:
if len(i) == 0:
continue
try:
i.replace('(', '')
i.replace(')', '')
i.replace(',', '')
data_.append(float(i))
except ValueError:
data_.append(i)
if len(data_) == 0:
continue
if s[0].startswith('short range'):
s[0] = 'short range'
elif s[0].startswith('same fragment'):
s[0] = 'same fragment'
s[0] = s[0].capitalize()
data[s[0]] = data_
return data | python | {
"resource": ""
} |
q28242 | MultiqcModule.hicexplorer_basic_statistics | train | def hicexplorer_basic_statistics(self):
"""Create the general statistics for HiCExplorer."""
data = {}
for file in self.mod_data:
max_distance_key = 'Max rest. site distance'
total_pairs = self.mod_data[file]['Pairs considered'][0]
try:
self.mod_data[file][max_distance_key][0]
except KeyError:
max_distance_key = 'Max library insert size'
data_ = {
'Pairs considered': self.mod_data[file]['Pairs considered'][0],
'Pairs used': self.mod_data[file]['Pairs used'][0] / total_pairs,
'Mapped': self.mod_data[file]['One mate unmapped'][0] / total_pairs,
'Min rest. site distance': self.mod_data[file]['Min rest. site distance'][0],
max_distance_key: self.mod_data[file][max_distance_key][0],
}
data[self.mod_data[file]['File'][0]] = data_
headers = OrderedDict()
headers['Pairs considered'] = {
'title': '{} Pairs'.format(config.read_count_prefix),
'description': 'Total number of read pairs ({})'.format(config.read_count_desc),
'shared_key': 'read_count'
}
headers['Pairs used'] = {
'title': '% Used pairs',
'max': 100,
'min': 0,
'modify': lambda x: x * 100,
'suffix': '%'
}
headers['Mapped'] = {
'title': '% Mapped',
'max': 100,
'min': 0,
'modify': lambda x: (1 - x) * 100,
'scale': 'RdYlGn',
'suffix': '%'
}
headers['Min rest. site distance'] = {
'title': 'Min RE dist',
'description': 'Minimum restriction site distance (bp)',
'format': '{:.0f}',
'suffix': ' bp'
}
headers[max_distance_key] = {
'title': 'Max RE dist',
'description': max_distance_key + ' (bp)',
'format': '{:.0f}',
'suffix': ' bp'
}
self.general_stats_addcols(data, headers) | python | {
"resource": ""
} |
q28243 | MultiqcModule.hicexplorer_create_plot | train | def hicexplorer_create_plot(self, pKeyList, pTitle, pId):
"""Create the graphics containing information about the read quality."""
keys = OrderedDict()
for i, key_ in enumerate(pKeyList):
keys[key_] = {'color': self.colors[i]}
data = {}
for data_ in self.mod_data:
data['{}'.format(self.mod_data[data_]['File'][0])] = {}
for key_ in pKeyList:
data['{}'.format(self.mod_data[data_]['File'][0])][key_] = self.mod_data[data_][key_][0]
config = {
'id': 'hicexplorer_' + pId + '_plot',
'title': pTitle,
'ylab': 'Number of Reads',
'cpswitch_counts_label': 'Number of Reads'
}
return bargraph.plot(data, keys, config) | python | {
"resource": ""
} |
q28244 | MultiqcModule.parse_clusterflow_logs | train | def parse_clusterflow_logs(self, f):
""" Parse Clusterflow logs """
module = None
job_id = None
pipeline_id = None
for l in f['f']:
# Get pipeline ID
module_r = re.match(r'Module:\s+(.+)$', l)
if module_r:
module = module_r.group(1)
job_id_r = re.match(r'Job ID:\s+(.+)$', l)
if job_id_r:
job_id = job_id_r.group(1)
if module is not None:
pipeline_r = re.match(r"(cf_.+)_"+re.escape(module)+r"_\d+$", job_id)
if pipeline_r:
pipeline_id = pipeline_r.group(1)
# Get commands that have been run
if l.startswith('###CFCMD'):
if pipeline_id is None:
pipeline_id = 'unknown'
if pipeline_id not in self.clusterflow_commands.keys():
self.clusterflow_commands[pipeline_id] = list()
self.clusterflow_commands[pipeline_id].append(l[8:]) | python | {
"resource": ""
} |
q28245 | MultiqcModule.clusterflow_commands_table | train | def clusterflow_commands_table (self):
""" Make a table of the Cluster Flow commands """
# I wrote this when I was tired. Sorry if it's incomprehensible.
desc = '''Every Cluster Flow run will have many different commands.
MultiQC splits these by whitespace, collects by the tool name
and shows the first command found. Any terms not found in <em>all</em> subsequent
calls are replaced with <code>[variable]</code>
<em>(typically input and ouput filenames)</em>. Each column is for one Cluster Flow run.'''
# Loop through pipelines
tool_cmds = OrderedDict()
headers = dict()
for pipeline_id, commands in self.clusterflow_commands.items():
headers[pipeline_id] = {'scale': False}
self.var_html = '<span style="background-color:#dedede; color:#999;">[variable]</span>'
tool_cmd_parts = OrderedDict()
for cmd in commands:
s = cmd.split()
tool = self._guess_cmd_name(s)
if tool not in tool_cmd_parts.keys():
tool_cmd_parts[tool] = list()
tool_cmd_parts[tool].append(s)
for tool, cmds in tool_cmd_parts.items():
cons_cmd = self._replace_variable_chunks(cmds)
# Try again with first two blocks if all variable
variable_count = cons_cmd.count(self.var_html)
if variable_count == len(cmds[0]) - 1 and len(cmds[0]) > 2:
for subcmd in set([x[1] for x in cmds]):
sub_cons_cmd = self._replace_variable_chunks([cmd for cmd in cmds if cmd[1] == subcmd])
tool = "{} {}".format(tool, subcmd)
if tool not in tool_cmds:
tool_cmds[tool] = dict()
tool_cmds[tool][pipeline_id] = '<code style="white-space:nowrap;">{}</code>'.format(" ".join(sub_cons_cmd) )
else:
if tool not in tool_cmds:
tool_cmds[tool] = dict()
tool_cmds[tool][pipeline_id] = '<code style="white-space:nowrap;">{}</code>'.format(" ".join(cons_cmd) )
table_config = {
'namespace': 'Cluster Flow',
'id': 'clusterflow-commands-table',
'table_title': 'Cluster Flow Commands',
'col1_header': 'Tool',
'sortRows': False,
'no_beeswarm': True
}
self.add_section (
name = 'Commands',
anchor = 'clusterflow-commands',
description = desc,
plot = table.plot(tool_cmds, headers, table_config)
) | python | {
"resource": ""
} |
q28246 | MultiqcModule._replace_variable_chunks | train | def _replace_variable_chunks(self, cmds):
""" List through a list of command chunks. Return a single list
with any variable bits blanked out. """
cons_cmd = None
while cons_cmd is None:
for cmd in cmds:
if cons_cmd is None:
cons_cmd = cmd[:]
else:
for idx, s in enumerate(cons_cmd):
if s not in cmd:
cons_cmd[idx] = self.var_html
return cons_cmd | python | {
"resource": ""
} |
q28247 | MultiqcModule._guess_cmd_name | train | def _guess_cmd_name(self, cmd):
""" Manually guess some known command names, where we can
do a better job than the automatic parsing. """
# zcat to bowtie
if cmd[0] == 'zcat' and 'bowtie' in cmd:
return 'bowtie'
# samtools
if cmd[0] == 'samtools':
return ' '.join(cmd[0:2])
# java (eg. picard)
if cmd[0] == 'java':
jars = [s for s in cmd if '.jar' in s]
return os.path.basename(jars[0].replace('.jar', ''))
return cmd[0] | python | {
"resource": ""
} |
q28248 | MultiqcModule.clusterflow_pipelines_section | train | def clusterflow_pipelines_section(self):
""" Generate HTML for section about pipelines, generated from
information parsed from run files. """
data = dict()
pids_guessed = ''
for f,d in self.clusterflow_runfiles.items():
pid = d.get('pipeline_id', 'unknown')
if d.get('pipeline_id_guess', False) is True:
pid += '*'
pids_guessed = ' Project IDs with an asterisk may be inaccurate.'
# Count the number of files going into the first module
num_starting_files = 0
for step_name, files in d.get('files',{}).items():
if step_name.startswith('start'):
num_starting_files += len(files)
# Reformat the date so that column sorting works nicely
if 'pipeline_start_dateparts' in d:
dt = d['pipeline_start_dateparts']
d['pipeline_start'] = '{}-{:02d}-{:02d} {:02d}:{:02d}'.format(dt['year'], dt['month'], dt['day'], dt['hour'], dt['minute'])
if pid not in data:
data[pid] = d
data[pid]['num_starting_files'] = int(num_starting_files)
else:
data[pid]['num_starting_files'] += int(num_starting_files)
headers = OrderedDict()
headers['pipeline_name'] = {'title': 'Pipeline Name'}
headers['pipeline_start'] = {'title': 'Date Started', 'description': 'Date and time that pipeline was started (YYYY-MM-DD HH:SS)'}
headers['genome'] = {'title': 'Genome ID', 'description': 'ID of reference genome used'}
headers['num_starting_files'] = {'title': '# Starting Files', 'format': '{:,.0f}', 'description': 'Number of input files at start of pipeline run.'}
table_config = {
'namespace': 'Cluster Flow',
'id': 'clusterflow-pipelines-table',
'table_title': 'Cluster Flow Pipelines',
'col1_header': 'Pipeline ID',
'no_beeswarm': True,
'save_file': True
}
self.add_section (
name = 'Pipelines',
anchor = 'clusterflow-pipelines',
description = 'Information about pipelines is parsed from <code>*.run</code> files. {}'.format(pids_guessed),
plot = table.plot(data, headers, table_config),
content = self.clusterflow_pipelines_printout()
) | python | {
"resource": ""
} |
q28249 | MultiqcModule.sortmerna_detailed_barplot | train | def sortmerna_detailed_barplot (self):
""" Make the HighCharts HTML to plot the sortmerna rates """
# Specify the order of the different possible categories
keys = OrderedDict()
metrics = set()
for sample in self.sortmerna:
for key in self.sortmerna[sample]:
if not key in ["total", "rRNA", "non_rRNA"] and not "_pct" in key:
metrics.add(key)
for key in metrics:
keys[key] = { 'name': key.replace("_count","") }
# Config for the plot
pconfig = {
'id': 'sortmerna-detailed-plot',
'title': 'SortMeRNA: Hit Counts',
'ylab': 'Reads'
}
self.add_section( plot = bargraph.plot(self.sortmerna, keys, pconfig) ) | python | {
"resource": ""
} |
q28250 | parse_reports | train | def parse_reports(self):
""" Find RSeQC inner_distance frequency reports and parse their data """
# Set up vars
self.inner_distance = dict()
self.inner_distance_pct = dict()
# Go through files and parse data
for f in self.find_log_files('rseqc/inner_distance'):
if f['s_name'] in self.inner_distance:
log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name']))
self.add_data_source(f, section='inner_distance')
#saving to temporary variable fro SE checking later
parsed_data = OrderedDict()
for l in f['f'].splitlines():
s = l.split()
try:
avg_pos = (float(s[0]) + float(s[1])) / 2.0
parsed_data[avg_pos] = float(s[2])
except:
# Don't bother running through whole file if wrong
break
# Only add if we actually found something i,e it was PE data
if len(parsed_data) > 0:
self.inner_distance[f['s_name']] = parsed_data
# Filter to strip out ignored sample names
self.inner_distance = self.ignore_samples(self.inner_distance)
if len(self.inner_distance) > 0:
# Make a normalised percentage version of the data
for s_name in self.inner_distance:
self.inner_distance_pct[s_name] = OrderedDict()
total = sum( self.inner_distance[s_name].values() )
for k, v in self.inner_distance[s_name].items():
self.inner_distance_pct[s_name][k] = (v/total)*100
# Add line graph to section
pconfig = {
'id': 'rseqc_inner_distance_plot',
'title': 'RSeQC: Inner Distance',
'ylab': 'Counts',
'xlab': "Inner Distance (bp)",
'tt_label': "<strong>{point.x} bp</strong>: {point.y:.2f}",
'data_labels': [
{'name': 'Counts', 'ylab': 'Counts'},
{'name': 'Percentages', 'ylab': 'Percentage'}
]
}
self.add_section (
name = 'Inner Distance',
anchor = 'rseqc-inner_distance',
description = '<a href="http://rseqc.sourceforge.net/#inner-distance-py" target="_blank">Inner Distance</a>' \
" calculates the inner distance" \
" (or insert size) between two paired RNA reads." \
" Note that this can be negative if fragments overlap.",
plot = linegraph.plot([self.inner_distance, self.inner_distance_pct], pconfig)
)
# Return number of samples found
return len(self.inner_distance) | python | {
"resource": ""
} |
q28251 | MultiqcModule.lane_stats_table | train | def lane_stats_table(self):
""" Return a table with overview stats for each bcl2fastq lane for a single flow cell """
headers = OrderedDict()
headers['total_yield'] = {
'title': '{} Total Yield'.format(config.base_count_prefix),
'description': 'Number of bases ({})'.format(config.base_count_desc),
'scale': 'Greens',
'shared_key': 'base_count'
}
headers['total'] = {
'title': '{} Total Clusters'.format(config.read_count_prefix),
'description': 'Total number of clusters for this lane ({})'.format(config.read_count_desc),
'scale': 'Blues',
'shared_key': 'read_count'
}
headers['percent_Q30'] = {
'title': '% bases ≥ Q30',
'description': 'Percentage of bases with greater than or equal to Q30 quality score',
'suffix': '%',
'max': 100,
'min': 0,
'scale': 'RdYlGn'
}
headers['mean_qscore'] = {
'title': 'Mean Quality',
'description': 'Average phred qualty score',
'min': 0,
'scale': 'Spectral'
}
headers['percent_perfectIndex'] = {
'title': '% Perfect Index',
'description': 'Percent of reads with perfect index (0 mismatches)',
'max': 100,
'min': 0,
'scale': 'RdYlGn',
'suffix': '%'
}
table_config = {
'namespace': 'bcl2fastq',
'id': 'bcl2fastq-lane-stats-table',
'table_title': 'bcl2fastq Lane Statistics',
'col1_header': 'Run ID - Lane',
'no_beeswarm': True
}
return table.plot(self.bcl2fastq_bylane, headers, table_config) | python | {
"resource": ""
} |
q28252 | MultiqcModule.get_bar_data_from_undetermined | train | def get_bar_data_from_undetermined(self, flowcells):
""" Get data to plot for undetermined barcodes.
"""
bar_data = defaultdict(dict)
# get undetermined barcodes for each lanes
for lane_id, lane in flowcells.items():
try:
for barcode, count in islice(lane['unknown_barcodes'].items(), 20):
bar_data[barcode][lane_id] = count
except AttributeError:
pass
# sort results
bar_data = OrderedDict(sorted(
bar_data.items(),
key=lambda x: sum(x[1].values()),
reverse=True
))
return OrderedDict(
(key, value) for key, value in islice(bar_data.items(), 20)
) | python | {
"resource": ""
} |
q28253 | MultiqcModule.kallisto_general_stats_table | train | def kallisto_general_stats_table(self):
""" Take the parsed stats from the Kallisto report and add it to the
basic stats table at the top of the report """
headers = OrderedDict()
headers['fragment_length'] = {
'title': 'Frag Length',
'description': 'Estimated average fragment length',
'min': 0,
'suffix': 'bp',
'scale': 'RdYlGn'
}
headers['percent_aligned'] = {
'title': '% Aligned',
'description': '% processed reads that were pseudoaligned',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn'
}
headers['pseudoaligned_reads'] = {
'title': '{} Aligned'.format(config.read_count_prefix),
'description': 'Pseudoaligned reads ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'PuRd',
'modify': lambda x: x * config.read_count_multiplier,
'shared_key': 'read_count'
}
self.general_stats_addcols(self.kallisto_data, headers) | python | {
"resource": ""
} |
q28254 | MultiqcModule.parse_hicpro_stats | train | def parse_hicpro_stats(self, f, rsection):
""" Parse a HiC-Pro stat file """
s_name = self.clean_s_name(os.path.basename(f['root']), os.path.dirname(f['root']))
if s_name not in self.hicpro_data.keys():
self.hicpro_data[s_name] = {}
self.add_data_source(f, s_name, section=rsection)
for l in f['f'].splitlines():
if not l.startswith('#'):
s = l.split("\t")
if s[0] in self.hicpro_data[s_name]:
log.debug("Duplicated keys found! Overwriting: {}".format(s[0]))
self.hicpro_data[s_name][s[0]] = int(s[1]) | python | {
"resource": ""
} |
q28255 | MultiqcModule.hicpro_mapping_chart | train | def hicpro_mapping_chart (self):
""" Generate the HiC-Pro Aligned reads plot """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['Full_Alignments_Read'] = { 'color': '#005ce6', 'name': 'Full reads Alignments' }
keys['Trimmed_Alignments_Read'] = { 'color': '#3385ff', 'name': 'Trimmed reads Alignments' }
keys['Failed_To_Align_Read'] = { 'color': '#a9a2a2', 'name': 'Failed To Align' }
data = [{},{}]
for s_name in self.hicpro_data:
for r in [1,2]:
data[r-1]['{} [R{}]'.format(s_name, r)] = {
'Full_Alignments_Read': self.hicpro_data[s_name]['global_R{}'.format(r)],
'Trimmed_Alignments_Read': self.hicpro_data[s_name]['local_R{}'.format(r)],
'Failed_To_Align_Read': int(self.hicpro_data[s_name]['total_R{}'.format(r)]) - int(self.hicpro_data[s_name]['mapped_R{}'.format(r)])
}
# Config for the plot
config = {
'id': 'hicpro_mapping_stats_plot',
'title': 'HiC-Pro: Mapping Statistics',
'ylab': '# Reads',
'ylab': '# Reads: Read 1',
'data_labels': [
{'name': 'Read 1', 'ylab': '# Reads: Read 1'},
{'name': 'Read 2', 'ylab': '# Reads: Read 2'}
]
}
return bargraph.plot(data, [keys, keys], config) | python | {
"resource": ""
} |
q28256 | MultiqcModule.hicpro_pairing_chart | train | def hicpro_pairing_chart (self):
""" Generate Pairing chart """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['Unique_paired_alignments'] = { 'color': '#005ce6', 'name': 'Uniquely Aligned' }
keys['Low_qual_pairs'] = { 'color': '#b97b35', 'name': 'Low Quality' }
keys['Pairs_with_singleton'] = { 'color': '#ff9933', 'name': 'Singleton' }
keys['Multiple_pairs_alignments'] = { 'color': '#e67300', 'name': 'Multi Aligned' }
keys['Unmapped_airs'] = { 'color': '#a9a2a2', 'name': 'Failed To Align' }
# Config for the plot
config = {
'id': 'hicpro_pairing_stats_plot',
'title': 'HiC-Pro: Pairing Statistics',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
return bargraph.plot(self.hicpro_data, keys, config) | python | {
"resource": ""
} |
q28257 | MultiqcModule.hicpro_filtering_chart | train | def hicpro_filtering_chart (self):
""" Generate the HiC-Pro filtering plot """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['Valid_interaction_pairs_FF'] = { 'color': '#ccddff', 'name': 'Valid Pairs FF' }
keys['Valid_interaction_pairs_RR'] = { 'color': '#6699ff', 'name': 'Valid Pairs RR' }
keys['Valid_interaction_pairs_RF'] = { 'color': '#0055ff', 'name': 'Valid Pairs RF' }
keys['Valid_interaction_pairs_FR'] = { 'color': '#003399', 'name': 'Valid Pairs FR' }
keys['Self_Cycle_pairs'] = { 'color': '#ffad99', 'name': 'Same Fragment - Self-Circle' }
keys['Dangling_end_pairs'] = { 'color': '#ff5c33', 'name': 'Same Fragment - Dangling Ends' }
keys['Religation_pairs'] = { 'color': '#cc2900', 'name': 'Re-ligation' }
keys['Filtered_pairs'] = { 'color': '#661400', 'name': 'Filtered pairs' }
keys['Dumped_pairs'] = { 'color': '#330a00', 'name': 'Dumped pairs' }
# Config for the plot
config = {
'id': 'hicpro_filtering_plot',
'title': 'HiC-Pro: Filtering Statistics',
'ylab': '# Read Pairs',
'cpswitch_counts_label': 'Number of Read Pairs'
}
return bargraph.plot(self.hicpro_data, keys, config) | python | {
"resource": ""
} |
q28258 | MultiqcModule.hicpro_contact_chart | train | def hicpro_contact_chart (self):
""" Generate the HiC-Pro interaction plot """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['cis_shortRange'] = { 'color': '#0039e6', 'name': 'Unique: cis <= 20Kbp' }
keys['cis_longRange'] = { 'color': '#809fff', 'name': 'Unique: cis > 20Kbp' }
keys['trans_interaction'] = { 'color': '#009933', 'name': 'Unique: trans' }
keys['duplicates'] = { 'color': '#a9a2a2', 'name': 'Duplicate read pairs' }
# Config for the plot
config = {
'id': 'hicpro_contact_plot',
'title': 'HiC-Pro: Contact Statistics',
'ylab': '# Pairs',
'cpswitch_counts_label': 'Number of Pairs'
}
return bargraph.plot(self.hicpro_data, keys, config) | python | {
"resource": ""
} |
q28259 | MultiqcModule.hicpro_capture_chart | train | def hicpro_capture_chart (self):
""" Generate Capture Hi-C plot"""
keys = OrderedDict()
keys['valid_pairs_on_target_cap_cap'] = { 'color': '#0039e6', 'name': 'Capture-Capture interactions' }
keys['valid_pairs_on_target_cap_rep'] = { 'color': '#809fff', 'name': 'Capture-Reporter interactions' }
keys['valid_pairs_off_target'] = { 'color': '#cccccc', 'name': 'Off-target valid pairs' }
# Check capture info are available
num_samples = 0
for s_name in self.hicpro_data:
for k in keys:
num_samples += sum([1 if k in self.hicpro_data[s_name] else 0])
if num_samples == 0:
return False
# Config for the plot
config = {
'id': 'hicpro_cap_plot',
'title': 'HiC-Pro: Capture Statistics',
'ylab': '# Pairs',
'cpswitch_counts_label': 'Number of Pairs'
}
return bargraph.plot(self.hicpro_data, keys, config) | python | {
"resource": ""
} |
q28260 | plotCorrelationMixin.parse_plotCorrelation | train | def parse_plotCorrelation(self):
"""Find plotCorrelation output"""
self.deeptools_plotCorrelationData = dict()
for f in self.find_log_files('deeptools/plotCorrelationData', filehandles=False):
parsed_data, samples = self.parsePlotCorrelationData(f)
for k, v in parsed_data.items():
if k in self.deeptools_plotCorrelationData:
log.warning("Replacing duplicate sample {}.".format(k))
self.deeptools_plotCorrelationData[k] = v
if len(parsed_data) > 0:
self.add_data_source(f, section='plotCorrelation')
if len(self.deeptools_plotCorrelationData) > 0:
config = {
'id': 'deeptools_correlation_plot',
'title': 'deeptools: Correlation Plot',
}
data = []
for s_name in samples:
try:
data.append(self.deeptools_plotCorrelationData[s_name])
except KeyError:
pass
if len(data) == 0:
log.debug('No valid data for correlation plot')
return None
self.add_section(
name="Correlation heatmap",
anchor="deeptools_correlation",
description="Pairwise correlations of samples based on distribution of sequence reads",
plot=heatmap.plot(data, samples, samples, config)
)
return len(self.deeptools_plotCorrelationData) | python | {
"resource": ""
} |
q28261 | MultiqcModule.parse_featurecounts_report | train | def parse_featurecounts_report (self, f):
""" Parse the featureCounts log file. """
file_names = list()
parsed_data = dict()
for l in f['f'].splitlines():
thisrow = list()
s = l.split("\t")
if len(s) < 2:
continue
if s[0] == 'Status':
for f_name in s[1:]:
file_names.append(f_name)
else:
k = s[0]
if k not in self.featurecounts_keys:
self.featurecounts_keys.append(k)
for val in s[1:]:
try:
thisrow.append(int(val))
except ValueError:
pass
if len(thisrow) > 0:
parsed_data[k] = thisrow
# Check that this actually is a featureCounts file, as format and parsing is quite general
if 'Assigned' not in parsed_data.keys():
return None
for idx, f_name in enumerate(file_names):
# Clean up sample name
s_name = self.clean_s_name(f_name, f['root'])
# Reorganised parsed data for this sample
# Collect total count number
data = dict()
data['Total'] = 0
for k in parsed_data:
data[k] = parsed_data[k][idx]
data['Total'] += parsed_data[k][idx]
# Calculate the percent aligned if we can
try:
data['percent_assigned'] = (float(data['Assigned'])/float(data['Total'])) * 100.0
except (KeyError, ZeroDivisionError):
pass
# Add to the main dictionary
if len(data) > 1:
if s_name in self.featurecounts_data:
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.add_data_source(f, s_name)
self.featurecounts_data[s_name] = data | python | {
"resource": ""
} |
q28262 | MultiqcModule.featurecounts_stats_table | train | def featurecounts_stats_table(self):
""" Take the parsed stats from the featureCounts report and add them to the
basic stats table at the top of the report """
headers = OrderedDict()
headers['percent_assigned'] = {
'title': '% Assigned',
'description': '% Assigned reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'RdYlGn'
}
headers['Assigned'] = {
'title': '{} Assigned'.format(config.read_count_prefix),
'description': 'Assigned reads ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'PuBu',
'modify': lambda x: float(x) * config.read_count_multiplier,
'shared_key': 'read_count'
}
self.general_stats_addcols(self.featurecounts_data, headers) | python | {
"resource": ""
} |
q28263 | MultiqcModule.featureCounts_chart | train | def featureCounts_chart (self):
""" Make the featureCounts assignment rates plot """
# Config for the plot
config = {
'id': 'featureCounts_assignment_plot',
'title': 'featureCounts: Assignments',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
return bargraph.plot(self.featurecounts_data, self.featurecounts_keys, config) | python | {
"resource": ""
} |
q28264 | smooth_line_data | train | def smooth_line_data(data, numpoints, sumcounts=True):
"""
Function to take an x-y dataset and use binning to
smooth to a maximum number of datapoints.
"""
smoothed = {}
for s_name, d in data.items():
# Check that we need to smooth this data
if len(d) <= numpoints:
smoothed[s_name] = d
continue
smoothed[s_name] = OrderedDict();
p = 0
binsize = len(d) / numpoints
if binsize < 1:
binsize = 1
binvals = []
for x in sorted(d):
y = d[x]
if p < binsize:
binvals.append(y)
p += 1
else:
if sumcounts is True:
v = sum(binvals)
else:
v = sum(binvals) / binsize
smoothed[s_name][x] = v
p = 0
binvals = []
return smoothed | python | {
"resource": ""
} |
q28265 | parse_reports | train | def parse_reports(self):
"""
Find Picard ValidateSamFile reports and parse their data based on wether we
think it's a VERBOSE or SUMMARY report
"""
# Get data
data = _parse_reports_by_type(self)
if data:
# Filter to strip out ignored sample names (REQUIRED)
data = self.ignore_samples(data)
# Populate the general stats table
_add_data_to_general_stats(self, data)
# Add any found data to the report
_add_section_to_report(self, data)
# Write parsed data to a file
self.write_data_file(data, 'multiqc_picard_validatesamfile')
self.picard_ValidateSamFile_data = data # Seems like the right thing to do
return len(data) | python | {
"resource": ""
} |
q28266 | _parse_reports_by_type | train | def _parse_reports_by_type(self):
""" Returns a data dictionary
Goes through logs and parses them based on 'No errors found', VERBOSE or SUMMARY type.
"""
data = dict()
for file_meta in self.find_log_files('picard/sam_file_validation', filehandles=True):
sample = file_meta['s_name']
if sample in data:
log.debug("Duplicate sample name found! Overwriting: {}".format(sample))
filehandle = file_meta['f']
first_line = filehandle.readline().rstrip()
filehandle.seek(0) # Rewind reading of the file
if 'No errors found' in first_line:
sample_data = _parse_no_error_report()
elif first_line.startswith('ERROR') or first_line.startswith('WARNING'):
sample_data = _parse_verbose_report(filehandle)
else:
sample_data = _parse_summary_report(filehandle)
data[sample] = sample_data
return data | python | {
"resource": ""
} |
q28267 | _histogram_data | train | def _histogram_data(iterator):
""" Yields only the row contents that contain the histogram entries """
histogram_started = False
header_passed = False
for l in iterator:
if '## HISTOGRAM' in l:
histogram_started = True
elif histogram_started:
if header_passed:
values = l.rstrip().split("\t")
problem_type, name = values[0].split(':')
yield problem_type, name, int(values[1])
elif l.startswith('Error Type'):
header_passed = True | python | {
"resource": ""
} |
q28268 | _add_data_to_general_stats | train | def _add_data_to_general_stats(self, data):
"""
Add data for the general stats in a Picard-module specific manner
"""
headers = _get_general_stats_headers()
self.general_stats_headers.update(headers)
header_names = ('ERROR_count', 'WARNING_count', 'file_validation_status')
general_data = dict()
for sample in data:
general_data[sample] = {column: data[sample][column] for column in header_names}
if sample not in self.general_stats_data:
self.general_stats_data[sample] = dict()
if data[sample]['file_validation_status'] != 'pass':
headers['file_validation_status']['hidden'] = False
self.general_stats_data[sample].update(general_data[sample]) | python | {
"resource": ""
} |
q28269 | _generate_overview_note | train | def _generate_overview_note(pass_count, only_warning_count, error_count, total_count):
""" Generates and returns the HTML note that provides a summary of validation status. """
note_html = ['<div class="progress">']
pbars = [
[ float(error_count), 'danger', 'had errors' ],
[ float(only_warning_count), 'warning', 'had warnings' ],
[ float(pass_count), 'success', 'passed' ]
]
for b in pbars:
if b[0]:
note_html.append(
'<div class="progress-bar progress-bar-{pbcol}" style="width: {pct}%" data-toggle="tooltip" title="{count} {sample} {txt}">{count}</div>'. \
format(
pbcol = b[1],
count = int(b[0]),
pct = (b[0]/float(total_count))*100.0,
txt = b[2],
sample = 'samples' if b[0] > 1 else 'sample'
)
)
note_html.append('</div>')
return "\n".join(note_html) | python | {
"resource": ""
} |
q28270 | _generate_detailed_table | train | def _generate_detailed_table(data):
"""
Generates and retuns the HTML table that overviews the details found.
"""
headers = _get_general_stats_headers()
# Only add headers for errors/warnings we have found
for problems in data.values():
for problem in problems:
if problem not in headers and problem in WARNING_DESCRIPTIONS:
headers['WARNING_count']['hidden'] = False
headers[problem] = {
'description': WARNING_DESCRIPTIONS[problem],
'namespace': 'WARNING',
'scale': headers['WARNING_count']['scale'],
'format': '{:.0f}',
'shared_key': 'warnings',
'hidden': True, # Hide by default; to unclutter things.
}
if problem not in headers and problem in ERROR_DESCRIPTIONS:
headers['ERROR_count']['hidden'] = False
headers[problem] = {
'description': ERROR_DESCRIPTIONS[problem],
'namespace': 'ERROR',
'scale': headers['ERROR_count']['scale'],
'format': '{:.0f}',
'shared_key': 'errors',
'hidden': True, # Hide by default; to unclutter things.
}
table_config = {
'table_title': 'Picard: SAM/BAM File Validation',
}
return table.plot(data=data, headers=headers, pconfig=table_config) | python | {
"resource": ""
} |
q28271 | MultiqcModule.busco_plot | train | def busco_plot (self, lin):
""" Make the HighCharts HTML for the BUSCO plot for a particular lineage """
data = {}
for s_name in self.busco_data:
if self.busco_data[s_name].get('lineage_dataset') == lin:
data[s_name] = self.busco_data[s_name]
plot_keys = ['complete_single_copy','complete_duplicated','fragmented','missing']
plot_cols = ['#7CB5EC', '#434348', '#F7A35C', '#FF3C50']
keys = OrderedDict()
for k, col in zip(plot_keys, plot_cols):
keys[k] = {'name': self.busco_keys[k], 'color': col}
# Config for the plot
config = {
'id': 'busco_plot_{}'.format(re.sub('\W+', '_', str(lin))),
'title': 'BUSCO: Assessment Results' if lin is None else 'BUSCO Assessment Results: {}'.format(lin),
'ylab': '# BUSCOs',
'cpswitch_counts_label': 'Number of BUSCOs'
}
return bargraph.plot(data, keys, config) | python | {
"resource": ""
} |
q28272 | MultiqcModule.trimmomatic_barplot | train | def trimmomatic_barplot (self):
""" Make the HighCharts HTML to plot the trimmomatic rates """
# Specify the order of the different possible categories
keys = OrderedDict()
keys['surviving'] = { 'color': '#437bb1', 'name': 'Surviving Reads' }
keys['both_surviving'] = { 'color': '#f7a35c', 'name': 'Both Surviving' }
keys['forward_only_surviving'] = { 'color': '#e63491', 'name': 'Forward Only Surviving' }
keys['reverse_only_surviving'] = { 'color': '#b1084c', 'name': 'Reverse Only Surviving' }
keys['dropped'] = { 'color': '#7f0000', 'name': 'Dropped' }
# Config for the plot
pconfig = {
'id': 'trimmomatic_plot',
'title': 'Trimmomatic: Surviving Reads',
'ylab': '# Reads',
'cpswitch_counts_label': 'Number of Reads'
}
self.add_section( plot = bargraph.plot(self.trimmomatic, keys, pconfig) ) | python | {
"resource": ""
} |
q28273 | MultiqcModule.parse_peddy_summary | train | def parse_peddy_summary(self, f):
""" Go through log file looking for peddy output """
parsed_data = dict()
headers = None
for l in f['f'].splitlines():
s = l.split("\t")
if headers is None:
s[0] = s[0].lstrip('#')
headers = s
else:
parsed_data[s[1]] = dict()
for i, v in enumerate(s):
if i != 1:
try:
parsed_data[s[1]][headers[i]] = float(v)
except ValueError:
parsed_data[s[1]][headers[i]] = v
if len(parsed_data) == 0:
return None
return parsed_data | python | {
"resource": ""
} |
q28274 | MultiqcModule.parse_peddy_csv | train | def parse_peddy_csv(self, f, pattern):
""" Parse csv output from peddy """
parsed_data = dict()
headers = None
s_name_idx = None
for l in f['f'].splitlines():
s = l.split(",")
if headers is None:
headers = s
try:
s_name_idx = [headers.index("sample_id")]
except ValueError:
try:
s_name_idx = [headers.index("sample_a"), headers.index("sample_b")]
except ValueError:
log.warn("Could not find sample name in Peddy output: {}".format(f['fn']))
return None
else:
s_name = '-'.join([s[idx] for idx in s_name_idx])
parsed_data[s_name] = dict()
for i, v in enumerate(s):
if i not in s_name_idx:
if headers[i] == "error" and pattern == "sex_check":
v = "True" if v == "False" else "False"
try:
# add the pattern as a suffix to key
parsed_data[s_name][headers[i] + "_" + pattern] = float(v)
except ValueError:
# add the pattern as a suffix to key
parsed_data[s_name][headers[i] + "_" + pattern] = v
if len(parsed_data) == 0:
return None
return parsed_data | python | {
"resource": ""
} |
q28275 | MultiqcModule.peddy_general_stats_table | train | def peddy_general_stats_table(self):
""" Take the parsed stats from the Peddy report and add it to the
basic stats table at the top of the report """
family_ids = [ x.get('family_id') for x in self.peddy_data.values() ]
headers = OrderedDict()
headers['family_id'] = {
'title': 'Family ID',
'hidden': True if all([v == family_ids[0] for v in family_ids]) else False
}
headers['ancestry-prediction'] = {
'title': 'Ancestry',
'description': 'Ancestry Prediction',
}
headers['ancestry-prob_het_check'] = {
'title': 'P(Ancestry)',
'description': 'Probability predicted ancestry is correct.'
}
headers['sex_het_ratio'] = {
'title': 'Sex / Het Ratio',
}
headers['error_sex_check'] = {
'title': 'Correct Sex',
'description': 'Displays False if error in sample sex prediction',
}
headers['predicted_sex_sex_check'] = {
'title': 'Sex',
'description': 'Predicted sex'
}
self.general_stats_addcols(self.peddy_data, headers) | python | {
"resource": ""
} |
q28276 | MultiqcModule.add_barplot | train | def add_barplot(self):
""" Generate the Samblaster bar plot. """
cats = OrderedDict()
cats['n_nondups'] = {'name': 'Non-duplicates'}
cats['n_dups'] = {'name': 'Duplicates'}
pconfig = {
'id': 'samblaster_duplicates',
'title': 'Samblaster: Number of duplicate reads',
'ylab': 'Number of reads'
}
self.add_section( plot = bargraph.plot(self.samblaster_data, cats, pconfig) ) | python | {
"resource": ""
} |
q28277 | MultiqcModule.parse_samblaster | train | def parse_samblaster(self, f):
""" Go through log file looking for samblaster output.
If the
Grab the name from the RG tag of the preceding bwa command """
dups_regex = "samblaster: (Removed|Marked) (\d+) of (\d+) \((\d+.\d+)%\) read ids as duplicates"
input_file_regex = "samblaster: Opening (\S+) for read."
rgtag_name_regex = "\\\\tID:(\S*?)\\\\t"
data = {}
s_name = None
fh = f['f']
for l in fh:
# try to find name from RG-tag. If bwa mem is used upstream samblaster with pipes, then the bwa mem command
# including the read group will be written in the log
match = re.search(rgtag_name_regex, l)
if match:
s_name = self.clean_s_name( match.group(1), f['root'])
# try to find name from the input file name, if used
match = re.search(input_file_regex, l)
if match:
basefn = os.path.basename(match.group(1))
fname, ext = os.path.splitext(basefn)
# if it's stdin, then try bwa RG-tag instead
if fname != 'stdin':
s_name = self.clean_s_name( fname, f['root'])
match = re.search(dups_regex, l)
if match:
data['n_dups'] = int(match.group(2))
data['n_tot'] = int(match.group(3))
data['n_nondups'] = data['n_tot'] - data['n_dups']
data['pct_dups'] = float(match.group(4))
if s_name is None:
s_name = f['s_name']
if len(data) > 0:
if s_name in self.samblaster_data:
log.debug("Duplicate sample name found in {}! Overwriting: {}".format(f['fn'], s_name))
self.add_data_source(f, s_name)
self.samblaster_data[s_name] = data | python | {
"resource": ""
} |
q28278 | MultiqcModule._short_chrom | train | def _short_chrom(self, chrom):
"""Plot standard chromosomes + X, sorted numerically.
Allows specification from a list of chromosomes via config
for non-standard genomes.
"""
default_allowed = set(["X"])
allowed_chroms = set(getattr(config, "goleft_indexcov_config", {}).get("chromosomes", []))
chrom_clean = chrom.replace("chr", "")
try:
chrom_clean = int(chrom_clean)
except ValueError:
if chrom_clean not in default_allowed and chrom_clean not in allowed_chroms:
chrom_clean = None
if allowed_chroms:
if chrom in allowed_chroms or chrom_clean in allowed_chroms:
return chrom_clean
elif isinstance(chrom_clean, int) or chrom_clean in default_allowed:
return chrom_clean | python | {
"resource": ""
} |
q28279 | MultiqcModule.parse_conpair_logs | train | def parse_conpair_logs(self, f):
""" Go through log file looking for conpair concordance or contamination output
One parser to rule them all. """
conpair_regexes = {
'concordance_concordance': r"Concordance: ([\d\.]+)%",
'concordance_used_markers': r"Based on (\d+)/\d+ markers",
'concordance_total_markers': r"Based on \d+/(\d+) markers",
'concordance_marker_threshold': r"\(coverage per marker threshold : (\d+) reads\)",
'concordance_min_mapping_quality': r"Minimum mappinq quality: (\d+)",
'concordance_min_base_quality': r"Minimum base quality: (\d+)",
'contamination_normal': r"Normal sample contamination level: ([\d\.]+)%",
'contamination_tumor': r"Tumor sample contamination level: ([\d\.]+)%"
}
parsed_data = {}
for k, r in conpair_regexes.items():
match = re.search(r, f['f'])
if match:
parsed_data[k] = float(match.group(1))
def _cp_type(data):
if 'concordance_concordance' in parsed_data:
return 'concordance'
elif 'contamination_normal' in parsed_data:
return 'contamination'
if len(parsed_data) > 0:
if f['s_name'] in self.conpair_data:
if(_cp_type(self.conpair_data[f['s_name']]) == _cp_type(parsed_data)):
log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name']))
else:
self.conpair_data[f['s_name']] = dict()
self.add_data_source(f, section=_cp_type(parsed_data))
self.conpair_data[f['s_name']].update(parsed_data) | python | {
"resource": ""
} |
q28280 | MultiqcModule.conpair_general_stats_table | train | def conpair_general_stats_table(self):
""" Take the parsed stats from the Conpair report and add it to the
basic stats table at the top of the report """
headers = {}
headers['concordance_concordance'] = {
'title': 'Concordance',
'max': 100,
'min': 0,
'suffix': '%',
'format': '{:,.2f}',
'scale': 'RdYlGn'
}
headers['contamination_normal'] = {
'title': 'N Contamination',
'description': 'Normal sample contamination level',
'max': 100,
'min': 0,
'suffix': '%',
'format': '{:,.3f}',
'scale': 'RdYlBu-rev'
}
headers['contamination_tumor'] = {
'title': 'T Contamination',
'description': 'Tumor sample contamination level',
'max': 100,
'min': 0,
'suffix': '%',
'format': '{:,.3f}',
'scale': 'RdYlBu-rev'
}
self.general_stats_addcols(self.conpair_data, headers) | python | {
"resource": ""
} |
q28281 | plotPCAMixin.parse_plotPCA | train | def parse_plotPCA(self):
"""Find plotPCA output"""
self.deeptools_plotPCAData = dict()
for f in self.find_log_files('deeptools/plotPCAData', filehandles=False):
parsed_data = self.parsePlotPCAData(f)
for k, v in parsed_data.items():
if k in self.deeptools_plotPCAData:
log.warning("Replacing duplicate sample {}.".format(k))
self.deeptools_plotPCAData[k] = v
if len(parsed_data) > 0:
self.add_data_source(f, section='plotPCA')
if len(self.deeptools_plotPCAData) > 0:
config = {
'id': 'deeptools_pca_plot',
'title': 'deeptools: PCA Plot',
'xlab': 'PC1',
'ylab': 'PC2',
'tt_label': 'PC1 {point.x:.2f}: PC2 {point.y:.2f}',
}
data = dict()
for s_name in self.deeptools_plotPCAData:
try:
data[s_name] = {'x': self.deeptools_plotPCAData[s_name][1], 'y': self.deeptools_plotPCAData[s_name][2]}
except KeyError:
pass
if len(data) == 0:
log.debug('No valid data for PCA plot')
return None
self.add_section(
name="PCA plot",
anchor="deeptools_pca",
description="PCA plot with the top two principal components calculated based on genome-wide distribution of sequence reads",
plot=scatter.plot(data, config)
)
return len(self.deeptools_plotPCAData) | python | {
"resource": ""
} |
q28282 | mqc_load_userconfig | train | def mqc_load_userconfig(paths=()):
""" Overwrite config defaults with user config files """
# Load and parse installation config file if we find it
mqc_load_config(os.path.join( os.path.dirname(MULTIQC_DIR), 'multiqc_config.yaml'))
# Load and parse a user config file if we find it
mqc_load_config(os.path.expanduser('~/.multiqc_config.yaml'))
# Load and parse a config file path set in an ENV variable if we find it
if os.environ.get('MULTIQC_CONFIG_PATH') is not None:
mqc_load_config( os.environ.get('MULTIQC_CONFIG_PATH') )
# Load and parse a config file in this working directory if we find it
mqc_load_config('multiqc_config.yaml')
# Custom command line config
for p in paths:
mqc_load_config(p) | python | {
"resource": ""
} |
q28283 | mqc_load_config | train | def mqc_load_config(yaml_config):
""" Load and parse a config file if we find it """
if os.path.isfile(yaml_config):
try:
with open(yaml_config) as f:
new_config = yaml.safe_load(f)
logger.debug("Loading config settings from: {}".format(yaml_config))
mqc_add_config(new_config, yaml_config)
except (IOError, AttributeError) as e:
logger.debug("Config error: {}".format(e))
except yaml.scanner.ScannerError as e:
logger.error("Error parsing config YAML: {}".format(e))
sys.exit(1)
else:
logger.debug("No MultiQC config found: {}".format(yaml_config)) | python | {
"resource": ""
} |
q28284 | mqc_add_config | train | def mqc_add_config(conf, conf_path=None):
""" Add to the global config with given MultiQC config dict """
global fn_clean_exts, fn_clean_trim
for c, v in conf.items():
if c == 'sp':
# Merge filename patterns instead of replacing
sp.update(v)
logger.debug("Added to filename patterns: {}".format(v))
elif c == 'extra_fn_clean_exts':
# Prepend to filename cleaning patterns instead of replacing
fn_clean_exts[0:0] = v
logger.debug("Added to filename clean extensions: {}".format(v))
elif c == 'extra_fn_clean_trim':
# Prepend to filename cleaning patterns instead of replacing
fn_clean_trim[0:0] = v
logger.debug("Added to filename clean trimmings: {}".format(v))
elif c in ['custom_logo'] and v:
# Resolve file paths - absolute or cwd, or relative to config file
fpath = v
if os.path.exists(v):
fpath = os.path.abspath(v)
elif conf_path is not None and os.path.exists(os.path.join(os.path.dirname(conf_path), v)):
fpath = os.path.abspath(os.path.join(os.path.dirname(conf_path), v))
else:
logger.error("Config '{}' path not found, skipping ({})".format(c, fpath))
continue
logger.debug("New config '{}': {}".format(c, fpath))
update({c: fpath})
else:
logger.debug("New config '{}': {}".format(c, v))
update({c: v}) | python | {
"resource": ""
} |
q28285 | update_dict | train | def update_dict(d, u):
""" Recursively updates nested dict d from nested dict u
"""
for key, val in u.items():
if isinstance(val, collections.Mapping):
d[key] = update_dict(d.get(key, {}), val)
else:
d[key] = u[key]
return d | python | {
"resource": ""
} |
q28286 | _parse_preseq_logs | train | def _parse_preseq_logs(f):
""" Go through log file looking for preseq output """
lines = f['f'].splitlines()
header = lines.pop(0)
data_is_bases = False
if header.startswith('TOTAL_READS EXPECTED_DISTINCT'):
pass
elif header.startswith('TOTAL_BASES EXPECTED_DISTINCT'):
data_is_bases = True
elif header.startswith('total_reads distinct_reads'):
pass
else:
log.debug("First line of preseq file {} did not look right".format(f['fn']))
return None, None
data = dict()
for l in lines:
s = l.split()
# Sometimes the Expected_distinct count drops to 0, not helpful
if float(s[1]) == 0 and float(s[0]) > 0:
continue
data[float(s[0])] = float(s[1])
return data, data_is_bases | python | {
"resource": ""
} |
q28287 | mqc_colour_scale.get_colour | train | def get_colour(self, val, colformat='hex'):
""" Given a value, return a colour within the colour scale """
try:
# Sanity checks
val = re.sub("[^0-9\.]", "", str(val))
if val == '':
val = self.minval
val = float(val)
val = max(val, self.minval)
val = min(val, self.maxval)
domain_nums = list( np.linspace(self.minval, self.maxval, len(self.colours)) )
my_scale = spectra.scale(self.colours).domain(domain_nums)
# Weird, I know. I ported this from the original JavaScript for continuity
# Seems to work better than adjusting brightness / saturation / luminosity
rgb_converter = lambda x: max(0, min(1, 1+((x-1)*0.3)))
thecolour = spectra.rgb( *[rgb_converter(v) for v in my_scale(val).rgb] )
return thecolour.hexcode
except:
# Shouldn't crash all of MultiQC just for colours
return '' | python | {
"resource": ""
} |
q28288 | MultiqcModule.parse_tophat_log | train | def parse_tophat_log (self, raw_data):
""" Parse the Tophat alignment log file. """
if 'Aligned pairs' in raw_data:
# Paired end data
regexes = {
'overall_aligned_percent': r"([\d\.]+)% overall read mapping rate.",
'concordant_aligned_percent': r"([\d\.]+)% concordant pair alignment rate.",
'aligned_total': r"Aligned pairs:\s+(\d+)",
'aligned_multimap': r"Aligned pairs:\s+\d+\n\s+of these:\s+(\d+)",
'aligned_discordant': r"(\d+) \([\s\d\.]+%\) are discordant alignments",
'total_reads': r"[Rr]eads:\n\s+Input\s*:\s+(\d+)",
}
else:
# Single end data
regexes = {
'total_reads': r"[Rr]eads:\n\s+Input\s*:\s+(\d+)",
'aligned_total': r"Mapped\s*:\s+(\d+)",
'aligned_multimap': r"of these\s*:\s+(\d+)",
'overall_aligned_percent': r"([\d\.]+)% overall read mapping rate.",
}
parsed_data = {}
for k, r in regexes.items():
r_search = re.search(r, raw_data, re.MULTILINE)
if r_search:
parsed_data[k] = float(r_search.group(1))
if len(parsed_data) == 0: return None
parsed_data['concordant_aligned_percent'] = parsed_data.get('concordant_aligned_percent', 0)
parsed_data['aligned_total'] = parsed_data.get('aligned_total', 0)
parsed_data['aligned_multimap'] = parsed_data.get('aligned_multimap', 0)
parsed_data['aligned_discordant'] = parsed_data.get('aligned_discordant', 0)
parsed_data['unaligned_total'] = parsed_data['total_reads'] - parsed_data['aligned_total']
parsed_data['aligned_not_multimapped_discordant'] = parsed_data['aligned_total'] - parsed_data['aligned_multimap'] - parsed_data['aligned_discordant']
return parsed_data | python | {
"resource": ""
} |
q28289 | MultiqcModule.tophat_general_stats_table | train | def tophat_general_stats_table(self):
""" Take the parsed stats from the Tophat report and add it to the
basic stats table at the top of the report """
headers = OrderedDict()
headers['overall_aligned_percent'] = {
'title': '% Aligned',
'description': 'overall read mapping rate',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn'
}
headers['aligned_not_multimapped_discordant'] = {
'title': '{} Aligned'.format(config.read_count_prefix),
'description': 'Aligned reads, not multimapped or discordant ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'PuRd',
'modify': lambda x: x * config.read_count_multiplier,
'shared_key': 'read_count'
}
self.general_stats_addcols(self.tophat_data, headers) | python | {
"resource": ""
} |
q28290 | MultiqcModule.cutadapt_length_trimmed_plot | train | def cutadapt_length_trimmed_plot (self):
""" Generate the trimming length plot """
description = 'This plot shows the number of reads with certain lengths of adapter trimmed. \n\
Obs/Exp shows the raw counts divided by the number expected due to sequencing errors. A defined peak \n\
may be related to adapter length. See the \n\
<a href="http://cutadapt.readthedocs.org/en/latest/guide.html#how-to-read-the-report" target="_blank">cutadapt documentation</a> \n\
for more information on how these numbers are generated.'
pconfig = {
'id': 'cutadapt_plot',
'title': 'Cutadapt: Lengths of Trimmed Sequences',
'ylab': 'Counts',
'xlab': 'Length Trimmed (bp)',
'xDecimals': False,
'ymin': 0,
'tt_label': '<b>{point.x} bp trimmed</b>: {point.y:.0f}',
'data_labels': [{'name': 'Counts', 'ylab': 'Count'},
{'name': 'Obs/Exp', 'ylab': 'Observed / Expected'}]
}
self.add_section(
description = description,
plot = linegraph.plot([self.cutadapt_length_counts, self.cutadapt_length_obsexp], pconfig)
) | python | {
"resource": ""
} |
q28291 | MultiqcModule.bowtie2_general_stats_table | train | def bowtie2_general_stats_table(self):
""" Take the parsed stats from the Bowtie 2 report and add it to the
basic stats table at the top of the report """
headers = OrderedDict()
headers['overall_alignment_rate'] = {
'title': '% Aligned',
'description': 'overall alignment rate',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'YlGn'
}
self.general_stats_addcols(self.bowtie2_data, headers) | python | {
"resource": ""
} |
q28292 | MultiqcModule.parseJSON | train | def parseJSON(self, f):
""" Parse the JSON output from DamageProfiler and save the summary statistics """
try:
parsed_json = json.load(f['f'])
except Exception as e:
print(e)
log.warn("Could not parse DamageProfiler JSON: '{}'".format(f['fn']))
return None
#Get sample name from JSON first
s_name = self.clean_s_name(parsed_json['metadata']['sample_name'],'')
self.add_data_source(f, s_name)
#Add 3' G to A data
self.threepGtoAfreq_data[s_name] = parsed_json['dmg_3p']
#Add 5' C to T data
self.fivepCtoTfreq_data[s_name] = parsed_json['dmg_5p']
#Add lendist forward
self.lgdist_fw_data[s_name] = parsed_json['lendist_fw']
#Add lendist reverse
self.lgdist_rv_data[s_name] = parsed_json['lendist_rv']
#Add summary metrics
self.summary_metrics_data[s_name] = parsed_json['summary_stats'] | python | {
"resource": ""
} |
q28293 | MultiqcModule.lgdistplot | train | def lgdistplot(self,dict_to_use,orientation):
"""Generate a read length distribution plot"""
data = dict()
for s_name in dict_to_use:
try:
data[s_name] = {int(d): int (dict_to_use[s_name][d]) for d in dict_to_use[s_name]}
except KeyError:
pass
if len(data) == 0:
log.debug('No valid data for forward read lgdist input!')
return None
config = {
'id': 'length-distribution-{}'.format(orientation),
'title': 'DamageProfiler: Read length distribution: {} '.format(orientation),
'ylab': 'Number of reads',
'xlab': 'Readlength (bp)',
'xDecimals': False,
'tt_label': '{point.y} reads of length {point.x}',
'ymin': 0,
'xmin': 0
}
return linegraph.plot(data,config) | python | {
"resource": ""
} |
q28294 | MultiqcModule.threeprime_plot | train | def threeprime_plot(self):
"""Generate a 3' G>A linegraph plot"""
data = dict()
dict_to_add = dict()
# Create tuples out of entries
for key in self.threepGtoAfreq_data:
pos = list(range(1,len(self.threepGtoAfreq_data.get(key))))
#Multiply values by 100 to get %
tmp = [i * 100.0 for i in self.threepGtoAfreq_data.get(key)]
tuples = list(zip(pos,tmp))
# Get a dictionary out of it
data = dict((x, y) for x, y in tuples)
dict_to_add[key] = data
config = {
'id': 'threeprime_misinc_plot',
'title': 'DamageProfiler: 3P G>A misincorporation plot',
'ylab': '% G to A substituted',
'xlab': 'Nucleotide position from 3\'',
'tt_label': '{point.y:.2f} % G>A misincorporations at nucleotide position {point.x}',
'ymin': 0,
'xmin': 1
}
return linegraph.plot(dict_to_add,config) | python | {
"resource": ""
} |
q28295 | MultiqcModule.parse_fastqc_report | train | def parse_fastqc_report(self, file_contents, s_name=None, f=None):
""" Takes contents from a fastq_data.txt file and parses out required
statistics and data. Returns a dict with keys 'stats' and 'data'.
Data is for plotting graphs, stats are for top table. """
# Make the sample name from the input filename if we find it
fn_search = re.search(r"Filename\s+(.+)", file_contents)
if fn_search:
s_name = self.clean_s_name(fn_search.group(1) , f['root'])
if s_name in self.fastqc_data.keys():
log.debug("Duplicate sample name found! Overwriting: {}".format(s_name))
self.add_data_source(f, s_name)
self.fastqc_data[s_name] = { 'statuses': dict() }
# Parse the report
section = None
s_headers = None
self.dup_keys = []
for l in file_contents.splitlines():
if l == '>>END_MODULE':
section = None
s_headers = None
elif l.startswith('>>'):
(section, status) = l[2:].split("\t", 1)
section = section.lower().replace(' ', '_')
self.fastqc_data[s_name]['statuses'][section] = status
elif section is not None:
if l.startswith('#'):
s_headers = l[1:].split("\t")
# Special case: Total Deduplicated Percentage header line
if s_headers[0] == 'Total Deduplicated Percentage':
self.fastqc_data[s_name]['basic_statistics'].append({
'measure': 'total_deduplicated_percentage',
'value': float(s_headers[1])
})
else:
# Special case: Rename dedup header in old versions of FastQC (v10)
if s_headers[1] == 'Relative count':
s_headers[1] = 'Percentage of total'
s_headers = [s.lower().replace(' ', '_') for s in s_headers]
self.fastqc_data[s_name][section] = list()
elif s_headers is not None:
s = l.split("\t")
row = dict()
for (i, v) in enumerate(s):
v.replace('NaN','0')
try:
v = float(v)
except ValueError:
pass
row[s_headers[i]] = v
self.fastqc_data[s_name][section].append(row)
# Special case - need to remember order of duplication keys
if section == 'sequence_duplication_levels':
try:
self.dup_keys.append(float(s[0]))
except ValueError:
self.dup_keys.append(s[0])
# Tidy up the Basic Stats
self.fastqc_data[s_name]['basic_statistics'] = {d['measure']: d['value'] for d in self.fastqc_data[s_name]['basic_statistics']}
# Calculate the average sequence length (Basic Statistics gives a range)
length_bp = 0
total_count = 0
for d in self.fastqc_data[s_name].get('sequence_length_distribution', {}):
length_bp += d['count'] * self.avg_bp_from_range(d['length'])
total_count += d['count']
if total_count > 0:
self.fastqc_data[s_name]['basic_statistics']['avg_sequence_length'] = length_bp / total_count | python | {
"resource": ""
} |
q28296 | MultiqcModule.fastqc_general_stats | train | def fastqc_general_stats(self):
""" Add some single-number stats to the basic statistics
table at the top of the report """
# Prep the data
data = dict()
for s_name in self.fastqc_data:
bs = self.fastqc_data[s_name]['basic_statistics']
data[s_name] = {
'percent_gc': bs['%GC'],
'avg_sequence_length': bs['avg_sequence_length'],
'total_sequences': bs['Total Sequences'],
}
try:
data[s_name]['percent_duplicates'] = 100 - bs['total_deduplicated_percentage']
except KeyError:
pass # Older versions of FastQC don't have this
# Add count of fail statuses
num_statuses = 0
num_fails = 0
for s in self.fastqc_data[s_name]['statuses'].values():
num_statuses += 1
if s == 'fail':
num_fails += 1
data[s_name]['percent_fails'] = (float(num_fails)/float(num_statuses))*100.0
# Are sequence lengths interesting?
seq_lengths = [x['avg_sequence_length'] for x in data.values()]
hide_seq_length = False if max(seq_lengths) - min(seq_lengths) > 10 else True
headers = OrderedDict()
headers['percent_duplicates'] = {
'title': '% Dups',
'description': '% Duplicate Reads',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'RdYlGn-rev'
}
headers['percent_gc'] = {
'title': '% GC',
'description': 'Average % GC Content',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'Set1',
'format': '{:,.0f}'
}
headers['avg_sequence_length'] = {
'title': 'Length',
'description': 'Average Sequence Length (bp)',
'min': 0,
'suffix': ' bp',
'scale': 'RdYlGn',
'format': '{:,.0f}',
'hidden': hide_seq_length
}
headers['percent_fails'] = {
'title': '% Failed',
'description': 'Percentage of modules failed in FastQC report (includes those not plotted here)',
'max': 100,
'min': 0,
'suffix': '%',
'scale': 'Reds',
'format': '{:,.0f}',
'hidden': True
}
headers['total_sequences'] = {
'title': '{} Seqs'.format(config.read_count_prefix),
'description': 'Total Sequences ({})'.format(config.read_count_desc),
'min': 0,
'scale': 'Blues',
'modify': lambda x: x * config.read_count_multiplier,
'shared_key': 'read_count'
}
self.general_stats_addcols(data, headers) | python | {
"resource": ""
} |
q28297 | MultiqcModule.get_status_cols | train | def get_status_cols(self, section):
""" Helper function - returns a list of colours according to the FastQC
status of this module for each sample. """
colours = dict()
for s_name in self.fastqc_data:
status = self.fastqc_data[s_name]['statuses'].get(section, 'default')
colours[s_name] = self.status_colours[status]
return colours | python | {
"resource": ""
} |
q28298 | TagDirReportMixin.homer_tagdirectory | train | def homer_tagdirectory(self):
""" Find HOMER tagdirectory logs and parse their data """
self.parse_gc_content()
self.parse_re_dist()
self.parse_tagLength_dist()
self.parse_tagInfo_data()
self.parse_FreqDistribution_data()
self.homer_stats_table_tagInfo()
return sum([len(v) for v in self.tagdir_data.values()]) | python | {
"resource": ""
} |
q28299 | TagDirReportMixin.parse_gc_content | train | def parse_gc_content(self):
"""parses and plots GC content and genome GC content files"""
# Find and parse GC content:
for f in self.find_log_files('homer/GCcontent', filehandles=True):
# Get the s_name from the parent directory
s_name = os.path.basename(f['root'])
s_name = self.clean_s_name(s_name, f['root'])
parsed_data = self.parse_twoCol_file(f)
if parsed_data is not None:
if s_name in self.tagdir_data['GCcontent']:
log.debug("Duplicate GCcontent sample log found! Overwriting: {}".format(s_name))
self.add_data_source(f, s_name, section='GCcontent')
self.tagdir_data['GCcontent'][s_name] = parsed_data
## get esimated genome content distribution:
for f in self.find_log_files('homer/genomeGCcontent', filehandles=True):
parsed_data = self.parse_twoCol_file(f)
if parsed_data is not None:
if s_name + "_genome" in self.tagdir_data['GCcontent']:
log.debug("Duplicate genome GCcontent sample log found! Overwriting: {}".format(s_name+ "_genome"))
self.add_data_source(f, s_name + "_genome", section='GCcontent')
self.tagdir_data['GCcontent'][s_name + "_genome"] = parsed_data
self.tagdir_data['GCcontent'] = self.ignore_samples(self.tagdir_data['GCcontent'])
if len(self.tagdir_data['GCcontent']) > 0:
self.add_section (
name = 'Per Sequence GC Content',
anchor = 'homer_per_sequence_gc_content',
description = 'This plot shows the distribution of GC content.',
plot = self.GCcontent_plot()
) | python | {
"resource": ""
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.