_id
stringlengths
2
7
title
stringlengths
1
88
partition
stringclasses
3 values
text
stringlengths
75
19.8k
language
stringclasses
1 value
meta_information
dict
q28300
TagDirReportMixin.parse_tagLength_dist
train
def parse_tagLength_dist(self): """parses and plots tag length distribution files""" # Find and parse homer tag length distribution reports for f in self.find_log_files('homer/LengthDistribution', filehandles=True): s_name = os.path.basename(f['root']) s_name = self.clean_s_name(s_name, f['root']) parsed_data = self.parse_length_dist(f) if parsed_data is not None: if s_name in self.tagdir_data['length']: log.debug("Duplicate Length Distribution sample log found! Overwriting: {}".format(s_name)) self.add_data_source(f, s_name, section='length') self.tagdir_data['length'][s_name] = parsed_data self.tagdir_data['length'] = self.ignore_samples(self.tagdir_data['length']) if len(self.tagdir_data['length']) > 0: self.add_section ( name = 'Tag Length Distribution', anchor = 'homer-tagLength', description = 'This plot shows the distribution of tag length.', helptext = 'This is a good quality control for tag length inputed into Homer.', plot = self.length_dist_chart() )
python
{ "resource": "" }
q28301
TagDirReportMixin.homer_stats_table_tagInfo
train
def homer_stats_table_tagInfo(self): """ Add core HOMER stats to the general stats table from tagInfo file""" if len(self.tagdir_data['header']) == 0: return None headers = OrderedDict() headers['UniqPositions'] = { 'title': 'Uniq Pos', 'description': 'Numer of Unique Di-Tags Passed Through HOMER', 'format': '{:,.0f}', 'modify': lambda x: x * 0.000001, 'suffix': "M" } headers['TotalPositions'] = { 'title': 'Total Pos', 'description': 'Numer of Total Di-Tags Passed Through HOMER', 'format': '{:,.0f}', 'modify': lambda x: x * 0.000001, 'suffix': "M" } headers['fragmentLengthEstimate'] = { 'title': 'fragment Length', 'description': 'Estimate of Fragnment Length', 'format': '{:,.0f}' } headers['peakSizeEstimate'] = { 'title': 'Peak Size', 'description': 'Estimate of Peak Size', 'format': '{:,.0f}' } headers['tagsPerBP'] = { 'title': 'tagsPerBP', 'description': 'average tags Per basepair', 'format': '{:,.3f}', } headers['TagsPerPosition'] = { 'title': 'averageTagsPerPosition', 'description': 'Average Tags Per Position', 'format': '{:,.2f}' } headers['averageTagLength'] = { 'title': 'TagLength', 'description': 'Average Tag Length', 'format': '{:,.0f}' } headers['averageFragmentGCcontent'] = { 'title': 'GCcontent', 'description': 'Average Fragment GC content', 'max': 1, 'min': 0, 'format': '{:,.2f}' } self.general_stats_addcols(self.tagdir_data['header'], headers, 'HOMER')
python
{ "resource": "" }
q28302
TagDirReportMixin.homer_stats_table_interChr
train
def homer_stats_table_interChr(self): """ Add core HOMER stats to the general stats table from FrequencyDistribution file""" headers = OrderedDict() headers['InterChr'] = { 'title': 'InterChr', 'description': 'Fraction of Reads forming inter chromosomal interactions', 'format': '{:,.4f}' } self.general_stats_addcols(self.tagdir_data['FreqDistribution'], headers, 'Homer-InterChr')
python
{ "resource": "" }
q28303
TagDirReportMixin.parse_restriction_dist
train
def parse_restriction_dist(self, f): """ Parse HOMER tagdirectory petagRestrictionDistribution file. """ parsed_data = dict() firstline = True for l in f['f']: if firstline: #skip first line firstline = False continue s = l.split("\t") if len(s) > 1: nuc = float(s[0].strip()) v1 = float(s[1].strip()) v2 = float(s[2].strip()) v = v1 + v2 #parsed_data.update({nuc:v1}) #parsed_data.update({nuc:v2}) parsed_data.update({nuc:v}) return parsed_data
python
{ "resource": "" }
q28304
TagDirReportMixin.parse_length_dist
train
def parse_length_dist(self, f): """ Parse HOMER tagdirectory tagLengthDistribution file. """ parsed_data = dict() firstline = True for l in f['f']: if firstline: #skip first line firstline = False continue s = l.split("\t") if len(s) > 1: k = float(s[0].strip()) v = float(s[1].strip()) parsed_data[k] = v return parsed_data
python
{ "resource": "" }
q28305
TagDirReportMixin.parse_tag_info
train
def parse_tag_info(self, f): """ Parse HOMER tagdirectory taginfo.txt file to extract statistics in the first 11 lines. """ # General Stats Table tag_info = dict() for l in f['f']: s = l.split("=") if len(s) > 1: if s[0].strip() == 'genome': ss = s[1].split("\t") if len(ss) > 2: tag_info['genome'] = ss[0].strip() try: tag_info['UniqPositions'] = float(ss[1].strip()) tag_info['TotalPositions'] = float(ss[2].strip()) except: tag_info['UniqPositions'] = ss[1].strip() tag_info['TotalPositions'] = ss[2].strip() try: tag_info[s[0].strip()] = float(s[1].strip()) except ValueError: tag_info[s[0].strip()] = s[1].strip() return tag_info
python
{ "resource": "" }
q28306
TagDirReportMixin.parse_tag_info_chrs
train
def parse_tag_info_chrs(self, f, convChr=True): """ Parse HOMER tagdirectory taginfo.txt file to extract chromosome coverage. """ parsed_data_total = OrderedDict() parsed_data_uniq = OrderedDict() remove = ["hap", "random", "chrUn", "cmd", "EBV", "GL", "NT_"] for l in f['f']: s = l.split("\t") key = s[0].strip() # skip header if '=' in l or len(s) != 3: continue if convChr: if any(x in key for x in remove): continue try: vT = float(s[1].strip()) vU = float(s[2].strip()) except ValueError: continue parsed_data_total[key] = vT parsed_data_uniq[key] = vU return [parsed_data_total, parsed_data_uniq]
python
{ "resource": "" }
q28307
TagDirReportMixin.parse_FreqDist
train
def parse_FreqDist(self, f): """ Parse HOMER tagdirectory petag.FreqDistribution_1000 file. """ parsed_data = dict() firstline = True for l in f['f']: if firstline: firstline = False continue else: s = l.split("\t") if len(s) > 1: k = s[0].strip() if k.startswith("More than "): k = re.sub("More than ", "", k) k = float(k) v = float(s[1].strip()) parsed_data[k] = v return parsed_data
python
{ "resource": "" }
q28308
TagDirReportMixin.parse_FreqDist_interChr
train
def parse_FreqDist_interChr(self, f): """ Parse HOMER tagdirectory petag.FreqDistribution_1000 file to get inter-chromosomal interactions. """ parsed_data = dict() firstline = True for l in f['f']: if firstline: firstline = False interChr = float(re.sub("\)", "", l.split(":")[1])) else: break parsed_data['interChr'] = interChr return parsed_data
python
{ "resource": "" }
q28309
TagDirReportMixin.restriction_dist_chart
train
def restriction_dist_chart (self): """ Make the petagRestrictionDistribution plot """ pconfig = { 'id': 'petagRestrictionDistribution', 'title': 'Restriction Distribution', 'ylab': 'Reads', 'xlab': 'Distance from cut site (bp)', 'data_labels': [ {'name': 'Number of Tags'}, {'name': 'Percenatge'} ] } datasets = [ self.tagdir_data['restriction'], self.tagdir_data['restriction_norm'] ] return linegraph.plot(datasets, pconfig)
python
{ "resource": "" }
q28310
TagDirReportMixin.GCcontent_plot
train
def GCcontent_plot (self): """ Create the HTML for the Homer GC content plot """ pconfig = { 'id': 'homer-tag-directory-gc-content', 'title': 'Homer: Tag Directory Per Sequence GC Content', 'smooth_points': 200, 'smooth_points_sumcounts': False, 'ylab': 'Normalized Count', 'xlab': '% GC', 'ymin': 0, 'xmax': 1, 'xmin': 0, 'yDecimals': True, 'tt_label': '<b>{point.x}% GC</b>: {point.y}' } return linegraph.plot(self.tagdir_data['GCcontent'], pconfig)
python
{ "resource": "" }
q28311
TagDirReportMixin.tag_info_chart
train
def tag_info_chart (self): """ Make the taginfo.txt plot """ ## TODO: human chrs on hg19. How will this work with GRCh genome or other, non human, genomes? # nice if they are ordered by size ucsc = ["chr" + str(i) for i in range(1,23)].append([ "chrX", "chrY", "chrM"]) ensembl = list(range(1,23)).append([ "X", "Y", "MT"]) pconfig = { 'id': 'tagInfo', 'title': 'Homer: Tag Info Distribution', 'ylab': 'Tags', 'cpswitch_counts_label': 'Number of Tags' } ## check if chromosomes starts with "chr" (UCSC) or "#" (ensembl) sample1 = next(iter(self.tagdir_data['taginfo_total'])) chrFormat = next(iter(self.tagdir_data['taginfo_total'][sample1])) if ("chr" in chrFormat): chrs = ucsc else: chrs = ensembl return bargraph.plot(self.tagdir_data['taginfo_total'], chrs, pconfig)
python
{ "resource": "" }
q28312
TagDirReportMixin.FreqDist_chart
train
def FreqDist_chart (self): """ Make the petag.FreqDistribution_1000 plot """ # Take a log of the data before plotting so that we can # reduce the number of points to plot evenly pdata = {} for idx, s_name in enumerate(self.tagdir_data['FreqDistribution']): pdata[s_name] = {} for x, y in self.tagdir_data['FreqDistribution'][s_name].items(): try: pdata[s_name][math.log(float(x))] = y except ValueError: pass pconfig = { 'id': 'FreqDistribution', 'title': 'Frequency Distribution', 'ylab': 'Fraction of Reads', 'xlab': 'Log10(Distance between regions)', 'data_labels': ['Reads', 'Percent'], 'smooth_points': 500, 'smooth_points_sumcounts': False, 'yLog' : True } return linegraph.plot(pdata, pconfig)
python
{ "resource": "" }
q28313
MultiqcModule.parse_bismark_report
train
def parse_bismark_report(self, report, regexes): """ Search a bismark report with a set of regexes """ parsed_data = {} for k, r in regexes.items(): r_search = re.search(r, report, re.MULTILINE) if r_search: try: parsed_data[k] = float(r_search.group(1)) except ValueError: parsed_data[k] = r_search.group(1) # NaN if len(parsed_data) == 0: return None return parsed_data
python
{ "resource": "" }
q28314
MultiqcModule.parse_bismark_mbias
train
def parse_bismark_mbias(self, f): """ Parse the Bismark M-Bias plot data """ s = f['s_name'] self.bismark_mbias_data['meth']['CpG_R1'][s] = {} self.bismark_mbias_data['meth']['CHG_R1'][s] = {} self.bismark_mbias_data['meth']['CHH_R1'][s] = {} self.bismark_mbias_data['cov']['CpG_R1'][s] = {} self.bismark_mbias_data['cov']['CHG_R1'][s] = {} self.bismark_mbias_data['cov']['CHH_R1'][s] = {} self.bismark_mbias_data['meth']['CpG_R2'][s] = {} self.bismark_mbias_data['meth']['CHG_R2'][s] = {} self.bismark_mbias_data['meth']['CHH_R2'][s] = {} self.bismark_mbias_data['cov']['CpG_R2'][s] = {} self.bismark_mbias_data['cov']['CHG_R2'][s] = {} self.bismark_mbias_data['cov']['CHH_R2'][s] = {} key = None for l in f['f']: if 'context' in l: if 'CpG' in l: key = 'CpG' elif 'CHG' in l: key = 'CHG' elif 'CHH' in l: key = 'CHH' if '(R1)' in l: key += '_R1' elif '(R2)' in l: key += '_R2' else: key += '_R1' if key is not None: sections = l.split() try: pos = int(sections[0]) self.bismark_mbias_data['meth'][key][s][pos] = float(sections[3]) self.bismark_mbias_data['cov'][key][s][pos] = int(sections[4]) except (IndexError, ValueError): continue # Remove empty dicts (eg. R2 for SE data) for t in self.bismark_mbias_data: for k in self.bismark_mbias_data[t]: self.bismark_mbias_data[t][k] = { s_name: self.bismark_mbias_data[t][k][s_name] for s_name in self.bismark_mbias_data[t][k] if len(self.bismark_mbias_data[t][k][s_name]) > 0 }
python
{ "resource": "" }
q28315
MultiqcModule.parse_bismark_bam2nuc
train
def parse_bismark_bam2nuc(self, f): """ Parse reports generated by Bismark bam2nuc """ if f['s_name'] in self.bismark_data['bam2nuc']: log.debug("Duplicate deduplication sample log found! Overwriting: {}".format(f['s_name'])) self.add_data_source(f, section='bam2nuc') self.bismark_data['bam2nuc'][f['s_name']] = dict() headers = None for l in f['f']: sections = l.rstrip().split("\t") if headers is None: headers = sections else: for i, h in enumerate(headers): if i == 0: k = sections[0] else: key = "{}_{}".format(k, h.lower().replace(' ','_')) self.bismark_data['bam2nuc'][f['s_name']][key] = sections[i]
python
{ "resource": "" }
q28316
MultiqcModule.bismark_stats_table
train
def bismark_stats_table(self): """ Take the parsed stats from the Bismark reports and add them to the basic stats table at the top of the report """ headers = { 'alignment': OrderedDict(), 'dedup': OrderedDict(), 'methextract': OrderedDict(), 'bam2nuc': OrderedDict() } headers['methextract']['percent_cpg_meth'] = { 'title': '% mCpG', 'description': '% Cytosines methylated in CpG context', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'Greens' } headers['methextract']['percent_chg_meth'] = { 'title': '% mCHG', 'description': '% Cytosines methylated in CHG context', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'Oranges' } headers['methextract']['percent_chh_meth'] = { 'title': '% mCHH', 'description': '% Cytosines methylated in CHH context', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'Oranges' } headers['methextract']['total_c'] = { 'title': "M C's", 'description': 'Total number of C\'s analysed, in millions', 'min': 0, 'scale': 'Purples', 'modify': lambda x: x / 1000000 } headers['bam2nuc']['C_coverage'] = { 'title': 'C Coverage', 'description': 'Cyotosine Coverage', 'min': 0, 'suffix': 'X', 'scale': 'Greens', 'format': '{:,.2f}' } headers['dedup']['dup_reads_percent'] = { 'title': '% Dups', 'description': 'Percent Duplicated Alignments', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'RdYlGn-rev', } headers['dedup']['dedup_reads'] = { 'title': '{} Unique'.format(config.read_count_prefix), 'description': 'Deduplicated Alignments ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'Greens', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count', 'hidden': True } headers['alignment']['aligned_reads'] = { 'title': '{} Aligned'.format(config.read_count_prefix), 'description': 'Total Aligned Sequences ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuRd', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count', 'hidden': True } headers['alignment']['percent_aligned'] = { 'title': '% Aligned', 'description': 'Percent Aligned Sequences', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn' } self.general_stats_addcols(self.bismark_data['methextract'], headers['methextract']) self.general_stats_addcols(self.bismark_data['bam2nuc'], headers['bam2nuc']) self.general_stats_addcols(self.bismark_data['dedup'], headers['dedup']) self.general_stats_addcols(self.bismark_data['alignment'], headers['alignment'])
python
{ "resource": "" }
q28317
MultiqcModule.bismark_alignment_chart
train
def bismark_alignment_chart (self): """ Make the alignment plot """ # Specify the order of the different possible categories keys = OrderedDict() keys['aligned_reads'] = { 'color': '#2f7ed8', 'name': 'Aligned Uniquely' } keys['ambig_reads'] = { 'color': '#492970', 'name': 'Aligned Ambiguously' } keys['no_alignments'] = { 'color': '#0d233a', 'name': 'Did Not Align' } keys['discarded_reads'] = { 'color': '#f28f43', 'name': 'No Genomic Sequence' } # Config for the plot config = { 'id': 'bismark_alignment', 'title': 'Bismark: Alignment Scores', 'ylab': '# Reads', 'cpswitch_counts_label': 'Number of Reads' } self.add_section ( name = 'Alignment Rates', anchor = 'bismark-alignment', plot = bargraph.plot(self.bismark_data['alignment'], keys, config) )
python
{ "resource": "" }
q28318
MultiqcModule.bismark_strand_chart
train
def bismark_strand_chart (self): """ Make the strand alignment plot """ # Specify the order of the different possible categories keys = OrderedDict() keys['strand_ob'] = { 'name': 'Original bottom strand' } keys['strand_ctob'] = { 'name': 'Complementary to original bottom strand' } keys['strand_ctot'] = { 'name': 'Complementary to original top strand' } keys['strand_ot'] = { 'name': 'Original top strand' } # See if we have any directional samples directional = 0 d_mode = '' for sn in self.bismark_data['alignment'].values(): if 'strand_directional' in sn.keys(): directional += 1 if directional == len(self.bismark_data['alignment']): keys.pop('strand_ctob', None) keys.pop('strand_ctot', None) d_mode = 'All samples were run with <code>--directional</code> mode; alignments to complementary strands (CTOT, CTOB) were ignored.' elif directional > 0: d_mode = '{} samples were run with <code>--directional</code> mode; alignments to complementary strands (CTOT, CTOB) were ignored.'.format(directional) # Config for the plot config = { 'id': 'bismark_strand_alignment', 'title': 'Bismark: Alignment to Individual Bisulfite Strands', 'ylab': '% Reads', 'cpswitch_c_active': False, 'cpswitch_counts_label': 'Number of Reads' } self.add_section ( name = 'Strand Alignment', anchor = 'bismark-strands', description = d_mode, plot = bargraph.plot(self.bismark_data['alignment'], keys, config) )
python
{ "resource": "" }
q28319
MultiqcModule.bismark_dedup_chart
train
def bismark_dedup_chart (self): """ Make the deduplication plot """ # Specify the order of the different possible categories keys = OrderedDict() keys['dedup_reads'] = { 'name': 'Deduplicated reads (remaining)' } keys['dup_reads'] = { 'name': 'Duplicate reads (removed)' } # Config for the plot config = { 'id': 'bismark_deduplication', 'title': 'Bismark: Deduplication', 'ylab': '% Reads', 'cpswitch_c_active': False, 'cpswitch_counts_label': 'Number of Reads' } self.add_section ( name = 'Deduplication', anchor = 'bismark-deduplication', plot = bargraph.plot(self.bismark_data['dedup'], keys, config) )
python
{ "resource": "" }
q28320
MultiqcModule.bismark_methlyation_chart
train
def bismark_methlyation_chart (self): """ Make the methylation plot """ # Config for the plot keys = OrderedDict() defaults = { 'max': 100, 'min': 0, 'suffix': '%', 'decimalPlaces': 1 } keys['percent_cpg_meth'] = dict(defaults, **{ 'title': 'Methylated CpG' }) keys['percent_chg_meth'] = dict(defaults, **{ 'title': 'Methylated CHG' }) keys['percent_chh_meth'] = dict(defaults, **{ 'title': 'Methylated CHH' }) self.add_section ( name = 'Cytosine Methylation', anchor = 'bismark-methylation', plot = beeswarm.plot(self.bismark_data['methextract'], keys, {'id': 'bismark-methylation-dp'}) )
python
{ "resource": "" }
q28321
MultiqcModule.bismark_mbias_plot
train
def bismark_mbias_plot (self): """ Make the M-Bias plot """ description = '<p>This plot shows the average percentage methylation and coverage across reads. See the \n\ <a href="https://rawgit.com/FelixKrueger/Bismark/master/Docs/Bismark_User_Guide.html#m-bias-plot" target="_blank">bismark user guide</a> \n\ for more information on how these numbers are generated.</p>' pconfig = { 'id': 'bismark_mbias', 'title': 'Bismark: M-Bias', 'ylab': '% Methylation', 'xlab': 'Position (bp)', 'xDecimals': False, 'ymax': 100, 'ymin': 0, 'tt_label': '<b>{point.x} bp</b>: {point.y:.1f}%', 'data_labels': [ {'name': 'CpG R1', 'ylab': '% Methylation', 'ymax': 100}, {'name': 'CHG R1', 'ylab': '% Methylation', 'ymax': 100}, {'name': 'CHH R1', 'ylab': '% Methylation', 'ymax': 100} ] } datasets = [ self.bismark_mbias_data['meth']['CpG_R1'], self.bismark_mbias_data['meth']['CHG_R1'], self.bismark_mbias_data['meth']['CHH_R1'] ] if len(self.bismark_mbias_data['meth']['CpG_R2']) > 0: pconfig['data_labels'].append({'name': 'CpG R2', 'ylab': '% Methylation', 'ymax': 100}) pconfig['data_labels'].append({'name': 'CHG R2', 'ylab': '% Methylation', 'ymax': 100}) pconfig['data_labels'].append({'name': 'CHH R2', 'ylab': '% Methylation', 'ymax': 100}) datasets.append(self.bismark_mbias_data['meth']['CpG_R2']) datasets.append(self.bismark_mbias_data['meth']['CHG_R2']) datasets.append(self.bismark_mbias_data['meth']['CHH_R2']) self.add_section ( name = 'M-Bias', anchor = 'bismark-mbias', description = description, plot = linegraph.plot(datasets, pconfig) )
python
{ "resource": "" }
q28322
MultiqcModule.parse_afterqc_log
train
def parse_afterqc_log(self, f): """ Parse the JSON output from AfterQC and save the summary statistics """ try: parsed_json = json.load(f['f']) except: log.warn("Could not parse AfterQC JSON: '{}'".format(f['fn'])) return None # AfterQC changed the name of their summary key at some point if 'summary' in parsed_json: summaryk = 'summary' elif 'afterqc_main_summary' in parsed_json: summaryk = 'afterqc_main_summary' else: log.warn("AfterQC JSON did not have a 'summary' or 'afterqc_main_summary' key, skipping: '{}'".format(f['fn'])) return None s_name = f['s_name'] self.add_data_source(f, s_name) self.afterqc_data[s_name] = {} for k in parsed_json[summaryk]: try: self.afterqc_data[s_name][k] = float(parsed_json[summaryk][k]) except ValueError: self.afterqc_data[s_name][k] = parsed_json[summaryk][k] try: self.afterqc_data[s_name]['pct_good_bases'] = (self.afterqc_data[s_name]['good_bases'] / self.afterqc_data[s_name]['total_bases']) * 100.0 except KeyError: pass
python
{ "resource": "" }
q28323
MultiqcModule.afterqc_general_stats_table
train
def afterqc_general_stats_table(self): """ Take the parsed stats from the Afterqc report and add it to the General Statistics table at the top of the report """ headers = OrderedDict() headers['pct_good_bases'] = { 'title': '% Good Bases', 'description': 'Percent Good Bases', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'BuGn', } headers['good_reads'] = { 'title': '{} Good Reads'.format(config.read_count_prefix), 'description': 'Good Reads ({})'.format(config.read_count_desc), 'min': 0, 'modify': lambda x: x * config.read_count_multiplier, 'scale': 'GnBu', 'shared_key': 'read_count' } headers['total_reads'] = { 'title': '{} Total Reads'.format(config.read_count_prefix), 'description': 'Total Reads ({})'.format(config.read_count_desc), 'min': 0, 'modify': lambda x: x * config.read_count_multiplier, 'scale': 'Blues', 'shared_key': 'read_count' } headers['readlen'] = { 'title': 'Read Length', 'description': 'Read Length', 'min': 0, 'suffix': ' bp', 'format': '{:,.0f}', 'scale': 'YlGn' } self.general_stats_addcols(self.afterqc_data, headers)
python
{ "resource": "" }
q28324
MultiqcModule.after_qc_bad_reads_chart
train
def after_qc_bad_reads_chart(self): """ Function to generate the AfterQC bad reads bar plot """ # Specify the order of the different possible categories keys = OrderedDict() keys['good_reads'] = { 'name': 'Good Reads' } keys['bad_reads_with_bad_barcode'] = { 'name': 'Bad Barcode' } keys['bad_reads_with_bad_overlap'] = { 'name': 'Bad Overlap' } keys['bad_reads_with_bad_read_length'] = { 'name': 'Bad Read Length' } keys['bad_reads_with_low_quality'] = { 'name': 'Low Quality' } keys['bad_reads_with_polyX'] = { 'name': 'PolyX' } keys['bad_reads_with_reads_in_bubble'] = { 'name': 'Reads In Bubble' } keys['bad_reads_with_too_many_N'] = { 'name': 'Too many N' } # Config for the plot pconfig = { 'id': 'afterqc_bad_reads_plot', 'title': 'AfterQC: Filtered Reads', 'ylab': '# Reads', 'cpswitch_counts_label': 'Number of Reads', 'hide_zero_cats': False, } return bargraph.plot(self.afterqc_data, keys, pconfig)
python
{ "resource": "" }
q28325
MultiqcModule.plot
train
def plot(self, file_type): """ Call file_type plotting function. """ samples = self.mod_data[file_type] plot_title = file_types[file_type]['title'] plot_func = file_types[file_type]['plot_func'] plot_params = file_types[file_type]['plot_params'] return plot_func(samples, file_type, plot_title=plot_title, plot_params=plot_params)
python
{ "resource": "" }
q28326
MultiqcModule.make_basic_table
train
def make_basic_table(self, file_type): """ Create table of key-value items in 'file_type'. """ table_data = {sample: items['kv'] for sample, items in self.mod_data[file_type].items() } table_headers = {} for column_header, (description, header_options) in file_types[file_type]['kv_descriptions'].items(): table_headers[column_header] = { 'rid': '{}_{}_bbmstheader'.format(file_type, column_header), 'title': column_header, 'description': description, } table_headers[column_header].update(header_options) tconfig = { 'id': file_type + '_bbm_table', 'namespace': 'BBTools' } for sample in table_data: for key, value in table_data[sample].items(): try: table_data[sample][key] = float(value) except ValueError: pass return table.plot(table_data, table_headers, tconfig)
python
{ "resource": "" }
q28327
TsTvByCountMixin.parse_tstv_by_count
train
def parse_tstv_by_count(self): """ Create the HTML for the TsTv by alternative allele count linegraph plot. """ self.vcftools_tstv_by_count = dict() for f in self.find_log_files('vcftools/tstv_by_count', filehandles=True): d = {} for line in f['f'].readlines()[1:]: # don't add the header line (first row) key = float(line.split()[0]) # taking the first column (alternative allele count) as key val = float(line.split()[3]) # taking Ts/Tv as value d[key] = val self.vcftools_tstv_by_count[f['s_name']] = d # Filter out ignored sample names self.vcftools_tstv_by_count = self.ignore_samples(self.vcftools_tstv_by_count) if len(self.vcftools_tstv_by_count) == 0: return 0 pconfig = { 'id': 'vcftools_tstv_by_count', 'title': 'VCFTools: TsTv by Count', 'ylab': 'TsTv Ratio', 'xlab': 'Alternative Allele Count', 'xmin': 0, 'ymin': 0, 'smooth_points': 400, # this limits huge filesizes and prevents browser crashing 'smooth_points_sumcounts': False } helptext = ''' `Transition` is a purine-to-purine or pyrimidine-to-pyrimidine point mutations. `Transversion` is a purine-to-pyrimidine or pyrimidine-to-purine point mutation. `Alternative allele count` is the number of alternative alleles at the site. Note: only bi-allelic SNPs are used (multi-allelic sites and INDELs are skipped.) Refer to Vcftools's manual (https://vcftools.github.io/man_latest.html) on `--TsTv-by-count` ''' self.add_section( name = 'TsTv by Count', anchor = 'vcftools-tstv-by-count', description = "Plot of `TSTV-BY-COUNT` - the transition to transversion ratio as a function of alternative allele count from the output of vcftools TsTv-by-count.", helptext = helptext, plot = linegraph.plot(self.vcftools_tstv_by_count,pconfig) ) return len(self.vcftools_tstv_by_count)
python
{ "resource": "" }
q28328
plotEnrichmentMixin.parse_plotEnrichment
train
def parse_plotEnrichment(self): """Find plotEnrichment output.""" self.deeptools_plotEnrichment = dict() for f in self.find_log_files('deeptools/plotEnrichment'): parsed_data = self.parsePlotEnrichment(f) for k, v in parsed_data.items(): if k in self.deeptools_plotEnrichment: log.warning("Replacing duplicate sample {}.".format(k)) self.deeptools_plotEnrichment[k] = v if len(parsed_data) > 0: self.add_data_source(f, section='plotEnrichment') if len(self.deeptools_plotEnrichment) > 0: dCounts = OrderedDict() dPercents = OrderedDict() for sample, v in self.deeptools_plotEnrichment.items(): dCounts[sample] = OrderedDict() dPercents[sample] = OrderedDict() for category, v2 in v.items(): dCounts[sample][category] = v2['count'] dPercents[sample][category] = v2['percent'] config = {'data_labels': [ {'name': 'Counts in features', 'ylab': 'Counts in feature'}, {'name': 'Percents in features', 'ylab': 'Percent of reads in feature'}], 'id': 'deeptools_enrichment_plot', 'title': 'deepTools: Signal enrichment per feature', 'ylab': 'Counts in feature', 'categories': True, 'ymin': 0.0} self.add_section(name="Feature enrichment", description="Signal enrichment per feature according to plotEnrichment", anchor="deeptools_enrichment", plot=linegraph.plot([dCounts, dPercents], pconfig=config)) return len(self.deeptools_plotEnrichment)
python
{ "resource": "" }
q28329
MultiqcModule.slamdunkGeneralStatsTable
train
def slamdunkGeneralStatsTable(self): """ Take the parsed summary stats from Slamdunk and add it to the basic stats table at the top of the report """ headers = OrderedDict() headers['counted'] = { 'title': '{} Counted'.format(config.read_count_prefix), 'description': '# reads counted within 3\'UTRs ({})'.format(config.read_count_desc), 'shared_key': 'read_count', 'min': 0, 'format': '{:,.2f}', 'scale': 'YlGn', 'modify': lambda x: float(x) * config.read_count_multiplier, } headers['retained'] = { 'title': '{} Retained'.format(config.read_count_prefix), 'description': '# retained reads after filtering ({})'.format(config.read_count_desc), 'shared_key': 'read_count', 'min': 0, 'format': '{:,.2f}', 'scale': 'YlGn', 'modify': lambda x: float(x) * config.read_count_multiplier, } headers['mapped'] = { 'title': '{} Mapped'.format(config.read_count_prefix), 'description': '# mapped reads ({})'.format(config.read_count_desc), 'shared_key': 'read_count', 'min': 0, 'format': '{:,.2f}', 'scale': 'YlGn', 'modify': lambda x: float(x) * config.read_count_multiplier, } headers['sequenced'] = { 'title': '{} Sequenced'.format(config.read_count_prefix), 'description': '# sequenced reads ({})'.format(config.read_count_desc), 'shared_key': 'read_count', 'min': 0, 'format': '{:,.2f}', 'scale': 'YlGn', 'modify': lambda x: float(x) * config.read_count_multiplier, } self.general_stats_addcols(self.slamdunk_data, headers)
python
{ "resource": "" }
q28330
MultiqcModule.slamdunkFilterStatsTable
train
def slamdunkFilterStatsTable(self): """ Take the parsed filter stats from Slamdunk and add it to a separate table """ headers = OrderedDict() headers['mapped'] = { 'namespace': 'Slamdunk', 'title': '{} Mapped'.format(config.read_count_prefix), 'description': '# mapped reads ({})'.format(config.read_count_desc), 'shared_key': 'read_count', 'min': 0, 'format': '{:,.2f}', 'suffix': config.read_count_prefix, 'scale': 'YlGn', 'modify': lambda x: float(x) * config.read_count_multiplier, } headers['multimapper'] = { 'namespace': 'Slamdunk', 'title': '{} Multimap-Filtered'.format(config.read_count_prefix), 'description': '# multimap-filtered reads ({})'.format(config.read_count_desc), 'shared_key': 'read_count', 'min': 0, 'format': '{:,.2f}', 'suffix': config.read_count_prefix, 'scale': 'OrRd', 'modify': lambda x: float(x) * config.read_count_multiplier, } headers['nmfiltered'] = { 'namespace': 'Slamdunk', 'title': '{} NM-Filtered'.format(config.read_count_prefix), 'description': '# NM-filtered reads ({})'.format(config.read_count_desc), 'shared_key': 'read_count', 'min': 0, 'format': '{:,.2f}', 'suffix': config.read_count_prefix, 'scale': 'OrRd', 'modify': lambda x: float(x) * config.read_count_multiplier, } headers['idfiltered'] = { 'namespace': 'Slamdunk', 'title': '{} Identity-Filtered'.format(config.read_count_prefix), 'description': '# identity-filtered reads ({})'.format(config.read_count_desc), 'shared_key': 'read_count', 'min': 0, 'format': '{:,.2f}', 'suffix': config.read_count_prefix, 'scale': 'OrRd', 'modify': lambda x: float(x) * config.read_count_multiplier, } headers['mqfiltered'] = { 'namespace': 'Slamdunk', 'title': '{} MQ-Filtered'.format(config.read_count_prefix), 'description': '# MQ-filtered reads ({})'.format(config.read_count_desc), 'shared_key': 'read_count', 'min': 0, 'format': '{:,.2f}', 'suffix': config.read_count_prefix, 'scale': 'OrRd', 'modify': lambda x: float(x) * config.read_count_multiplier, } pconfig = { 'id': 'slamdunk_filtering_table', 'min': 0, } self.add_section ( name = 'Filter statistics', anchor = 'slamdunk_filtering', description = 'This table shows the number of reads filtered with each filter criterion during filtering phase of slamdunk.', plot = table.plot(self.slamdunk_data, headers, pconfig) )
python
{ "resource": "" }
q28331
MultiqcModule.bowtie_general_stats_table
train
def bowtie_general_stats_table(self): """ Take the parsed stats from the Bowtie report and add it to the basic stats table at the top of the report """ headers = OrderedDict() headers['reads_aligned_percentage'] = { 'title': '% Aligned', 'description': '% reads with at least one reported alignment', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn' } headers['reads_aligned'] = { 'title': '{} Aligned'.format(config.read_count_prefix), 'description': 'reads with at least one reported alignment ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuRd', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count' } self.general_stats_addcols(self.bowtie_data, headers)
python
{ "resource": "" }
q28332
search_file
train
def search_file (pattern, f): """ Function to searach a single file for a single search pattern. """ fn_matched = False contents_matched = False # Use mimetypes to exclude binary files where possible if not re.match(r'.+_mqc\.(png|jpg|jpeg)', f['fn']): (ftype, encoding) = mimetypes.guess_type(os.path.join(f['root'], f['fn'])) if encoding is not None: return False if ftype is not None and ftype.startswith('image'): return False # Search pattern specific filesize limit if pattern.get('max_filesize') is not None and 'filesize' in f: if f['filesize'] > pattern.get('max_filesize'): logger.debug("Ignoring because exceeded search pattern filesize limit: {}".format(f['fn'])) return False # Search by file name (glob) if pattern.get('fn') is not None: if fnmatch.fnmatch(f['fn'], pattern['fn']): fn_matched = True if pattern.get('contents') is None and pattern.get('contents_re') is None: return True # Search by file name (regex) if pattern.get('fn_re') is not None: if re.match( pattern['fn_re'], f['fn']): fn_matched = True if pattern.get('contents') is None and pattern.get('contents_re') is None: return True # Search by file contents if pattern.get('contents') is not None or pattern.get('contents_re') is not None: if pattern.get('contents_re') is not None: repattern = re.compile(pattern['contents_re']) try: with io.open (os.path.join(f['root'],f['fn']), "r", encoding='utf-8') as f: l = 1 for line in f: # Search by file contents (string) if pattern.get('contents') is not None: if pattern['contents'] in line: contents_matched = True if pattern.get('fn') is None and pattern.get('fn_re') is None: return True break # Search by file contents (regex) elif pattern.get('contents_re') is not None: if re.search(repattern, line): contents_matched = True if pattern.get('fn') is None and pattern.get('fn_re') is None: return True break # Break if we've searched enough lines for this pattern if pattern.get('num_lines') and l >= pattern.get('num_lines'): break l += 1 except (IOError, OSError, ValueError, UnicodeDecodeError): if config.report_readerrors: logger.debug("Couldn't read file when looking for output: {}".format(f['fn'])) return False return fn_matched and contents_matched
python
{ "resource": "" }
q28333
exclude_file
train
def exclude_file(sp, f): """ Exclude discovered files if they match the special exclude_ search pattern keys """ # Make everything a list if it isn't already for k in sp: if k in ['exclude_fn', 'exclude_fn_re' 'exclude_contents', 'exclude_contents_re']: if not isinstance(sp[k], list): sp[k] = [sp[k]] # Search by file name (glob) if 'exclude_fn' in sp: for pat in sp['exclude_fn']: if fnmatch.fnmatch(f['fn'], pat): return True # Search by file name (regex) if 'exclude_fn_re' in sp: for pat in sp['exclude_fn_re']: if re.match( pat, f['fn']): return True # Search the contents of the file if 'exclude_contents' in sp or 'exclude_contents_re' in sp: # Compile regex patterns if we have any if 'exclude_contents_re' in sp: sp['exclude_contents_re'] = [re.compile(pat) for pat in sp['exclude_contents_re']] with io.open (os.path.join(f['root'],f['fn']), "r", encoding='utf-8') as fh: for line in fh: if 'exclude_contents' in sp: for pat in sp['exclude_contents']: if pat in line: return True if 'exclude_contents_re' in sp: for pat in sp['exclude_contents_re']: if re.search(pat, line): return True return False
python
{ "resource": "" }
q28334
save_htmlid
train
def save_htmlid(html_id, skiplint=False): """ Take a HTML ID, sanitise for HTML, check for duplicates and save. Returns sanitised, unique ID """ global html_ids global lint_errors # Trailing whitespace html_id_clean = html_id.strip() # Trailing underscores html_id_clean = html_id_clean.strip('_') # Must begin with a letter if re.match(r'^[a-zA-Z]', html_id_clean) is None: html_id_clean = 'mqc_{}'.format(html_id_clean) # Replace illegal characters html_id_clean = re.sub('[^a-zA-Z0-9_-]+', '_', html_id_clean) # Validate if linting if config.lint and not skiplint: modname = '' codeline = '' callstack = inspect.stack() for n in callstack: if 'multiqc/modules/' in n[1] and 'base_module.py' not in n[1]: callpath = n[1].split('multiqc/modules/',1)[-1] modname = '>{}< '.format(callpath) codeline = n[4][0].strip() break if config.lint and not skiplint and html_id != html_id_clean: errmsg = "LINT: {}HTML ID was not clean ('{}' -> '{}') ## {}".format(modname, html_id, html_id_clean, codeline) logger.error(errmsg) lint_errors.append(errmsg) # Check for duplicates i = 1 html_id_base = html_id_clean while html_id_clean in html_ids: html_id_clean = '{}-{}'.format(html_id_base, i) i += 1 if config.lint and not skiplint: errmsg = "LINT: {}HTML ID was a duplicate ({}) ## {}".format(modname, html_id_clean, codeline) logger.error(errmsg) lint_errors.append(errmsg) # Remember and return html_ids.append(html_id_clean) return html_id_clean
python
{ "resource": "" }
q28335
compress_json
train
def compress_json(data): """ Take a Python data object. Convert to JSON and compress using lzstring """ json_string = json.dumps(data).encode('utf-8', 'ignore').decode('utf-8') # JSON.parse() doesn't handle `NaN`, but it does handle `null`. json_string = json_string.replace('NaN', 'null'); x = lzstring.LZString() return x.compressToBase64(json_string)
python
{ "resource": "" }
q28336
MultiqcModule.methylqa_general_stats_table
train
def methylqa_general_stats_table(self): """ Take the parsed stats from the methylQA report and add it to the basic stats table at the top of the report """ headers = OrderedDict() headers['coverage'] = { 'title': 'Fold Coverage', 'min': 0, 'suffix': 'X', 'scale': 'YlGn' } self.general_stats_addcols(self.methylqa_data, headers)
python
{ "resource": "" }
q28337
MultiqcModule.rsem_stats_table
train
def rsem_stats_table(self): """ Take the parsed stats from the rsem report and add them to the basic stats table at the top of the report """ headers = OrderedDict() headers['alignable_percent'] = { 'title': '% Alignable'.format(config.read_count_prefix), 'description': '% Alignable reads'.format(config.read_count_desc), 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn' } self.general_stats_addcols(self.rsem_mapped_data, headers)
python
{ "resource": "" }
q28338
MultiqcModule.rsem_mapped_reads_plot
train
def rsem_mapped_reads_plot(self): """ Make the rsem assignment rates plot """ # Plot categories keys = OrderedDict() keys['Unique'] = { 'color': '#437bb1', 'name': 'Aligned uniquely to a gene' } keys['Multi'] = { 'color': '#e63491', 'name': 'Aligned to multiple genes' } keys['Filtered'] = { 'color': '#b1084c', 'name': 'Filtered due to too many alignments' } keys['Unalignable'] = { 'color': '#7f0000', 'name': 'Unalignable reads' } # Config for the plot config = { 'id': 'rsem_assignment_plot', 'title': 'RSEM: Mapped reads', 'ylab': '# Reads', 'cpswitch_counts_label': 'Number of Reads', 'hide_zero_cats': False } self.add_section( name = 'Mapped Reads', anchor = 'rsem_mapped_reads', description = 'A breakdown of how all reads were aligned for each sample.', plot = bargraph.plot(self.rsem_mapped_data, keys, config) )
python
{ "resource": "" }
q28339
TsTvByQualMixin.parse_tstv_by_qual
train
def parse_tstv_by_qual(self): """ Create the HTML for the TsTv by quality linegraph plot. """ self.vcftools_tstv_by_qual = dict() for f in self.find_log_files('vcftools/tstv_by_qual', filehandles=True): d = {} for line in f['f'].readlines()[1:]: # don't add the header line (first row) key = float(line.split()[0]) # taking the first column (QUAL_THRESHOLD) as key val = float(line.split()[6]) # taking Ts/Tv_GT_QUAL_THRESHOLD as value if (val == float('inf')) or (val == float('-inf')): val = float('nan') d[key] = val self.vcftools_tstv_by_qual[f['s_name']] = d # Filter out ignored sample names self.vcftools_tstv_by_qual = self.ignore_samples(self.vcftools_tstv_by_qual) if len(self.vcftools_tstv_by_qual) == 0: return 0 pconfig = { 'id': 'vcftools_tstv_by_qual', 'title': 'VCFTools: TsTv by Qual', 'ylab': 'TsTv Ratio', 'xlab': 'SNP Quality Threshold', 'xmin': 0, 'ymin': 0, 'smooth_points': 400, # this limits huge filesizes and prevents browser crashing 'smooth_points_sumcounts': False } helptext = ''' `Transition` is a purine-to-purine or pyrimidine-to-pyrimidine point mutations. `Transversion` is a purine-to-pyrimidine or pyrimidine-to-purine point mutation. `Quality` here is the Phred-scaled quality score as given in the QUAL column of VCF. Note: only bi-allelic SNPs are used (multi-allelic sites and INDELs are skipped.) Refer to Vcftools's manual (https://vcftools.github.io/man_latest.html) on `--TsTv-by-qual` ''' self.add_section( name = 'TsTv by Qual', anchor = 'vcftools-tstv-by-qual', description = "Plot of `TSTV-BY-QUAL` - the transition to transversion ratio as a function of SNP quality from the output of vcftools TsTv-by-qual.", helptext = helptext, plot = linegraph.plot(self.vcftools_tstv_by_qual,pconfig) ) return len(self.vcftools_tstv_by_qual)
python
{ "resource": "" }
q28340
MultiqcModule.general_stats
train
def general_stats(self): """ Add key SnpEff stats to the general stats table """ headers = OrderedDict() headers['Change_rate'] = { 'title': 'Change rate', 'scale': 'RdYlBu-rev', 'min': 0, 'format': '{:,.0f}' } headers['Ts_Tv_ratio'] = { 'title': 'Ts/Tv', 'description': 'Transitions / Transversions ratio', 'format': '{:,.3f}' } headers['Number_of_variants_before_filter'] = { 'title': 'M Variants', 'description': 'Number of variants before filter (millions)', 'scale': 'PuRd', 'modify': lambda x: x / 1000000, 'min': 0, 'format': '{:,.2f}' } self.general_stats_addcols(self.snpeff_data, headers)
python
{ "resource": "" }
q28341
MultiqcModule.count_genomic_region_plot
train
def count_genomic_region_plot(self): """ Generate the SnpEff Counts by Genomic Region plot """ # Sort the keys based on the total counts keys = self.snpeff_section_totals['# Count by genomic region'] sorted_keys = sorted(keys, reverse=True, key=keys.get) # Make nicer label names pkeys = OrderedDict() for k in sorted_keys: pkeys[k] = {'name': k.replace('_', ' ').title().replace('Utr', 'UTR') } # Config for the plot pconfig = { 'id': 'snpeff_variant_effects_region', 'title': 'SnpEff: Counts by Genomic Region', 'ylab': '# Reads', 'logswitch': True } return bargraph.plot(self.snpeff_data, pkeys, pconfig)
python
{ "resource": "" }
q28342
MultiqcModule.effects_impact_plot
train
def effects_impact_plot(self): """ Generate the SnpEff Counts by Effects Impact plot """ # Put keys in a more logical order keys = [ 'MODIFIER', 'LOW', 'MODERATE', 'HIGH' ] # Make nicer label names pkeys = OrderedDict() for k in keys: pkeys[k] = {'name': k.title() } # Config for the plot pconfig = { 'id': 'snpeff_variant_effects_impact', 'title': 'SnpEff: Counts by Effects Impact', 'ylab': '# Reads', 'logswitch': True } return bargraph.plot(self.snpeff_data, pkeys, pconfig)
python
{ "resource": "" }
q28343
MultiqcModule.parse_prokka
train
def parse_prokka(self, f): """ Parse prokka txt summary files. Prokka summary files are difficult to identify as there are practically no distinct prokka identifiers in the filenames or file contents. This parser makes an attempt using the first three lines, expected to contain organism, contigs, and bases statistics. """ s_name = None # Look at the first three lines, they are always the same first_line = f['f'].readline() contigs_line = f['f'].readline() bases_line = f['f'].readline() # If any of these fail, it's probably not a prokka summary file if not all((first_line.startswith("organism:"), contigs_line.startswith("contigs:"), bases_line.startswith("bases:"))): return # Get organism and sample name from the first line # Assumes organism name only consists of two words, # i.e. 'Genusname speciesname', and that the remaining # text on the organism line is the sample name. try: organism = " ".join(first_line.strip().split(":", 1)[1].split()[:2]) s_name = self.clean_s_name(" ".join(first_line.split()[3:]), f['root']) except KeyError: organism = first_line.strip().split(":", 1)[1] s_name = f['s_name'] # Don't try to guess sample name if requested in the config if getattr(config, 'prokka_fn_snames', False): s_name = f['s_name'] if s_name in self.prokka: log.debug("Duplicate sample name found! Overwriting: {}".format(s_name)) self.prokka[s_name] = dict() self.prokka[s_name]['organism'] = organism self.prokka[s_name]['contigs'] = int(contigs_line.split(":")[1]) self.prokka[s_name]['bases'] = int(bases_line.split(":")[1]) # Get additional info from remaining lines for line in f['f']: description, value = line.split(":") try: self.prokka[s_name][description] = int(value) except ValueError: log.warning("Unable to parse line: '%s'", line) self.add_data_source(f, s_name)
python
{ "resource": "" }
q28344
MultiqcModule.prokka_table
train
def prokka_table(self): """ Make basic table of the annotation stats """ # Specify the order of the different possible categories headers = OrderedDict() headers['organism'] = { 'title': 'Organism', 'description': 'Organism name', } headers['contigs'] = { 'title': '# contigs', 'description': 'Number of contigs in assembly', 'format': '{:i}', } headers['bases'] = { 'title': '# bases', 'description': 'Number of nucleotide bases in assembly', 'format': '{:i}', } headers['CDS'] = { 'title': '# CDS', 'description': 'Number of annotated CDS', 'format': '{:i}', } headers['rRNA'] = { 'title': '# rRNA', 'description': 'Number of annotated rRNA', 'format': '{:i}', } headers['tRNA'] = { 'title': '# tRNA', 'description': 'Number of annotated tRNA', 'format': '{:i}', } headers['tmRNA'] = { 'title': '# tmRNA', 'description': 'Number of annotated tmRNA', 'format': '{:i}', } headers['misc_RNA'] = { 'title': '# misc RNA', 'description': 'Number of annotated misc. RNA', 'format': '{:i}', } headers['sig_peptide'] = { 'title': '# sig_peptide', 'description': 'Number of annotated sig_peptide', 'format': '{:i}', } headers['repeat_region'] = { 'title': '# CRISPR arrays', 'description': 'Number of annotated CRSIPR arrays', 'format': '{:i}', } table_config = { 'namespace': 'prokka', 'min': 0, } return table.plot(self.prokka, headers, table_config)
python
{ "resource": "" }
q28345
MultiqcModule.prokka_barplot
train
def prokka_barplot(self): """ Make a basic plot of the annotation stats """ # Specify the order of the different categories keys = OrderedDict() keys['CDS'] = { 'name': 'CDS' } keys['rRNA'] = { 'name': 'rRNA' } keys['tRNA'] = { 'name': 'tRNA' } keys['tmRNA'] = { 'name': 'tmRNA' } keys['misc_RNA'] = { 'name': 'misc RNA' } keys['sig_peptide'] = { 'name': 'Signal peptides' } keys['repeat_region'] = { 'name': 'CRISPR array'} plot_config = { 'id': 'prokka_plot', 'title': 'Prokka: Feature Types', 'ylab': '# Counts', 'cpswitch_counts_label': 'Features' } return bargraph.plot(self.prokka, keys, plot_config)
python
{ "resource": "" }
q28346
plot_bhist
train
def plot_bhist(samples, file_type, **plot_args): """ Create line graph plot of histogram data for BBMap 'bhist' output. The 'samples' parameter could be from the bbmap mod_data dictionary: samples = bbmap.MultiqcModule.mod_data[file_type] """ all_x = set() for item in sorted(chain(*[samples[sample]['data'].items() for sample in samples])): all_x.add(item[0]) columns_to_plot = { 'GC': { 1: 'C', 2: 'G', }, 'AT': { 0: 'A', 3: 'T', }, 'N': { 4: 'N' }, } nucleotide_data = [] for column_type in columns_to_plot: nucleotide_data.append( { sample+'.'+column_name: { x: samples[sample]['data'][x][column]*100 if x in samples[sample]['data'] else 0 for x in all_x } for sample in samples for column, column_name in columns_to_plot[column_type].items() } ) plot_params = { 'id': 'bbmap-' + file_type + '_plot', 'title': 'BBTools: ' + plot_args['plot_title'], 'xlab': 'Read position', 'ymin': 0, 'ymax': 100, 'data_labels': [ {'name': 'Percentage of G+C bases'}, {'name': 'Percentage of A+T bases'}, {'name': 'Percentage of N bases'}, ] } plot_params.update(plot_args['plot_params']) plot = linegraph.plot( nucleotide_data, plot_params ) return plot
python
{ "resource": "" }
q28347
MultiqcModule.add_readlen_dist_plot
train
def add_readlen_dist_plot(self): """ Generate plot HTML for read length distribution plot. """ pconfig = { 'id': 'skewer_read_length_histogram', 'title': 'Skewer: Read Length Distribution after trimming', 'xDecimals': False, 'ylab': '% of Reads', 'xlab': 'Read Length', 'xmin': 0, 'ymin': 0, 'tt_label': '<b>{point.x}</b>: {point.y:.1f}%', } self.add_section( plot = linegraph.plot(self.skewer_readlen_dist, pconfig) )
python
{ "resource": "" }
q28348
MultiqcModule.parse_skewer_log
train
def parse_skewer_log(self, f): """ Go through log file looking for skewer output """ fh = f['f'] regexes = { 'fq1': "Input file:\s+(.+)", 'fq2': "Paired file:\s+(.+)", 'r_processed': "(\d+) read|reads pairs? processed", 'r_short_filtered': "(\d+) \(\s*\d+.\d+%\) short read", 'r_empty_filtered': "(\d+) \(\s*\d+.\d+%\) empty read", 'r_avail': "(\d+) \(\s*\d+.\d+%\) read", 'r_trimmed': "(\d+) \(\s*\d+.\d+%\) trimmed read", 'r_untrimmed': "(\d+) \(\s*\d+.\d+%\) untrimmed read" } regex_hist = "\s?(\d+)\s+(\d+)\s+(\d+.\d+)%" data = dict() for k, v in regexes.items(): data[k] = 0 data['fq1'] = None data['fq2'] = None readlen_dist = OrderedDict() for l in fh: for k, r in regexes.items(): match = re.search(r, l) if match: data[k] = match.group(1).replace(',', '') match = re.search(regex_hist, l) if match: read_length = int(match.group(1)) pct_at_rl = float(match.group(3)) readlen_dist[read_length] = pct_at_rl if data['fq1'] is not None: s_name = self.clean_s_name(data['fq1'], f['root']) if s_name in self.skewer_readlen_dist: log.debug("Duplicate sample name found in {}! Overwriting: {}".format(f['fn'], s_name)) self.add_data_source(f, s_name) self.add_skewer_data(s_name, data, f) self.skewer_readlen_dist[s_name] = readlen_dist if data['fq2'] is not None: s_name = self.clean_s_name(data['fq1'], f['root']) if s_name in self.skewer_readlen_dist: log.debug("Duplicate sample name found in {}! Overwriting: {}".format(f['fn'], s_name)) self.add_data_source(f, s_name) self.add_skewer_data(s_name, data, f) self.skewer_readlen_dist[s_name] = readlen_dist
python
{ "resource": "" }
q28349
parse_reports
train
def parse_reports(self): """ Find RSeQC read_duplication reports and parse their data """ # Set up vars self.read_dups = dict() # Go through files and parse data for f in self.find_log_files('rseqc/read_duplication_pos'): if f['f'].startswith('Occurrence UniqReadNumber'): if f['s_name'] in self.read_dups: log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name'])) self.add_data_source(f, section='read_duplication') self.read_dups[f['s_name']] = OrderedDict() for l in f['f'].splitlines(): s = l.split() try: if int(s[0]) <= 500: self.read_dups[f['s_name']][int(s[0])] = int(s[1]) except: pass # Filter to strip out ignored sample names self.read_dups = self.ignore_samples(self.read_dups) if len(self.read_dups) > 0: # Add line graph to section pconfig = { 'smooth_points': 200, 'id': 'rseqc_read_dups_plot', 'title': 'RSeQC: Read Duplication', 'ylab': 'Number of Reads (log10)', 'xlab': "Occurrence of read", 'yLog': True, 'tt_label': "<strong>{point.x} occurrences</strong>: {point.y} reads", } self.add_section ( name = 'Read Duplication', anchor = 'rseqc-read_dups', description = '<a href="http://rseqc.sourceforge.net/#read-duplication-py" target="_blank">read_duplication.py</a>' \ " calculates how many alignment positions have a certain number of exact duplicates."\ " Note - plot truncated at 500 occurrences and binned.</p>", plot = linegraph.plot(self.read_dups, pconfig) ) # Return number of samples found return len(self.read_dups)
python
{ "resource": "" }
q28350
read_sample_name
train
def read_sample_name(line_iter, clean_fn): """ Consumes lines from the provided line_iter and parses those lines as a header for the picard base distribution file. The header file is assumed to contain a line with both 'INPUT' and 'BaseDistributionByCycle'. If the header parses correctly, the sample name is returned. If the header does not parse correctly, None is returned. """ try: while True: new_line = next(line_iter) new_line = new_line.strip() if 'BaseDistributionByCycle' in new_line and 'INPUT' in new_line: # Pull sample name from input fn_search = re.search(r"INPUT=?\s*(\[?[^\s]+\]?)", new_line, flags=re.IGNORECASE) if fn_search: s_name = os.path.basename(fn_search.group(1).strip('[]')) s_name = clean_fn(s_name) return s_name except StopIteration: return None
python
{ "resource": "" }
q28351
MultiqcModule.fastp_general_stats_table
train
def fastp_general_stats_table(self): """ Take the parsed stats from the fastp report and add it to the General Statistics table at the top of the report """ headers = OrderedDict() headers['pct_duplication'] = { 'title': '% Duplication', 'description': 'Duplication rate in filtered reads', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'RdYlGn-rev' } headers['after_filtering_q30_rate'] = { 'title': '% > Q30', 'description': 'Percentage of reads > Q30 after filtering', 'min': 0, 'max': 100, 'modify': lambda x: x * 100.0, 'scale': 'GnBu', 'suffix': '%', 'hidden': True } headers['after_filtering_q30_bases'] = { 'title': '{} Q30 bases'.format(config.base_count_prefix), 'description': 'Bases > Q30 after filtering ({})'.format(config.base_count_desc), 'min': 0, 'modify': lambda x: x * config.base_count_multiplier, 'scale': 'GnBu', 'shared_key': 'base_count', 'hidden': True } headers['after_filtering_gc_content'] = { 'title': 'GC content', 'description': 'GC content after filtering', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'Blues', 'modify': lambda x: x * 100.0 } headers['pct_surviving'] = { 'title': '% PF', 'description': 'Percent reads passing filter', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'BuGn', } headers['pct_adapter'] = { 'title': '% Adapter', 'description': 'Percentage adapter-trimmed reads', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'RdYlGn-rev', } self.general_stats_addcols(self.fastp_data, headers)
python
{ "resource": "" }
q28352
MultiqcModule.fastp_filtered_reads_chart
train
def fastp_filtered_reads_chart(self): """ Function to generate the fastp filtered reads bar plot """ # Specify the order of the different possible categories keys = OrderedDict() keys['filtering_result_passed_filter_reads'] = { 'name': 'Passed Filter' } keys['filtering_result_low_quality_reads'] = { 'name': 'Low Quality' } keys['filtering_result_too_many_N_reads'] = { 'name': 'Too Many N' } keys['filtering_result_too_short_reads'] = { 'name': 'Too short' } # Config for the plot pconfig = { 'id': 'fastp_filtered_reads_plot', 'title': 'Fastp: Filtered Reads', 'ylab': '# Reads', 'cpswitch_counts_label': 'Number of Reads', 'hide_zero_cats': False, } return bargraph.plot(self.fastp_data, keys, pconfig)
python
{ "resource": "" }
q28353
MultiqcModule.fastp_read_qual_plot
train
def fastp_read_qual_plot(self): """ Make the read quality plot for Fastp """ data_labels, pdata = self.filter_pconfig_pdata_subplots(self.fastp_qual_plotdata, 'Sequence Quality') pconfig = { 'id': 'fastp-seq-quality-plot', 'title': 'Fastp: Sequence Quality', 'xlab': 'Read Position', 'ylab': 'R1 Before filtering: Sequence Quality', 'ymin': 0, 'xDecimals': False, 'data_labels': data_labels } return linegraph.plot(pdata, pconfig)
python
{ "resource": "" }
q28354
MultiqcModule.fastp_read_gc_plot
train
def fastp_read_gc_plot(self): """ Make the read GC plot for Fastp """ data_labels, pdata = self.filter_pconfig_pdata_subplots(self.fastp_gc_content_data, 'Base Content Percent') pconfig = { 'id': 'fastp-seq-content-gc-plot', 'title': 'Fastp: Read GC Content', 'xlab': 'Read Position', 'ylab': 'R1 Before filtering: Base Content Percent', 'ymax': 100, 'ymin': 0, 'xDecimals': False, 'yLabelFormat': '{value}%', 'tt_label': '{point.x}: {point.y:.2f}%', 'data_labels': data_labels } return linegraph.plot(pdata, pconfig)
python
{ "resource": "" }
q28355
MultiqcModule.fastp_read_n_plot
train
def fastp_read_n_plot(self): """ Make the read N content plot for Fastp """ data_labels, pdata = self.filter_pconfig_pdata_subplots(self.fastp_n_content_data, 'Base Content Percent') pconfig = { 'id': 'fastp-seq-content-n-plot', 'title': 'Fastp: Read N Content', 'xlab': 'Read Position', 'ylab': 'R1 Before filtering: Base Content Percent', 'yCeiling': 100, 'yMinRange': 5, 'ymin': 0, 'xDecimals': False, 'yLabelFormat': '{value}%', 'tt_label': '{point.x}: {point.y:.2f}%', 'data_labels': data_labels } return linegraph.plot(pdata, pconfig)
python
{ "resource": "" }
q28356
plotProfileMixin.parse_plotProfile
train
def parse_plotProfile(self): """Find plotProfile output""" self.deeptools_plotProfile = dict() for f in self.find_log_files('deeptools/plotProfile', filehandles=False): parsed_data, bin_labels, converted_bin_labels = self.parsePlotProfileData(f) for k, v in parsed_data.items(): if k in self.deeptools_plotProfile: log.warning("Replacing duplicate sample {}.".format(k)) self.deeptools_plotProfile[k] = v if len(parsed_data) > 0: self.add_data_source(f, section='plotProfile') if len(self.deeptools_plotProfile) > 0: config = { 'id': 'read_distribution_profile', 'title': 'deeptools: Read Distribution Profile after Annotation', 'ylab': 'Occurrence', 'xlab': None, 'smooth_points': 100, 'xPlotBands': [ {'from': converted_bin_labels[bin_labels.index('TES')], 'to': converted_bin_labels[-1], 'color': '#f7cfcf'}, {'from': converted_bin_labels[bin_labels.index('TSS')], 'to': converted_bin_labels[bin_labels.index('TES')], 'color': '#ffffe2'}, {'from': converted_bin_labels[0], 'to': converted_bin_labels[bin_labels.index('TSS')], 'color': '#e5fce0'}, ], 'xPlotLines': [ {'width': 1, 'value': converted_bin_labels[bin_labels.index('TES')], 'dashStyle': 'Dash', 'color': '#000000'}, {'width': 1, 'value': converted_bin_labels[bin_labels.index('TSS')], 'dashStyle': 'Dash', 'color': '#000000'}, ], } self.add_section ( name = 'Read Distribution Profile after Annotation', anchor = 'read_distribution_profile_plot', description="Accumulated view of the distribution of sequence reads related to the closest annotated gene. All annotated genes have been normalized to the same size. Green: {} upstream of gene to {}; Yellow: {} to {}; Pink: {} to {} downstream of gene".format(list(filter(None,bin_labels))[0], list(filter(None,bin_labels))[1], list(filter(None,bin_labels))[1], list(filter(None,bin_labels))[2], list(filter(None,bin_labels))[2], list(filter(None,bin_labels))[3]), plot=linegraph.plot(self.deeptools_plotProfile, config) ) return len(self.deeptools_bamPEFragmentSizeDistribution)
python
{ "resource": "" }
q28357
MultiqcModule.qorts_general_stats
train
def qorts_general_stats (self): """ Add columns to the General Statistics table """ headers = OrderedDict() headers['Genes_PercentWithNonzeroCounts'] = { 'title': '% Genes with Counts', 'description': 'Percent of Genes with Non-Zero Counts', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn' } headers['NumberOfChromosomesCovered'] = { 'title': 'Chrs Covered', 'description': 'Number of Chromosomes Covered', 'format': '{:,.0f}' } self.general_stats_addcols(self.qorts_data, headers)
python
{ "resource": "" }
q28358
MultiqcModule.parse_metrics
train
def parse_metrics(self, f): """ Parse the metrics.tsv file from RNA-SeQC """ headers = None for l in f['f'].splitlines(): s = l.strip().split("\t") if headers is None: headers = s else: s_name = s[ headers.index('Sample') ] data = dict() for idx, h in enumerate(headers): try: data[h] = float(s[idx]) except ValueError: data[h] = s[idx] self.rna_seqc_metrics[s_name] = data
python
{ "resource": "" }
q28359
MultiqcModule.rnaseqc_general_stats
train
def rnaseqc_general_stats (self): """ Add alignment rate to the general stats table """ headers = OrderedDict() headers['Expression Profiling Efficiency'] = { 'title': '% Expression Efficiency', 'description': 'Expression Profiling Efficiency: Ratio of exon reads to total reads', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'YlGn', 'modify': lambda x: float(x) * 100.0 } headers['Genes Detected'] = { 'title': '# Genes', 'description': 'Number of genes detected with at least 5 reads.', 'min': 0, 'scale': 'Bu', 'format': '{:,.0f}' } headers['rRNA rate'] = { 'title': '% rRNA Alignment', 'description': ' rRNA reads (non-duplicate and duplicate reads) per total reads', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'Reds', 'modify': lambda x: float(x) * 100.0 } self.general_stats_addcols(self.rna_seqc_metrics, headers)
python
{ "resource": "" }
q28360
MultiqcModule.transcript_associated_plot
train
def transcript_associated_plot (self): """ Plot a bargraph showing the Transcript-associated reads """ # Plot bar graph of groups keys = OrderedDict() keys['Exonic Rate'] = { 'name': 'Exonic', 'color': '#2f7ed8' } keys['Intronic Rate'] = { 'name': 'Intronic', 'color': '#8bbc21' } keys['Intergenic Rate'] = { 'name': 'Intergenic', 'color': '#0d233a'} # Config for the plot pconfig = { 'id': 'rna_seqc_position_plot', 'title': 'RNA-SeQC: Transcript-associated reads', 'ylab': 'Ratio of Reads', 'cpswitch': False, 'ymax': 1, 'ymin': 0, 'tt_decimals': 3, 'cpswitch_c_active': False } self.add_section ( name = 'Transcript-associated reads', anchor = 'Transcript_associated', helptext = 'All of the above rates are per mapped read. Exonic Rate is the fraction mapping within exons. ' 'Intronic Rate is the fraction mapping within introns. ' 'Intergenic Rate is the fraction mapping in the genomic space between genes. ', plot = bargraph.plot(self.rna_seqc_metrics, keys, pconfig) )
python
{ "resource": "" }
q28361
MultiqcModule.strand_barplot
train
def strand_barplot(self): """ Plot a bargraph showing the strandedness of alignments """ # Plot bar graph of groups keys = [ 'End 1 Sense', 'End 1 Antisense', 'End 2 Sense', 'End 2 Antisense' ] # Config for the plot pconfig = { 'id': 'rna_seqc_strandedness_plot', 'title': 'RNA-SeQC: Strand Specificity', 'ylab': '% Reads', 'cpswitch_counts_label': '# Reads', 'cpswitch_percent_label': '% Reads', 'ymin': 0, 'cpswitch_c_active': False } self.add_section ( name = 'Strand Specificity', anchor = 'rna_seqc_strand_specificity', helptext = 'End 1/2 Sense are the number of End 1 or 2 reads that were sequenced in the sense direction. ' 'Similarly, End 1/2 Antisense are the number of End 1 or 2 reads that were sequenced in the ' 'antisense direction', plot = bargraph.plot(self.rna_seqc_metrics, keys, pconfig) )
python
{ "resource": "" }
q28362
MultiqcModule.parse_coverage
train
def parse_coverage (self, f): """ Parse the RNA-SeQC Normalised Coverage Files """ data = dict() s_names = None j = 1 for l in f['f'].splitlines(): s = l.strip().split("\t") if s_names is None: s_names = s for s_name in s_names: data[s_name] = dict() else: for i, v in enumerate(s): data[s_names[i]][j] = float(v) j += 1 if f['fn'] == 'meanCoverageNorm_high.txt': self.rna_seqc_norm_high_cov.update(data) elif f['fn'] == 'meanCoverageNorm_medium.txt': self.rna_seqc_norm_medium_cov.update(data) elif f['fn'] == 'meanCoverageNorm_low.txt': self.rna_seqc_norm_low_cov.update(data)
python
{ "resource": "" }
q28363
MultiqcModule.coverage_lineplot
train
def coverage_lineplot (self): """ Make HTML for coverage line plots """ # Add line graph to section data = list() data_labels = list() if len(self.rna_seqc_norm_high_cov) > 0: data.append(self.rna_seqc_norm_high_cov) data_labels.append({'name': 'High Expressed'}) if len(self.rna_seqc_norm_medium_cov) > 0: data.append(self.rna_seqc_norm_medium_cov) data_labels.append({'name': 'Medium Expressed'}) if len(self.rna_seqc_norm_low_cov) > 0: data.append(self.rna_seqc_norm_low_cov) data_labels.append({'name': 'Low Expressed'}) pconfig = { 'id': 'rna_seqc_mean_coverage_plot', 'title': 'RNA-SeQC: Gene Body Coverage', 'ylab': '% Coverage', 'xlab': "Gene Body Percentile (5' -> 3')", 'xmin': 0, 'xmax': 100, 'tt_label': "<strong>{point.x}% from 5'</strong>: {point.y:.2f}", 'data_labels': data_labels } if len(data) > 0: self.add_section ( name = 'Gene Body Coverage', anchor = 'rseqc-rna_seqc_mean_coverage', helptext = 'The metrics are calculated across the transcripts with tiered expression levels.', plot = linegraph.plot(data, pconfig) )
python
{ "resource": "" }
q28364
MultiqcModule.parse_correlation
train
def parse_correlation(self, f): """ Parse RNA-SeQC correlation matrices """ s_names = None data = list() for l in f['f'].splitlines(): s = l.strip().split("\t") if s_names is None: s_names = [ x for x in s if x != '' ] else: data.append(s[1:]) if f['fn'] == 'corrMatrixPearson.txt': self.rna_seqc_pearson = (s_names, data) elif f['fn'] == 'corrMatrixSpearman.txt': self.rna_seqc_spearman = (s_names, data)
python
{ "resource": "" }
q28365
MultiqcModule.plot_correlation_heatmap
train
def plot_correlation_heatmap(self): """ Return HTML for correlation heatmap """ data = None corr_type = None correlation_type = getattr(config, 'rna_seqc' ,{}).get('default_correlation', 'spearman') if self.rna_seqc_spearman is not None and correlation_type != 'pearson': data = self.rna_seqc_spearman corr_type = 'Spearman' elif self.rna_seqc_pearson is not None: data = self.rna_seqc_pearson corr_type = 'Pearson' if data is not None: pconfig = { 'id': 'rna_seqc_correlation_heatmap', 'title': 'RNA-SeQC: {} Sample Correlation'.format(corr_type) } self.add_section ( name = '{} Correlation'.format(corr_type), anchor = 'rseqc-rna_seqc_correlation', plot = heatmap.plot(data[1], data[0], data[0], pconfig) )
python
{ "resource": "" }
q28366
MultiqcModule.parse_theta2_report
train
def parse_theta2_report (self, fh): """ Parse the final THetA2 log file. """ parsed_data = {} for l in fh: if l.startswith('#'): continue else: s = l.split("\t") purities = s[1].split(',') parsed_data['proportion_germline'] = float(purities[0]) * 100.0 for i, v in enumerate(purities[1:]): if i <= 5: parsed_data['proportion_tumour_{}'.format(i+1)] = float(v) * 100.0 else: parsed_data['proportion_tumour_gt5'] = (float(v) * 100.0) + parsed_data.get('proportion_tumour_gt5', 0) break return parsed_data
python
{ "resource": "" }
q28367
parse_single_report
train
def parse_single_report(f): """ Parse a gatk varianteval varianteval """ # Fixme: Separate GATKReport parsing and data subsetting. A GATKReport parser now available from the GATK MultiqcModel. data = dict() in_CompOverlap = False in_CountVariants = False in_TiTv = False for l in f: # Detect section headers if '#:GATKTable:CompOverlap' in l: in_CompOverlap = True elif '#:GATKTable:CountVariants' in l: in_CountVariants = True elif '#:GATKTable:TiTvVariantEvaluator' in l: in_TiTv = True else: # Parse contents using nested loops if in_CompOverlap: headers = l.split() while in_CompOverlap: l = f.readline().strip("\n") d = dict() try: for i, s in enumerate(l.split()): d[headers[i]] = s if d['Novelty'] == 'all': data['reference'] = d['CompRod'] data['comp_rate'] = float(d['compRate']) data['concordant_rate'] = float(d['concordantRate']) data['eval_variants'] = int(d['nEvalVariants']) data['novel_sites'] = int(d['novelSites']) elif d['Novelty'] == 'known': data['known_sites'] = int(d['nEvalVariants']) except KeyError: in_CompOverlap = False elif in_CountVariants: headers = l.split() while in_CountVariants: l = f.readline().strip("\n") d = dict() try: for i, s in enumerate(l.split()): d[headers[i]] = s if d['Novelty'] == 'all': data['snps'] = int(d['nSNPs']) data['mnps'] = int(d['nMNPs']) data['insertions'] = int(d['nInsertions']) data['deletions'] = int(d['nDeletions']) data['complex'] = int(d['nComplex']) data['symbolic'] = int(d['nSymbolic']) data['mixed'] = int(d['nMixed']) data['nocalls'] = int(d['nNoCalls']) except KeyError: in_CountVariants = False elif in_TiTv: headers = l.split() data['titv_reference'] = 'unknown' while in_TiTv: l = f.readline().strip("\n") d = dict() try: for i, s in enumerate(l.split()): d[headers[i]] = s if d['Novelty'] == 'known': data['titv_reference'] = d['CompRod'] data['known_titv'] = float(d['tiTvRatio']) elif d['Novelty'] == 'novel': data['novel_titv'] = float(d['tiTvRatio']) except KeyError: in_TiTv = False return data
python
{ "resource": "" }
q28368
comp_overlap_table
train
def comp_overlap_table(data): """Build a table from the comp overlaps output.""" headers = OrderedDict() headers['comp_rate'] = { 'title': 'Compare rate', 'description': 'Ratio of known variants found in the reference set.', 'namespace': 'GATK', 'min': 0, 'max': 100, 'suffix': '%', 'format': '{:,.2f}', 'scale': 'Blues', } headers['concordant_rate'] = { 'title': 'Concordant rate', 'description': 'Ratio of variants matching alleles in the reference set.', 'namespace': 'GATK', 'min': 0, 'max': 100, 'suffix': '%', 'format': '{:,.2f}', 'scale': 'Blues', } headers['eval_variants'] = { 'title': 'M Evaluated variants', 'description': 'Number of called variants (millions)', 'namespace': 'GATK', 'min': 0, 'modify': lambda x: float(x) / 1000000.0 } headers['known_sites'] = { 'title': 'M Known sites', 'description': 'Number of known variants (millions)', 'namespace': 'GATK', 'min': 0, 'modify': lambda x: float(x) / 1000000.0 } headers['novel_sites'] = { 'title': 'M Novel sites', 'description': 'Number of novel variants (millions)', 'namespace': 'GATK', 'min': 0, 'modify': lambda x: float(x) / 1000000.0 } table_html = table.plot(data, headers, {'id': 'gatk_compare_overlap', 'table_title': 'GATK - Compare Overlap'}) return table_html
python
{ "resource": "" }
q28369
VariantEvalMixin.parse_gatk_varianteval
train
def parse_gatk_varianteval(self): """ Find GATK varianteval logs and parse their data """ self.gatk_varianteval = dict() for f in self.find_log_files('gatk/varianteval', filehandles=True): parsed_data = parse_single_report(f['f']) if len(parsed_data) > 0: if f['s_name'] in self.gatk_varianteval: log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name'])) self.add_data_source(f, section='varianteval') self.gatk_varianteval[f['s_name']] = parsed_data # Filter to strip out ignored sample names self.gatk_varianteval = self.ignore_samples(self.gatk_varianteval) n_reports_found = len(self.gatk_varianteval) if n_reports_found > 0: log.info("Found {} VariantEval reports".format(n_reports_found)) # Write parsed report data to a file (restructure first) self.write_data_file(self.gatk_varianteval, 'multiqc_gatk_varianteval') # Get consensus TiTv references titv_ref = None for s_name in self.gatk_varianteval: if titv_ref is None: titv_ref = self.gatk_varianteval[s_name]['titv_reference'] elif titv_ref != self.gatk_varianteval[s_name]['titv_reference']: titv_ref = 'Multiple' break # General Stats Table varianteval_headers = dict() varianteval_headers['known_titv'] = { 'title': 'TiTV ratio (known)', 'description': "TiTV ratio from variants found in '{}'".format(titv_ref), 'min': 0, 'scale': 'Blues', 'shared_key': 'titv_ratio' } varianteval_headers['novel_titv'] = { 'title': 'TiTV ratio (novel)', 'description': "TiTV ratio from variants NOT found in '{}'".format(titv_ref), 'min': 0, 'scale': 'Blues', 'shared_key': 'titv_ratio' } self.general_stats_addcols(self.gatk_varianteval, varianteval_headers, 'GATK VariantEval') # Variant Counts plot self.add_section ( name = 'Variant Counts', anchor = 'gatk-count-variants', plot = count_variants_barplot(self.gatk_varianteval) ) # Compare Overlap Table self.add_section ( name = 'Compare Overlap', anchor = 'gatk-compare-overlap', plot = comp_overlap_table(self.gatk_varianteval) ) return n_reports_found
python
{ "resource": "" }
q28370
MultiqcModule.parse_sargasso_logs
train
def parse_sargasso_logs(self, f): """ Parse the sargasso log file. """ species_name = list() items = list() header = list() is_first_line = True for l in f['f'].splitlines(): s = l.split(",") # Check that this actually is a Sargasso file if is_first_line and s[0]!='Sample': return None if len(s) < 7: continue if is_first_line: #prepare header is_first_line = False header = s for i in header[1:]: #find out what species included sname = i.split('-')[-1] if sname not in species_name: species_name.append(sname) #find out what is being counted kname = ("-".join(i.split('-')[-3:-1])) if kname not in items: items.append(kname) else: #start sample lines. sample_name = s.pop(0) chunk_by_species = [s[i:i + len(items)] for i in range(0, len(s), len(items))]; for idx,v in enumerate(chunk_by_species): #adding species name to the sample name for easy interpretation new_sample_name = '_'.join([sample_name,species_name[idx]]) # Clean up sample name new_sample_name = self.clean_s_name(new_sample_name, f['root']) if new_sample_name in self.sargasso_data.keys(): log.debug("Duplicate sample name found! Overwriting: {}".format(new_sample_name)) try: self.sargasso_data[new_sample_name] = dict(zip(items,map(int, v))) except ValueError: pass self.sargasso_keys = items for idx, f_name in enumerate(self.sargasso_data.keys()): # Reorganised parsed data for this sample # Collect total READ count number self.sargasso_data[f_name]['Total'] = 0; for key, value in list(self.sargasso_data[f_name].items()): # iter on both keys and values if key.endswith("Reads"): self.sargasso_data[f_name]['Total'] += value # Calculate the percent aligned if we can try: self.sargasso_data[f_name]['sargasso_percent_assigned'] = (float(self.sargasso_data[f_name]['Assigned-Reads'])/float(self.sargasso_data[f_name]['Total'])) * 100.0 except (KeyError, ZeroDivisionError): pass
python
{ "resource": "" }
q28371
MultiqcModule.sargasso_stats_table
train
def sargasso_stats_table(self): """ Take the parsed stats from the sargasso report and add them to the basic stats table at the top of the report """ headers = OrderedDict() headers['sargasso_percent_assigned'] = { 'title': '% Assigned', 'description': 'Sargasso % Assigned reads', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'RdYlGn' } headers['Assigned-Reads'] = { 'title': '{} Assigned'.format(config.read_count_prefix), 'description': 'Sargasso Assigned reads ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuBu', 'modify': lambda x: float(x) * config.read_count_multiplier, 'shared_key': 'read_count' } self.general_stats_addcols(self.sargasso_data, headers)
python
{ "resource": "" }
q28372
MultiqcModule.sargasso_chart
train
def sargasso_chart (self): """ Make the sargasso plot """ # Config for the plot config = { 'id': 'sargasso_assignment_plot', 'title': 'Sargasso: Assigned Reads', 'ylab': '# Reads', 'cpswitch_counts_label': 'Number of Reads' } #We only want to plot the READs at the moment return bargraph.plot(self.sargasso_data, [name for name in self.sargasso_keys if 'Reads' in name], config)
python
{ "resource": "" }
q28373
plot_aqhist
train
def plot_aqhist(samples, file_type, **plot_args): """ Create line graph plot of histogram data for BBMap 'aqhist' output. The 'samples' parameter could be from the bbmap mod_data dictionary: samples = bbmap.MultiqcModule.mod_data[file_type] """ sumy = sum([int(samples[sample]['data'][x][0]) for sample in samples for x in samples[sample]['data']]) cutoff = sumy * 0.999 all_x = set() for item in sorted(chain(*[samples[sample]['data'].items() for sample in samples])): all_x.add(item[0]) cutoff -= item[1][0] if cutoff < 0: xmax = item[0] break else: xmax = max(all_x) columns_to_plot = { 'Counts': { 0: 'Read1', 2: 'Read2' }, 'Proportions': { 1: 'Read1', 3: 'Read2' } } plot_data = [] for column_type in columns_to_plot: plot_data.append( { sample+'.'+column_name: { x: samples[sample]['data'][x][column] if x in samples[sample]['data'] else 0 for x in all_x } for sample in samples for column, column_name in columns_to_plot[column_type].items() } ) plot_params = { 'id': 'bbmap-' + file_type + '_plot', 'title': 'BBTools: ' + plot_args['plot_title'], 'xmax': xmax, 'xlab': 'Quality score', 'data_labels': [ {'name': 'Count data', 'ylab': 'Read count'}, {'name': 'Proportion data', 'ylab': 'Proportion of reads'}, ] } plot_params.update(plot_args['plot_params']) plot = linegraph.plot( plot_data, plot_params ) return plot
python
{ "resource": "" }
q28374
TsTvSummaryMixin.parse_tstv_summary
train
def parse_tstv_summary(self): """ Create the HTML for the TsTv summary plot. """ self.vcftools_tstv_summary = dict() for f in self.find_log_files('vcftools/tstv_summary', filehandles=True): d = {} for line in f['f'].readlines()[1:]: # don't add the header line (first row) key = line.split()[0] # taking the first column (MODEL) as key val = int(line.split()[1]) # taking the second column (COUNT) as value d[key] = val self.vcftools_tstv_summary[f['s_name']] = d # Filter out ignored sample names self.vcftools_tstv_summary = self.ignore_samples(self.vcftools_tstv_summary) if len(self.vcftools_tstv_summary) == 0: return 0 # Specifying the categories of the bargraph keys = OrderedDict() keys = ['AC', 'AG', 'AT', 'CG', 'CT', 'GT', 'Ts', 'Tv'] pconfig = { 'id': 'vcftools_tstv_summary', 'title': 'VCFTools: TsTv Summary', 'ylab': 'Counts', } self.add_section( name = 'TsTv Summary', anchor = 'vcftools-tstv-summary', description = "Plot of `TSTV-SUMMARY` - count of different types of transition and transversion SNPs.", plot = bargraph.plot(self.vcftools_tstv_summary,keys,pconfig) ) return len(self.vcftools_tstv_summary)
python
{ "resource": "" }
q28375
FindPeaksReportMixin.parse_homer_findpeaks
train
def parse_homer_findpeaks(self): """ Find HOMER findpeaks logs and parse their data """ self.homer_findpeaks = dict() for f in self.find_log_files('homer/findpeaks', filehandles=True): self.parse_findPeaks(f) # Filter to strip out ignored sample names self.homer_findpeaks = self.ignore_samples(self.homer_findpeaks) if len(self.homer_findpeaks) > 0: # Write parsed report data to a file self.write_data_file(self.homer_findpeaks, 'multiqc_homer_findpeaks') # General Stats Table stats_headers = OrderedDict() stats_headers['approximate_ip_efficiency'] = { 'title': '% Efficiency', 'description': 'Approximate IP efficiency', 'min': 0, 'max': 100, 'suffix': '%', 'scale': 'RdYlGn' } stats_headers['total_peaks'] = { 'title': 'Total Peaks', 'min': 0, 'format': '{:,.0f}', 'scale': 'GnBu' } stats_headers['expected_tags_per_peak'] = { 'title': 'Tags/Peak', 'description': 'Expected tags per peak', 'min': 0, 'format': '{:,.0f}', 'scale': 'PuRd' } self.general_stats_addcols(self.homer_findpeaks, stats_headers, 'HOMER findpeaks') return len(self.homer_findpeaks)
python
{ "resource": "" }
q28376
FindPeaksReportMixin.parse_findPeaks
train
def parse_findPeaks(self, f): """ Parse HOMER findPeaks file headers. """ parsed_data = dict() s_name = f['s_name'] for l in f['f']: # Start of data if l.strip() and not l.strip().startswith('#'): break # Automatically parse header lines by = symbol s = l[2:].split('=') if len(s) > 1: k = s[0].strip().replace(' ','_').lower() v = s[1].strip().replace('%','') try: parsed_data[k] = float(v) except ValueError: parsed_data[k] = v if k == 'tag_directory': s_name = self.clean_s_name(os.path.basename(v), os.path.dirname(v)) if len(parsed_data) > 0: if s_name in self.homer_findpeaks: log.debug("Duplicate sample name found in {}! Overwriting: {}".format(f['fn'], s_name)) self.add_data_source(f, s_name, section='findPeaks') self.homer_findpeaks[s_name] = parsed_data
python
{ "resource": "" }
q28377
move_tmp_log
train
def move_tmp_log(logger): """ Move the temporary log file to the MultiQC data directory if it exists. """ try: # https://stackoverflow.com/questions/15435652/python-does-not-release-filehandles-to-logfile logging.shutdown() shutil.move(log_tmp_fn, os.path.join(config.data_dir, 'multiqc.log')) util_functions.robust_rmtree(log_tmp_dir) except (AttributeError, TypeError, IOError): pass
python
{ "resource": "" }
q28378
get_log_stream
train
def get_log_stream(logger): """ Returns a stream to the root log file. If there is no logfile return the stderr log stream Returns: A stream to the root log file or stderr stream. """ file_stream = None log_stream = None for handler in logger.handlers: if isinstance(handler, logging.FileHandler): file_stream = handler.stream else: log_stream = handler.stream if file_stream: return file_stream return log_stream
python
{ "resource": "" }
q28379
MultiqcModule.parse_jellyfish_data
train
def parse_jellyfish_data(self, f): """ Go through the hist file and memorise it """ histogram = {} occurence = 0 for line in f['f']: line = line.rstrip('\n') occurence = int(line.split(" ")[0]) count = int(line.split(" ")[1]) histogram[occurence] = occurence*count #delete last occurnece as it is the sum of all kmer occuring more often than it. del histogram[occurence] #sanity check self.max_key = max(histogram, key=histogram.get) self.jellyfish_max_x = max(self.jellyfish_max_x, self.max_key) if len(histogram) > 0: if f['s_name'] in self.jellyfish_data: log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name'])) self.add_data_source(f) self.jellyfish_data[f['s_name']] = histogram
python
{ "resource": "" }
q28380
MultiqcModule.frequencies_plot
train
def frequencies_plot(self, xmin=0, xmax=200): """ Generate the qualities plot """ helptext = ''' A possible way to assess the complexity of a library even in absence of a reference sequence is to look at the kmer profile of the reads. The idea is to count all the kmers (_i.e._, sequence of length `k`) that occur in the reads. In this way it is possible to know how many kmers occur `1,2,.., N` times and represent this as a plot. This plot tell us for each x, how many k-mers (y-axis) are present in the dataset in exactly x-copies. In an ideal world (no errors in sequencing, no bias, no repeated regions) this plot should be as close as possible to a gaussian distribution. In reality we will always see a peak for `x=1` (_i.e._, the errors) and another peak close to the expected coverage. If the genome is highly heterozygous a second peak at half of the coverage can be expected.''' pconfig = { 'id': 'Jellyfish_kmer_plot', 'title': 'Jellyfish: K-mer plot', 'ylab': 'Counts', 'xlab': 'k-mer frequency', 'xDecimals': False, 'xmin': xmin, 'xmax': xmax } self.add_section( anchor = 'jellyfish_kmer_plot', description = 'The K-mer plot lets you estimate library complexity and coverage from k-mer content.', helptext = helptext, plot = linegraph.plot(self.jellyfish_data, pconfig) )
python
{ "resource": "" }
q28381
parse_reports
train
def parse_reports(self): """ Find RSeQC infer_experiment reports and parse their data """ # Set up vars self.infer_exp = dict() regexes = { 'pe_sense': r"\"1\+\+,1--,2\+-,2-\+\": (\d\.\d+)", 'pe_antisense': r"\"1\+-,1-\+,2\+\+,2--\": (\d\.\d+)", 'se_sense': r"\"\+\+,--\": (\d\.\d+)", 'se_antisense': r"\+-,-\+\": (\d\.\d+)", 'failed': r"Fraction of reads failed to determine: (\d\.\d+)" } # Go through files and parse data using regexes for f in self.find_log_files('rseqc/infer_experiment'): d = dict() for k, r in regexes.items(): r_search = re.search(r, f['f'], re.MULTILINE) if r_search: d[k] = float(r_search.group(1)) if len(d) > 0: if f['s_name'] in self.infer_exp: log.debug("Duplicate sample name found! Overwriting: {}".format(f['s_name'])) self.add_data_source(f, section='infer_experiment') self.infer_exp[f['s_name']] = d # Filter to strip out ignored sample names self.infer_exp = self.ignore_samples(self.infer_exp) if len(self.infer_exp) > 0: # Write to file self.write_data_file(self.infer_exp, 'multiqc_rseqc_infer_experiment') # Merge PE and SE for plot pdata = dict() for s_name, vals in self.infer_exp.items(): pdata[s_name] = dict() for k, v in vals.items(): v *= 100.0 # Multiply to get percentage if k[:2] == 'pe' or k[:2] == 'se': k = k[3:] pdata[s_name][k] = v + pdata[s_name].get(k, 0) # Plot bar graph of groups keys = OrderedDict() keys['sense'] = {'name': "Sense"} keys['antisense'] = {'name': "Antisense"} keys['failed'] = {'name': "Undetermined"} # Config for the plot pconfig = { 'id': 'rseqc_infer_experiment_plot', 'title': 'RSeQC: Infer experiment', 'ylab': '% Tags', 'ymin': 0, 'ymax': 100, 'tt_percentages': False, 'ylab_format': '{value}%', 'cpswitch': False } self.add_section ( name = 'Infer experiment', anchor = 'rseqc-infer_experiment', description = '<a href="http://rseqc.sourceforge.net/#infer-experiment-py" target="_blank">Infer experiment</a>' \ " counts the percentage of reads and read pairs that match the strandedness of overlapping transcripts." \ " It can be used to infer whether RNA-seq library preps are stranded (sense or antisense).", plot = bargraph.plot(pdata, keys, pconfig) ) # Return number of samples found return len(self.infer_exp)
python
{ "resource": "" }
q28382
MultiqcModule.parse_leehom_logs
train
def parse_leehom_logs(self, f): """ Go through log file looking for leehom output """ regexes = { 'total': r"Total reads[\s\:]+(\d+)", 'merged_trimming': r"Merged \(trimming\)\s+(\d+)", 'merged_overlap': r"Merged \(overlap\)\s+(\d+)", 'kept': r"Kept PE/SR\s+(\d+)", 'trimmed': r"Trimmed SR\s+(\d+)", 'adapter_dimers_chimeras': r"Adapter dimers/chimeras\s+(\d+)", 'failed_key': r"Failed Key\s+(\d+)" } parsed_data = dict() for l in f['f']: # Search regexes for overview stats for k, r in regexes.items(): match = re.search(r, l) if match: parsed_data[k] = int(match.group(1)) return parsed_data
python
{ "resource": "" }
q28383
MultiqcModule.leehom_general_stats_table
train
def leehom_general_stats_table(self): """ Take the parsed stats from the leeHom report and add it to the basic stats table at the top of the report """ headers = {} headers['merged_trimming'] = { 'title': '{} Merged (Trimming)'.format(config.read_count_prefix), 'description': 'Merged clusters from trimming ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuRd', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count' } headers['merged_overlap'] = { 'title': '{} Merged (Overlap)'.format(config.read_count_prefix), 'description': 'Merged clusters from overlapping reads ({})'.format(config.read_count_desc), 'min': 0, 'scale': 'PuRd', 'modify': lambda x: x * config.read_count_multiplier, 'shared_key': 'read_count' } self.general_stats_addcols(self.leehom_data, headers)
python
{ "resource": "" }
q28384
MultiqcModule.dedup_general_stats_table
train
def dedup_general_stats_table(self): """ Take the parsed stats from the DeDup report and add it to the basic stats table at the top of the report """ headers = OrderedDict() headers['duplication_rate'] = { 'title': 'Duplication Rate', 'description': 'Percentage of reads categorised as a technical duplicate', 'min': 0, 'max': 100, 'suffix': '%', 'scale': 'OrRd', 'format': '{:,.0f}', 'modify': lambda x: x * 100.0 } self.general_stats_addcols(self.dedup_data, headers)
python
{ "resource": "" }
q28385
MultiqcModule.macs_filtered_reads_plot
train
def macs_filtered_reads_plot(self): """ Plot of filtered reads for control and treatment samples """ data = dict() req_cats = ['control_fragments_total', 'control_fragments_after_filtering', 'treatment_fragments_total', 'treatment_fragments_after_filtering'] for s_name, d in self.macs_data.items(): if all([c in d for c in req_cats]): data['{}: Control'.format(s_name)] = dict() data['{}: Treatment'.format(s_name)] = dict() data['{}: Control'.format(s_name)]['fragments_filtered'] = d['control_fragments_total'] - d['control_fragments_after_filtering'] data['{}: Control'.format(s_name)]['fragments_not_filtered'] = d['control_fragments_after_filtering'] data['{}: Treatment'.format(s_name)]['fragments_filtered'] = d['treatment_fragments_total'] - d['treatment_fragments_after_filtering'] data['{}: Treatment'.format(s_name)]['fragments_not_filtered'] = d['treatment_fragments_after_filtering'] # Check that we have something to plot if len(data) == 0: return # Specify the order of the different possible categories keys = OrderedDict() keys['fragments_not_filtered'] = { 'color': '#437BB1', 'name': 'Remaining fragments' } keys['fragments_filtered'] = { 'color': '#B1084C', 'name': 'Filtered fragments' } # Config for the plot pconfig = { 'id': 'macs2_filtered', 'title': 'MACS2: Filtered Fragments', 'ylab': '# Fragments', 'cpswitch_counts_label': 'Number of Fragments', 'hide_zero_cats': False } self.add_section( plot = bargraph.plot(data, keys, pconfig) )
python
{ "resource": "" }
q28386
MultiqcModule.split_log
train
def split_log(logf): """split concat log into individual samples""" flashpatt = re.compile( r'\[FLASH\] Fast Length Adjustment of SHort reads\n(.+?)\[FLASH\] FLASH', flags=re.DOTALL) return flashpatt.findall(logf)
python
{ "resource": "" }
q28387
MultiqcModule.get_field
train
def get_field(field, slog, fl=False): """parse sample log for field set fl=True to return a float otherwise, returns int """ field += r'\:\s+([\d\.]+)' match = re.search(field, slog) if match: if fl: return float(match.group(1)) return int(match.group(1)) return 0
python
{ "resource": "" }
q28388
MultiqcModule.clean_pe_name
train
def clean_pe_name(self, nlog, root): """additional name cleaning for paired end data""" use_output_name = getattr(config, 'flash', {}).get('use_output_name', False) if use_output_name: name = re.search(r'Output files\:\n\[FLASH\]\s+(.+?)\n', nlog) else: name = re.search(r'Input files\:\n\[FLASH\]\s+(.+?)\n', nlog) if not name: return None name = name.group(1) name = self.clean_s_name(name, root) return name
python
{ "resource": "" }
q28389
MultiqcModule.parse_flash_log
train
def parse_flash_log(self, logf): """parse flash logs""" data = OrderedDict() samplelogs = self.split_log(logf['f']) for slog in samplelogs: try: sample = dict() ## Sample name ## s_name = self.clean_pe_name(slog, logf['root']) if s_name is None: continue sample['s_name'] = s_name ## Log attributes ## sample['totalpairs'] = self.get_field('Total pairs', slog) sample['discardpairs'] = self.get_field('Discarded pairs', slog) sample['percdiscard'] = self.get_field('Percent Discarded', slog, fl=True) sample['combopairs'] = self.get_field('Combined pairs', slog) sample['inniepairs'] = self.get_field('Innie pairs', slog) sample['outiepairs'] = self.get_field('Outie pairs', slog) sample['uncombopairs'] = self.get_field('Uncombined pairs', slog) sample['perccombo'] = self.get_field('Percent combined', slog, fl=True) data[s_name] = sample except Exception as err: log.warning("Error parsing record in {}. {}".format(logf['fn'], err)) log.debug(traceback.format_exc()) continue return data
python
{ "resource": "" }
q28390
MultiqcModule.stats_table
train
def stats_table(self, data): """Add percent combined to general stats table""" headers = OrderedDict() headers['combopairs'] = { 'title': 'Combined pairs', 'description': 'Num read pairs combined', 'shared_key': 'read_count', 'hidden': True, 'scale': False } headers['perccombo'] = { 'title': '% Combined', 'description': '% read pairs combined', 'max': 100, 'min': 0, 'suffix': '%', 'scale': 'PiYG' } self.general_stats_addcols(data, headers)
python
{ "resource": "" }
q28391
MultiqcModule.summary_plot
train
def summary_plot(data): """Barplot of combined pairs""" cats = OrderedDict() cats = { 'inniepairs': { 'name': 'Combined innie pairs', 'color': '#191970' }, 'outiepairs': { 'name': 'Combined outie pairs', 'color': '#00A08A' }, 'uncombopairs': { 'name': 'Uncombined pairs', 'color': '#cd1076' }, 'discardpairs': { 'name': 'Discarded pairs', 'color': '#ffd700' } } splotconfig = {'id': 'flash_combo_stats_plot', 'title': 'FLASh: Read combination statistics', 'ylab': 'Number of read pairs', 'hide_zero_cats': False } return bargraph.plot(data, cats, splotconfig)
python
{ "resource": "" }
q28392
MultiqcModule.parse_hist_files
train
def parse_hist_files(histf): """parse histogram files""" nameddata = dict() data = dict() try: for l in histf['f'].splitlines(): s = l.split() if s: if len(s) != 2: raise RuntimeError("invalid format: " + str(len(s)) + " column(s) found in row. must be exactly 2.") data[int(s[0])] = int(s[1]) except Exception as err: log.warning("Error parsing %s. %s", histf['fn'], err) log.debug(traceback.format_exc()) else: if data: nameddata[histf['s_name']] = data else: log.debug("%s is empty.", histf['fn']) finally: return nameddata
python
{ "resource": "" }
q28393
MultiqcModule.get_colors
train
def get_colors(n): """get colors for freqpoly graph""" cb_palette = ["#E69F00", "#56B4E9", "#009E73", "#F0E442", "#0072B2", "#D55E00", "#CC79A7","#001F3F", "#0074D9", "#7FDBFF", "#39CCCC", "#3D9970", "#2ECC40", "#01FF70", "#FFDC00", "#FF851B", "#FF4136", "#F012BE", "#B10DC9", "#85144B", "#AAAAAA", "#000000"] whole = int(n/22) extra = (n % 22) cols = cb_palette * whole if extra >= 0: cols.extend(cb_palette[0:extra]) return cols
python
{ "resource": "" }
q28394
MultiqcModule.freqpoly_plot
train
def freqpoly_plot(data): """make freqpoly plot of merged read lengths""" rel_data = OrderedDict() for key, val in data.items(): tot = sum(val.values(), 0) rel_data[key] = {k: v / tot for k, v in val.items()} fplotconfig = { 'data_labels': [ {'name': 'Absolute', 'ylab': 'Frequency', 'xlab': 'Merged Read Length'}, {'name': 'Relative', 'ylab': 'Relative Frequency', 'xlab': 'Merged Read Length'} ], 'id': 'flash_freqpoly_plot', 'title': 'FLASh: Frequency of merged read lengths', 'colors': dict(zip(data.keys(), MultiqcModule.get_colors(len(data)))) } return linegraph.plot([data, rel_data], fplotconfig)
python
{ "resource": "" }
q28395
MultiqcModule.hist_results
train
def hist_results(self): """process flash numeric histograms""" self.hist_data = OrderedDict() for histfile in self.find_log_files('flash/hist'): self.hist_data.update(self.parse_hist_files(histfile)) # ignore sample names self.hist_data = self.ignore_samples(self.hist_data) try: if not self.hist_data: raise UserWarning log.info("Found %d histogram reports", len(self.hist_data)) self.add_section( name='Frequency polygons of merged read lengths', anchor='flash-histogram', description='This plot is made from the numerical histograms output by FLASh.', plot=self.freqpoly_plot(self.hist_data)) except UserWarning: pass except Exception as err: log.error(err) log.debug(traceback.format_exc()) return len(self.hist_data)
python
{ "resource": "" }
q28396
generate_dummy_graph
train
def generate_dummy_graph(network): """Generate a dummy graph to feed to the FIAS libraries. It adds the "pos" attribute and removes the 380 kV duplicate buses when the buses have been split, so that all load and generation is attached to the 220kV bus.""" graph = pypsa.descriptors.OrderedGraph() graph.add_nodes_from([bus for bus in network.buses.index if bus not in buses_to_split]) #add positions to graph for voronoi cell computation for node in graph.nodes(): graph.node[node]["pos"] = np.array(network.buses.loc[node,["x","y"]],dtype=float) return graph
python
{ "resource": "" }
q28397
voronoi_partition
train
def voronoi_partition(G, outline): """ For 2D-embedded graph `G`, within the boundary given by the shapely polygon `outline`, returns `G` with the Voronoi cell region as an additional node attribute. """ #following line from vresutils.graph caused a bug #G = polygon_subgraph(G, outline, copy=False) points = list(vresutils.graph.get_node_attributes(G, 'pos').values()) regions = vresutils.graph.voronoi_partition_pts(points, outline, no_multipolygons=True) nx.set_node_attributes(G, 'region', dict(zip(G.nodes(), regions))) return G
python
{ "resource": "" }
q28398
area_from_lon_lat_poly
train
def area_from_lon_lat_poly(geometry): """ Compute the area in km^2 of a shapely geometry, whose points are in longitude and latitude. Parameters ---------- geometry: shapely geometry Points must be in longitude and latitude. Returns ------- area: float Area in km^2. """ import pyproj from shapely.ops import transform from functools import partial project = partial( pyproj.transform, pyproj.Proj(init='epsg:4326'), # Source: Lon-Lat pyproj.Proj(proj='aea')) # Target: Albers Equal Area Conical https://en.wikipedia.org/wiki/Albers_projection new_geometry = transform(project, geometry) #default area is in m^2 return new_geometry.area/1e6
python
{ "resource": "" }
q28399
define_sub_network_cycle_constraints
train
def define_sub_network_cycle_constraints( subnetwork, snapshots, passive_branch_p, attribute): """ Constructs cycle_constraints for a particular subnetwork """ sub_network_cycle_constraints = {} sub_network_cycle_index = [] matrix = subnetwork.C.tocsc() branches = subnetwork.branches() for col_j in range( matrix.shape[1] ): cycle_is = matrix.getcol(col_j).nonzero()[0] if len(cycle_is) == 0: continue sub_network_cycle_index.append((subnetwork.name, col_j)) branch_idx_attributes = [] for cycle_i in cycle_is: branch_idx = branches.index[cycle_i] attribute_value = 1e5 * branches.at[ branch_idx, attribute] * subnetwork.C[ cycle_i, col_j] branch_idx_attributes.append( (branch_idx, attribute_value)) for snapshot in snapshots: expression_list = [ (attribute_value, passive_branch_p[branch_idx[0], branch_idx[1], snapshot]) for (branch_idx, attribute_value) in branch_idx_attributes] lhs = LExpression(expression_list) sub_network_cycle_constraints[subnetwork.name,col_j,snapshot] = LConstraint(lhs,"==",LExpression()) return( sub_network_cycle_index, sub_network_cycle_constraints)
python
{ "resource": "" }