sentence1
stringlengths
52
3.87M
sentence2
stringlengths
1
47.2k
label
stringclasses
1 value
def overlaps(self, other): """ check for overlap with the other interval """ if self.chrom != other.chrom: return False if self.start >= other.end: return False if other.start >= self.end: return False return True
check for overlap with the other interval
entailment
def is_upstream_of(self, other): """ check if this is upstream of the `other` interval taking the strand of the other interval into account """ if self.chrom != other.chrom: return None if getattr(other, "strand", None) == "+": return self.end <= other.start # other feature is on - strand, so this must have higher start return self.start >= other.end
check if this is upstream of the `other` interval taking the strand of the other interval into account
entailment
def distance(self, other_or_start=None, end=None, features=False): """ check the distance between this an another interval Parameters ---------- other_or_start : Interval or int either an integer or an Interval with a start attribute indicating the start of the interval end : int if `other_or_start` is an integer, this must be an integer indicating the end of the interval features : bool if True, the features, such as CDS, intron, etc. that this feature overlaps are returned. """ if end is None: assert other_or_start.chrom == self.chrom other_start, other_end = get_start_end(other_or_start, end) if other_start > self.end: return other_start - self.end if self.start > other_end: return self.start - other_end return 0
check the distance between this an another interval Parameters ---------- other_or_start : Interval or int either an integer or an Interval with a start attribute indicating the start of the interval end : int if `other_or_start` is an integer, this must be an integer indicating the end of the interval features : bool if True, the features, such as CDS, intron, etc. that this feature overlaps are returned.
entailment
def exons(self): """ return a list of exons [(start, stop)] for this object if appropriate """ # drop the trailing comma if not self.is_gene_pred: return [] if hasattr(self, "exonStarts"): try: starts = (long(s) for s in self.exonStarts[:-1].split(",")) ends = (long(s) for s in self.exonEnds[:-1].split(",")) except TypeError: starts = (long(s) for s in self.exonStarts[:-1].decode().split(",")) ends = (long(s) for s in self.exonEnds[:-1].decode().split(",")) else: # it is bed12 starts = [self.start + long(s) for s in self.chromStarts[:-1].decode().split(",")] ends = [starts[i] + long(size) for i, size \ in enumerate(self.blockSizes[:-1].decode().split(","))] return zip(starts, ends)
return a list of exons [(start, stop)] for this object if appropriate
entailment
def gene_features(self): """ return a list of features for the gene features of this object. This would include exons, introns, utrs, etc. """ nm, strand = self.gene_name, self.strand feats = [(self.chrom, self.start, self.end, nm, strand, 'gene')] for feat in ('introns', 'exons', 'utr5', 'utr3', 'cdss'): fname = feat[:-1] if feat[-1] == 's' else feat res = getattr(self, feat) if res is None or all(r is None for r in res): continue if not isinstance(res, list): res = [res] feats.extend((self.chrom, s, e, nm, strand, fname) for s, e in res) tss = self.tss(down=1) if tss is not None: feats.append((self.chrom, tss[0], tss[1], nm, strand, 'tss')) prom = self.promoter() feats.append((self.chrom, prom[0], prom[1], nm, strand, 'promoter')) return sorted(feats, key=itemgetter(1))
return a list of features for the gene features of this object. This would include exons, introns, utrs, etc.
entailment
def tss(self, up=0, down=0): """ Return a start, end tuple of positions around the transcription-start site Parameters ---------- up : int if greature than 0, the strand is used to add this many upstream bases in the appropriate direction down : int if greature than 0, the strand is used to add this many downstream bases into the gene. """ if not self.is_gene_pred: return None tss = self.txEnd if self.strand == '-' else self.txStart start, end = tss, tss if self.strand == '+': start -= up end += down else: start += up end -= down start, end = end, start return max(0, start), max(end, start, 0)
Return a start, end tuple of positions around the transcription-start site Parameters ---------- up : int if greature than 0, the strand is used to add this many upstream bases in the appropriate direction down : int if greature than 0, the strand is used to add this many downstream bases into the gene.
entailment
def promoter(self, up=2000, down=0): """ Return a start, end tuple of positions for the promoter region of this gene Parameters ---------- up : int this distance upstream that is considered the promoter down : int the strand is used to add this many downstream bases into the gene. """ if not self.is_gene_pred: return None return self.tss(up=up, down=down)
Return a start, end tuple of positions for the promoter region of this gene Parameters ---------- up : int this distance upstream that is considered the promoter down : int the strand is used to add this many downstream bases into the gene.
entailment
def coding_exons(self): """ includes the entire exon as long as any of it is > cdsStart and < cdsEnd """ # drop the trailing comma starts = (long(s) for s in self.exonStarts[:-1].split(",")) ends = (long(s) for s in self.exonEnds[:-1].split(",")) return [(s, e) for s, e in zip(starts, ends) if e > self.cdsStart and s < self.cdsEnd]
includes the entire exon as long as any of it is > cdsStart and < cdsEnd
entailment
def cds(self): """just the parts of the exons that are translated""" ces = self.coding_exons if len(ces) < 1: return ces ces[0] = (self.cdsStart, ces[0][1]) ces[-1] = (ces[-1][0], self.cdsEnd) assert all((s < e for s, e in ces)) return ces
just the parts of the exons that are translated
entailment
def is_downstream_of(self, other): """ return a boolean indicating whether this feature is downstream of `other` taking the strand of other into account """ if self.chrom != other.chrom: return None if getattr(other, "strand", None) == "-": # other feature is on - strand, so this must have higher start return self.end <= other.start return self.start >= other.end
return a boolean indicating whether this feature is downstream of `other` taking the strand of other into account
entailment
def features(self, other_start, other_end): """ return e.g. "intron;exon" if the other_start, end overlap introns and exons """ # completely encases gene. if other_start <= self.start and other_end >= self.end: return ['gene' if self.cdsStart != self.cdsEnd else 'nc_gene'] other = Interval(other_start, other_end) ovls = [] tx = 'txEnd' if self.strand == "-" else 'txStart' if hasattr(self, tx) and other_start <= getattr(self, tx) <= other_end \ and self.cdsStart != self.cdsEnd: ovls = ["TSS"] for ftype in ('introns', 'exons', 'utr5', 'utr3', 'cdss'): feats = getattr(self, ftype) if not isinstance(feats, list): feats = [feats] if any(Interval(f[0], f[1]).overlaps(other) for f in feats): ovls.append(ftype[:-1] if ftype[-1] == 's' else ftype) if 'cds' in ovls: ovls = [ft for ft in ovls if ft != 'exon'] if self.cdsStart == self.cdsEnd: ovls = ['nc_' + ft for ft in ovls] return ovls
return e.g. "intron;exon" if the other_start, end overlap introns and exons
entailment
def upstream(self, distance): """ return the (start, end) of the region before the geneStart """ if getattr(self, "strand", None) == "+": e = self.start s = e - distance else: s = self.end e = s + distance return self._xstream(s, e)
return the (start, end) of the region before the geneStart
entailment
def utr5(self): """ return the 5' UTR if appropriate """ if not self.is_coding or len(self.exons) < 2: return (None, None) if self.strand == "+": s, e = (self.txStart, self.cdsStart) else: s, e = (self.cdsEnd, self.txEnd) if s == e: return (None, None) return s, e
return the 5' UTR if appropriate
entailment
def sequence(self, per_exon=False): """ Return the sequence for this feature. if per-exon is True, return an array of exon sequences This sequence is never reverse complemented """ db = self.db if not per_exon: start = self.txStart + 1 return _sequence(db, self.chrom, start, self.txEnd) else: # TODO: use same strategy as cds_sequence to reduce # of requests. seqs = [] for start, end in self.exons: seqs.append(_sequence(db, self.chrom, start + 1, end)) return seqs
Return the sequence for this feature. if per-exon is True, return an array of exon sequences This sequence is never reverse complemented
entailment
def ncbi_blast(self, db="nr", megablast=True, sequence=None): """ perform an NCBI blast against the sequence of this feature """ import requests requests.defaults.max_retries = 4 assert sequence in (None, "cds", "mrna") seq = self.sequence() if sequence is None else ("".join(self.cds_sequence if sequence == "cds" else self.mrna_sequence)) r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi', timeout=20, data=dict( PROGRAM="blastn", #EXPECT=2, DESCRIPTIONS=100, ALIGNMENTS=0, FILTER="L", # low complexity CMD="Put", MEGABLAST=True, DATABASE=db, QUERY=">%s\n%s" % (self.name, seq) ) ) if not ("RID =" in r.text and "RTOE" in r.text): print("no results", file=sys.stderr) raise StopIteration rid = r.text.split("RID = ")[1].split("\n")[0] import time time.sleep(4) print("checking...", file=sys.stderr) r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi', data=dict(RID=rid, format="Text", DESCRIPTIONS=100, DATABASE=db, CMD="Get", )) while "Status=WAITING" in r.text: print("checking...", file=sys.stderr) time.sleep(10) r = requests.post('http://blast.ncbi.nlm.nih.gov/Blast.cgi', data=dict(RID=rid, format="Text", CMD="Get", )) for rec in _ncbi_parse(r.text): yield rec
perform an NCBI blast against the sequence of this feature
entailment
def blat(self, db=None, sequence=None, seq_type="DNA"): """ make a request to the genome-browsers BLAT interface sequence is one of None, "mrna", "cds" returns a list of features that are hits to this sequence. """ from . blat_blast import blat, blat_all assert sequence in (None, "cds", "mrna") seq = self.sequence() if sequence is None else ("".join(self.cds_sequence if sequence == "cds" else self.mrna_sequence)) if isinstance(db, (tuple, list)): return blat_all(seq, self.gene_name, db, seq_type) else: return blat(seq, self.gene_name, db or self.db, seq_type)
make a request to the genome-browsers BLAT interface sequence is one of None, "mrna", "cds" returns a list of features that are hits to this sequence.
entailment
def bed(self, *attrs, **kwargs): """ return a bed formatted string of this feature """ exclude = ("chrom", "start", "end", "txStart", "txEnd", "chromStart", "chromEnd") if self.is_gene_pred: return self.bed12(**kwargs) return "\t".join(map(str, ( [self.chrom, self.start, self.end] + [getattr(self, attr) for attr in attrs if not attr in exclude] )))
return a bed formatted string of this feature
entailment
def bed12(self, score="0", rgb="."): """ return a bed12 (http://genome.ucsc.edu/FAQ/FAQformat.html#format1) representation of this interval """ if not self.is_gene_pred: raise CruzException("can't create bed12 from non genepred feature") exons = list(self.exons) # go from global start, stop, to relative start, length... sizes = ",".join([str(e[1] - e[0]) for e in exons]) + "," starts = ",".join([str(e[0] - self.txStart) for e in exons]) + "," name = self.name2 + "," + self.name if hasattr(self, "name2") \ else self.name return "\t".join(map(str, ( self.chrom, self.txStart, self.txEnd, name, score, self.strand, self.cdsStart, self.cdsEnd, rgb, len(exons), sizes, starts)))
return a bed12 (http://genome.ucsc.edu/FAQ/FAQformat.html#format1) representation of this interval
entailment
def localize(self, *positions, **kwargs): """ convert global coordinate(s) to local taking introns into account and cds/tx-Start depending on cdna=True kwarg """ cdna = kwargs.get('cdna', False) # TODO: account for strand ?? add kwarg ?? # if it's to the CDNA, then it's based on the cdsStart start, end = (self.cdsStart, self.cdsEnd) if cdna else \ (self.start, self.end) introns = self.introns or None if cdna: if not self.is_coding: return ([None] * len(positions)) if len(positions) > 1 else None introns = self._introns(self.cds) or None if introns is None: local_ps = [p - start if (start <= p < end) else None for p in positions] return local_ps[0] if len(positions) == 1 else local_ps introns = [(s - start, e - start) for s, e in introns] positions = [p - start for p in positions] # now both introns and positions are local starts based on cds/tx-Start local_ps = [] l = end - start for original_p in positions: subtract = 0 p = original_p print(p, l, file=sys.stderr) if p < 0 or p >= l: # outside of transcript local_ps.append(None) continue for s, e in introns: # within intron if s <= p <= e: subtract = None break # otherwise, adjust for intron length. elif p >= e: subtract += (e - s) local_ps.append(p - subtract if subtract is not None else None) assert all(p is None or p >=0 for p in local_ps), (local_ps) return local_ps[0] if len(positions) == 1 else local_ps
convert global coordinate(s) to local taking introns into account and cds/tx-Start depending on cdna=True kwarg
entailment
def distance(self, other_or_start=None, end=None, features="unused", shore_dist=3000): """ check the distance between this an another interval Parameters ---------- other_or_start : Interval or int either an integer or an Interval with a start attribute indicating the start of the interval end : int if `other_or_start` is an integer, this must be an integer indicating the end of the interval features : bool if True, the features, such as CDS, intron, etc. that this feature overlaps are returned. """ # leave features kwarg to match signature from Feature.distance if end is None: assert other_or_start.chrom == self.chrom other_start, other_end = get_start_end(other_or_start, end) dist = 0 if other_start > self.end: dist = other_start - self.end elif self.start > other_end: dist = self.start - other_end assert dist >= 0 if dist > 0: dist = (dist, "shore" if abs(dist) <= shore_dist else "") else: dist = (0, "island") return dist
check the distance between this an another interval Parameters ---------- other_or_start : Interval or int either an integer or an Interval with a start attribute indicating the start of the interval end : int if `other_or_start` is an integer, this must be an integer indicating the end of the interval features : bool if True, the features, such as CDS, intron, etc. that this feature overlaps are returned.
entailment
def annotate(g, fname, tables, feature_strand=False, in_memory=False, header=None, out=sys.stdout, _chrom=None, parallel=False): """ annotate bed file in fname with tables. distances are integers for distance. and intron/exon/utr5 etc for gene-pred tables. if the annotation features have a strand, the distance reported is negative if the annotation feature is upstream of the feature in question if feature_strand is True, then the distance is negative if t """ close = False if isinstance(out, basestring): out = nopen(out, "w") close = True if parallel: import multiprocessing import signal p = multiprocessing.Pool(initializer=lambda: signal.signal(signal.SIGINT, signal.SIG_IGN)) chroms = _split_chroms(fname) def write_result(fanno, written=[False]): for i, d in enumerate(reader(fanno, header="ordered")): if i == 0 and written[0] == False: print >>out, "\t".join(d.keys()) written[0] = True print >>out, "\t".join(x if x else "NA" for x in d.values()) os.unlink(fanno) os.unlink(fanno.replace(".anno", "")) for fchrom, (fout, fanno) in chroms: p.apply_async(annotate, args=(g.db, fout.name, tables, feature_strand, True, header, fanno, fchrom), callback=write_result) p.close() p.join() return out.name if isinstance(g, basestring): from . import Genome g = Genome(g) if in_memory: from . intersecter import Intersecter intersecters = [] # 1 per table. for t in tables: q = getattr(g, t) if isinstance(t, basestring) else t if _chrom is not None: q = q.filter_by(chrom=_chrom) table_iter = q #page_query(q, g.session) intersecters.append(Intersecter(table_iter)) elif isinstance(fname, basestring) and os.path.exists(fname) \ and sum(1 for _ in nopen(fname)) > 25000: print >>sys.stderr, "annotating many intervals, may be faster using in_memory=True" if header is None: header = [] extra_header = [] for j, toks in enumerate(reader(fname, header=False)): if j == 0 and not header: if not (toks[1] + toks[2]).isdigit(): header = toks if j == 0: for t in tables: annos = (getattr(g, t) if isinstance(t, basestring) else t).first().anno_cols h = t if isinstance(t, basestring) else t._table.name if hasattr(t, "_table") else t.first()._table.name extra_header += ["%s_%s" % (h, a) for a in annos] if 0 != len(header): if not header[0].startswith("#"): header[0] = "#" + header[0] print >>out, "\t".join(header + extra_header) if header == toks: continue if not isinstance(toks, ABase): f = Feature() f.chrom = toks[0] f.txStart = int(toks[1]) f.txEnd = int(toks[2]) try: f.strand = toks[header.index('strand')] except ValueError: pass else: f = toks # for now, use the objects str to get the columns # might want to use getattr on the original cols toks = f.bed(*header).split("\t") sep = "^*^" for ti, tbl in enumerate(tables): if in_memory: objs = intersecters[ti].knearest(int(toks[1]), int(toks[2]), chrom=toks[0], k = 1) else: objs = g.knearest(tbl, toks[0], int(toks[1]), int(toks[2]), k=1) if len(objs) == 0: print >>out, "\t".join(toks + ["", "", ""]) continue gp = hasattr(objs[0], "exonStarts") names = [o.gene_name for o in objs] if feature_strand: strands = [-1 if f.is_upstream_of(o) else 1 for o in objs] else: strands = [-1 if o.is_upstream_of(f) else 1 for o in objs] # dists can be a list of tuples where the 2nd item is something # like 'island' or 'shore' dists = [o.distance(f, features=gp) for o in objs] pure_dists = [d[0] if isinstance(d, (tuple, list)) else d for d in dists] # convert to negative if the feature is upstream of the query for i, s in enumerate(strands): if s == 1: continue if isinstance(pure_dists[i], basestring): continue pure_dists[i] *= -1 for i, (pd, d) in enumerate(zip(pure_dists, dists)): if isinstance(d, tuple): if len(d) > 1: dists[i] = "%s%s%s" % (pd, sep, sep.join(d[1:])) else: dists[i] = pd # keep uniqe name, dist combinations (occurs because of # transcripts) name_dists = set(["%s%s%s" % (n, sep, d) \ for (n, d) in zip(names, dists)]) name_dists = [nd.split(sep) for nd in name_dists] # just take the first gene name if they are all the same if len(set(nd[0] for nd in name_dists)) == 1: toks.append(name_dists[0][0]) else: toks.append(";".join(nd[0] for nd in name_dists)) # iterate over the feat type, dist cols for i in range(1, len(name_dists[0])): toks.append(";".join(nd[i] for nd in name_dists)) print >>out, "\t".join(toks) if close: out.close() return out.name
annotate bed file in fname with tables. distances are integers for distance. and intron/exon/utr5 etc for gene-pred tables. if the annotation features have a strand, the distance reported is negative if the annotation feature is upstream of the feature in question if feature_strand is True, then the distance is negative if t
entailment
def entry_point(): """ External entry point which calls main() and if Stop is raised, calls sys.exit() """ try: main("omego", items=[ (InstallCommand.NAME, InstallCommand), (UpgradeCommand.NAME, UpgradeCommand), (ConvertCommand.NAME, ConvertCommand), (DownloadCommand.NAME, DownloadCommand), (DbCommand.NAME, DbCommand), (Version.NAME, Version)]) except Stop, stop: if stop.rc != 0: print "ERROR:", stop else: print stop sys.exit(stop.rc)
External entry point which calls main() and if Stop is raised, calls sys.exit()
entailment
def open_url(url, httpuser=None, httppassword=None, method=None): """ Open a URL using an opener that will simulate a browser user-agent url: The URL httpuser, httppassword: HTTP authentication credentials (either both or neither must be provided) method: The HTTP method Caller is reponsible for calling close() on the returned object """ if os.getenv('OMEGO_SSL_NO_VERIFY') == '1': # This needs to come first to override the default HTTPS handler log.debug('OMEGO_SSL_NO_VERIFY=1') try: sslctx = ssl.create_default_context() except Exception as e: log.error('Failed to create Default SSL context: %s' % e) raise Stop( 'Failed to create Default SSL context, OMEGO_SSL_NO_VERIFY ' 'is not supported on older versions of Python') sslctx.check_hostname = False sslctx.verify_mode = ssl.CERT_NONE opener = urllib2.build_opener(urllib2.HTTPSHandler(context=sslctx)) else: opener = urllib2.build_opener() if 'USER_AGENT' in os.environ: opener.addheaders = [('User-agent', os.environ.get('USER_AGENT'))] log.debug('Setting user-agent: %s', os.environ.get('USER_AGENT')) if httpuser and httppassword: mgr = urllib2.HTTPPasswordMgrWithDefaultRealm() mgr.add_password(None, url, httpuser, httppassword) log.debug('Enabling HTTP authentication') opener.add_handler(urllib2.HTTPBasicAuthHandler(mgr)) opener.add_handler(urllib2.HTTPDigestAuthHandler(mgr)) elif httpuser or httppassword: raise FileException( 'httpuser and httppassword must be used together', url) # Override method http://stackoverflow.com/a/4421485 req = urllib2.Request(url) if method: req.get_method = lambda: method return opener.open(req)
Open a URL using an opener that will simulate a browser user-agent url: The URL httpuser, httppassword: HTTP authentication credentials (either both or neither must be provided) method: The HTTP method Caller is reponsible for calling close() on the returned object
entailment
def dereference_url(url): """ Makes a HEAD request to find the final destination of a URL after following any redirects """ res = open_url(url, method='HEAD') res.close() return res.url
Makes a HEAD request to find the final destination of a URL after following any redirects
entailment
def read(url, **kwargs): """ Read the contents of a URL into memory, return """ response = open_url(url, **kwargs) try: return response.read() finally: response.close()
Read the contents of a URL into memory, return
entailment
def download(url, filename=None, print_progress=0, delete_fail=True, **kwargs): """ Download a file, optionally printing a simple progress bar url: The URL to download filename: The filename to save to, default is to use the URL basename print_progress: The length of the progress bar, use 0 to disable delete_fail: If True delete the file if the download was not successful, default is to keep the temporary file return: The downloaded filename """ blocksize = 1024 * 1024 downloaded = 0 progress = None log.info('Downloading %s', url) response = open_url(url, **kwargs) if not filename: filename = os.path.basename(url) output = None try: total = int(response.headers['Content-Length']) if print_progress: progress = ProgressBar(print_progress, total) with tempfile.NamedTemporaryFile( prefix=filename + '.', dir='.', delete=False) as output: while downloaded < total: block = response.read(blocksize) output.write(block) downloaded += len(block) if progress: progress.update(downloaded) os.rename(output.name, filename) output = None return filename finally: response.close() if delete_fail and output: os.unlink(output.name)
Download a file, optionally printing a simple progress bar url: The URL to download filename: The filename to save to, default is to use the URL basename print_progress: The length of the progress bar, use 0 to disable delete_fail: If True delete the file if the download was not successful, default is to keep the temporary file return: The downloaded filename
entailment
def rename_backup(name, suffix='.bak'): """ Append a backup prefix to a file or directory, with an increasing numeric suffix (.N) if a file already exists """ newname = '%s%s' % (name, suffix) n = 0 while os.path.exists(newname): n += 1 newname = '%s%s.%d' % (name, suffix, n) log.info('Renaming %s to %s', name, newname) os.rename(name, newname) return newname
Append a backup prefix to a file or directory, with an increasing numeric suffix (.N) if a file already exists
entailment
def timestamp_filename(basename, ext=None): """ Return a string of the form [basename-TIMESTAMP.ext] where TIMESTAMP is of the form YYYYMMDD-HHMMSS-MILSEC """ dt = datetime.now().strftime('%Y%m%d-%H%M%S-%f') if ext: return '%s-%s.%s' % (basename, dt, ext) return '%s-%s' % (basename, dt)
Return a string of the form [basename-TIMESTAMP.ext] where TIMESTAMP is of the form YYYYMMDD-HHMMSS-MILSEC
entailment
def check_extracted_paths(namelist, subdir=None): """ Check whether zip file paths are all relative, and optionally in a specified subdirectory, raises an exception if not namelist: A list of paths from the zip file subdir: If specified then check whether all paths in the zip file are under this subdirectory Python docs are unclear about the security of extract/extractall: https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extractall https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extract """ def relpath(p): # relpath strips a trailing sep # Windows paths may also use unix sep q = os.path.relpath(p) if p.endswith(os.path.sep) or p.endswith('/'): q += os.path.sep return q parent = os.path.abspath('.') if subdir: if os.path.isabs(subdir): raise FileException('subdir must be a relative path', subdir) subdir = relpath(subdir + os.path.sep) for name in namelist: if os.path.commonprefix([parent, os.path.abspath(name)]) != parent: raise FileException('Insecure path in zipfile', name) if subdir and os.path.commonprefix( [subdir, relpath(name)]) != subdir: raise FileException( 'Path in zipfile is not in required subdir', name)
Check whether zip file paths are all relative, and optionally in a specified subdirectory, raises an exception if not namelist: A list of paths from the zip file subdir: If specified then check whether all paths in the zip file are under this subdirectory Python docs are unclear about the security of extract/extractall: https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extractall https://docs.python.org/2/library/zipfile.html#zipfile.ZipFile.extract
entailment
def unzip(filename, match_dir=False, destdir=None): """ Extract all files from a zip archive filename: The path to the zip file match_dir: If True all files in the zip must be contained in a subdirectory named after the archive file with extension removed destdir: Extract the zip into this directory, default current directory return: If match_dir is True then returns the subdirectory (including destdir), otherwise returns destdir or '.' """ if not destdir: destdir = '.' z = zipfile.ZipFile(filename) unzipped = '.' if match_dir: if not filename.endswith('.zip'): raise FileException('Expected .zip file extension', filename) unzipped = os.path.basename(filename)[:-4] check_extracted_paths(z.namelist(), unzipped) else: check_extracted_paths(z.namelist()) # File permissions, see # http://stackoverflow.com/a/6297838 # http://stackoverflow.com/a/3015466 for info in z.infolist(): log.debug('Extracting %s to %s', info.filename, destdir) z.extract(info, destdir) perms = info.external_attr >> 16 & 4095 if perms > 0: os.chmod(os.path.join(destdir, info.filename), perms) return os.path.join(destdir, unzipped)
Extract all files from a zip archive filename: The path to the zip file match_dir: If True all files in the zip must be contained in a subdirectory named after the archive file with extension removed destdir: Extract the zip into this directory, default current directory return: If match_dir is True then returns the subdirectory (including destdir), otherwise returns destdir or '.'
entailment
def zip(filename, paths, strip_prefix=''): """ Create a new zip archive containing files filename: The name of the zip file to be created paths: A list of files or directories strip_dir: Remove this prefix from all file-paths before adding to zip """ if isinstance(paths, basestring): paths = [paths] filelist = set() for p in paths: if os.path.isfile(p): filelist.add(p) else: for root, dirs, files in os.walk(p): for f in files: filelist.add(os.path.join(root, f)) z = zipfile.ZipFile(filename, 'w', zipfile.ZIP_DEFLATED) for f in sorted(filelist): arcname = f if arcname.startswith(strip_prefix): arcname = arcname[len(strip_prefix):] if arcname.startswith(os.path.sep): arcname = arcname[1:] log.debug('Adding %s to %s[%s]', f, filename, arcname) z.write(f, arcname) z.close()
Create a new zip archive containing files filename: The name of the zip file to be created paths: A list of files or directories strip_dir: Remove this prefix from all file-paths before adding to zip
entailment
def get_as_local_path(path, overwrite, progress=0, httpuser=None, httppassword=None): """ Automatically handle local and remote URLs, files and directories path: Either a local directory, file or remote URL. If a URL is given it will be fetched. If this is a zip it will be automatically expanded by default. overwrite: Whether to overwrite an existing file: 'error': Raise an exception 'backup: Renamed the old file and use the new one 'keep': Keep the old file, don't overwrite or raise an exception progress: Number of progress dots, default 0 (don't print) httpuser, httppass: Credentials for HTTP authentication return: A tuple (type, localpath) type: 'file': localpath is the path to a local file 'directory': localpath is the path to a local directory 'unzipped': localpath is the path to a local unzipped directory """ m = re.match('([A-Za-z]+)://', path) if m: # url_open handles multiple protocols so don't bother validating log.debug('Detected URL protocol: %s', m.group(1)) # URL should use / as the pathsep localpath = path.split('/')[-1] if not localpath: raise FileException( 'Remote path appears to be a directory', path) if os.path.exists(localpath): if overwrite == 'error': raise FileException('File already exists', localpath) elif overwrite == 'keep': log.info('Keeping existing %s', localpath) elif overwrite == 'backup': rename_backup(localpath) download(path, localpath, progress, httpuser=httpuser, httppassword=httppassword) else: raise Exception('Invalid overwrite flag: %s' % overwrite) else: download(path, localpath, progress, httpuser=httpuser, httppassword=httppassword) else: localpath = path log.debug("Local path: %s", localpath) if os.path.isdir(localpath): return 'directory', localpath if os.path.exists(localpath): return 'file', localpath # Somethings gone very wrong raise Exception('Local path does not exist: %s' % localpath)
Automatically handle local and remote URLs, files and directories path: Either a local directory, file or remote URL. If a URL is given it will be fetched. If this is a zip it will be automatically expanded by default. overwrite: Whether to overwrite an existing file: 'error': Raise an exception 'backup: Renamed the old file and use the new one 'keep': Keep the old file, don't overwrite or raise an exception progress: Number of progress dots, default 0 (don't print) httpuser, httppass: Credentials for HTTP authentication return: A tuple (type, localpath) type: 'file': localpath is the path to a local file 'directory': localpath is the path to a local directory 'unzipped': localpath is the path to a local unzipped directory
entailment
def create(fs, channels, application): """Allocates and initializes an encoder state.""" result_code = ctypes.c_int() result = _create(fs, channels, application, ctypes.byref(result_code)) if result_code.value is not constants.OK: raise OpusError(result_code.value) return result
Allocates and initializes an encoder state.
entailment
def encode(encoder, pcm, frame_size, max_data_bytes): """Encodes an Opus frame Returns string output payload """ pcm = ctypes.cast(pcm, c_int16_pointer) data = (ctypes.c_char * max_data_bytes)() result = _encode(encoder, pcm, frame_size, data, max_data_bytes) if result < 0: raise OpusError(result) return array.array('c', data[:result]).tostring()
Encodes an Opus frame Returns string output payload
entailment
def encode_float(encoder, pcm, frame_size, max_data_bytes): """Encodes an Opus frame from floating point input""" pcm = ctypes.cast(pcm, c_float_pointer) data = (ctypes.c_char * max_data_bytes)() result = _encode_float(encoder, pcm, frame_size, data, max_data_bytes) if result < 0: raise OpusError(result) return array.array('c', data[:result]).tostring()
Encodes an Opus frame from floating point input
entailment
def __parse_tostr(self, text, **kwargs): '''Builds and returns the MeCab function for parsing Unicode text. Args: fn_name: MeCab function name that determines the function behavior, either 'mecab_sparse_tostr' or 'mecab_nbest_sparse_tostr'. Returns: A function definition, tailored to parsing Unicode text and returning the result as a string suitable for display on stdout, using either the default or N-best behavior. ''' n = self.options.get('nbest', 1) if self._KW_BOUNDARY in kwargs: patt = kwargs.get(self._KW_BOUNDARY, '.') tokens = list(self.__split_pattern(text, patt)) text = ''.join([t[0] for t in tokens]) btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) bpos = 0 self.__mecab.mecab_lattice_set_boundary_constraint( self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY) for (token, match) in tokens: bpos += 1 if match: mark = self.MECAB_INSIDE_TOKEN else: mark = self.MECAB_ANY_BOUNDARY for _ in range(1, len(self.__str2bytes(token))): self.__mecab.mecab_lattice_set_boundary_constraint( self.lattice, bpos, mark) bpos += 1 self.__mecab.mecab_lattice_set_boundary_constraint( self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY) elif self._KW_FEATURE in kwargs: features = kwargs.get(self._KW_FEATURE, ()) fd = {morph: self.__str2bytes(feat) for morph, feat in features} tokens = self.__split_features(text, [e[0] for e in features]) text = ''.join([t[0] for t in tokens]) btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) bpos = 0 for chunk, match in tokens: c = len(self.__str2bytes(chunk)) if match == True: self.__mecab.mecab_lattice_set_feature_constraint( self.lattice, bpos, bpos+c, fd[chunk]) bpos += c else: btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) self.__mecab.mecab_parse_lattice(self.tagger, self.lattice) if n > 1: res = self.__mecab.mecab_lattice_nbest_tostr(self.lattice, n) else: res = self.__mecab.mecab_lattice_tostr(self.lattice) if res != self.__ffi.NULL: raw = self.__ffi.string(res) return self.__bytes2str(raw).strip() else: err = self.__mecab.mecab_lattice_strerror(self.lattice) logger.error(self.__bytes2str(self.__ffi.string(err))) raise MeCabError(self.__bytes2str(self.__ffi.string(err)))
Builds and returns the MeCab function for parsing Unicode text. Args: fn_name: MeCab function name that determines the function behavior, either 'mecab_sparse_tostr' or 'mecab_nbest_sparse_tostr'. Returns: A function definition, tailored to parsing Unicode text and returning the result as a string suitable for display on stdout, using either the default or N-best behavior.
entailment
def __parse_tonodes(self, text, **kwargs): '''Builds and returns the MeCab function for parsing to nodes using morpheme boundary constraints. Args: format_feature: flag indicating whether or not to format the feature value for each node yielded. Returns: A function which returns a Generator, tailored to using boundary constraints and parsing as nodes, using either the default or N-best behavior. ''' n = self.options.get('nbest', 1) try: if self._KW_BOUNDARY in kwargs: patt = kwargs.get(self._KW_BOUNDARY, '.') tokens = list(self.__split_pattern(text, patt)) text = ''.join([t[0] for t in tokens]) btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) bpos = 0 self.__mecab.mecab_lattice_set_boundary_constraint( self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY) for (token, match) in tokens: bpos += 1 if match: mark = self.MECAB_INSIDE_TOKEN else: mark = self.MECAB_ANY_BOUNDARY for _ in range(1, len(self.__str2bytes(token))): self.__mecab.mecab_lattice_set_boundary_constraint( self.lattice, bpos, mark) bpos += 1 self.__mecab.mecab_lattice_set_boundary_constraint( self.lattice, bpos, self.MECAB_TOKEN_BOUNDARY) elif self._KW_FEATURE in kwargs: features = kwargs.get(self._KW_FEATURE, ()) fd = {morph: self.__str2bytes(feat) for morph, feat in features} tokens = self.__split_features(text, [e[0] for e in features]) text = ''.join([t[0] for t in tokens]) btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) bpos = 0 for chunk, match in tokens: c = len(self.__str2bytes(chunk)) if match: self.__mecab.mecab_lattice_set_feature_constraint( self.lattice, bpos, bpos+c, fd[chunk]) bpos += c else: btext = self.__str2bytes(text) self.__mecab.mecab_lattice_set_sentence(self.lattice, btext) self.__mecab.mecab_parse_lattice(self.tagger, self.lattice) for _ in range(n): check = self.__mecab.mecab_lattice_next(self.lattice) if n == 1 or check: nptr = self.__mecab.mecab_lattice_get_bos_node(self.lattice) while nptr != self.__ffi.NULL: # skip over any BOS nodes, since mecab does if nptr.stat != MeCabNode.BOS_NODE: raws = self.__ffi.string( nptr.surface[0:nptr.length]) surf = self.__bytes2str(raws).strip() if 'output_format_type' in self.options or \ 'node_format' in self.options: sp = self.__mecab.mecab_format_node( self.tagger, nptr) if sp != self.__ffi.NULL: rawf = self.__ffi.string(sp) else: err = self.__mecab.mecab_strerror( self.tagger) err = self.__bytes2str( self.__ffi.string(err)) msg = self._ERROR_NODEFORMAT.format( surf, err) raise MeCabError(msg) else: rawf = self.__ffi.string(nptr.feature) feat = self.__bytes2str(rawf).strip() mnode = MeCabNode(nptr, surf, feat) yield mnode nptr = getattr(nptr, 'next') except GeneratorExit: logger.debug('close invoked on generator') except MeCabError: raise except: err = self.__mecab.mecab_lattice_strerror(self.lattice) logger.error(self.__bytes2str(self.__ffi.string(err))) raise MeCabError(self.__bytes2str(self.__ffi.string(err)))
Builds and returns the MeCab function for parsing to nodes using morpheme boundary constraints. Args: format_feature: flag indicating whether or not to format the feature value for each node yielded. Returns: A function which returns a Generator, tailored to using boundary constraints and parsing as nodes, using either the default or N-best behavior.
entailment
def parse(self, text, **kwargs): '''Parse the given text and return result from MeCab. :param text: the text to parse. :type text: str :param as_nodes: return generator of MeCabNodes if True; or string if False. :type as_nodes: bool, defaults to False :param boundary_constraints: regular expression for morpheme boundary splitting; if non-None and feature_constraints is None, then boundary constraint parsing will be used. :type boundary_constraints: str or re :param feature_constraints: tuple containing tuple instances of target morpheme and corresponding feature string in order of precedence; if non-None and boundary_constraints is None, then feature constraint parsing will be used. :type feature_constraints: tuple :return: A single string containing the entire MeCab output; or a Generator yielding the MeCabNode instances. :raises: MeCabError ''' if text is None: logger.error(self._ERROR_EMPTY_STR) raise MeCabError(self._ERROR_EMPTY_STR) elif not isinstance(text, str): logger.error(self._ERROR_NOTSTR) raise MeCabError(self._ERROR_NOTSTR) elif 'partial' in self.options and not text.endswith("\n"): logger.error(self._ERROR_MISSING_NL) raise MeCabError(self._ERROR_MISSING_NL) if self._KW_BOUNDARY in kwargs: val = kwargs[self._KW_BOUNDARY] if not isinstance(val, self._REGEXTYPE) and not isinstance(val, str): logger.error(self._ERROR_BOUNDARY) raise MeCabError(self._ERROR_BOUNDARY) elif self._KW_FEATURE in kwargs: val = kwargs[self._KW_FEATURE] if not isinstance(val, tuple): logger.error(self._ERROR_FEATURE) raise MeCabError(self._ERROR_FEATURE) as_nodes = kwargs.get(self._KW_ASNODES, False) if as_nodes: return self.__parse_tonodes(text, **kwargs) else: return self.__parse_tostr(text, **kwargs)
Parse the given text and return result from MeCab. :param text: the text to parse. :type text: str :param as_nodes: return generator of MeCabNodes if True; or string if False. :type as_nodes: bool, defaults to False :param boundary_constraints: regular expression for morpheme boundary splitting; if non-None and feature_constraints is None, then boundary constraint parsing will be used. :type boundary_constraints: str or re :param feature_constraints: tuple containing tuple instances of target morpheme and corresponding feature string in order of precedence; if non-None and boundary_constraints is None, then feature constraint parsing will be used. :type feature_constraints: tuple :return: A single string containing the entire MeCab output; or a Generator yielding the MeCabNode instances. :raises: MeCabError
entailment
def parse(filename, MAX_TERM_COUNT=1000): """ MAX_TERM_COUNT = 10000 # There are 39,000 terms in the GO! """ with open(filename, "r") as f: termId = None name = None desc = None parents = [] termCount = 0 for l in f.readlines(): if l.startswith("id:"): termId = l.strip()[4:] if l.startswith("name:"): name = l.strip()[6:] elif l.startswith("def:"): desc = l.strip()[5:] elif l.startswith("is_a:"): pid = l.strip()[6:].split(" ", 1)[0] parents.append(pid) if len(l) == 1: # newline # save if termId is not None and name is not None: terms[termId] = {'name': name, 'desc': desc, 'parents': parents[:], 'children': []} termId = None name = None parents = [] termCount += 1 if MAX_TERM_COUNT is not None and \ termCount > MAX_TERM_COUNT: break count = 0 for tid, tdict in terms.items(): count += 1 # purely for display for p in tdict['parents']: if p in terms.keys(): terms[p]['children'].append(tid) # Get unique term IDs for Tag Groups. tagGroups = set() for tid, tdict in terms.items(): # Only create Tags for GO:terms that are 'leafs' of the tree if len(tdict['children']) == 0: for p in tdict['parents']: tagGroups.add(p) return tagGroups, terms
MAX_TERM_COUNT = 10000 # There are 39,000 terms in the GO!
entailment
def generate(tagGroups, terms): """ create Tag Groups and Child Tags using data from terms dict """ rv = [] for pid in tagGroups: # In testing we may not have complete set if pid not in terms.keys(): continue groupData = terms[pid] groupName = "[%s] %s" % (pid, groupData['name']) groupDesc = groupData['desc'] children = [] group = dict(name=groupName, desc=groupDesc, set=children) rv.append(group) for cid in groupData['children']: cData = terms[cid] cName = "[%s] %s" % (cid, cData['name']) cDesc = cData['desc'] child = dict(name=cName, desc=cDesc) children.append(child) return json.dumps(rv, indent=2)
create Tag Groups and Child Tags using data from terms dict
entailment
def _handle_args(self, cmd, args): """ We need to support deprecated behaviour for now which makes this quite complicated Current behaviour: - install: Installs a new server, existing server causes an error - install --upgrade: Installs or upgrades a server - install --managedb: Automatically initialise or upgrade the db Deprecated: - install --upgradedb --initdb: Replaced by install --managedb - install --upgradedb: upgrade the db, must exist - install --initdb: initialise the db - upgrade: Upgrades a server, must already exist - upgrade --upgradedb: Automatically upgrade the db returns: - Modified args object, flag to indicate new/existing/auto install """ if cmd == 'install': if args.upgrade: # Current behaviour: install or upgrade if args.initdb or args.upgradedb: raise Stop(10, ( 'Deprecated --initdb --upgradedb flags ' 'are incompatible with --upgrade')) newinstall = None else: # Current behaviour: Server must not exist newinstall = True if args.managedb: # Current behaviour if args.initdb or args.upgradedb: raise Stop(10, ( 'Deprecated --initdb --upgradedb flags ' 'are incompatible with --managedb')) args.initdb = True args.upgradedb = True else: if args.initdb or args.upgradedb: log.warn('--initdb and --upgradedb are deprecated, ' 'use --managedb') elif cmd == 'upgrade': # Deprecated behaviour log.warn( '"omero upgrade" is deprecated, use "omego install --upgrade"') cmd = 'install' args.upgrade = True # Deprecated behaviour: Server must exist newinstall = False else: raise Exception('Unexpected command: %s' % cmd) return args, newinstall
We need to support deprecated behaviour for now which makes this quite complicated Current behaviour: - install: Installs a new server, existing server causes an error - install --upgrade: Installs or upgrades a server - install --managedb: Automatically initialise or upgrade the db Deprecated: - install --upgradedb --initdb: Replaced by install --managedb - install --upgradedb: upgrade the db, must exist - install --initdb: initialise the db - upgrade: Upgrades a server, must already exist - upgrade --upgradedb: Automatically upgrade the db returns: - Modified args object, flag to indicate new/existing/auto install
entailment
def get_server_dir(self): """ Either downloads and/or unzips the server if necessary return: the directory of the unzipped server """ if not self.args.server: if self.args.skipunzip: raise Stop(0, 'Unzip disabled, exiting') log.info('Downloading server') # The downloader automatically symlinks the server, however if # we are upgrading we want to delay the symlink swap, so this # overrides args.sym # TODO: Find a nicer way to do this? artifact_args = copy.copy(self.args) artifact_args.sym = '' artifacts = Artifacts(artifact_args) server = artifacts.download('server') else: progress = 0 if self.args.verbose: progress = 20 ptype, server = fileutils.get_as_local_path( self.args.server, self.args.overwrite, progress=progress, httpuser=self.args.httpuser, httppassword=self.args.httppassword) if ptype == 'file': if self.args.skipunzip: raise Stop(0, 'Unzip disabled, exiting') log.info('Unzipping %s', server) server = fileutils.unzip( server, match_dir=True, destdir=self.args.unzipdir) log.debug('Server directory: %s', server) return server
Either downloads and/or unzips the server if necessary return: the directory of the unzipped server
entailment
def handle_database(self): """ Handle database initialisation and upgrade, taking into account command line arguments """ # TODO: When initdb and upgradedb are dropped we can just test # managedb, but for backwards compatibility we need to support # initdb without upgradedb and vice-versa if self.args.initdb or self.args.upgradedb: db = DbAdmin(self.dir, None, self.args, self.external) status = db.check() log.debug('OMERO database upgrade status: %s', status) else: log.warn('OMERO database check disabled') return DB_INIT_NEEDED if status == DB_INIT_NEEDED: if self.args.initdb: log.debug('Initialising OMERO database') db.init() else: log.error('OMERO database not found') raise Stop(DB_INIT_NEEDED, 'Install/Upgrade failed: OMERO database not found') elif status == DB_UPGRADE_NEEDED: log.warn('OMERO database exists but is out of date') if self.args.upgradedb: log.debug('Upgrading OMERO database') db.upgrade() else: raise Stop( DB_UPGRADE_NEEDED, 'Pass --managedb or upgrade your OMERO database manually') else: assert status == DB_UPTODATE return status
Handle database initialisation and upgrade, taking into account command line arguments
entailment
def run(self, command): """ Runs a command as if from the command-line without the need for using popen or subprocess """ if isinstance(command, basestring): command = command.split() else: command = list(command) self.external.omero_cli(command)
Runs a command as if from the command-line without the need for using popen or subprocess
entailment
def bin(self, command): """ Runs the omero command-line client with an array of arguments using the old environment """ if isinstance(command, basestring): command = command.split() self.external.omero_bin(command)
Runs the omero command-line client with an array of arguments using the old environment
entailment
def symlink_check_and_set(self): """ The default symlink was changed from OMERO-CURRENT to OMERO.server. If `--sym` was not specified and OMERO-CURRENT exists in the current directory stop and warn. """ if self.args.sym == '': if os.path.exists('OMERO-CURRENT'): log.error('Deprecated OMERO-CURRENT found but --sym not set') raise Stop( 30, 'The default for --sym has changed to OMERO.server ' 'but the current directory contains OMERO-CURRENT. ' 'Either remove OMERO-CURRENT or explicity pass --sym.') if self.args.sym in ('', 'auto'): self.args.sym = 'OMERO.server'
The default symlink was changed from OMERO-CURRENT to OMERO.server. If `--sym` was not specified and OMERO-CURRENT exists in the current directory stop and warn.
entailment
def query(request): """Query encoder/decoder with a request value""" def inner(func, obj): result_code = func(obj, request) if result_code is not constants.OK: raise OpusError(result_code) return result_code return inner
Query encoder/decoder with a request value
entailment
def get(request, result_type): """Get CTL value from a encoder/decoder""" def inner(func, obj): result = result_type() result_code = func(obj, request, ctypes.byref(result)) if result_code is not constants.OK: raise OpusError(result_code) return result.value return inner
Get CTL value from a encoder/decoder
entailment
def set(request): """Set new CTL value to a encoder/decoder""" def inner(func, obj, value): result_code = func(obj, request, value) if result_code is not constants.OK: raise OpusError(result_code) return inner
Set new CTL value to a encoder/decoder
entailment
def sort_schemas(schemas): """Sort a list of SQL schemas in order""" def keyfun(v): x = SQL_SCHEMA_REGEXP.match(v).groups() # x3: 'DEV' should come before '' return (int(x[0]), x[1], int(x[2]) if x[2] else None, x[3] if x[3] else 'zzz', int(x[4])) return sorted(schemas, key=keyfun)
Sort a list of SQL schemas in order
entailment
def parse_schema_files(files): """ Parse a list of SQL files and return a dictionary of valid schema files where each key is a valid schema file and the corresponding value is a tuple containing the source and the target schema. """ f_dict = {} for f in files: root, ext = os.path.splitext(f) if ext != ".sql": continue vto, vfrom = os.path.split(root) vto = os.path.split(vto)[1] if is_schema(vto) and is_schema(vfrom): f_dict[f] = (vfrom, vto) return f_dict
Parse a list of SQL files and return a dictionary of valid schema files where each key is a valid schema file and the corresponding value is a tuple containing the source and the target schema.
entailment
def dump(self): """ Dump the database using the postgres custom format """ dumpfile = self.args.dumpfile if not dumpfile: db, env = self.get_db_args_env() dumpfile = fileutils.timestamp_filename( 'omero-database-%s' % db['name'], 'pgdump') log.info('Dumping database to %s', dumpfile) if not self.args.dry_run: self.pgdump('-Fc', '-f', dumpfile)
Dump the database using the postgres custom format
entailment
def get_db_args_env(self): """ Get a dictionary of database connection parameters, and create an environment for running postgres commands. Falls back to omego defaults. """ db = { 'name': self.args.dbname, 'host': self.args.dbhost, 'user': self.args.dbuser, 'pass': self.args.dbpass } if not self.args.no_db_config: try: c = self.external.get_config(force=True) except Exception as e: log.warn('config.xml not found: %s', e) c = {} for k in db: try: db[k] = c['omero.db.%s' % k] except KeyError: log.info( 'Failed to lookup parameter omero.db.%s, using %s', k, db[k]) if not db['name']: raise Exception('Database name required') env = os.environ.copy() env['PGPASSWORD'] = db['pass'] return db, env
Get a dictionary of database connection parameters, and create an environment for running postgres commands. Falls back to omego defaults.
entailment
def psql(self, *psqlargs): """ Run a psql command """ db, env = self.get_db_args_env() args = [ '-v', 'ON_ERROR_STOP=on', '-d', db['name'], '-h', db['host'], '-U', db['user'], '-w', '-A', '-t' ] + list(psqlargs) stdout, stderr = External.run('psql', args, capturestd=True, env=env) if stderr: log.warn('stderr: %s', stderr) log.debug('stdout: %s', stdout) return stdout
Run a psql command
entailment
def pgdump(self, *pgdumpargs): """ Run a pg_dump command """ db, env = self.get_db_args_env() args = ['-d', db['name'], '-h', db['host'], '-U', db['user'], '-w' ] + list(pgdumpargs) stdout, stderr = External.run( 'pg_dump', args, capturestd=True, env=env) if stderr: log.warn('stderr: %s', stderr) log.debug('stdout: %s', stdout) return stdout
Run a pg_dump command
entailment
def set_server_dir(self, dir): """ Set the directory of the server to be controlled """ self.dir = os.path.abspath(dir) config = os.path.join(self.dir, 'etc', 'grid', 'config.xml') self.configured = os.path.exists(config)
Set the directory of the server to be controlled
entailment
def get_config(self, force=False): """ Returns a dictionary of all config.xml properties If `force = True` then ignore any cached state and read config.xml if possible setup_omero_cli() must be called before this method to import the correct omero module to minimise the possibility of version conflicts """ if not force and not self.has_config(): raise Exception('No config file') configxml = os.path.join(self.dir, 'etc', 'grid', 'config.xml') if not os.path.exists(configxml): raise Exception('No config file') try: # Attempt to open config.xml read-only, though this flag is not # present in early versions of OMERO 5.0 c = self._omero.config.ConfigXml( configxml, exclusive=False, read_only=True) except TypeError: c = self._omero.config.ConfigXml(configxml, exclusive=False) try: return c.as_map() finally: c.close()
Returns a dictionary of all config.xml properties If `force = True` then ignore any cached state and read config.xml if possible setup_omero_cli() must be called before this method to import the correct omero module to minimise the possibility of version conflicts
entailment
def setup_omero_cli(self): """ Imports the omero CLI module so that commands can be run directly. Note Python does not allow a module to be imported multiple times, so this will only work with a single omero instance. This can have several surprising effects, so setup_omero_cli() must be explcitly called. """ if not self.dir: raise Exception('No server directory set') if 'omero.cli' in sys.modules: raise Exception('omero.cli can only be imported once') log.debug("Setting up omero CLI") lib = os.path.join(self.dir, "lib", "python") if not os.path.exists(lib): raise Exception("%s does not exist!" % lib) sys.path.insert(0, lib) import omero import omero.cli log.debug("Using omero CLI from %s", omero.cli.__file__) self.cli = omero.cli.CLI() self.cli.loadplugins() self._omero = omero
Imports the omero CLI module so that commands can be run directly. Note Python does not allow a module to be imported multiple times, so this will only work with a single omero instance. This can have several surprising effects, so setup_omero_cli() must be explcitly called.
entailment
def setup_previous_omero_env(self, olddir, savevarsfile): """ Create a copy of the current environment for interacting with the current OMERO server installation """ env = self.get_environment(savevarsfile) def addpath(varname, p): if not os.path.exists(p): raise Exception("%s does not exist!" % p) current = env.get(varname) if current: env[varname] = p + os.pathsep + current else: env[varname] = p olddir = os.path.abspath(olddir) lib = os.path.join(olddir, "lib", "python") addpath("PYTHONPATH", lib) bin = os.path.join(olddir, "bin") addpath("PATH", bin) self.old_env = env
Create a copy of the current environment for interacting with the current OMERO server installation
entailment
def omero_cli(self, command): """ Runs a command as if from the OMERO command-line without the need for using popen or subprocess. """ assert isinstance(command, list) if not self.cli: raise Exception('omero.cli not initialised') log.info("Invoking CLI [current environment]: %s", " ".join(command)) self.cli.invoke(command, strict=True)
Runs a command as if from the OMERO command-line without the need for using popen or subprocess.
entailment
def omero_bin(self, command): """ Runs the omero command-line client with an array of arguments using the old environment """ assert isinstance(command, list) if not self.old_env: raise Exception('Old environment not initialised') log.info("Running [old environment]: %s", " ".join(command)) self.run('omero', command, capturestd=True, env=self.old_env)
Runs the omero command-line client with an array of arguments using the old environment
entailment
def run(exe, args, capturestd=False, env=None): """ Runs an executable with an array of arguments, optionally in the specified environment. Returns stdout and stderr """ command = [exe] + args if env: log.info("Executing [custom environment]: %s", " ".join(command)) else: log.info("Executing : %s", " ".join(command)) start = time.time() # Temp files will be automatically deleted on close() # If run() throws the garbage collector should call close(), so don't # bother with try-finally outfile = None errfile = None if capturestd: outfile = tempfile.TemporaryFile() errfile = tempfile.TemporaryFile() # Use call instead of Popen so that stdin is connected to the console, # in case user input is required # On Windows shell=True is needed otherwise the modified environment # PATH variable is ignored. On Unix this breaks things. r = subprocess.call( command, env=env, stdout=outfile, stderr=errfile, shell=WINDOWS) stdout = None stderr = None if capturestd: outfile.seek(0) stdout = outfile.read() outfile.close() errfile.seek(0) stderr = errfile.read() errfile.close() end = time.time() if r != 0: log.error("Failed [%.3f s]", end - start) raise RunException( "Non-zero return code", exe, args, r, stdout, stderr) log.info("Completed [%.3f s]", end - start) return stdout, stderr
Runs an executable with an array of arguments, optionally in the specified environment. Returns stdout and stderr
entailment
def string_support(py3enc): '''Create byte-to-string and string-to-byte conversion functions for internal use. :param py3enc: Encoding used by Python 3 environment. :type py3enc: str ''' if sys.version < '3': def bytes2str(b): '''Identity, returns the argument string (bytes).''' return b def str2bytes(s): '''Identity, returns the argument string (bytes).''' return s else: def bytes2str(b): '''Transforms bytes into string (Unicode).''' return b.decode(py3enc) def str2bytes(u): '''Transforms Unicode into string (bytes).''' return u.encode(py3enc) return (bytes2str, str2bytes)
Create byte-to-string and string-to-byte conversion functions for internal use. :param py3enc: Encoding used by Python 3 environment. :type py3enc: str
entailment
def splitter_support(py2enc): '''Create tokenizer for use in boundary constraint parsing. :param py2enc: Encoding used by Python 2 environment. :type py2enc: str ''' if sys.version < '3': def _fn_sentence(pattern, sentence): if REGEXTYPE == type(pattern): if pattern.flags & re.UNICODE: return sentence.decode(py2enc) else: return sentence else: return sentence def _fn_token2str(pattern): if REGEXTYPE == type(pattern): if pattern.flags & re.UNICODE: def _fn(token): return token.encode(py2enc) else: def _fn(token): return token else: def _fn(token): return token return _fn else: def _fn_sentence(pattern, sentence): return sentence def _fn_token2str(pattern): def _fn(token): return token return _fn def _fn_tokenize_pattern(text, pattern): pos = 0 sentence = _fn_sentence(pattern, text) postprocess = _fn_token2str(pattern) for m in re.finditer(pattern, sentence): if pos < m.start(): token = postprocess(sentence[pos:m.start()]) yield (token.strip(), False) pos = m.start() token = postprocess(sentence[pos:m.end()]) yield (token.strip(), True) pos = m.end() if pos < len(sentence): token = postprocess(sentence[pos:]) yield (token.strip(), False) def _fn_tokenize_features(text, features): acc = [] acc.append((text.strip(), False)) for feat in features: for i,e in enumerate(acc): if e[1]==False: tmp = list(_fn_tokenize_pattern(e[0], feat)) if len(tmp) > 0: acc.pop(i) acc[i:i] = tmp return acc return _fn_tokenize_pattern, _fn_tokenize_features
Create tokenizer for use in boundary constraint parsing. :param py2enc: Encoding used by Python 2 environment. :type py2enc: str
entailment
def update(self, document_id, update_spec, namespace, timestamp): """Apply updates given in update_spec to the document whose id matches that of doc. """ index, doc_type = self._index_and_mapping(namespace) with self.lock: # Check if document source is stored in local buffer document = self.BulkBuffer.get_from_sources(index, doc_type, u(document_id)) if document: # Document source collected from local buffer # Perform apply_update on it and then it will be # ready for commiting to Elasticsearch updated = self.apply_update(document, update_spec) # _id is immutable in MongoDB, so won't have changed in update updated['_id'] = document_id self.upsert(updated, namespace, timestamp) else: # Document source needs to be retrieved from Elasticsearch # before performing update. Pass update_spec to upsert function updated = {"_id": document_id} self.upsert(updated, namespace, timestamp, update_spec) # upsert() strips metadata, so only _id + fields in _source still here return updated
Apply updates given in update_spec to the document whose id matches that of doc.
entailment
def upsert(self, doc, namespace, timestamp, update_spec=None): """Insert a document into Elasticsearch.""" index, doc_type = self._index_and_mapping(namespace) # No need to duplicate '_id' in source document doc_id = u(doc.pop("_id")) metadata = { 'ns': namespace, '_ts': timestamp } # Index the source document, using lowercase namespace as index name. action = { '_op_type': 'index', '_index': index, '_type': doc_type, '_id': doc_id, '_source': self._formatter.format_document(doc) } # Index document metadata with original namespace (mixed upper/lower). meta_action = { '_op_type': 'index', '_index': self.meta_index_name, '_type': self.meta_type, '_id': doc_id, '_source': bson.json_util.dumps(metadata) } self.index(action, meta_action, doc, update_spec) # Leave _id, since it's part of the original document doc['_id'] = doc_id
Insert a document into Elasticsearch.
entailment
def bulk_upsert(self, docs, namespace, timestamp): """Insert multiple documents into Elasticsearch.""" def docs_to_upsert(): doc = None for doc in docs: # Remove metadata and redundant _id index, doc_type = self._index_and_mapping(namespace) doc_id = u(doc.pop("_id")) document_action = { '_index': index, '_type': doc_type, '_id': doc_id, '_source': self._formatter.format_document(doc) } document_meta = { '_index': self.meta_index_name, '_type': self.meta_type, '_id': doc_id, '_source': { 'ns': namespace, '_ts': timestamp } } yield document_action yield document_meta if doc is None: raise errors.EmptyDocsError( "Cannot upsert an empty sequence of " "documents into Elastic Search") try: kw = {} if self.chunk_size > 0: kw['chunk_size'] = self.chunk_size responses = streaming_bulk(client=self.elastic, actions=docs_to_upsert(), **kw) for ok, resp in responses: if not ok: LOG.error( "Could not bulk-upsert document " "into ElasticSearch: %r" % resp) if self.auto_commit_interval == 0: self.commit() except errors.EmptyDocsError: # This can happen when mongo-connector starts up, there is no # config file, but nothing to dump pass
Insert multiple documents into Elasticsearch.
entailment
def remove(self, document_id, namespace, timestamp): """Remove a document from Elasticsearch.""" index, doc_type = self._index_and_mapping(namespace) action = { '_op_type': 'delete', '_index': index, '_type': doc_type, '_id': u(document_id) } meta_action = { '_op_type': 'delete', '_index': self.meta_index_name, '_type': self.meta_type, '_id': u(document_id) } self.index(action, meta_action)
Remove a document from Elasticsearch.
entailment
def send_buffered_operations(self): """Send buffered operations to Elasticsearch. This method is periodically called by the AutoCommitThread. """ with self.lock: try: action_buffer = self.BulkBuffer.get_buffer() if action_buffer: successes, errors = bulk(self.elastic, action_buffer) LOG.debug("Bulk request finished, successfully sent %d " "operations", successes) if errors: LOG.error( "Bulk request finished with errors: %r", errors) except es_exceptions.ElasticsearchException: LOG.exception("Bulk request failed with exception")
Send buffered operations to Elasticsearch. This method is periodically called by the AutoCommitThread.
entailment
def get_last_doc(self): """Get the most recently modified document from Elasticsearch. This method is used to help define a time window within which documents may be in conflict after a MongoDB rollback. """ try: result = self.elastic.search( index=self.meta_index_name, body={ "query": {"match_all": {}}, "sort": [{"_ts": "desc"}], }, size=1 )["hits"]["hits"] for r in result: r['_source']['_id'] = r['_id'] return r['_source'] except es_exceptions.RequestError: # no documents so ES returns 400 because of undefined _ts mapping return None
Get the most recently modified document from Elasticsearch. This method is used to help define a time window within which documents may be in conflict after a MongoDB rollback.
entailment
def split_sig(params): """ Split a list of parameters/types by commas, whilst respecting brackets. For example: String arg0, int arg2 = 1, List<int> arg3 = [1, 2, 3] => ['String arg0', 'int arg2 = 1', 'List<int> arg3 = [1, 2, 3]'] """ result = [] current = '' level = 0 for char in params: if char in ('<', '{', '['): level += 1 elif char in ('>', '}', ']'): level -= 1 if char != ',' or level > 0: current += char elif char == ',' and level == 0: result.append(current) current = '' if current.strip() != '': result.append(current) return result
Split a list of parameters/types by commas, whilst respecting brackets. For example: String arg0, int arg2 = 1, List<int> arg3 = [1, 2, 3] => ['String arg0', 'int arg2 = 1', 'List<int> arg3 = [1, 2, 3]']
entailment
def parse_method_signature(sig): """ Parse a method signature of the form: modifier* type name (params) """ match = METH_SIG_RE.match(sig.strip()) if not match: raise RuntimeError('Method signature invalid: ' + sig) modifiers, return_type, name, generic_types, params = match.groups() if params.strip() != '': params = split_sig(params) params = [parse_param_signature(x) for x in params] else: params = [] return (modifiers.split(), return_type, name, generic_types, params)
Parse a method signature of the form: modifier* type name (params)
entailment
def parse_property_signature(sig): """ Parse a property signature of the form: modifier* type name { (get;)? (set;)? } """ match = PROP_SIG_RE.match(sig.strip()) if not match: raise RuntimeError('Property signature invalid: ' + sig) groups = match.groups() if groups[0] is not None: modifiers = [x.strip() for x in groups[:-4]] groups = groups[-4:] else: modifiers = [] groups = groups[1:] typ, name, getter, setter = groups return (modifiers, typ, name, getter is not None, setter is not None)
Parse a property signature of the form: modifier* type name { (get;)? (set;)? }
entailment
def parse_indexer_signature(sig): """ Parse a indexer signature of the form: modifier* type this[params] { (get;)? (set;)? } """ match = IDXR_SIG_RE.match(sig.strip()) if not match: raise RuntimeError('Indexer signature invalid: ' + sig) modifiers, return_type, params, getter, setter = match.groups() params = split_sig(params) params = [parse_param_signature(x) for x in params] return (modifiers.split(), return_type, params, getter is not None, setter is not None)
Parse a indexer signature of the form: modifier* type this[params] { (get;)? (set;)? }
entailment
def parse_param_signature(sig): """ Parse a parameter signature of the form: type name (= default)? """ match = PARAM_SIG_RE.match(sig.strip()) if not match: raise RuntimeError('Parameter signature invalid, got ' + sig) groups = match.groups() modifiers = groups[0].split() typ, name, _, default = groups[-4:] return ParamTuple(name=name, typ=typ, default=default, modifiers=modifiers)
Parse a parameter signature of the form: type name (= default)?
entailment
def parse_type_signature(sig): """ Parse a type signature """ match = TYPE_SIG_RE.match(sig.strip()) if not match: raise RuntimeError('Type signature invalid, got ' + sig) groups = match.groups() typ = groups[0] generic_types = groups[1] if not generic_types: generic_types = [] else: generic_types = split_sig(generic_types[1:-1]) is_array = (groups[2] is not None) return typ, generic_types, is_array
Parse a type signature
entailment
def parse_attr_signature(sig): """ Parse an attribute signature """ match = ATTR_SIG_RE.match(sig.strip()) if not match: raise RuntimeError('Attribute signature invalid, got ' + sig) name, _, params = match.groups() if params is not None and params.strip() != '': params = split_sig(params) params = [parse_param_signature(x) for x in params] else: params = [] return (name, params)
Parse an attribute signature
entailment
def get_msdn_ref(name): """ Try and create a reference to a type on MSDN """ in_msdn = False if name in MSDN_VALUE_TYPES: name = MSDN_VALUE_TYPES[name] in_msdn = True if name.startswith('System.'): in_msdn = True if in_msdn: link = name.split('<')[0] if link in MSDN_LINK_MAP: link = MSDN_LINK_MAP[link] else: link = link.lower() url = 'https://msdn.microsoft.com/en-us/library/'+link+'.aspx' node = nodes.reference(name, shorten_type(name)) node['refuri'] = url node['reftitle'] = name return node else: return None
Try and create a reference to a type on MSDN
entailment
def shorten_type(typ): """ Shorten a type. E.g. drops 'System.' """ offset = 0 for prefix in SHORTEN_TYPE_PREFIXES: if typ.startswith(prefix): if len(prefix) > offset: offset = len(prefix) return typ[offset:]
Shorten a type. E.g. drops 'System.'
entailment
def parse_mecab_options(self, options): '''Parses the MeCab options, returning them in a dictionary. Lattice-level option has been deprecated; please use marginal or nbest instead. :options string or dictionary of options to use when instantiating the MeCab instance. May be in short- or long-form, or in a Python dictionary. Returns: A dictionary of the specified MeCab options, where the keys are snake-cased names of the long-form of the option names. Raises: MeCabError: An invalid value for N-best was passed in. ''' class MeCabArgumentParser(argparse.ArgumentParser): '''MeCab option parser for natto-py.''' def error(self, message): '''error(message: string) Raises ValueError. ''' raise ValueError(message) options = options or {} dopts = {} if type(options) is dict: for name in iter(list(self._SUPPORTED_OPTS.values())): if name in options: if options[name] or options[name] is '': val = options[name] if isinstance(val, bytes): val = self.__bytes2str(options[name]) dopts[name] = val else: p = MeCabArgumentParser() p.add_argument('-r', '--rcfile', help='use FILE as a resource file', action='store', dest='rcfile') p.add_argument('-d', '--dicdir', help='set DIR as a system dicdir', action='store', dest='dicdir') p.add_argument('-u', '--userdic', help='use FILE as a user dictionary', action='store', dest='userdic') p.add_argument('-l', '--lattice-level', help='lattice information level (DEPRECATED)', action='store', dest='lattice_level', type=int) p.add_argument('-O', '--output-format-type', help='set output format type (wakati, none,...)', action='store', dest='output_format_type') p.add_argument('-a', '--all-morphs', help='output all morphs (default false)', action='store_true', default=False) p.add_argument('-N', '--nbest', help='output N best results (default 1)', action='store', dest='nbest', type=int) p.add_argument('-p', '--partial', help='partial parsing mode (default false)', action='store_true', default=False) p.add_argument('-m', '--marginal', help='output marginal probability (default false)', action='store_true', default=False) p.add_argument('-M', '--max-grouping-size', help=('maximum grouping size for unknown words ' '(default 24)'), action='store', dest='max_grouping_size', type=int) p.add_argument('-F', '--node-format', help='use STR as the user-defined node format', action='store', dest='node_format') p.add_argument('-U', '--unk-format', help=('use STR as the user-defined unknown ' 'node format'), action='store', dest='unk_format') p.add_argument('-B', '--bos-format', help=('use STR as the user-defined ' 'beginning-of-sentence format'), action='store', dest='bos_format') p.add_argument('-E', '--eos-format', help=('use STR as the user-defined ' 'end-of-sentence format'), action='store', dest='eos_format') p.add_argument('-S', '--eon-format', help=('use STR as the user-defined end-of-NBest ' 'format'), action='store', dest='eon_format') p.add_argument('-x', '--unk-feature', help='use STR as the feature for unknown word', action='store', dest='unk_feature') p.add_argument('-b', '--input-buffer-size', help='set input buffer size (default 8192)', action='store', dest='input_buffer_size', type=int) p.add_argument('-C', '--allocate-sentence', help='allocate new memory for input sentence', action='store_true', dest='allocate_sentence', default=False) p.add_argument('-t', '--theta', help=('set temperature parameter theta ' '(default 0.75)'), action='store', dest='theta', type=float) p.add_argument('-c', '--cost-factor', help='set cost factor (default 700)', action='store', dest='cost_factor', type=int) opts = p.parse_args([o.replace('\"', '').replace('\'', '') for o in options.split()]) for name in iter(list(self._SUPPORTED_OPTS.values())): if hasattr(opts, name): v = getattr(opts, name) if v or v is '': dopts[name] = v # final checks if 'nbest' in dopts \ and (dopts['nbest'] < 1 or dopts['nbest'] > self._NBEST_MAX): logger.error(self._ERROR_NVALUE) raise ValueError(self._ERROR_NVALUE) # warning for lattice-level deprecation if 'lattice_level' in dopts: logger.warn('WARNING: {}\n'.format(self._WARN_LATTICE_LEVEL)) return dopts
Parses the MeCab options, returning them in a dictionary. Lattice-level option has been deprecated; please use marginal or nbest instead. :options string or dictionary of options to use when instantiating the MeCab instance. May be in short- or long-form, or in a Python dictionary. Returns: A dictionary of the specified MeCab options, where the keys are snake-cased names of the long-form of the option names. Raises: MeCabError: An invalid value for N-best was passed in.
entailment
def build_options_str(self, options): '''Returns a string concatenation of the MeCab options. Args: options: dictionary of options to use when instantiating the MeCab instance. Returns: A string concatenation of the options used when instantiating the MeCab instance, in long-form. ''' opts = [] for name in iter(list(self._SUPPORTED_OPTS.values())): if name in options: key = name.replace('_', '-') if key in self._BOOLEAN_OPTIONS: if options[name]: opts.append('--{}'.format(key)) else: opts.append('--{}={}'.format(key, options[name])) return self.__str2bytes(' '.join(opts))
Returns a string concatenation of the MeCab options. Args: options: dictionary of options to use when instantiating the MeCab instance. Returns: A string concatenation of the options used when instantiating the MeCab instance, in long-form.
entailment
def create(fs, channels): """Allocates and initializes a decoder state""" result_code = ctypes.c_int() result = _create(fs, channels, ctypes.byref(result_code)) if result_code.value is not 0: raise OpusError(result_code.value) return result
Allocates and initializes a decoder state
entailment
def packet_get_bandwidth(data): """Gets the bandwidth of an Opus packet.""" data_pointer = ctypes.c_char_p(data) result = _packet_get_bandwidth(data_pointer) if result < 0: raise OpusError(result) return result
Gets the bandwidth of an Opus packet.
entailment
def packet_get_nb_channels(data): """Gets the number of channels from an Opus packet""" data_pointer = ctypes.c_char_p(data) result = _packet_get_nb_channels(data_pointer) if result < 0: raise OpusError(result) return result
Gets the number of channels from an Opus packet
entailment
def packet_get_nb_frames(data, length=None): """Gets the number of frames in an Opus packet""" data_pointer = ctypes.c_char_p(data) if length is None: length = len(data) result = _packet_get_nb_frames(data_pointer, ctypes.c_int(length)) if result < 0: raise OpusError(result) return result
Gets the number of frames in an Opus packet
entailment
def packet_get_samples_per_frame(data, fs): """Gets the number of samples per frame from an Opus packet""" data_pointer = ctypes.c_char_p(data) result = _packet_get_nb_frames(data_pointer, ctypes.c_int(fs)) if result < 0: raise OpusError(result) return result
Gets the number of samples per frame from an Opus packet
entailment
def decode(decoder, data, length, frame_size, decode_fec, channels=2): """Decode an Opus frame Unlike the `opus_decode` function , this function takes an additional parameter `channels`, which indicates the number of channels in the frame """ pcm_size = frame_size * channels * ctypes.sizeof(ctypes.c_int16) pcm = (ctypes.c_int16 * pcm_size)() pcm_pointer = ctypes.cast(pcm, c_int16_pointer) # Converting from a boolean to int decode_fec = int(bool(decode_fec)) result = _decode(decoder, data, length, pcm_pointer, frame_size, decode_fec) if result < 0: raise OpusError(result) return array.array('h', pcm).tostring()
Decode an Opus frame Unlike the `opus_decode` function , this function takes an additional parameter `channels`, which indicates the number of channels in the frame
entailment
def label_list_parser(self, url): """ Extracts comma separate tag=value pairs from a string Assumes all characters other than / and , are valid """ labels = re.findall('([^/,]+=[^/,]+)', url) slabels = set(labels) if '' in slabels: slabels.remove('') return slabels
Extracts comma separate tag=value pairs from a string Assumes all characters other than / and , are valid
entailment
def init(app, register_blueprint=True, url_prefix='/fm', access_control_function=None, custom_config_json_path=None, custom_init_js_path=None): """ :param app: The Flask app :param register_blueprint: Override to False to stop the blueprint from automatically being registered to the app :param url_prefix: The URL prefix for the blueprint, defaults to /fm :param access_control_function: Pass in a function here to implement access control. The function will be called any time someone tries to access the filemanager, and a 404 will be returned if this function returns False :param custom_config_json_path: Set this to the full path of you filemanager.config.json file if you want to use a custom config. Example: os.path.join(app.root_path, 'static/filemanager.config.json') :param custom_init_js_path: Set this to the full path of you filemanager.init.js file if you want to use a custom init.js. Example: os.path.join(app.root_path, 'static/filemanager.init.js') """ global _initialised, _FILE_PATH, _URL_PREFIX if _initialised: raise Exception('Flask Filemanager can only be registered once!') _initialised = True _FILE_PATH = app.config.get('FLASKFILEMANAGER_FILE_PATH') if not _FILE_PATH: raise Exception('No FLASKFILEMANAGER_FILE_PATH value in config') log.info('File Manager Using file path: {}'.format(_FILE_PATH)) util.ensure_dir(_FILE_PATH) if access_control_function: set_access_control_function(access_control_function) if custom_config_json_path: set_custom_config_json_path(custom_config_json_path) log.info('File Manager using custom config.json path: {}'.format(custom_config_json_path)) if custom_init_js_path: set_custom_init_js_path(custom_init_js_path) log.info('File Manager using custom init.js path: {}'.format(custom_init_js_path)) if register_blueprint: log.info('Registering filemanager blueprint to {}'.format(url_prefix)) app.register_blueprint(filemanager_blueprint, url_prefix=url_prefix)
:param app: The Flask app :param register_blueprint: Override to False to stop the blueprint from automatically being registered to the app :param url_prefix: The URL prefix for the blueprint, defaults to /fm :param access_control_function: Pass in a function here to implement access control. The function will be called any time someone tries to access the filemanager, and a 404 will be returned if this function returns False :param custom_config_json_path: Set this to the full path of you filemanager.config.json file if you want to use a custom config. Example: os.path.join(app.root_path, 'static/filemanager.config.json') :param custom_init_js_path: Set this to the full path of you filemanager.init.js file if you want to use a custom init.js. Example: os.path.join(app.root_path, 'static/filemanager.init.js')
entailment
def get_file(path=None, content=None): """ :param path: relative path, or None to get from request :param content: file content, output in data. Used for editfile """ if path is None: path = request.args.get('path') if path is None: return error('No path in request') filename = os.path.split(path.rstrip('/'))[-1] extension = filename.rsplit('.', 1)[-1] os_file_path = web_path_to_os_path(path) if os.path.isdir(os_file_path): file_type = 'folder' # Ensure trailing slash if path[-1] != '/': path += '/' else: file_type = 'file' ctime = int(os.path.getctime(os_file_path)) mtime = int(os.path.getmtime(os_file_path)) height = 0 width = 0 if extension in ['gif', 'jpg', 'jpeg', 'png']: try: im = PIL.Image.open(os_file_path) height, width = im.size except OSError: log.exception('Error loading image "{}" to get width and height'.format(os_file_path)) attributes = { 'name': filename, 'path': get_url_path(path), 'readable': 1 if os.access(os_file_path, os.R_OK) else 0, 'writeable': 1 if os.access(os_file_path, os.W_OK) else 0, 'created': datetime.datetime.fromtimestamp(ctime).ctime(), 'modified': datetime.datetime.fromtimestamp(mtime).ctime(), 'timestamp': mtime, 'width': width, 'height': height, 'size': os.path.getsize(os_file_path) } if content: attributes['content'] = content return { 'id': path, 'type': file_type, 'attributes': attributes }
:param path: relative path, or None to get from request :param content: file content, output in data. Used for editfile
entailment
def __get_charset(self): '''Return the character encoding (charset) used internally by MeCab. Charset is that of the system dictionary used by MeCab. Will defer to the user-specified MECAB_CHARSET environment variable, if set. Defaults to shift-jis on Windows. Defaults to utf-8 on Mac OS. Defaults to euc-jp, as per MeCab documentation, when all else fails. Returns: Character encoding (charset) used by MeCab. ''' cset = os.getenv(self.MECAB_CHARSET) if cset: logger.debug(self._DEBUG_CSET_DEFAULT.format(cset)) return cset else: try: res = Popen(['mecab', '-D'], stdout=PIPE).communicate() lines = res[0].decode() if not lines.startswith('unrecognized'): dicinfo = lines.split(os.linesep) t = [t for t in dicinfo if t.startswith('charset')] if len(t) > 0: cset = t[0].split()[1].lower() logger.debug(self._DEBUG_CSET_DEFAULT.format(cset)) return cset else: logger.error('{}\n'.format(self._ERROR_NODIC)) raise EnvironmentError(self._ERROR_NODIC) else: logger.error('{}\n'.format(self._ERROR_NOCMD)) raise EnvironmentError(self._ERROR_NOCMD) except OSError: cset = 'euc-jp' if sys.platform == 'win32': cset = 'shift-jis' elif sys.platform == 'darwin': cset = 'utf8' logger.debug(self._DEBUG_CSET_DEFAULT.format(cset)) return cset
Return the character encoding (charset) used internally by MeCab. Charset is that of the system dictionary used by MeCab. Will defer to the user-specified MECAB_CHARSET environment variable, if set. Defaults to shift-jis on Windows. Defaults to utf-8 on Mac OS. Defaults to euc-jp, as per MeCab documentation, when all else fails. Returns: Character encoding (charset) used by MeCab.
entailment
def __get_libpath(self): '''Return the absolute path to the MeCab library. On Windows, the path to the system dictionary is used to deduce the path to libmecab.dll. Otherwise, mecab-config is used find the libmecab shared object or dynamic library (*NIX or Mac OS, respectively). Will defer to the user-specified MECAB_PATH environment variable, if set. Returns: The absolute path to the MeCab library. Raises: EnvironmentError: A problem was encountered in trying to locate the MeCab library. ''' libp = os.getenv(self.MECAB_PATH) if libp: return os.path.abspath(libp) else: plat = sys.platform if plat == 'win32': lib = self._LIBMECAB.format(self._WINLIB_EXT) try: v = self.__regkey_value(self._WINHKEY, self._WINVALUE) ldir = v.split('etc')[0] libp = os.path.join(ldir, 'bin', lib) except EnvironmentError as err: logger.error('{}\n'.format(err)) logger.error('{}\n'.format(sys.exc_info()[0])) raise EnvironmentError( self._ERROR_WINREG.format(self._WINVALUE, self._WINHKEY)) else: # UNIX-y OS? if plat == 'darwin': lib = self._LIBMECAB.format(self._MACLIB_EXT) else: lib = self._LIBMECAB.format(self._UNIXLIB_EXT) try: cmd = ['mecab-config', '--libs-only-L'] res = Popen(cmd, stdout=PIPE).communicate() lines = res[0].decode() if not lines.startswith('unrecognized'): linfo = lines.strip() libp = os.path.join(linfo, lib) else: raise EnvironmentError( self._ERROR_MECABCONFIG.format(lib)) except EnvironmentError as err: logger.error('{}\n'.format(err)) logger.error('{}\n'.format(sys.exc_info()[0])) raise EnvironmentError(self._ERROR_NOLIB.format(lib)) if libp and os.path.exists(libp): libp = os.path.abspath(libp) os.environ[self.MECAB_PATH] = libp return libp else: raise EnvironmentError(self._ERROR_NOLIB.format(libp))
Return the absolute path to the MeCab library. On Windows, the path to the system dictionary is used to deduce the path to libmecab.dll. Otherwise, mecab-config is used find the libmecab shared object or dynamic library (*NIX or Mac OS, respectively). Will defer to the user-specified MECAB_PATH environment variable, if set. Returns: The absolute path to the MeCab library. Raises: EnvironmentError: A problem was encountered in trying to locate the MeCab library.
entailment
def __regkey_value(self, path, name='', start_key=None): r'''Return the data of value mecabrc at MeCab HKEY node. On Windows, the path to the mecabrc as set in the Windows Registry is used to deduce the path to libmecab.dll. Returns: The full path to the mecabrc on Windows. Raises: WindowsError: A problem was encountered in trying to locate the value mecabrc at HKEY_CURRENT_USER\Software\MeCab. ''' if sys.version < '3': import _winreg as reg else: import winreg as reg def _fn(path, name='', start_key=None): if isinstance(path, str): path = path.split('\\') if start_key is None: start_key = getattr(reg, path[0]) return _fn(path[1:], name, start_key) else: subkey = path.pop(0) with reg.OpenKey(start_key, subkey) as handle: if path: return _fn(path, name, handle) else: desc, i = None, 0 while not desc or desc[0] != name: desc = reg.EnumValue(handle, i) i += 1 return desc[1] return _fn(path, name, start_key)
r'''Return the data of value mecabrc at MeCab HKEY node. On Windows, the path to the mecabrc as set in the Windows Registry is used to deduce the path to libmecab.dll. Returns: The full path to the mecabrc on Windows. Raises: WindowsError: A problem was encountered in trying to locate the value mecabrc at HKEY_CURRENT_USER\Software\MeCab.
entailment
def diff(old_html, new_html, cutoff=0.0, plaintext=False, pretty=False): """Show the differences between the old and new html document, as html. Return the document html with extra tags added to show changes. Add <ins> tags around newly added sections, and <del> tags to show sections that have been deleted. """ if plaintext: old_dom = parse_text(old_html) new_dom = parse_text(new_html) else: old_dom = parse_minidom(old_html) new_dom = parse_minidom(new_html) # If the two documents are not similar enough, don't show the changes. if not check_text_similarity(old_dom, new_dom, cutoff): return '<h2>The differences from the previous version are too large to show concisely.</h2>' dom = dom_diff(old_dom, new_dom) # HTML-specific cleanup. if not plaintext: fix_lists(dom) fix_tables(dom) # Only return html for the document body contents. body_elements = dom.getElementsByTagName('body') if len(body_elements) == 1: dom = body_elements[0] return minidom_tostring(dom, pretty=pretty)
Show the differences between the old and new html document, as html. Return the document html with extra tags added to show changes. Add <ins> tags around newly added sections, and <del> tags to show sections that have been deleted.
entailment
def adjusted_ops(opcodes): """ Iterate through opcodes, turning them into a series of insert and delete operations, adjusting indices to account for the size of insertions and deletions. >>> def sequence_opcodes(old, new): return difflib.SequenceMatcher(a=old, b=new).get_opcodes() >>> list(adjusted_ops(sequence_opcodes('abc', 'b'))) [('delete', 0, 1, 0, 0), ('delete', 1, 2, 1, 1)] >>> list(adjusted_ops(sequence_opcodes('b', 'abc'))) [('insert', 0, 0, 0, 1), ('insert', 2, 2, 2, 3)] >>> list(adjusted_ops(sequence_opcodes('axxa', 'aya'))) [('delete', 1, 3, 1, 1), ('insert', 1, 1, 1, 2)] >>> list(adjusted_ops(sequence_opcodes('axa', 'aya'))) [('delete', 1, 2, 1, 1), ('insert', 1, 1, 1, 2)] >>> list(adjusted_ops(sequence_opcodes('ab', 'bc'))) [('delete', 0, 1, 0, 0), ('insert', 1, 1, 1, 2)] >>> list(adjusted_ops(sequence_opcodes('bc', 'ab'))) [('insert', 0, 0, 0, 1), ('delete', 2, 3, 2, 2)] """ while opcodes: op = opcodes.pop(0) tag, i1, i2, j1, j2 = op shift = 0 if tag == 'equal': continue if tag == 'replace': # change the single replace op into a delete then insert # pay careful attention to the variables here, there's no typo opcodes = [ ('delete', i1, i2, j1, j1), ('insert', i2, i2, j1, j2), ] + opcodes continue yield op if tag == 'delete': shift = -(i2 - i1) elif tag == 'insert': shift = +(j2 - j1) new_opcodes = [] for tag, i1, i2, j1, j2 in opcodes: new_opcodes.append(( tag, i1 + shift, i2 + shift, j1, j2, )) opcodes = new_opcodes
Iterate through opcodes, turning them into a series of insert and delete operations, adjusting indices to account for the size of insertions and deletions. >>> def sequence_opcodes(old, new): return difflib.SequenceMatcher(a=old, b=new).get_opcodes() >>> list(adjusted_ops(sequence_opcodes('abc', 'b'))) [('delete', 0, 1, 0, 0), ('delete', 1, 2, 1, 1)] >>> list(adjusted_ops(sequence_opcodes('b', 'abc'))) [('insert', 0, 0, 0, 1), ('insert', 2, 2, 2, 3)] >>> list(adjusted_ops(sequence_opcodes('axxa', 'aya'))) [('delete', 1, 3, 1, 1), ('insert', 1, 1, 1, 2)] >>> list(adjusted_ops(sequence_opcodes('axa', 'aya'))) [('delete', 1, 2, 1, 1), ('insert', 1, 1, 1, 2)] >>> list(adjusted_ops(sequence_opcodes('ab', 'bc'))) [('delete', 0, 1, 0, 0), ('insert', 1, 1, 1, 2)] >>> list(adjusted_ops(sequence_opcodes('bc', 'ab'))) [('insert', 0, 0, 0, 1), ('delete', 2, 3, 2, 2)]
entailment
def match_indices(match): """Yield index tuples (old_index, new_index) for each place in the match.""" a, b, size = match for i in range(size): yield a + i, b + i
Yield index tuples (old_index, new_index) for each place in the match.
entailment
def get_opcodes(matching_blocks): """Use difflib to get the opcodes for a set of matching blocks.""" sm = difflib.SequenceMatcher(a=[], b=[]) sm.matching_blocks = matching_blocks return sm.get_opcodes()
Use difflib to get the opcodes for a set of matching blocks.
entailment
def match_blocks(hash_func, old_children, new_children): """Use difflib to find matching blocks.""" sm = difflib.SequenceMatcher( _is_junk, a=[hash_func(c) for c in old_children], b=[hash_func(c) for c in new_children], ) return sm
Use difflib to find matching blocks.
entailment
def get_nonmatching_blocks(matching_blocks): """Given a list of matching blocks, output the gaps between them. Non-matches have the format (alo, ahi, blo, bhi). This specifies two index ranges, one in the A sequence, and one in the B sequence. """ i = j = 0 for match in matching_blocks: a, b, size = match yield (i, a, j, b) i = a + size j = b + size
Given a list of matching blocks, output the gaps between them. Non-matches have the format (alo, ahi, blo, bhi). This specifies two index ranges, one in the A sequence, and one in the B sequence.
entailment
def merge_blocks(a_blocks, b_blocks): """Given two lists of blocks, combine them, in the proper order. Ensure that there are no overlaps, and that they are for sequences of the same length. """ # Check sentinels for sequence length. assert a_blocks[-1][2] == b_blocks[-1][2] == 0 # sentinel size is 0 assert a_blocks[-1] == b_blocks[-1] combined_blocks = sorted(list(set(a_blocks + b_blocks))) # Check for overlaps. i = j = 0 for a, b, size in combined_blocks: assert i <= a assert j <= b i = a + size j = b + size return combined_blocks
Given two lists of blocks, combine them, in the proper order. Ensure that there are no overlaps, and that they are for sequences of the same length.
entailment