code
string
signature
string
docstring
string
loss_without_docstring
float64
loss_with_docstring
float64
factor
float64
blockdata = stream.read(blocksize) return int.from_bytes(blockdata, 'big')
def _read_block(blocksize, stream)
Read block data from network into integer type
3.806349
2.974236
1.279774
while True: header = _read_header(stream) block = _read_block(header[1], stream) yield (header, block)
def read_blocks(stream)
Generate parsed blocks from input stream
4.155685
3.977388
1.044828
# We can eliminate this source node if len(nodes) == 1: to_eliminate = list(self.eliminate(next(iter(nodes)), data)) # Recursively eliminate all nodes that can now be resolved while len(to_eliminate): other, check = to_eliminate.pop() to_eliminate.extend(self.eliminate(other, check)) else: # Pass messages from already-resolved source nodes for node in list(nodes): if node in self.eliminated: nodes.remove(node) data ^= self.eliminated[node] # Resolve if we are left with a single non-resolved source node if len(nodes) == 1: return self.add_block(nodes, data) else: # Add edges for all remaining nodes to this check check = CheckNode(nodes, data) for node in nodes: self.checks[node].append(check) # Are we done yet? return len(self.eliminated) >= self.num_blocks
def add_block(self, nodes, data)
Adds a new check node and edges between that node and all source nodes it connects, resolving all message passes that become possible as a result.
4.960991
4.589408
1.080965
# Cache resolved value self.eliminated[node] = data others = self.checks[node] del self.checks[node] # Pass messages to all associated checks for check in others: check.check ^= data check.src_nodes.remove(node) # Yield all nodes that can now be resolved if len(check.src_nodes) == 1: yield (next(iter(check.src_nodes)), check.check)
def eliminate(self, node, data)
Resolves a source node, passing the message to all associated checks
7.128788
5.451193
1.307748
return MobileClient( username, device_id, token=token, locale=locale )
def mobileclient(username=None, device_id=None, *, token=None, locale='en_US')
Create and authenticate a Google Music mobile client. >>> import google_music >>> mc = google_music.mobileclient('username') Parameters: username (str, Optional): Your Google Music username. This is used to store OAuth credentials for different accounts separately. device_id (str, Optional): A mobile device ID. Default: MAC address is used. token (dict, Optional): An OAuth token compatible with ``requests-oauthlib``. locale (str, Optional): `ICU <http://www.localeplanet.com/icu/>`__ locale used to localize some responses. This must be a locale supported by Android. Default: `'en_US'``. Returns: MobileClient: An authenticated :class:`~google_music.MobileClient` instance.
3.294534
3.497269
0.94203
# Maximum distance from root. tree.max_distance_from_root() # Initialize the data object. idx = [] data = { 'type': [], 'id': [], 'parent': [], 'length': [], 'label': [], 'distance': []} if use_uids: data['uid'] = [] # Add labels to internal nodes if set to true. if add_node_labels: for i, node in enumerate(tree.internal_nodes()): node.label = str(i) for node in tree.nodes(): # Get node type if node.is_leaf(): type_ = 'leaf' label = str(node.taxon.label).replace(' ', '_') elif node.is_internal(): type_ = 'node' label = str(node.label) # Set node label and parent. id_ = label parent_node = node.parent_node length = node.edge_length distance = node.distance_from_root() # Is this node a root? if parent_node is None and length is None: parent_label = None parent_node = None length = 0 distance = 0 type_ = 'root' # Set parent node label elif parent_node.is_internal(): parent_label = str(parent_node.label) else: raise Exception("Subtree is not attached to tree?") # Add this node to the data. data['type'].append(type_) data['id'].append(id_) data['parent'].append(parent_label) data['length'].append(length) data['label'].append(label) data['distance'].append(distance) if use_uids: data['uid'].append(get_random_id(10)) # Construct dataframe. df = pandas.DataFrame(data) return df
def _dendropy_to_dataframe( tree, add_node_labels=True, use_uids=True)
Convert Dendropy tree to Pandas dataframe.
2.740606
2.75552
0.994587
if filename is not None: # Use Dendropy to parse tree. tree = dendropy.Tree.get( path=filename, schema=schema, preserve_underscores=True) elif data is not None: tree = dendropy.Tree.get( data=data, schema=schema, preserve_underscores=True) else: raise Exception('No tree given?') df = _dendropy_to_dataframe( tree, add_node_labels=add_node_labels, use_uids=use_uids ) return df
def _read( filename=None, data=None, schema=None, add_node_labels=True, use_uids=True )
Read a phylogenetic tree into a phylopandas.DataFrame. The resulting DataFrame has the following columns: - name: label for each taxa or node. - id: unique id (created by phylopandas) given to each node. - type: type of node (leaf, internal, or root). - parent: parent id. necessary for constructing trees. - length: length of branch from parent to node. - distance: distance from root. Parameters ---------- filename: str (default is None) newick file to read into DataFrame. data: str (default is None) newick string to parse and read into DataFrame. add_node_labels: bool If true, labels the internal nodes with numbers. Returns ------- df: phylopandas.DataFrame.
2.338515
2.406108
0.971907
def func( filename=None, data=None, add_node_labels=True, use_uids=True, **kwargs): # Use generic write class to write data. return _read( filename=filename, data=data, schema=schema, add_node_labels=add_node_labels, use_uids=use_uids, **kwargs ) # Update docs func.__doc__ = _read_doc_template(schema) return func
def _read_function(schema)
Add a write method for named schema to a class.
4.337422
3.945745
1.099266
seq_records = [] for i, row in df.iterrows(): # Tries getting sequence data. If a TypeError at the seqrecord # creation is thrown, it is assumed that this row does not contain # sequence data and therefore the row is ignored. try: # Get sequence seq = Seq(row[sequence_col], alphabet=alphabet) # Get id id = row[id_col] # Build a description description = "" if extra_data is not None: description = " ".join([row[key] for key in extra_data]) # Build a record record = SeqRecord( seq=seq, id=id, description=description, ) seq_records.append(record) except TypeError: pass return seq_records
def pandas_df_to_biopython_seqrecord( df, id_col='uid', sequence_col='sequence', extra_data=None, alphabet=None, )
Convert pandas dataframe to biopython seqrecord for easy writing. Parameters ---------- df : Dataframe Pandas dataframe to convert id_col : str column in dataframe to use as sequence label sequence_col str: column in dataframe to use as sequence data extra_data : list extra columns to use in sequence description line alphabet : biopython Alphabet object Returns ------- seq_records : List of biopython seqrecords.
3.161647
3.233815
0.977683
# Get sequence seq = Seq(series[sequence_col], alphabet=alphabet) # Get id id = series[id_col] # Build a description description = "" if extra_data is not None: description = " ".join([series[key] for key in extra_data]) # Build a record record = SeqRecord( seq=seq, id=id, description=description, ) seq_records = [record] return seq_records
def pandas_series_to_biopython_seqrecord( series, id_col='uid', sequence_col='sequence', extra_data=None, alphabet=None )
Convert pandas series to biopython seqrecord for easy writing. Parameters ---------- series : Series Pandas series to convert id_col : str column in dataframe to use as sequence label sequence_col : str column in dataframe to use as sequence data extra_data : list extra columns to use in sequence description line Returns ------- seq_records : List of biopython seqrecords.
2.745793
2.522846
1.088372
# Check Alphabet if given if alphabet is None: alphabet = Bio.Alphabet.Alphabet() elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']: alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet)) else: raise Exception( "The alphabet is not recognized. Must be 'dna', 'rna', " "'nucleotide', or 'protein'.") # Build a list of records from a pandas DataFrame if type(data) is pd.DataFrame: seq_records = pandas_df_to_biopython_seqrecord( data, id_col=id_col, sequence_col=sequence_col, extra_data=extra_data, alphabet=alphabet, ) # Build a record from a pandas Series elif type(data) is pd.Series: seq_records = pandas_series_to_biopython_seqrecord( data, id_col=id_col, sequence_col=sequence_col, extra_data=extra_data, alphabet=alphabet, ) # Write to disk or return string if filename is not None: SeqIO.write(seq_records, filename, format=schema, **kwargs) else: return "".join([s.format(schema) for s in seq_records])
def _write( data, filename=None, schema='fasta', id_col='uid', sequence_col='sequence', extra_data=None, alphabet=None, **kwargs)
General write function. Write phylopanda data to biopython format. Parameters ---------- filename : str File to write string to. If no filename is given, a string will be returned. sequence_col : str (default='sequence') Sequence column name in DataFrame. id_col : str (default='id') ID column name in DataFrame id_only : bool (default=False) If True, use only the ID column to label sequences in fasta.
2.297611
2.392704
0.960257
def method( self, filename=None, schema=schema, id_col='uid', sequence_col='sequence', extra_data=None, alphabet=None, **kwargs): # Use generic write class to write data. return _write( self._data, filename=filename, schema=schema, id_col=id_col, sequence_col=sequence_col, extra_data=extra_data, alphabet=alphabet, **kwargs ) # Update docs method.__doc__ = _write_doc_template(schema) return method
def _write_method(schema)
Add a write method for named schema to a class.
4.139264
4.03406
1.026079
def func( data, filename=None, schema=schema, id_col='uid', sequence_col='sequence', extra_data=None, alphabet=None, **kwargs): # Use generic write class to write data. return _write( data, filename=filename, schema=schema, id_col=id_col, sequence_col=sequence_col, extra_data=extra_data, alphabet=alphabet, **kwargs ) # Update docs func.__doc__ = _write_doc_template(schema) return func
def _write_function(schema)
Add a write method for named schema to a class.
3.832376
3.693339
1.037645
# Check Alphabet if given if alphabet is None: alphabet = Bio.Alphabet.Alphabet() elif alphabet in ['dna', 'rna', 'protein', 'nucleotide']: alphabet = getattr(Bio.Alphabet, 'generic_{}'.format(alphabet)) else: raise Exception( "The alphabet is not recognized. Must be 'dna', 'rna', " "'nucleotide', or 'protein'.") kwargs.update(alphabet=alphabet) # Prepare DataFrame fields. data = { 'id': [], seq_label: [], 'description': [], 'label': [] } if use_uids: data['uid'] = [] # Parse Fasta file. for i, s in enumerate(SeqIO.parse(filename, format=schema, **kwargs)): data['id'].append(s.id) data[seq_label].append(str(s.seq)) data['description'].append(s.description) data['label'].append(s.name) if use_uids: data['uid'].append(get_random_id(10)) # Port to DataFrame. return pd.DataFrame(data)
def _read( filename, schema, seq_label='sequence', alphabet=None, use_uids=True, **kwargs)
Use BioPython's sequence parsing module to convert any file format to a Pandas DataFrame. The resulting DataFrame has the following columns: - name - id - description - sequence
2.805253
2.840962
0.987431
def func( self, filename, seq_label='sequence', alphabet=None, combine_on='uid', use_uids=True, **kwargs): # Use generic write class to write data. df0 = self._data df1 = _read( filename=filename, schema=schema, seq_label=seq_label, alphabet=alphabet, use_uids=use_uids, **kwargs ) return df0.phylo.combine(df1, on=combine_on) # Update docs func.__doc__ = _read_doc_template(schema) return func
def _read_method(schema)
Add a write method for named schema to a class.
5.914917
5.69205
1.039154
def func( filename, seq_label='sequence', alphabet=None, use_uids=True, **kwargs): # Use generic write class to write data. return _read( filename=filename, schema=schema, seq_label=seq_label, alphabet=alphabet, use_uids=use_uids, **kwargs ) # Update docs func.__doc__ = _read_doc_template(schema) return func
def _read_function(schema)
Add a write method for named schema to a class.
5.166357
4.77775
1.081337
# Read file. with open(filename, 'r') as f: blast_record = NCBIXML.read(f) # Prepare DataFrame fields. data = {'accession': [], 'hit_def': [], 'hit_id': [], 'title': [], 'length': [], 'e_value': [], 'sequence': []} # Get alignments from blast result. for i, s in enumerate(blast_record.alignments): data['accession'] = s.accession data['hit_def'] = s.hit_def data['hit_id'] = s.hit_id data['title'] = s.title data['length'] = s.length data['e_value'] = s.hsps[0].expect data['sequence'] = s.hsps[0].sbjct # Port to DataFrame. return pd.DataFrame(data)
def read_blast_xml(filename, **kwargs)
Read BLAST XML format.
2.376719
2.386689
0.995822
if isinstance(taxon_col, str) is False: raise Exception("taxon_col must be a string.") if isinstance(node_col, str) is False: raise Exception("taxon_col must be a string.") # Construct a list of nodes from dataframe. taxon_namespace = dendropy.TaxonNamespace() nodes = {} for idx in df.index: # Get node data. data = df.loc[idx] # Get taxon for node (if leaf node). taxon = None if data['type'] == 'leaf': taxon = dendropy.Taxon(label=data[taxon_col]) # Add annotations data. for ann in taxon_annotations: taxon.annotations.add_new(ann, data[ann]) taxon_namespace.add_taxon(taxon) # Get label for node. label = data[node_col] # Get edge length. edge_length = None if branch_lengths is True: edge_length = data['length'] # Build a node n = dendropy.Node( taxon=taxon, label=label, edge_length=edge_length ) # Add node annotations for ann in node_annotations: n.annotations.add_new(ann, data[ann]) nodes[idx] = n # Build branching pattern for nodes. root = None for idx, node in nodes.items(): # Get node data. data = df.loc[idx] # Get children nodes children_idx = df[df['parent'] == data['id']].index children_nodes = [nodes[i] for i in children_idx] # Set child nodes nodes[idx].set_child_nodes(children_nodes) # Check if this is root. if data['parent'] is None: root = nodes[idx] # Build tree. tree = dendropy.Tree( seed_node=root, taxon_namespace=taxon_namespace ) return tree
def _pandas_df_to_dendropy_tree( df, taxon_col='uid', taxon_annotations=[], node_col='uid', node_annotations=[], branch_lengths=True, )
Turn a phylopandas dataframe into a dendropy tree. Parameters ---------- df : DataFrame DataFrame containing tree data. taxon_col : str (optional) Column in dataframe to label the taxon. If None, the index will be used. taxon_annotations : str List of columns to annotation in the tree taxon. node_col : str (optional) Column in dataframe to label the nodes. If None, the index will be used. node_annotations : str List of columns to annotation in the node taxon. branch_lengths : bool If True, inclues branch lengths.
2.358789
2.438533
0.967298
tree = _pandas_df_to_dendropy_tree( df, taxon_col=taxon_col, taxon_annotations=taxon_annotations, node_col=node_col, node_annotations=node_annotations, branch_lengths=branch_lengths, ) # Write out format print(schema) if filename is not None: tree.write(path=filename, schema=schema, suppress_annotations=False, **kwargs) else: return tree.as_string(schema=schema)
def _write( df, filename=None, schema='newick', taxon_col='uid', taxon_annotations=[], node_col='uid', node_annotations=[], branch_lengths=True, **kwargs )
Write a phylopandas tree DataFrame to various formats. Parameters ---------- df : DataFrame DataFrame containing tree data. filename : str filepath to write out tree. If None, will return string. schema : str tree format to write out. taxon_col : str (optional) Column in dataframe to label the taxon. If None, the index will be used. taxon_annotations : str List of columns to annotation in the tree taxon. node_col : str (optional) Column in dataframe to label the nodes. If None, the index will be used. node_annotations : str List of columns to annotation in the node taxon. branch_lengths : bool If True, inclues branch lengths.
2.465716
3.089059
0.798209
def method( self, filename=None, schema=schema, taxon_col='uid', taxon_annotations=[], node_col='uid', node_annotations=[], branch_lengths=True, **kwargs): # Use generic write class to write data. return _write( self._data, filename=filename, schema=schema, taxon_col=taxon_col, taxon_annotations=taxon_annotations, node_col=node_col, node_annotations=node_annotations, branch_lengths=branch_lengths, **kwargs ) # Update docs method.__doc__ = _write_doc_template(schema) return method
def _write_method(schema)
Add a write method for named schema to a class.
3.826855
3.775822
1.013516
def func( data, filename=None, schema=schema, taxon_col='uid', taxon_annotations=[], node_col='uid', node_annotations=[], branch_lengths=True, **kwargs): # Use generic write class to write data. return _write( data, filename=filename, schema=schema, taxon_col=taxon_col, taxon_annotations=taxon_annotations, node_col=node_col, node_annotations=node_annotations, branch_lengths=branch_lengths, **kwargs ) # Update docs func.__doc__ = _write_doc_template(schema) return func
def _write_function(schema)
Add a write method for named schema to a class.
3.6173
3.496878
1.034437
alphabet = string.ascii_uppercase + string.ascii_lowercase + string.digits return ''.join(random.choice(alphabet) for _ in range(length))
def get_random_id(length)
Generate a random, alpha-numerical id.
2.042705
1.943725
1.050923
self.folder = folder self.templates.directories[0] = folder self.app.root_path = folder
def set_folder(self, folder)
Sets the folder where the files to serve are located.
9.806851
6.681268
1.467813
waitress.serve(self.app, host=host, port=port)
def run(self, host='0.0.0.0', port=8080)
Launch a development web server.
4.11218
4.386829
0.937392
data = self.app.test_client().get("/%s" % path).data return data
def get(self, path)
Get the content of a file, indentified by its path relative to the folder configured in PyGreen. If the file extension is one of the extensions that should be processed through Mako, it will be processed.
6.912282
7.611513
0.908135
files = [] for l in self.file_listers: files += l() for f in files: _logger.info("generating %s" % f) content = self.get(f) loc = os.path.join(output_folder, f) d = os.path.dirname(loc) if not os.path.exists(d): os.makedirs(d) with open(loc, "wb") as file_: file_.write(content)
def gen_static(self, output_folder)
Generates a complete static version of the web site. It will stored in output_folder.
2.750868
2.823557
0.974256
logging.basicConfig(level=logging.INFO, format='%(message)s') parser = argparse.ArgumentParser(description='PyGreen, micro web framework/static web site generator') subparsers = parser.add_subparsers(dest='action') parser_serve = subparsers.add_parser('serve', help='serve the web site') parser_serve.add_argument('-p', '--port', type=int, default=8080, help='port to serve on') parser_serve.add_argument('-f', '--folder', default=".", help='folder containg files to serve') parser_serve.add_argument('-d', '--disable-templates', action='store_true', default=False, help='just serve static files, do not use Mako') def serve(): if args.disable_templates: self.template_exts = set([]) self.run(port=args.port) parser_serve.set_defaults(func=serve) parser_gen = subparsers.add_parser('gen', help='generate a static version of the site') parser_gen.add_argument('output', help='folder to store the files') parser_gen.add_argument('-f', '--folder', default=".", help='folder containing files to generate') def gen(): self.gen_static(args.output) parser_gen.set_defaults(func=gen) args = parser.parse_args(cmd_args) self.set_folder(args.folder) print(parser.description) print("") args.func()
def cli(self, cmd_args=None)
The command line interface of PyGreen.
2.499077
2.383259
1.048596
self.switch_configuration = switch_configuration self.terminal_controller = terminal_controller self.logger = logger self.piping_processor = piping_processor self.sub_processor = None self.continuing_to = None self.is_done = False self.replace_input = False self.awaiting_keystroke = False
def init(self, switch_configuration, terminal_controller, logger, piping_processor, *args)
:type switch_configuration: fake_switches.switch_configuration.SwitchConfiguration :type terminal_controller: fake_switches.terminal.TerminalController :type logger: logging.Logger :type piping_processor: fake_switches.command_processing.piping_processor_base.PipingProcessorBase
3.44127
3.459437
0.994748
glat1 = lat * np.pi / 180. glon1 = lon * np.pi / 180. s = maxdist / 1.852243 faz = azimuth * np.pi / 180. EPS = 0.00000000005 if ((np.abs(np.cos(glat1)) < EPS) and not (np.abs(np.sin(faz)) < EPS)): raise CourseException("Only North-South courses are meaningful") a = 6378.137 / 1.852243 f = 1 / 298.257223563 r = 1 - f tu = r * np.tan(glat1) sf = np.sin(faz) cf = np.cos(faz) if (cf == 0): b = 0. else: b = 2. * np.arctan2 (tu, cf) cu = 1. / np.sqrt(1 + tu * tu) su = tu * cu sa = cu * sf c2a = 1 - sa * sa x = 1. + np.sqrt(1. + c2a * (1. / (r * r) - 1.)) x = (x - 2.) / x c = 1. - x c = (x * x / 4. + 1.) / c d = (0.375 * x * x - 1.) * x tu = s / (r * a * c) y = tu c = y + 1 while (np.abs (y - c) > EPS): sy = np.sin(y) cy = np.cos(y) cz = np.cos(b + y) e = 2. * cz * cz - 1. c = y x = e * cy y = e + e - 1. y = (((sy * sy * 4. - 3.) * y * cz * d / 6. + x) * d / 4. - cz) * sy * d + tu b = cu * cy * cf - su * sy c = r * np.sqrt(sa * sa + b * b) d = su * cy + cu * sy * cf glat2 = (np.arctan2(d, c) + np.pi) % (2*np.pi) - np.pi c = cu * cy - su * sy * cf x = np.arctan2(sy * sf, c) c = ((-3. * c2a + 4.) * f + 4.) * c2a * f / 16. d = ((e * cy * c + cz) * sy * c + y) * sa glon2 = ((glon1 + x - (1. - c) * d * f + np.pi) % (2*np.pi)) - np.pi baz = (np.arctan2(sa, b) + np.pi) % (2 * np.pi) glon2 *= 180./np.pi glat2 *= 180./np.pi baz *= 180./np.pi return (glon2, glat2, baz)
def _gccalc(lon, lat, azimuth, maxdist=None)
Original javascript on http://williams.best.vwh.net/gccalc.htm Translated into python by Thomas Lecocq This function is a black box, because trigonometry is difficult
2.428364
2.413376
1.00621
glon1 = centerlon glat1 = centerlat X = [] Y = [] for azimuth in range(0, 360): glon2, glat2, baz = _gccalc(glon1, glat1, azimuth, radius) X.append(glon2) Y.append(glat2) X.append(X[0]) Y.append(Y[0]) proj_x, proj_y = m(X,Y) return zip(proj_x, proj_y)
def circle(m, centerlon, centerlat, radius, *args, **kwargs)
Return lon, lat tuples of a "circle" which matches the chosen Basemap projection Takes the following arguments: m = basemap instance centerlon = originating lon centrelat = originating lat radius = radius
2.662086
2.838176
0.937957
if shortname in _MIME_TYPES: raise MimeRenderException('"%s" has already been registered'%shortname) _MIME_TYPES[shortname] = mime_types
def register_mime(shortname, mime_types)
Register a new mime type. Usage example: mimerender.register_mime('svg', ('application/x-svg', 'application/svg+xml',)) After this you can do: @mimerender.mimerender(svg=render_svg) def GET(... ...
4.070675
5.752341
0.707655
''' Wraps a standard wsgi application e.g.: def app(environ, start_response) It intercepts the start_response callback and grabs the results from it so it can return the status, headers, and body as a tuple ''' @wraps(app) def wrapped(environ, start_response): status_headers = [None, None] def _start_response(status, headers): status_headers[:] = [status, headers] body = app(environ, _start_response) ret = body, status_headers[0], status_headers[1] return ret return wrapped
def wsgi_wrap(app)
Wraps a standard wsgi application e.g.: def app(environ, start_response) It intercepts the start_response callback and grabs the results from it so it can return the status, headers, and body as a tuple
4.021672
1.95181
2.060483
''' A wrapper for _WSGIMimeRender that wrapps the inner callable with wsgi_wrap first. ''' def wrapper(*args2, **kwargs2): # take the function def wrapped(f): return _WSGIMimeRender(*args, **kwargs)(*args2, **kwargs2)(wsgi_wrap(f)) return wrapped return wrapper
def WSGIMimeRender(*args, **kwargs)
A wrapper for _WSGIMimeRender that wrapps the inner callable with wsgi_wrap first.
5.993397
3.218132
1.862384
@self.__call__(*args, **kwargs) def helper(e, status): return dict(exception=e), status def wrap(target): @wraps(target) def wrapper(*args, **kwargs): try: return target(*args, **kwargs) except BaseException as e: for klass, status in mapping: if isinstance(e, klass): return helper(e, status) raise return wrapper return wrap
def map_exceptions(self, mapping, *args, **kwargs)
Exception mapping helper decorator. Takes the same arguments as the main decorator, plus `mapping`, which is a list of `(exception_class, status_line)` pairs.
3.353108
3.172315
1.056991
scheme = self.scheme if not scheme: return True return scheme.is_relative(self)
def relative(self)
Identify if this URI is relative to some "current context". For example, if the protocol is missing, it's protocol-relative. If the host is missing, it's host-relative, etc.
7.854592
5.50638
1.426453
if uri: result = self.__class__(urljoin(str(self), str(uri))) else: result = self.__class__(self) for part, value in parts.items(): if part not in self.__all_parts__: raise TypeError("Unknown URI component: " + part) setattr(result, part, value) return result
def resolve(self, uri=None, **parts)
Attempt to resolve a new URI given an updated URI, partial or complete.
3.101179
2.998673
1.034184
# noinspection PyUnresolvedReferences result = cls.formatterfactory(fmt).parse(date_string) result = {k: v for k, v in result.items() if k in ('year', 'month', 'day')} return cls(**result)
def strptime(cls, date_string, fmt)
This is opposite of the :py:meth:`khayyam.JalaliDate.strftime`, and used to parse date strings into date object. `ValueError` is raised if the date_string and format can’t be parsed by time.strptime() or if it returns a value which isn’t a time tuple. For a complete list of formatting directives, see :doc:`/directives`. :param date_string: :param fmt: :return: A :py:class:`khayyam.JalaliDate` corresponding to date_string, parsed according to format :rtype: :py:class:`khayyam.JalaiDate`
4.121991
4.857978
0.848499
return JalaliDate( year if year else self.year, month if month else self.month, day if day else self.day )
def replace(self, year=None, month=None, day=None)
Replaces the given arguments on this instance, and return a new instance. :param year: :param month: :param day: :return: A :py:class:`khayyam.JalaliDate` with the same attributes, except for those attributes given new values by which keyword arguments are specified.
3.282195
2.796156
1.173824
arr = get_gregorian_date_from_julian_day(self.tojulianday()) return datetime.date(int(arr[0]), int(arr[1]), int(arr[2]))
def todate(self)
Calculates the corresponding day in the gregorian calendar. this is the main use case of this library. :return: Corresponding date in gregorian calendar. :rtype: :py:class:`datetime.date`
4.308309
4.357344
0.988746
return time.struct_time(( self.year, self.month, self.day, 0, 0, 0, self.weekday(), self.dayofyear(), -1 ))
def timetuple(self)
It's equivalent to: >>> time.struct_time((d.year, d.month, d.day, d.hour, d.minute, d.second, d.weekday(), dayofyear, [-1|1|0])) # doctest: +SKIP time.struct_time(tm_year=2015, tm_mon=7, tm_mday=28, tm_hour=0, tm_min=0, tm_sec=0, tm_wday=1, tm_yday=209, tm_isdst=-1) The tm_isdst flag of the result is set according to the dst() method: `tzinfo` is None or dst() returns None, tm_isdst is set to -1; else if dst() returns a non-zero value, tm_isdst is set to 1; else tm_isdst is set to 0. :return: A :py:class:`time.struct_time` such as returned by time.localtime(). :rtype: :py:class:`time.struct_time`
3.350388
3.291408
1.01792
first_day_of_year = self.firstdayofyear() days = (self - first_day_of_year).days offset = first_day_of_week - first_day_of_year.weekday() if offset < 0: offset += 7 if days < offset: return 0 return int((days - offset) / 7 + 1)
def weekofyear(self, first_day_of_week=SATURDAY)
weekofyear(first_day_of_week=SATURDAY) :param first_day_of_week: One of the :py:data:`khayyam.SATURDAY`, :py:data:`khayyam.SUNDAY`, :py:data:`khayyam.MONDAY`, :py:data:`khayyam.TUESDAY`, :py:data:`khayyam.WEDNESDAY`, :py:data:`khayyam.THURSDAY` or :py:data:`khayyam.FRIDAY` :return: The week number of the year. :rtype: int
2.342079
2.888104
0.81094
return cls(datetime.fromtimestamp(timestamp, tz=tz))
def fromtimestamp(cls, timestamp, tz=None)
Creates a new :py:class:`khayyam.JalaliDatetime` instance from the given posix timestamp. If optional argument tz is :py:obj:`None` or not specified, the timestamp is converted to the platform's local date and time, and the returned datetime object is naive. Else tz must be an instance of a class :py:class:`datetime.tzinfo` subclass, and the timestamp is converted to tz's time zone. In this case the result is equivalent to `tz.fromutc(JalaliDatetime.utcfromtimestamp(timestamp).replace(tzinfo=tz))`. This method may raise `ValueError`, if the timestamp is out of the range of values supported by the platform C localtime() or gmtime() functions. It's common for this to be restricted to years in 1970 through 2038. Note that on non-POSIX systems that include leap seconds in their notion of a timestamp, leap seconds are ignored by fromtimestamp(), and then it's possible to have two timestamps differing by a second that yield identical datetime objects. See also :py:class:`khayyam.JalaliDatetime.utcfromtimestamp`. .. testsetup:: api-datetime-fromtimestamp import khayyam from khayyam import JalaliDatetime .. doctest:: api-datetime-fromtimestamp >>> JalaliDatetime.fromtimestamp(1313132131.21232) khayyam.JalaliDatetime(1390, 5, 21, 11, 25, 31, 212320, Jomeh) :param timestamp: float the posix timestamp, i.e 1014324234.23423423. :param tz: :py:class:`datetime.tzinfo` The optional timezone to get local date & time from the given timestamp. :return: The local date and time corresponding to the POSIX timestamp, such as is returned by :py:func:`time.time()`. :rtype: :py:class:`khayyam.JalaliDatetime`
5.070074
10.060879
0.503939
if isinstance(date, (JalaliDatetime, khayyam.JalaliDate)): date = date.todate() return cls(datetime.combine(date, _time))
def combine(cls, date, _time)
Return a new jalali datetime object whose date members are equal to the given date object's, and whose _time and tzinfo members are equal to the given _time object's. For any datetime object d, d == datetime.combine(d.date(), d.timetz()). If date is a datetime object, its _time and tzinfo members are ignored. :param date: :py:class:`khayyam.JalaliDate` the date object to combine. :param _time: :py:class:`datetime.time` the time object to combine. :return: the combined jalali date & time object. :rtype: :py:class:`khayyam.JalaliDatetime`
6.559509
4.485206
1.462477
arr = get_gregorian_date_from_julian_day(self.tojulianday()) return datetime(int(arr[0]), int(arr[1]), int(arr[2]), self.hour, self.minute, self.second, self.microsecond, self.tzinfo)
def todatetime(self)
Converts the current instance to the python builtins :py:class:`datetime.datetime` instance. :return: the new :py:class:`datetime.datetime` instance representing the current date and time in gregorian calendar. :rtype: :py:class:`datetime.datetime`
3.50139
3.761585
0.930828
return khayyam.JalaliDate(self.year, self.month, self.day)
def date(self)
Return date object with same year, month and day. :rtype: :py:class:`khayyam.JalaliDate`
7.323632
3.343676
2.190294
year, month, day = self._validate( year if year else self.year, month if month else self.month, day if day else self.day ) result = JalaliDatetime( year, month, day, self.hour if hour is None else hour, self.minute if minute is None else minute, self.second if second is None else second, self.microsecond if microsecond is None else microsecond, tzinfo if tzinfo != self.tzinfo else self.tzinfo ) return result
def replace(self, year=None, month=None, day=None, hour=None, minute=None, second=None, microsecond=None, tzinfo=None)
Return a :py:class:`khayyam.JalaliDatetime` instance with the same attributes, except for those attributes given new values by whichever keyword arguments are specified. Note that tzinfo=None can be specified to create a naive datetime from an aware datetime with no conversion of date and time data, without adjusting the date the and time based tzinfo. :param year: int :param month: int :param day: int :param hour: int :param minute: int :param second: int :param microsecond: int :param tzinfo: :py:class:`datetime.tzinfo` :rtype: :py:class:`khayyam.JalaliDatetime`
2.012882
2.221555
0.906069
if self.tzinfo is tz: return self if self.tzinfo: utc = self - self.utcoffset() else: utc = self return tz.fromutc(utc.replace(tzinfo=tz))
def astimezone(self, tz)
Return a :py:class:`khayyam.JalaliDatetime` object with new :py:meth:`khayyam.JalaliDatetime.tzinfo` attribute tz, adjusting the date and time data so the result is the same UTC time as self, but in *tz*‘s local time. *tz* must be an instance of a :py:class:`datetime.tzinfo` subclass, and its :py:meth:`datetime.tzinfo.utcoffset()` and :py:meth:`datetime.tzinfo.dst()` methods must not return :py:obj:`None`. *self* must be aware (`self.tzinfo` must not be `None`, and `self.utcoffset()` must not return `None`). If `self.tzinfo` is `tz`, `self.astimezone(tz)` is equal to `self`: no adjustment of date or time data is performed. Else the result is local time in time zone `tz`, representing the same UTC time as `self`: after `astz = dt.astimezone(tz), astz - astz.utcoffset()` will usually have the same date and time data as `dt - dt.utcoffset()`. The discussion of class :py:class:`datetime.tzinfo` explains the cases at Daylight Saving Time transition boundaries where this cannot be achieved (an issue only if `tz` models both standard and daylight time). If you merely want to attach a time zone object `tz` to a datetime dt without adjustment of date and time data, use `dt.replace(tzinfo=tz)`. If you merely want to remove the time zone object from an aware datetime dt without conversion of date and time data, use `dt.replace(tzinfo=None)`. Note that the default :py:meth:`datetime.tzinfo.fromutc()` method can be overridden in a :py:class:`datetime.tzinfo` subclass to affect the result returned by :py:meth:`khayyam.JalaliDatetime.astimezone()`. Ignoring error cases, :py:meth:`khayyam.JalaliDatetime.astimezone()` acts like: .. code-block:: python :emphasize-lines: 3,5 def astimezone(self, tz): # doctest: +SKIP if self.tzinfo is tz: return self if self.tzinfo: utc = self - self.utcoffset() else: utc = self return tz.fromutc(utc.replace(tzinfo=tz)) :param tz: :py:class:`datetime.tzinfo` :rtype: :py:class:`khayyam.JalaliDatetime`
2.810619
1.690543
1.662554
res = self.hour if res > 12: res -= 12 elif res == 0: res = 12 return res
def hour12(self)
Return The hour value between `1-12`. use :py:meth:`khayyam.JalaliDatetime.ampm()` or :py:meth:`khayyam.JalaliDatetime.ampmascii()` to determine `ante meridiem` and or `post meridiem` :rtype: int
3.106803
3.032104
1.024636
if signal.ndim > 1: raise ValueError("Array of rank > 1 not supported yet") if order > signal.size: raise ValueError("Input signal must have a lenght >= lpc order") if order > 0: p = order + 1 r = np.zeros(p, 'float32') # Number of non zero values in autocorrelation one needs for p LPC # coefficients nx = np.min([p, signal.size]) x = np.correlate(signal, signal, 'full') r[:nx] = x[signal.size - 1:signal.size + order] phi = np.dot(sp.linalg.inv(sp.linalg.toeplitz(r[:-1])), -r[1:]) return np.concatenate(([1.], phi)) else: return np.ones(1, dtype='float32')
def lpc_ref(signal, order)
Compute the Linear Prediction Coefficients. Return the order + 1 LPC coefficients for the signal. c = lpc(x, k) will find the k+1 coefficients of a k order linear filter: xp[n] = -c[1] * x[n-2] - ... - c[k-1] * x[n-k-1] Such as the sum of the squared-error e[i] = xp[i] - x[i] is minimized. Parameters ---------- signal: array_like input signal order : int LPC order (the output will have order + 1 items) Notes ---- This is just for reference, as it is using the direct inversion of the toeplitz matrix, which is really slow
4.233555
3.968893
1.066684
r = np.atleast_1d(r) if r.ndim > 1: raise ValueError("Only rank 1 are supported for now.") n = r.size if n < 1: raise ValueError("Cannot operate on empty array !") elif order > n - 1: raise ValueError("Order should be <= size-1") if not np.isreal(r[0]): raise ValueError("First item of input must be real.") elif not np.isfinite(1 / r[0]): raise ValueError("First item should be != 0") # Estimated coefficients a = np.empty(order + 1, 'float32') # temporary array t = np.empty(order + 1, 'float32') # Reflection coefficients k = np.empty(order, 'float32') a[0] = 1. e = r[0] for i in range(1, order + 1): acc = r[i] for j in range(1, i): acc += a[j] * r[i - j] k[i - 1] = -acc / e a[i] = k[i - 1] for j in range(order): t[j] = a[j] for j in range(1, i): a[j] += k[i - 1] * np.conj(t[i - j]) e *= 1 - k[i - 1] * np.conj(k[i - 1]) return a, e, k
def levinson_1d(r, order)
Levinson-Durbin recursion, to efficiently solve symmetric linear systems with toeplitz structure. Parameters --------- r : array-like input array to invert (since the matrix is symmetric Toeplitz, the corresponding pxp matrix is defined by p items only). Generally the autocorrelation of the signal for linear prediction coefficients estimation. The first item must be a non zero real. Notes ---- This implementation is in python, hence unsuitable for any serious computation. Use it as educational and reference purpose only. Levinson is a well-known algorithm to solve the Hermitian toeplitz equation: _ _ -R[1] = R[0] R[1] ... R[p-1] a[1] : : : : * : : : : _ * : -R[p] = R[p-1] R[p-2] ... R[0] a[p] _ with respect to a ( is the complex conjugate). Using the special symmetry in the matrix, the inversion can be done in O(p^2) instead of O(p^3).
2.647717
2.670364
0.991519
if not np.isrealobj(x): raise ValueError("Complex input not supported yet") maxlag = x.shape[axis] nfft = int(2 ** nextpow2(2 * maxlag - 1)) if axis != -1: x = np.swapaxes(x, -1, axis) a = _acorr_last_axis(x, nfft, maxlag) if axis != -1: a = np.swapaxes(a, -1, axis) return a
def acorr_lpc(x, axis=-1)
Compute autocorrelation of x along the given axis. This compute the biased autocorrelation estimator (divided by the size of input signal) Notes ----- The reason why we do not use acorr directly is for speed issue.
2.748014
3.015903
0.911174
n = signal.shape[axis] if order > n: raise ValueError("Input signal must have length >= order") r = acorr_lpc(signal, axis) return levinson_1d(r, order)
def lpc(signal, order, axis=-1)
Compute the Linear Prediction Coefficients. Return the order + 1 LPC coefficients for the signal. c = lpc(x, k) will find the k+1 coefficients of a k order linear filter: xp[n] = -c[1] * x[n-2] - ... - c[k-1] * x[n-k-1] Such as the sum of the squared-error e[i] = xp[i] - x[i] is minimized. Parameters ---------- signal: array_like input signal order : int LPC order (the output will have order + 1 items) Returns ------- a : array-like the solution of the inversion. e : array-like the prediction error. k : array-like reflection coefficients. Notes ----- This uses Levinson-Durbin recursion for the autocorrelation matrix inversion, and fft for the autocorrelation computation. For small order, particularly if order << signal size, direct computation of the autocorrelation is faster: use levinson and correlate in this case.
5.300431
6.36873
0.832259
num_cores = int((3 * cpu_count()) / 4) segments = set() for x in path_mapping: segments.update(x) if multiprocessing: cache = generate_cache_mp(segments, analysis_function, num_cores, call_back, stop_check) asim = calculate_distances_mp(path_mapping, cache, distance_function, num_cores, call_back, stop_check) else: cache = generate_cache_th(segments, analysis_function, num_cores, call_back, stop_check) asim = calculate_distances_th(path_mapping, cache, distance_function, num_cores, call_back, stop_check) return asim
def acoustic_similarity_mapping(path_mapping, analysis_function, distance_function, stop_check=None, call_back=None, multiprocessing=True)
Takes in an explicit mapping of full paths to .wav files to have acoustic similarity computed. Parameters ---------- path_mapping : iterable of iterables Explicit mapping of full paths of .wav files, in the form of a list of tuples to be compared. Returns ------- dict Returns a list of tuples corresponding to the `path_mapping` input, with a new final element in the tuple being the similarity/distance score for that mapping.
2.487894
2.629588
0.946116
num_cores = int((3 * cpu_count()) / 4) segments = set() for x in path_mapping: segments.update(x) if multiprocessing: cache = generate_cache_mp(segments, analysis_function, num_cores, call_back, stop_check) asim = calculate_axb_ratio_mp(path_mapping, cache, distance_function, num_cores, call_back, stop_check) else: cache = generate_cache_th(segments, analysis_function, num_cores, call_back, stop_check) asim = calculate_axb_ratio_th(path_mapping, cache, distance_function, num_cores, call_back, stop_check) return asim
def axb_mapping(path_mapping, analysis_function, distance_function, stop_check=None, call_back=None, multiprocessing=True)
Takes in an explicit mapping of full paths to .wav files to have acoustic similarity computed. Parameters ---------- path_mapping : iterable of iterables Explicit mapping of full paths of .wav files, in the form of a list of tuples to be compared. Returns ------- dict Returns a list of tuples corresponding to the `path_mapping` input, with a new final element in the tuple being the similarity/distance score for that mapping.
2.683901
2.79973
0.958629
files = [] if call_back is not None: call_back('Mapping directories...') call_back(0, len(directories)) cur = 0 for d in directories: if not os.path.isdir(d): continue if stop_check is not None and stop_check(): return if call_back is not None: cur += 1 if cur % 3 == 0: call_back(cur) files += [os.path.join(d, x) for x in os.listdir(d) if x.lower().endswith('.wav')] if len(files) == 0: raise (ConchError("The directories specified do not contain any wav files")) if call_back is not None: call_back('Mapping directories...') call_back(0, len(files) * len(files)) cur = 0 path_mapping = list() for x in files: for y in files: if stop_check is not None and stop_check(): return if call_back is not None: cur += 1 if cur % 20 == 0: call_back(cur) if not x.lower().endswith('.wav'): continue if not y.lower().endswith('.wav'): continue if x == y: continue path_mapping.append((x, y)) result = acoustic_similarity_mapping(path_mapping, analysis_function, distance_function, stop_check, call_back, multiprocessing) return result
def acoustic_similarity_directories(directories, analysis_function, distance_function, stop_check=None, call_back=None, multiprocessing=True)
Analyze many directories. Parameters ---------- directories : list of str List of fully specified paths to the directories to be analyzed
2.059234
2.176902
0.945947
if not isinstance(rep_one, np.ndarray): rep_one = rep_one.to_array() if not isinstance(rep_two, np.ndarray): rep_two = rep_two.to_array() assert (rep_one.shape[1] == rep_two.shape[1]) distMat = generate_distance_matrix(rep_one, rep_two) return regularDTW(distMat, norm=norm)
def dtw_distance(rep_one, rep_two, norm=True)
Computes the distance between two representations with the same number of filters using Dynamic Time Warping. Parameters ---------- rep_one : 2D array First representation to compare. First dimension is time in frames or samples and second dimension is the features. rep_two : 2D array Second representation to compare. First dimension is time in frames or samples and second dimension is the features. Returns ------- float Distance of dynamically time warping `rep_one` to `rep_two`.
2.217274
2.662021
0.832929
if weights is None: weights = ones((source.shape[1], 1)) sLen = source.shape[0] tLen = target.shape[0] distMat = zeros((sLen, tLen)) for i in range(sLen): for j in range(tLen): distMat[i, j] = euclidean(source[i, :], target[j, :]) return distMat
def generate_distance_matrix(source, target, weights=None)
Generates a local distance matrix for use in dynamic time warping. Parameters ---------- source : 2D array Source matrix with features in the second dimension. target : 2D array Target matrix with features in the second dimension. Returns ------- 2D array Local distance matrix.
1.917235
1.998981
0.959106
sLen, tLen = distMat.shape totalDistance = zeros((sLen, tLen)) totalDistance[0:sLen, 0:tLen] = distMat minDirection = zeros((sLen, tLen)) for i in range(1, sLen): totalDistance[i, 0] = totalDistance[i, 0] + totalDistance[i - 1, 0] for j in range(1, tLen): totalDistance[0, j] = totalDistance[0, j] + totalDistance[0, j - 1] for i in range(1, sLen): for j in range(1, tLen): # direction,minPrevDistance = min(enumerate([totalDistance[i,j],totalDistance[i,j+1],totalDistance[i+1,j]]), key=operator.itemgetter(1)) # totalDistance[i+1,j+1] = totalDistance[i+1,j+1] + minPrevDistance # minDirection[i,j] = direction minDirection[i, j], totalDistance[i, j] = min( enumerate([totalDistance[i - 1, j - 1] + 2 * totalDistance[i, j], totalDistance[i - 1, j] + totalDistance[i, j], totalDistance[i, j - 1] + totalDistance[i, j]]), key=operator.itemgetter(1)) if norm: return totalDistance[sLen - 1, tLen - 1] / (sLen + tLen) return totalDistance[sLen - 1, tLen - 1]
def regularDTW(distMat, norm=True)
Use a local distance matrix to perform dynamic time warping. Parameters ---------- distMat : 2D array Local distance matrix. Returns ------- float Total unweighted distance of the optimal path through the local distance matrix.
1.860937
1.912826
0.972873
oldsr, sig = wavfile.read(path) try: sig = sig[:, 0] except IndexError: pass if False and sr != oldsr: t = len(sig) / oldsr numsamp = int(t * sr) proc = resample(sig, numsamp) else: proc = sig sr = oldsr # proc = proc / 32768 if alpha is not None and alpha != 0: proc = lfilter([1., -alpha], 1, proc) return sr, proc
def preproc(path, sr=16000, alpha=0.95)
Preprocess a .wav file for later processing. Currently assumes a 16-bit PCM input. Only returns left channel of stereo files. Parameters ---------- path : str Full path to .wav file to load. sr : int, optional Sampling rate to resample at, if specified. alpha : float, optional Alpha for preemphasis, defaults to 0.97. Returns ------- int Sampling rate. array Processed PCM.
3.742086
3.930345
0.952101
N_x = len(x) N_b = len(b) # Determine the FFT length to use: if len(n): # Use the specified FFT length (rounded up to the nearest # power of 2), provided that it is no less than the filter # length: n = n[0] if n != int(n) or n <= 0: raise ValueError('n must be a nonnegative integer') if n < N_b: n = N_b N_fft = 2 ** nextpow2(n) else: if N_x > N_b: # When the filter length is smaller than the signal, # choose the FFT length and block size that minimize the # FLOPS cost. Since the cost for a length-N FFT is # (N/2)*log2(N) and the filtering operation of each block # involves 2 FFT operations and N multiplications, the # cost of the overlap-add method for 1 length-N block is # N*(1+log2(N)). For the sake of efficiency, only FFT # lengths that are powers of 2 are considered: N = 2 ** np.arange(np.ceil(np.log2(N_b)), np.floor(np.log2(N_x))) cost = np.ceil(N_x / (N - N_b + 1)) * N * (np.log2(N) + 1) if len(cost) > 0: N_fft = N[np.argmin(cost)] else: N_fft = 2 ** nextpow2(N_b + N_x - 1) else: # When the filter length is at least as long as the signal, # filter the signal using a single block: N_fft = 2 ** nextpow2(N_b + N_x - 1) N_fft = int(N_fft) # Compute the block length: L = int(N_fft - N_b + 1) # Compute the transform of the filter: H = fft(b, N_fft) y = np.zeros(N_x, np.float32) i = 0 while i <= N_x: il = np.min([i + L, N_x]) k = np.min([i + N_fft, N_x]) yt = ifft(fft(x[i:il], N_fft) * H, N_fft) # Overlap.. y[i:k] = y[i:k] + yt[:k - i] # and add i += L return y
def fftfilt(b, x, *n)
Filter the signal x with the FIR filter described by the coefficients in b using the overlap-add method. If the FFT length n is not specified, it and the overlap-add block length are selected so as to minimize the computational cost of the filtering operation.
3.161326
2.99163
1.056723
ncep = spec.shape[0] dctm = np.zeros((ncep, ncep)) for i in range(ncep): dctm[i, :] = np.cos(i * np.arange(1, 2 * ncep, 2) / (2 * ncep) * np.pi) * np.sqrt(2 / ncep) dctm *= 0.230258509299405 cep = np.dot(dctm, (10 * np.log10(spec + np.spacing(1)))) return cep
def dct_spectrum(spec)
Convert a spectrum into a cepstrum via type-III DCT (following HTK). Parameters ---------- spec : array Spectrum to perform a DCT on. Returns ------- array Cepstrum of the input spectrum.
2.877338
2.865686
1.004066
min_mel = freq_to_mel(min_freq) max_mel = freq_to_mel(max_freq) mel_points = np.linspace(min_mel, max_mel, num_filters + 2) bin_freqs = mel_to_freq(mel_points) # bins = round((nfft - 1) * bin_freqs / sr) fftfreqs = np.arange(int(nfft / 2 + 1)) / nfft * sr fbank = np.zeros((num_filters, int(nfft / 2 + 1))) for i in range(num_filters): fs = bin_freqs[i + np.arange(3)] fs = fs[1] + (fs - fs[1]) loslope = (fftfreqs - fs[0]) / (fs[1] - fs[0]) highslope = (fs[2] - fftfreqs) / (fs[2] - fs[1]) fbank[i, :] = np.maximum(np.zeros(loslope.shape), np.minimum(loslope, highslope)) return fbank.transpose()
def construct_filterbank(num_filters, nfft, sr, min_freq, max_freq)
Constructs a mel-frequency filter bank. Parameters ---------- nfft : int Number of points in the FFT. Returns ------- array Filter bank to multiply an FFT spectrum to create a mel-frequency spectrum.
2.506494
2.640347
0.949305
if self.is_initialized: view = self.item_view upper_limit = view.iterable_index+view.iterable_fetch_size-view.iterable_prefetch lower_limit = max(0,view.iterable_index+view.iterable_prefetch) offset = int(view.iterable_fetch_size/2.0) upper_visible_row = view.visible_rect[2] lower_visible_row = view.visible_rect[0] print("Visible rect = %s"%view.visible_rect) if upper_visible_row >= upper_limit: next_index = max(0,upper_visible_row-offset) # Center on current row # Going up works... if next_index>view.iterable_index: print("Auto prefetch upper limit %s!"%upper_limit) view.iterable_index = next_index #view.model().reset() # But doewn doesnt? elif view.iterable_index>0 and lower_visible_row < lower_limit: next_index = max(0,lower_visible_row-offset) # Center on current row # Going down works if next_index<view.iterable_index: print("Auto prefetch lower limit=%s, iterable=%s, setting next=%s!"%(lower_limit,view.iterable_index,next_index)) view.iterable_index = next_index
def _prefetch_items(self,change)
When the current_row in the model changes (whether from scrolling) or set by the application. Make sure the results are loaded!
4.405008
4.284744
1.028068
# Seek to offset effective_offset = max(0,self.item_view.iterable_index) for i,item in enumerate(self.iterable): if i<effective_offset: continue elif i>=(effective_offset+self.item_view.iterable_fetch_size): return yield item
def windowed_iterable(self)
That returns only the window
6.000351
6.163778
0.973486
old_items = self.items[:]# if self._dirty else [] old_iter_data = self._iter_data# if self._dirty else {} iterable = self.windowed_iterable pattern_nodes = self.pattern_nodes new_iter_data = sortedmap() new_items = [] if iterable is not None and len(pattern_nodes) > 0: for loop_index, loop_item in enumerate(iterable): iteration = old_iter_data.get(loop_item) if iteration is not None: new_iter_data[loop_item] = iteration new_items.append(iteration) old_items.remove(iteration) continue iteration = [] new_iter_data[loop_item] = iteration new_items.append(iteration) for nodes, key, f_locals in pattern_nodes: with new_scope(key, f_locals) as f_locals: f_locals['loop_index'] = loop_index f_locals['loop_item'] = loop_item for node in nodes: child = node(None) if isinstance(child, list): iteration.extend(child) else: iteration.append(child) # Add to old items list #self.old_items.extend(old_items) #if self._dirty: for iteration in old_items: for old in iteration: if not old.is_destroyed: old.destroy() if len(new_items) > 0: expanded = [] recursive_expand(sum(new_items, []), expanded) self.parent.insert_children(self, expanded) self.items = new_items# if self._dirty else new_items+old_items self._iter_data = new_iter_data
def refresh_items(self)
Refresh the items of the pattern. This method destroys the old items and creates and initializes the new items.
3.454835
3.383116
1.021199
widget = QDoubleSpinBox(self.parent_widget()) widget.setKeyboardTracking(False) self.widget = widget
def create_widget(self)
Create the underlying QDoubleSpinBox widget.
5.480567
3.39589
1.613882
super(Block, self).initialize() if self.block: self.block.parent.insert_children(self.block, self.children)
def initialize(self)
A reimplemented initializer. This method will add the include objects to the parent of the include and ensure that they are initialized.
7.256163
6.596856
1.099943
if self.is_initialized: if change['type'] == 'update': old_block = change['oldvalue'] old_block.parent.remove_children(old_block,self.children) new_block = change['value'] new_block.parent.insert_children(new_block, self.children)
def _observe_block(self, change)
A change handler for the 'objects' list of the Include. If the object is initialized objects which are removed will be unparented and objects which are added will be reparented. Old objects will be destroyed if the 'destroy_old' flag is True.
3.762754
3.7635
0.999802
d = self.declaration self.index = self.view.model.index(d.row, d.column) if self.delegate: self._refresh_count += 1 timed_call(self._loading_interval, self._update_delegate)
def _update_index(self)
Update the reference to the index within the table
9.083059
7.591645
1.196455
self._refresh_count -= 1 if self._refresh_count != 0: return try: delegate = self.delegate if not self.is_visible(): return # The table destroys when it goes out of view # so we always have to make a new one delegate.create_widget() delegate.init_widget() # Set the index widget self.view.widget.setIndexWidget(self.index, delegate.widget) except RuntimeError: pass
def _update_delegate(self)
Update the delegate cell widget. This is deferred so it does not get called until the user is done scrolling.
6.637329
5.979093
1.11009
index = self.index if index: self.view.model.dataChanged.emit(index, index)
def data_changed(self, change)
Notify the model that data has changed in this cell!
5.860439
4.594996
1.275396
def GetHandle(self): ''' returns an the identifier of the GUI widget. It must be an integer ''' win_id = self.winId() # this returns either an int or voitptr if "%s"%type(win_id) == "<type 'PyCObject'>": # PySide ### with PySide, self.winId() does not return an integer if sys.platform == "win32": ## Be careful, this hack is py27 specific ## does not work with python31 or higher ## since the PyCObject api was changed import ctypes ctypes.pythonapi.PyCObject_AsVoidPtr.restype = ctypes.c_void_p ctypes.pythonapi.PyCObject_AsVoidPtr.argtypes = [ ctypes.py_object] win_id = ctypes.pythonapi.PyCObject_AsVoidPtr(win_id) elif type(win_id) is not int: #PyQt4 or 5 ## below integer cast may be required because self.winId() can ## returns a sip.voitptr according to the PyQt version used ## as well as the python version win_id = int(win_id) return win_id
returns an the identifier of the GUI widget. It must be an integer
null
null
null
def _loop_topo(self, topologyType, topologicalEntity=None, topologyTypeToAvoid=None): ''' this could be a faces generator for a python TopoShape class that way you can just do: for face in srf.faces: processFace(face) ''' topoTypes = {TopAbs_VERTEX: TopoDS_Vertex, TopAbs_EDGE: TopoDS_Edge, TopAbs_FACE: TopoDS_Face, TopAbs_WIRE: TopoDS_Wire, TopAbs_SHELL: TopoDS_Shell, TopAbs_SOLID: TopoDS_Solid, TopAbs_COMPOUND: TopoDS_Compound, TopAbs_COMPSOLID: TopoDS_CompSolid} assert topologyType in topoTypes.keys(), '%s not one of %s' % (topologyType, topoTypes.keys()) self.topExp = TopExp_Explorer() # use self.myShape if nothing is specified if topologicalEntity is None and topologyTypeToAvoid is None: self.topExp.Init(self.myShape, topologyType) elif topologicalEntity is None and topologyTypeToAvoid is not None: self.topExp.Init(self.myShape, topologyType, topologyTypeToAvoid) elif topologyTypeToAvoid is None: self.topExp.Init(topologicalEntity, topologyType) elif topologyTypeToAvoid: self.topExp.Init(topologicalEntity, topologyType, topologyTypeToAvoid) seq = [] hashes = [] # list that stores hashes to avoid redundancy occ_seq = TopTools_ListOfShape() while self.topExp.More(): current_item = self.topExp.Current() current_item_hash = current_item.__hash__() if not current_item_hash in hashes: hashes.append(current_item_hash) occ_seq.Append(current_item) self.topExp.Next() # Convert occ_seq to python list occ_iterator = TopTools_ListIteratorOfListOfShape(occ_seq) while occ_iterator.More(): topo_to_add = self.topoFactory[topologyType](occ_iterator.Value()) seq.append(topo_to_add) occ_iterator.Next() if self.ignore_orientation: # filter out those entities that share the same TShape # but do *not* share the same orientation filter_orientation_seq = [] for i in seq: _present = False for j in filter_orientation_seq: if i.IsSame(j): _present = True break if _present is False: filter_orientation_seq.append(i) return filter_orientation_seq else: return iter(seq)
this could be a faces generator for a python TopoShape class that way you can just do: for face in srf.faces: processFace(face)
null
null
null
def _map_shapes_and_ancestors(self, topoTypeA, topoTypeB, topologicalEntity): ''' using the same method @param topoTypeA: @param topoTypeB: @param topologicalEntity: ''' topo_set = set() _map = TopTools_IndexedDataMapOfShapeListOfShape() topexp_MapShapesAndAncestors(self.myShape, topoTypeA, topoTypeB, _map) results = _map.FindFromKey(topologicalEntity) if results.IsEmpty(): yield None topology_iterator = TopTools_ListIteratorOfListOfShape(results) while topology_iterator.More(): topo_entity = self.topoFactory[topoTypeB](topology_iterator.Value()) # return the entity if not in set # to assure we're not returning entities several times if not topo_entity in topo_set: if self.ignore_orientation: unique = True for i in topo_set: if i.IsSame(topo_entity): unique = False break if unique: yield topo_entity else: yield topo_entity topo_set.add(topo_entity) topology_iterator.Next()
using the same method @param topoTypeA: @param topoTypeB: @param topologicalEntity:
null
null
null
def _number_shapes_ancestors(self, topoTypeA, topoTypeB, topologicalEntity): '''returns the number of shape ancestors If you want to know how many edges a faces has: _number_shapes_ancestors(self, TopAbs_EDGE, TopAbs_FACE, edg) will return the number of edges a faces has @param topoTypeA: @param topoTypeB: @param topologicalEntity: ''' topo_set = set() _map = TopTools_IndexedDataMapOfShapeListOfShape() topexp_MapShapesAndAncestors(self.myShape, topoTypeA, topoTypeB, _map) results = _map.FindFromKey(topologicalEntity) if results.IsEmpty(): return None topology_iterator = TopTools_ListIteratorOfListOfShape(results) while topology_iterator.More(): topo_set.add(topology_iterator.Value()) topology_iterator.Next() return len(topo_set)
returns the number of shape ancestors If you want to know how many edges a faces has: _number_shapes_ancestors(self, TopAbs_EDGE, TopAbs_FACE, edg) will return the number of edges a faces has @param topoTypeA: @param topoTypeB: @param topologicalEntity:
null
null
null
def init_layout(self): for child in self.children(): self.child_added(child) self.update_shape({})
Initialize the layout of the toolkit shape. This method is called during the bottom-up pass. This method should initialize the layout of the widget. The child widgets will be fully initialized and layed out when this is called.
null
null
null
super(QtKeyEvent, self).init_widget() d = self.declaration widget = self.widget self._keyPressEvent = widget.keyPressEvent self._keyReleaseEvent = widget.keyReleaseEvent self.set_enabled(d.enabled) self.set_keys(d.keys)
def init_widget(self)
The KeyEvent uses the parent_widget as it's widget
3.616852
3.115149
1.161053
codes = {} for key in keys: parts = [k.strip().lower() for k in key.split("+")] code = KEYS.get(parts[-1]) modifier = Qt.KeyboardModifiers() if code is None: raise KeyError("Invalid key code '{}'".format(key)) if len(parts) > 1: for mod in parts[:-1]: mod_code = MODIFIERS.get(mod) if mod_code is None: raise KeyError("Invalid key modifier '{}'" .format(mod_code)) modifier |= mod_code if code not in codes: codes[code] = [] codes[code].append(modifier) self.codes = codes
def set_keys(self, keys)
Parse all the key codes and save them
2.91274
2.721281
1.070356
if change['name'] == 'items': self._update_visible_area() super(TableView, self)._update_proxy(change)
def _update_proxy(self, change)
An observer which sends state change to the proxy.
6.723823
5.689084
1.181882
features = self._features = self.declaration.features if not features: return if features & Feature.FocusTraversal: self.hook_focus_traversal() if features & Feature.FocusEvents: self.hook_focus_events() if features & Feature.DragEnabled: self.hook_drag() if features & Feature.DropEnabled: self.hook_drop() features = self._extra_features if features & GraphicFeature.WheelEvent: self.hook_wheel() if features & GraphicFeature.DrawEvent: self.hook_draw()
def _setup_features(self)
Setup the advanced widget feature handlers.
3.851343
3.471331
1.109472
features = self._features if not features: return if features & Feature.FocusTraversal: self.unhook_focus_traversal() if features & Feature.FocusEvents: self.unhook_focus_events() if features & Feature.DragEnabled: self.unhook_drag() if features & Feature.DropEnabled: self.unhook_drop() features = self._extra_features if features & GraphicFeature.WheelEvent: self.unhook_wheel() if features & GraphicFeature.DrawEvent: self.unhook_draw()
def _teardown_features(self)
Teardowns the advanced widget feature handlers.
3.293042
3.057235
1.077131
widget = self.focus_target() if not widget.focusPolicy & Qt.TabFocus: return False if not widget.isEnabled(): return False if not widget.isVisibleTo(widget.window()): return False widget.setFocus(reason) return False
def tab_focus_request(self, reason)
Handle a custom tab focus request. This method is called when focus is being set on the proxy as a result of a user-implemented focus traversal handler. This can be reimplemented by subclasses as needed. Parameters ---------- reason : Qt.FocusReason The reason value for the focus request. Returns ------- result : bool True if focus was set, False otherwise.
4.558114
5.214193
0.874174
widget = self.widget widget.focusInEvent = self.focusInEvent widget.focusOutEvent = self.focusOutEvent
def hook_focus_events(self)
Install the hooks for focus events. This method may be overridden by subclasses as needed.
3.129148
3.111303
1.005736
fd = focus_registry.focused_declaration() if next_child: child = self.declaration.next_focus_child(fd) reason = Qt.TabFocusReason else: child = self.declaration.previous_focus_child(fd) reason = Qt.BacktabFocusReason if child is not None and child.proxy_is_active: return child.proxy.tab_focus_request(reason) widget = self.widget return type(widget).focusNextPrevChild(widget, next_child)
def focusNextPrevChild(self, next_child)
The default 'focusNextPrevChild' implementation.
5.1716
5.058308
1.022397
widget = self.widget type(widget).focusInEvent(widget, event) self.declaration.focus_gained()
def focusInEvent(self, event)
The default 'focusInEvent' implementation.
9.340414
7.699146
1.213175
widget = self.widget type(widget).focusOutEvent(widget, event) self.declaration.focus_lost()
def focusOutEvent(self, event)
The default 'focusOutEvent' implementation.
9.531964
8.012068
1.189701
widget = self.widget widget.mousePressEvent = self.mousePressEvent widget.mouseMoveEvent = self.mouseMoveEvent widget.mouseReleaseEvent = self.mouseReleaseEvent
def hook_drag(self)
Install the hooks for drag operations.
2.410826
2.373378
1.015778
widget = self.widget del widget.mousePressEvent del widget.mouseMoveEvent del widget.mouseReleaseEvent
def unhook_drag(self)
Remove the hooks for drag operations.
4.816456
4.638313
1.038407
if event.button() == Qt.LeftButton: self._drag_origin = event.pos() widget = self.widget type(widget).mousePressEvent(widget, event)
def mousePressEvent(self, event)
Handle the mouse press event for a drag operation.
4.534148
4.008402
1.131161
#if event.buttons() & Qt.LeftButton and self._drag_origin is not None: #dist = (event.pos() - self._drag_origin).manhattanLength() #if dist >= QApplication.startDragDistance(): #self.do_drag(event.widget()) #self._drag_origin = None #return # Don't returns widget = self.widget type(widget).mouseMoveEvent(widget, event)
def mouseMoveEvent(self, event)
Handle the mouse move event for a drag operation.
4.192538
4.075589
1.028695
if event.button() == Qt.LeftButton: self._drag_origin = None widget = self.widget type(widget).mouseReleaseEvent(widget, event)
def mouseReleaseEvent(self, event)
Handle the mouse release event for the drag operation.
5.200396
4.645566
1.119432
widget = self.widget widget.setAcceptDrops(True) widget.dragEnterEvent = self.dragEnterEvent widget.dragMoveEvent = self.dragMoveEvent widget.dragLeaveEvent = self.dragLeaveEvent widget.dropEvent = self.dropEvent
def hook_drop(self)
Install hooks for drop operations.
2.137535
2.065306
1.034973
widget = self.widget widget.setAcceptDrops(False) del widget.dragEnterEvent del widget.dragMoveEvent del widget.dragLeaveEvent del widget.dropEvent
def unhook_drop(self)
Remove hooks for drop operations.
3.290944
3.185086
1.033236
drag_data = self.declaration.drag_start() if drag_data is None: return #widget = self.widget qdrag = QDrag(widget) qdrag.setMimeData(drag_data.mime_data.q_data()) if drag_data.image is not None: qimg = get_cached_qimage(drag_data.image) qdrag.setPixmap(QPixmap.fromImage(qimg)) #else: #if __version_info__ < (5, ): #qdrag.setPixmap(QPixmap.grabWidget(self.widget)) #else: #qdrag.setPixmap(widget.grab()) if drag_data.hotspot: qdrag.setHotSpot(QPoint(*drag_data.hotspot)) else: cursor_position = widget.mapFromGlobal(QCursor.pos()) qdrag.setHotSpot(cursor_position) default = Qt.DropAction(drag_data.default_drop_action) supported = Qt.DropActions(drag_data.supported_actions) qresult = qdrag.exec_(supported, default) self.declaration.drag_end(drag_data, DropAction(int(qresult)))
def do_drag(self, widget)
Perform the drag operation for the widget. Parameters ---------- widget: QWidget A reference to the viewport widget.
2.916127
2.950764
0.988261
self.declaration.draw(painter, options, widget)
def draw(self, painter, options, widget)
Handle the draw event for the widget.
8.23778
6.996234
1.177459
action = self._widget_action if action is None and create: action = self._widget_action = QWidgetAction(None) action.setDefaultWidget(self.widget) return action
def get_action(self, create=False)
Get the shared widget action for this widget. This API is used to support widgets in tool bars and menus. Parameters ---------- create : bool, optional Whether to create the action if it doesn't already exist. The default is False. Returns ------- result : QWidgetAction or None The cached widget action or None, depending on arguments.
4.198897
3.967196
1.058404
self.declaration.mouse_press_event(event) super(QtGraphicsView, self).mousePressEvent(event)
def mousePressEvent(self, event)
Handle the mouse press event for a drag operation.
7.381222
6.344779
1.163354
self.declaration.mouse_move_event(event) super(QtGraphicsView, self).mouseMoveEvent(event)
def mouseMoveEvent(self, event)
Handle the mouse move event for a drag operation.
7.277481
6.453633
1.127657
self.declaration.mouse_release_event(event) super(QtGraphicsView, self).mouseReleaseEvent(event)
def mouseReleaseEvent(self, event)
Handle the mouse release event for the drag operation.
7.165968
6.131428
1.168728
viewport = None if renderer == 'opengl': from enaml.qt.QtWidgets import QOpenGLWidget viewport = QOpenGLWidget() elif renderer == 'default': try: from enaml.qt.QtWidgets import QOpenGLWidget viewport = QOpenGLWidget() except ImportError as e: warnings.warn( "QOpenGLWidget could not be imported: {}".format(e)) self.widget.setViewport(viewport)
def set_renderer(self, renderer)
Set the viewport widget.
3.103232
2.843005
1.091532