repository_name
stringlengths
7
55
func_path_in_repository
stringlengths
4
223
func_name
stringlengths
1
134
whole_func_string
stringlengths
75
104k
language
stringclasses
1 value
func_code_string
stringlengths
75
104k
func_code_tokens
listlengths
19
28.4k
func_documentation_string
stringlengths
1
46.9k
func_documentation_tokens
listlengths
1
1.97k
split_name
stringclasses
1 value
func_code_url
stringlengths
87
315
gophish/api-client-python
gophish/api/campaigns.py
API.summary
def summary(self, campaign_id=None): """ Returns the campaign summary """ resource_cls = CampaignSummary single_resource = False if not campaign_id: resource_cls = CampaignSummaries single_resource = True return super(API, self).get( resource_id=campaign_id, resource_action='summary', resource_cls=resource_cls, single_resource=single_resource)
python
def summary(self, campaign_id=None): """ Returns the campaign summary """ resource_cls = CampaignSummary single_resource = False if not campaign_id: resource_cls = CampaignSummaries single_resource = True return super(API, self).get( resource_id=campaign_id, resource_action='summary', resource_cls=resource_cls, single_resource=single_resource)
[ "def", "summary", "(", "self", ",", "campaign_id", "=", "None", ")", ":", "resource_cls", "=", "CampaignSummary", "single_resource", "=", "False", "if", "not", "campaign_id", ":", "resource_cls", "=", "CampaignSummaries", "single_resource", "=", "True", "return", "super", "(", "API", ",", "self", ")", ".", "get", "(", "resource_id", "=", "campaign_id", ",", "resource_action", "=", "'summary'", ",", "resource_cls", "=", "resource_cls", ",", "single_resource", "=", "single_resource", ")" ]
Returns the campaign summary
[ "Returns", "the", "campaign", "summary" ]
train
https://github.com/gophish/api-client-python/blob/28a7790f19e13c92ef0fb7bde8cd89389df5c155/gophish/api/campaigns.py#L38-L51
gophish/api-client-python
gophish/api/campaigns.py
API.results
def results(self, campaign_id): """ Returns just the results for a given campaign """ return super(API, self).get( resource_id=campaign_id, resource_action='results', resource_cls=CampaignResults)
python
def results(self, campaign_id): """ Returns just the results for a given campaign """ return super(API, self).get( resource_id=campaign_id, resource_action='results', resource_cls=CampaignResults)
[ "def", "results", "(", "self", ",", "campaign_id", ")", ":", "return", "super", "(", "API", ",", "self", ")", ".", "get", "(", "resource_id", "=", "campaign_id", ",", "resource_action", "=", "'results'", ",", "resource_cls", "=", "CampaignResults", ")" ]
Returns just the results for a given campaign
[ "Returns", "just", "the", "results", "for", "a", "given", "campaign" ]
train
https://github.com/gophish/api-client-python/blob/28a7790f19e13c92ef0fb7bde8cd89389df5c155/gophish/api/campaigns.py#L53-L58
gunthercox/jsondb
jsondb/db.py
Database.set_path
def set_path(self, file_path): """ Set the path of the database. Create the file if it does not exist. """ if not file_path: self.read_data = self.memory_read self.write_data = self.memory_write elif not is_valid(file_path): self.write_data(file_path, {}) self.path = file_path
python
def set_path(self, file_path): """ Set the path of the database. Create the file if it does not exist. """ if not file_path: self.read_data = self.memory_read self.write_data = self.memory_write elif not is_valid(file_path): self.write_data(file_path, {}) self.path = file_path
[ "def", "set_path", "(", "self", ",", "file_path", ")", ":", "if", "not", "file_path", ":", "self", ".", "read_data", "=", "self", ".", "memory_read", "self", ".", "write_data", "=", "self", ".", "memory_write", "elif", "not", "is_valid", "(", "file_path", ")", ":", "self", ".", "write_data", "(", "file_path", ",", "{", "}", ")", "self", ".", "path", "=", "file_path" ]
Set the path of the database. Create the file if it does not exist.
[ "Set", "the", "path", "of", "the", "database", ".", "Create", "the", "file", "if", "it", "does", "not", "exist", "." ]
train
https://github.com/gunthercox/jsondb/blob/d0d2c7e5fdb8c3c46c0373d59c130641349b1bc9/jsondb/db.py#L38-L49
gunthercox/jsondb
jsondb/db.py
Database.delete
def delete(self, key): """ Removes the specified key from the database. """ obj = self._get_content() obj.pop(key, None) self.write_data(self.path, obj)
python
def delete(self, key): """ Removes the specified key from the database. """ obj = self._get_content() obj.pop(key, None) self.write_data(self.path, obj)
[ "def", "delete", "(", "self", ",", "key", ")", ":", "obj", "=", "self", ".", "_get_content", "(", ")", "obj", ".", "pop", "(", "key", ",", "None", ")", "self", ".", "write_data", "(", "self", ".", "path", ",", "obj", ")" ]
Removes the specified key from the database.
[ "Removes", "the", "specified", "key", "from", "the", "database", "." ]
train
https://github.com/gunthercox/jsondb/blob/d0d2c7e5fdb8c3c46c0373d59c130641349b1bc9/jsondb/db.py#L68-L75
gunthercox/jsondb
jsondb/db.py
Database.data
def data(self, **kwargs): """ If a key is passed in, a corresponding value will be returned. If a key-value pair is passed in then the corresponding key in the database will be set to the specified value. A dictionary can be passed in as well. If a key does not exist and a value is provided then an entry will be created in the database. """ key = kwargs.pop('key', None) value = kwargs.pop('value', None) dictionary = kwargs.pop('dictionary', None) # Fail if a key and a dictionary or a value and a dictionary are given if (key is not None and dictionary is not None) or \ (value is not None and dictionary is not None): raise ValueError # If only a key was provided return the corresponding value if key is not None and value is None: return self._get_content(key) # if a key and a value are passed in if key is not None and value is not None: self._set_content(key, value) if dictionary is not None: for key in dictionary.keys(): value = dictionary[key] self._set_content(key, value) return self._get_content()
python
def data(self, **kwargs): """ If a key is passed in, a corresponding value will be returned. If a key-value pair is passed in then the corresponding key in the database will be set to the specified value. A dictionary can be passed in as well. If a key does not exist and a value is provided then an entry will be created in the database. """ key = kwargs.pop('key', None) value = kwargs.pop('value', None) dictionary = kwargs.pop('dictionary', None) # Fail if a key and a dictionary or a value and a dictionary are given if (key is not None and dictionary is not None) or \ (value is not None and dictionary is not None): raise ValueError # If only a key was provided return the corresponding value if key is not None and value is None: return self._get_content(key) # if a key and a value are passed in if key is not None and value is not None: self._set_content(key, value) if dictionary is not None: for key in dictionary.keys(): value = dictionary[key] self._set_content(key, value) return self._get_content()
[ "def", "data", "(", "self", ",", "*", "*", "kwargs", ")", ":", "key", "=", "kwargs", ".", "pop", "(", "'key'", ",", "None", ")", "value", "=", "kwargs", ".", "pop", "(", "'value'", ",", "None", ")", "dictionary", "=", "kwargs", ".", "pop", "(", "'dictionary'", ",", "None", ")", "# Fail if a key and a dictionary or a value and a dictionary are given", "if", "(", "key", "is", "not", "None", "and", "dictionary", "is", "not", "None", ")", "or", "(", "value", "is", "not", "None", "and", "dictionary", "is", "not", "None", ")", ":", "raise", "ValueError", "# If only a key was provided return the corresponding value", "if", "key", "is", "not", "None", "and", "value", "is", "None", ":", "return", "self", ".", "_get_content", "(", "key", ")", "# if a key and a value are passed in", "if", "key", "is", "not", "None", "and", "value", "is", "not", "None", ":", "self", ".", "_set_content", "(", "key", ",", "value", ")", "if", "dictionary", "is", "not", "None", ":", "for", "key", "in", "dictionary", ".", "keys", "(", ")", ":", "value", "=", "dictionary", "[", "key", "]", "self", ".", "_set_content", "(", "key", ",", "value", ")", "return", "self", ".", "_get_content", "(", ")" ]
If a key is passed in, a corresponding value will be returned. If a key-value pair is passed in then the corresponding key in the database will be set to the specified value. A dictionary can be passed in as well. If a key does not exist and a value is provided then an entry will be created in the database.
[ "If", "a", "key", "is", "passed", "in", "a", "corresponding", "value", "will", "be", "returned", ".", "If", "a", "key", "-", "value", "pair", "is", "passed", "in", "then", "the", "corresponding", "key", "in", "the", "database", "will", "be", "set", "to", "the", "specified", "value", ".", "A", "dictionary", "can", "be", "passed", "in", "as", "well", ".", "If", "a", "key", "does", "not", "exist", "and", "a", "value", "is", "provided", "then", "an", "entry", "will", "be", "created", "in", "the", "database", "." ]
train
https://github.com/gunthercox/jsondb/blob/d0d2c7e5fdb8c3c46c0373d59c130641349b1bc9/jsondb/db.py#L77-L109
gunthercox/jsondb
jsondb/db.py
Database.filter
def filter(self, filter_arguments): """ Takes a dictionary of filter parameters. Return a list of objects based on a list of parameters. """ results = self._get_content() # Filter based on a dictionary of search parameters if isinstance(filter_arguments, dict): for item, content in iteritems(self._get_content()): for key, value in iteritems(filter_arguments): keys = key.split('.') value = filter_arguments[key] if not self._contains_value({item: content}, keys, value): del results[item] # Filter based on an input string that should match database key if isinstance(filter_arguments, str): if filter_arguments in results: return [{filter_arguments: results[filter_arguments]}] else: return [] return results
python
def filter(self, filter_arguments): """ Takes a dictionary of filter parameters. Return a list of objects based on a list of parameters. """ results = self._get_content() # Filter based on a dictionary of search parameters if isinstance(filter_arguments, dict): for item, content in iteritems(self._get_content()): for key, value in iteritems(filter_arguments): keys = key.split('.') value = filter_arguments[key] if not self._contains_value({item: content}, keys, value): del results[item] # Filter based on an input string that should match database key if isinstance(filter_arguments, str): if filter_arguments in results: return [{filter_arguments: results[filter_arguments]}] else: return [] return results
[ "def", "filter", "(", "self", ",", "filter_arguments", ")", ":", "results", "=", "self", ".", "_get_content", "(", ")", "# Filter based on a dictionary of search parameters", "if", "isinstance", "(", "filter_arguments", ",", "dict", ")", ":", "for", "item", ",", "content", "in", "iteritems", "(", "self", ".", "_get_content", "(", ")", ")", ":", "for", "key", ",", "value", "in", "iteritems", "(", "filter_arguments", ")", ":", "keys", "=", "key", ".", "split", "(", "'.'", ")", "value", "=", "filter_arguments", "[", "key", "]", "if", "not", "self", ".", "_contains_value", "(", "{", "item", ":", "content", "}", ",", "keys", ",", "value", ")", ":", "del", "results", "[", "item", "]", "# Filter based on an input string that should match database key", "if", "isinstance", "(", "filter_arguments", ",", "str", ")", ":", "if", "filter_arguments", "in", "results", ":", "return", "[", "{", "filter_arguments", ":", "results", "[", "filter_arguments", "]", "}", "]", "else", ":", "return", "[", "]", "return", "results" ]
Takes a dictionary of filter parameters. Return a list of objects based on a list of parameters.
[ "Takes", "a", "dictionary", "of", "filter", "parameters", ".", "Return", "a", "list", "of", "objects", "based", "on", "a", "list", "of", "parameters", "." ]
train
https://github.com/gunthercox/jsondb/blob/d0d2c7e5fdb8c3c46c0373d59c130641349b1bc9/jsondb/db.py#L125-L149
gunthercox/jsondb
jsondb/db.py
Database.drop
def drop(self): """ Remove the database by deleting the JSON file. """ import os if self.path: if os.path.exists(self.path): os.remove(self.path) else: # Clear the in-memory data if there is no file path self._data = {}
python
def drop(self): """ Remove the database by deleting the JSON file. """ import os if self.path: if os.path.exists(self.path): os.remove(self.path) else: # Clear the in-memory data if there is no file path self._data = {}
[ "def", "drop", "(", "self", ")", ":", "import", "os", "if", "self", ".", "path", ":", "if", "os", ".", "path", ".", "exists", "(", "self", ".", "path", ")", ":", "os", ".", "remove", "(", "self", ".", "path", ")", "else", ":", "# Clear the in-memory data if there is no file path", "self", ".", "_data", "=", "{", "}" ]
Remove the database by deleting the JSON file.
[ "Remove", "the", "database", "by", "deleting", "the", "JSON", "file", "." ]
train
https://github.com/gunthercox/jsondb/blob/d0d2c7e5fdb8c3c46c0373d59c130641349b1bc9/jsondb/db.py#L151-L162
gunthercox/jsondb
jsondb/file_writer.py
read_data
def read_data(file_path): """ Reads a file and returns a json encoded representation of the file. """ if not is_valid(file_path): write_data(file_path, {}) db = open_file_for_reading(file_path) content = db.read() obj = decode(content) db.close() return obj
python
def read_data(file_path): """ Reads a file and returns a json encoded representation of the file. """ if not is_valid(file_path): write_data(file_path, {}) db = open_file_for_reading(file_path) content = db.read() obj = decode(content) db.close() return obj
[ "def", "read_data", "(", "file_path", ")", ":", "if", "not", "is_valid", "(", "file_path", ")", ":", "write_data", "(", "file_path", ",", "{", "}", ")", "db", "=", "open_file_for_reading", "(", "file_path", ")", "content", "=", "db", ".", "read", "(", ")", "obj", "=", "decode", "(", "content", ")", "db", ".", "close", "(", ")", "return", "obj" ]
Reads a file and returns a json encoded representation of the file.
[ "Reads", "a", "file", "and", "returns", "a", "json", "encoded", "representation", "of", "the", "file", "." ]
train
https://github.com/gunthercox/jsondb/blob/d0d2c7e5fdb8c3c46c0373d59c130641349b1bc9/jsondb/file_writer.py#L4-L19
gunthercox/jsondb
jsondb/file_writer.py
write_data
def write_data(path, obj): """ Writes to a file and returns the updated file content. """ with open_file_for_writing(path) as db: db.write(encode(obj)) return obj
python
def write_data(path, obj): """ Writes to a file and returns the updated file content. """ with open_file_for_writing(path) as db: db.write(encode(obj)) return obj
[ "def", "write_data", "(", "path", ",", "obj", ")", ":", "with", "open_file_for_writing", "(", "path", ")", "as", "db", ":", "db", ".", "write", "(", "encode", "(", "obj", ")", ")", "return", "obj" ]
Writes to a file and returns the updated file content.
[ "Writes", "to", "a", "file", "and", "returns", "the", "updated", "file", "content", "." ]
train
https://github.com/gunthercox/jsondb/blob/d0d2c7e5fdb8c3c46c0373d59c130641349b1bc9/jsondb/file_writer.py#L21-L28
gunthercox/jsondb
jsondb/file_writer.py
is_valid
def is_valid(file_path): """ Check to see if a file exists or is empty. """ from os import path, stat can_open = False try: with open(file_path) as fp: can_open = True except IOError: return False is_file = path.isfile(file_path) return path.exists(file_path) and is_file and stat(file_path).st_size > 0
python
def is_valid(file_path): """ Check to see if a file exists or is empty. """ from os import path, stat can_open = False try: with open(file_path) as fp: can_open = True except IOError: return False is_file = path.isfile(file_path) return path.exists(file_path) and is_file and stat(file_path).st_size > 0
[ "def", "is_valid", "(", "file_path", ")", ":", "from", "os", "import", "path", ",", "stat", "can_open", "=", "False", "try", ":", "with", "open", "(", "file_path", ")", "as", "fp", ":", "can_open", "=", "True", "except", "IOError", ":", "return", "False", "is_file", "=", "path", ".", "isfile", "(", "file_path", ")", "return", "path", ".", "exists", "(", "file_path", ")", "and", "is_file", "and", "stat", "(", "file_path", ")", ".", "st_size", ">", "0" ]
Check to see if a file exists or is empty.
[ "Check", "to", "see", "if", "a", "file", "exists", "or", "is", "empty", "." ]
train
https://github.com/gunthercox/jsondb/blob/d0d2c7e5fdb8c3c46c0373d59c130641349b1bc9/jsondb/file_writer.py#L30-L46
nschloe/meshzoo
meshzoo/helpers.py
_refine
def _refine(node_coords, cells_nodes, edge_nodes, cells_edges): """Canonically refine a mesh by inserting nodes at all edge midpoints and make four triangular elements where there was one. This is a very crude refinement; don't use for actual applications. """ num_nodes = len(node_coords) num_new_nodes = len(edge_nodes) # new_nodes = numpy.empty(num_new_nodes, dtype=numpy.dtype((float, 2))) node_coords.resize(num_nodes + num_new_nodes, 3, refcheck=False) # Set starting index for new nodes. new_node_gid = num_nodes # After the refinement step, all previous edge-node associations will be # obsolete, so record *all* the new edges. num_edges = len(edge_nodes) num_cells = len(cells_nodes) assert num_cells == len(cells_edges) num_new_edges = 2 * num_edges + 3 * num_cells new_edges_nodes = numpy.empty(num_new_edges, dtype=numpy.dtype((int, 2))) new_edge_gid = 0 # After the refinement step, all previous cell-node associations will be # obsolete, so record *all* the new cells. num_new_cells = 4 * num_cells new_cells_nodes = numpy.empty(num_new_cells, dtype=numpy.dtype((int, 3))) new_cells_edges = numpy.empty(num_new_cells, dtype=numpy.dtype((int, 3))) new_cell_gid = 0 is_edge_divided = numpy.zeros(num_edges, dtype=bool) edge_midpoint_gids = numpy.empty(num_edges, dtype=int) edge_newedges_gids = numpy.empty(num_edges, dtype=numpy.dtype((int, 2))) # Loop over all elements. for cell_id, cell in enumerate(zip(cells_edges, cells_nodes)): cell_edges, cell_nodes = cell # Divide edges. local_edge_midpoint_gids = numpy.empty(3, dtype=int) local_edge_newedges = numpy.empty(3, dtype=numpy.dtype((int, 2))) local_neighbor_midpoints = [[], [], []] local_neighbor_newedges = [[], [], []] for k, edge_gid in enumerate(cell_edges): edgenodes_gids = edge_nodes[edge_gid] if is_edge_divided[edge_gid]: # Edge is already divided. Just keep records for the cell # creation. local_edge_midpoint_gids[k] = edge_midpoint_gids[edge_gid] local_edge_newedges[k] = edge_newedges_gids[edge_gid] else: # Create new node at the edge midpoint. node_coords[new_node_gid] = 0.5 * ( node_coords[edgenodes_gids[0]] + node_coords[edgenodes_gids[1]] ) local_edge_midpoint_gids[k] = new_node_gid new_node_gid += 1 edge_midpoint_gids[edge_gid] = local_edge_midpoint_gids[k] # Divide edge into two. new_edges_nodes[new_edge_gid] = numpy.array( [edgenodes_gids[0], local_edge_midpoint_gids[k]] ) new_edge_gid += 1 new_edges_nodes[new_edge_gid] = numpy.array( [local_edge_midpoint_gids[k], edgenodes_gids[1]] ) new_edge_gid += 1 local_edge_newedges[k] = [new_edge_gid - 2, new_edge_gid - 1] edge_newedges_gids[edge_gid] = local_edge_newedges[k] # Do the household. is_edge_divided[edge_gid] = True # Keep a record of the new neighbors of the old nodes. # Get local node IDs. edgenodes_lids = [ numpy.nonzero(cell_nodes == edgenodes_gids[0])[0][0], numpy.nonzero(cell_nodes == edgenodes_gids[1])[0][0], ] local_neighbor_midpoints[edgenodes_lids[0]].append( local_edge_midpoint_gids[k] ) local_neighbor_midpoints[edgenodes_lids[1]].append( local_edge_midpoint_gids[k] ) local_neighbor_newedges[edgenodes_lids[0]].append(local_edge_newedges[k][0]) local_neighbor_newedges[edgenodes_lids[1]].append(local_edge_newedges[k][1]) new_edge_opposite_of_local_node = numpy.empty(3, dtype=int) # New edges: Connect the three midpoints. for k in range(3): new_edges_nodes[new_edge_gid] = local_neighbor_midpoints[k] new_edge_opposite_of_local_node[k] = new_edge_gid new_edge_gid += 1 # Create new elements. # Center cell: new_cells_nodes[new_cell_gid] = local_edge_midpoint_gids new_cells_edges[new_cell_gid] = new_edge_opposite_of_local_node new_cell_gid += 1 # The three corner elements: for k in range(3): new_cells_nodes[new_cell_gid] = numpy.array( [ cells_nodes[cell_id][k], local_neighbor_midpoints[k][0], local_neighbor_midpoints[k][1], ] ) new_cells_edges[new_cell_gid] = numpy.array( [ new_edge_opposite_of_local_node[k], local_neighbor_newedges[k][0], local_neighbor_newedges[k][1], ] ) new_cell_gid += 1 return node_coords, new_cells_nodes, new_edges_nodes, new_cells_edges
python
def _refine(node_coords, cells_nodes, edge_nodes, cells_edges): """Canonically refine a mesh by inserting nodes at all edge midpoints and make four triangular elements where there was one. This is a very crude refinement; don't use for actual applications. """ num_nodes = len(node_coords) num_new_nodes = len(edge_nodes) # new_nodes = numpy.empty(num_new_nodes, dtype=numpy.dtype((float, 2))) node_coords.resize(num_nodes + num_new_nodes, 3, refcheck=False) # Set starting index for new nodes. new_node_gid = num_nodes # After the refinement step, all previous edge-node associations will be # obsolete, so record *all* the new edges. num_edges = len(edge_nodes) num_cells = len(cells_nodes) assert num_cells == len(cells_edges) num_new_edges = 2 * num_edges + 3 * num_cells new_edges_nodes = numpy.empty(num_new_edges, dtype=numpy.dtype((int, 2))) new_edge_gid = 0 # After the refinement step, all previous cell-node associations will be # obsolete, so record *all* the new cells. num_new_cells = 4 * num_cells new_cells_nodes = numpy.empty(num_new_cells, dtype=numpy.dtype((int, 3))) new_cells_edges = numpy.empty(num_new_cells, dtype=numpy.dtype((int, 3))) new_cell_gid = 0 is_edge_divided = numpy.zeros(num_edges, dtype=bool) edge_midpoint_gids = numpy.empty(num_edges, dtype=int) edge_newedges_gids = numpy.empty(num_edges, dtype=numpy.dtype((int, 2))) # Loop over all elements. for cell_id, cell in enumerate(zip(cells_edges, cells_nodes)): cell_edges, cell_nodes = cell # Divide edges. local_edge_midpoint_gids = numpy.empty(3, dtype=int) local_edge_newedges = numpy.empty(3, dtype=numpy.dtype((int, 2))) local_neighbor_midpoints = [[], [], []] local_neighbor_newedges = [[], [], []] for k, edge_gid in enumerate(cell_edges): edgenodes_gids = edge_nodes[edge_gid] if is_edge_divided[edge_gid]: # Edge is already divided. Just keep records for the cell # creation. local_edge_midpoint_gids[k] = edge_midpoint_gids[edge_gid] local_edge_newedges[k] = edge_newedges_gids[edge_gid] else: # Create new node at the edge midpoint. node_coords[new_node_gid] = 0.5 * ( node_coords[edgenodes_gids[0]] + node_coords[edgenodes_gids[1]] ) local_edge_midpoint_gids[k] = new_node_gid new_node_gid += 1 edge_midpoint_gids[edge_gid] = local_edge_midpoint_gids[k] # Divide edge into two. new_edges_nodes[new_edge_gid] = numpy.array( [edgenodes_gids[0], local_edge_midpoint_gids[k]] ) new_edge_gid += 1 new_edges_nodes[new_edge_gid] = numpy.array( [local_edge_midpoint_gids[k], edgenodes_gids[1]] ) new_edge_gid += 1 local_edge_newedges[k] = [new_edge_gid - 2, new_edge_gid - 1] edge_newedges_gids[edge_gid] = local_edge_newedges[k] # Do the household. is_edge_divided[edge_gid] = True # Keep a record of the new neighbors of the old nodes. # Get local node IDs. edgenodes_lids = [ numpy.nonzero(cell_nodes == edgenodes_gids[0])[0][0], numpy.nonzero(cell_nodes == edgenodes_gids[1])[0][0], ] local_neighbor_midpoints[edgenodes_lids[0]].append( local_edge_midpoint_gids[k] ) local_neighbor_midpoints[edgenodes_lids[1]].append( local_edge_midpoint_gids[k] ) local_neighbor_newedges[edgenodes_lids[0]].append(local_edge_newedges[k][0]) local_neighbor_newedges[edgenodes_lids[1]].append(local_edge_newedges[k][1]) new_edge_opposite_of_local_node = numpy.empty(3, dtype=int) # New edges: Connect the three midpoints. for k in range(3): new_edges_nodes[new_edge_gid] = local_neighbor_midpoints[k] new_edge_opposite_of_local_node[k] = new_edge_gid new_edge_gid += 1 # Create new elements. # Center cell: new_cells_nodes[new_cell_gid] = local_edge_midpoint_gids new_cells_edges[new_cell_gid] = new_edge_opposite_of_local_node new_cell_gid += 1 # The three corner elements: for k in range(3): new_cells_nodes[new_cell_gid] = numpy.array( [ cells_nodes[cell_id][k], local_neighbor_midpoints[k][0], local_neighbor_midpoints[k][1], ] ) new_cells_edges[new_cell_gid] = numpy.array( [ new_edge_opposite_of_local_node[k], local_neighbor_newedges[k][0], local_neighbor_newedges[k][1], ] ) new_cell_gid += 1 return node_coords, new_cells_nodes, new_edges_nodes, new_cells_edges
[ "def", "_refine", "(", "node_coords", ",", "cells_nodes", ",", "edge_nodes", ",", "cells_edges", ")", ":", "num_nodes", "=", "len", "(", "node_coords", ")", "num_new_nodes", "=", "len", "(", "edge_nodes", ")", "# new_nodes = numpy.empty(num_new_nodes, dtype=numpy.dtype((float, 2)))", "node_coords", ".", "resize", "(", "num_nodes", "+", "num_new_nodes", ",", "3", ",", "refcheck", "=", "False", ")", "# Set starting index for new nodes.", "new_node_gid", "=", "num_nodes", "# After the refinement step, all previous edge-node associations will be", "# obsolete, so record *all* the new edges.", "num_edges", "=", "len", "(", "edge_nodes", ")", "num_cells", "=", "len", "(", "cells_nodes", ")", "assert", "num_cells", "==", "len", "(", "cells_edges", ")", "num_new_edges", "=", "2", "*", "num_edges", "+", "3", "*", "num_cells", "new_edges_nodes", "=", "numpy", ".", "empty", "(", "num_new_edges", ",", "dtype", "=", "numpy", ".", "dtype", "(", "(", "int", ",", "2", ")", ")", ")", "new_edge_gid", "=", "0", "# After the refinement step, all previous cell-node associations will be", "# obsolete, so record *all* the new cells.", "num_new_cells", "=", "4", "*", "num_cells", "new_cells_nodes", "=", "numpy", ".", "empty", "(", "num_new_cells", ",", "dtype", "=", "numpy", ".", "dtype", "(", "(", "int", ",", "3", ")", ")", ")", "new_cells_edges", "=", "numpy", ".", "empty", "(", "num_new_cells", ",", "dtype", "=", "numpy", ".", "dtype", "(", "(", "int", ",", "3", ")", ")", ")", "new_cell_gid", "=", "0", "is_edge_divided", "=", "numpy", ".", "zeros", "(", "num_edges", ",", "dtype", "=", "bool", ")", "edge_midpoint_gids", "=", "numpy", ".", "empty", "(", "num_edges", ",", "dtype", "=", "int", ")", "edge_newedges_gids", "=", "numpy", ".", "empty", "(", "num_edges", ",", "dtype", "=", "numpy", ".", "dtype", "(", "(", "int", ",", "2", ")", ")", ")", "# Loop over all elements.", "for", "cell_id", ",", "cell", "in", "enumerate", "(", "zip", "(", "cells_edges", ",", "cells_nodes", ")", ")", ":", "cell_edges", ",", "cell_nodes", "=", "cell", "# Divide edges.", "local_edge_midpoint_gids", "=", "numpy", ".", "empty", "(", "3", ",", "dtype", "=", "int", ")", "local_edge_newedges", "=", "numpy", ".", "empty", "(", "3", ",", "dtype", "=", "numpy", ".", "dtype", "(", "(", "int", ",", "2", ")", ")", ")", "local_neighbor_midpoints", "=", "[", "[", "]", ",", "[", "]", ",", "[", "]", "]", "local_neighbor_newedges", "=", "[", "[", "]", ",", "[", "]", ",", "[", "]", "]", "for", "k", ",", "edge_gid", "in", "enumerate", "(", "cell_edges", ")", ":", "edgenodes_gids", "=", "edge_nodes", "[", "edge_gid", "]", "if", "is_edge_divided", "[", "edge_gid", "]", ":", "# Edge is already divided. Just keep records for the cell", "# creation.", "local_edge_midpoint_gids", "[", "k", "]", "=", "edge_midpoint_gids", "[", "edge_gid", "]", "local_edge_newedges", "[", "k", "]", "=", "edge_newedges_gids", "[", "edge_gid", "]", "else", ":", "# Create new node at the edge midpoint.", "node_coords", "[", "new_node_gid", "]", "=", "0.5", "*", "(", "node_coords", "[", "edgenodes_gids", "[", "0", "]", "]", "+", "node_coords", "[", "edgenodes_gids", "[", "1", "]", "]", ")", "local_edge_midpoint_gids", "[", "k", "]", "=", "new_node_gid", "new_node_gid", "+=", "1", "edge_midpoint_gids", "[", "edge_gid", "]", "=", "local_edge_midpoint_gids", "[", "k", "]", "# Divide edge into two.", "new_edges_nodes", "[", "new_edge_gid", "]", "=", "numpy", ".", "array", "(", "[", "edgenodes_gids", "[", "0", "]", ",", "local_edge_midpoint_gids", "[", "k", "]", "]", ")", "new_edge_gid", "+=", "1", "new_edges_nodes", "[", "new_edge_gid", "]", "=", "numpy", ".", "array", "(", "[", "local_edge_midpoint_gids", "[", "k", "]", ",", "edgenodes_gids", "[", "1", "]", "]", ")", "new_edge_gid", "+=", "1", "local_edge_newedges", "[", "k", "]", "=", "[", "new_edge_gid", "-", "2", ",", "new_edge_gid", "-", "1", "]", "edge_newedges_gids", "[", "edge_gid", "]", "=", "local_edge_newedges", "[", "k", "]", "# Do the household.", "is_edge_divided", "[", "edge_gid", "]", "=", "True", "# Keep a record of the new neighbors of the old nodes.", "# Get local node IDs.", "edgenodes_lids", "=", "[", "numpy", ".", "nonzero", "(", "cell_nodes", "==", "edgenodes_gids", "[", "0", "]", ")", "[", "0", "]", "[", "0", "]", ",", "numpy", ".", "nonzero", "(", "cell_nodes", "==", "edgenodes_gids", "[", "1", "]", ")", "[", "0", "]", "[", "0", "]", ",", "]", "local_neighbor_midpoints", "[", "edgenodes_lids", "[", "0", "]", "]", ".", "append", "(", "local_edge_midpoint_gids", "[", "k", "]", ")", "local_neighbor_midpoints", "[", "edgenodes_lids", "[", "1", "]", "]", ".", "append", "(", "local_edge_midpoint_gids", "[", "k", "]", ")", "local_neighbor_newedges", "[", "edgenodes_lids", "[", "0", "]", "]", ".", "append", "(", "local_edge_newedges", "[", "k", "]", "[", "0", "]", ")", "local_neighbor_newedges", "[", "edgenodes_lids", "[", "1", "]", "]", ".", "append", "(", "local_edge_newedges", "[", "k", "]", "[", "1", "]", ")", "new_edge_opposite_of_local_node", "=", "numpy", ".", "empty", "(", "3", ",", "dtype", "=", "int", ")", "# New edges: Connect the three midpoints.", "for", "k", "in", "range", "(", "3", ")", ":", "new_edges_nodes", "[", "new_edge_gid", "]", "=", "local_neighbor_midpoints", "[", "k", "]", "new_edge_opposite_of_local_node", "[", "k", "]", "=", "new_edge_gid", "new_edge_gid", "+=", "1", "# Create new elements.", "# Center cell:", "new_cells_nodes", "[", "new_cell_gid", "]", "=", "local_edge_midpoint_gids", "new_cells_edges", "[", "new_cell_gid", "]", "=", "new_edge_opposite_of_local_node", "new_cell_gid", "+=", "1", "# The three corner elements:", "for", "k", "in", "range", "(", "3", ")", ":", "new_cells_nodes", "[", "new_cell_gid", "]", "=", "numpy", ".", "array", "(", "[", "cells_nodes", "[", "cell_id", "]", "[", "k", "]", ",", "local_neighbor_midpoints", "[", "k", "]", "[", "0", "]", ",", "local_neighbor_midpoints", "[", "k", "]", "[", "1", "]", ",", "]", ")", "new_cells_edges", "[", "new_cell_gid", "]", "=", "numpy", ".", "array", "(", "[", "new_edge_opposite_of_local_node", "[", "k", "]", ",", "local_neighbor_newedges", "[", "k", "]", "[", "0", "]", ",", "local_neighbor_newedges", "[", "k", "]", "[", "1", "]", ",", "]", ")", "new_cell_gid", "+=", "1", "return", "node_coords", ",", "new_cells_nodes", ",", "new_edges_nodes", ",", "new_cells_edges" ]
Canonically refine a mesh by inserting nodes at all edge midpoints and make four triangular elements where there was one. This is a very crude refinement; don't use for actual applications.
[ "Canonically", "refine", "a", "mesh", "by", "inserting", "nodes", "at", "all", "edge", "midpoints", "and", "make", "four", "triangular", "elements", "where", "there", "was", "one", ".", "This", "is", "a", "very", "crude", "refinement", ";", "don", "t", "use", "for", "actual", "applications", "." ]
train
https://github.com/nschloe/meshzoo/blob/623b84c8b985dcc8e5ccc6250d1dbb989736dcef/meshzoo/helpers.py#L7-L124
nschloe/meshzoo
meshzoo/helpers.py
create_edges
def create_edges(cells_nodes): """Setup edge-node and edge-cell relations. Adapted from voropy. """ # Create the idx_hierarchy (nodes->edges->cells), i.e., the value of # `self.idx_hierarchy[0, 2, 27]` is the index of the node of cell 27, edge # 2, node 0. The shape of `self.idx_hierarchy` is `(2, 3, n)`, where `n` is # the number of cells. Make sure that the k-th edge is opposite of the k-th # point in the triangle. local_idx = numpy.array([[1, 2], [2, 0], [0, 1]]).T # Map idx back to the nodes. This is useful if quantities which are in # idx shape need to be added up into nodes (e.g., equation system rhs). nds = cells_nodes.T idx_hierarchy = nds[local_idx] s = idx_hierarchy.shape a = numpy.sort(idx_hierarchy.reshape(s[0], s[1] * s[2]).T) b = numpy.ascontiguousarray(a).view( numpy.dtype((numpy.void, a.dtype.itemsize * a.shape[1])) ) _, idx, inv, cts = numpy.unique( b, return_index=True, return_inverse=True, return_counts=True ) # No edge has more than 2 cells. This assertion fails, for example, if # cells are listed twice. assert all(cts < 3) edge_nodes = a[idx] cells_edges = inv.reshape(3, -1).T return edge_nodes, cells_edges
python
def create_edges(cells_nodes): """Setup edge-node and edge-cell relations. Adapted from voropy. """ # Create the idx_hierarchy (nodes->edges->cells), i.e., the value of # `self.idx_hierarchy[0, 2, 27]` is the index of the node of cell 27, edge # 2, node 0. The shape of `self.idx_hierarchy` is `(2, 3, n)`, where `n` is # the number of cells. Make sure that the k-th edge is opposite of the k-th # point in the triangle. local_idx = numpy.array([[1, 2], [2, 0], [0, 1]]).T # Map idx back to the nodes. This is useful if quantities which are in # idx shape need to be added up into nodes (e.g., equation system rhs). nds = cells_nodes.T idx_hierarchy = nds[local_idx] s = idx_hierarchy.shape a = numpy.sort(idx_hierarchy.reshape(s[0], s[1] * s[2]).T) b = numpy.ascontiguousarray(a).view( numpy.dtype((numpy.void, a.dtype.itemsize * a.shape[1])) ) _, idx, inv, cts = numpy.unique( b, return_index=True, return_inverse=True, return_counts=True ) # No edge has more than 2 cells. This assertion fails, for example, if # cells are listed twice. assert all(cts < 3) edge_nodes = a[idx] cells_edges = inv.reshape(3, -1).T return edge_nodes, cells_edges
[ "def", "create_edges", "(", "cells_nodes", ")", ":", "# Create the idx_hierarchy (nodes->edges->cells), i.e., the value of", "# `self.idx_hierarchy[0, 2, 27]` is the index of the node of cell 27, edge", "# 2, node 0. The shape of `self.idx_hierarchy` is `(2, 3, n)`, where `n` is", "# the number of cells. Make sure that the k-th edge is opposite of the k-th", "# point in the triangle.", "local_idx", "=", "numpy", ".", "array", "(", "[", "[", "1", ",", "2", "]", ",", "[", "2", ",", "0", "]", ",", "[", "0", ",", "1", "]", "]", ")", ".", "T", "# Map idx back to the nodes. This is useful if quantities which are in", "# idx shape need to be added up into nodes (e.g., equation system rhs).", "nds", "=", "cells_nodes", ".", "T", "idx_hierarchy", "=", "nds", "[", "local_idx", "]", "s", "=", "idx_hierarchy", ".", "shape", "a", "=", "numpy", ".", "sort", "(", "idx_hierarchy", ".", "reshape", "(", "s", "[", "0", "]", ",", "s", "[", "1", "]", "*", "s", "[", "2", "]", ")", ".", "T", ")", "b", "=", "numpy", ".", "ascontiguousarray", "(", "a", ")", ".", "view", "(", "numpy", ".", "dtype", "(", "(", "numpy", ".", "void", ",", "a", ".", "dtype", ".", "itemsize", "*", "a", ".", "shape", "[", "1", "]", ")", ")", ")", "_", ",", "idx", ",", "inv", ",", "cts", "=", "numpy", ".", "unique", "(", "b", ",", "return_index", "=", "True", ",", "return_inverse", "=", "True", ",", "return_counts", "=", "True", ")", "# No edge has more than 2 cells. This assertion fails, for example, if", "# cells are listed twice.", "assert", "all", "(", "cts", "<", "3", ")", "edge_nodes", "=", "a", "[", "idx", "]", "cells_edges", "=", "inv", ".", "reshape", "(", "3", ",", "-", "1", ")", ".", "T", "return", "edge_nodes", ",", "cells_edges" ]
Setup edge-node and edge-cell relations. Adapted from voropy.
[ "Setup", "edge", "-", "node", "and", "edge", "-", "cell", "relations", ".", "Adapted", "from", "voropy", "." ]
train
https://github.com/nschloe/meshzoo/blob/623b84c8b985dcc8e5ccc6250d1dbb989736dcef/meshzoo/helpers.py#L127-L158
nschloe/meshzoo
meshzoo/helpers.py
plot2d
def plot2d(points, cells, mesh_color="k", show_axes=False): """Plot a 2D mesh using matplotlib. """ import matplotlib.pyplot as plt from matplotlib.collections import LineCollection fig = plt.figure() ax = fig.gca() plt.axis("equal") if not show_axes: ax.set_axis_off() xmin = numpy.amin(points[:, 0]) xmax = numpy.amax(points[:, 0]) ymin = numpy.amin(points[:, 1]) ymax = numpy.amax(points[:, 1]) width = xmax - xmin xmin -= 0.1 * width xmax += 0.1 * width height = ymax - ymin ymin -= 0.1 * height ymax += 0.1 * height ax.set_xlim(xmin, xmax) ax.set_ylim(ymin, ymax) edge_nodes, _ = create_edges(cells) # Get edges, cut off z-component. e = points[edge_nodes][:, :, :2] line_segments = LineCollection(e, color=mesh_color) ax.add_collection(line_segments) return fig
python
def plot2d(points, cells, mesh_color="k", show_axes=False): """Plot a 2D mesh using matplotlib. """ import matplotlib.pyplot as plt from matplotlib.collections import LineCollection fig = plt.figure() ax = fig.gca() plt.axis("equal") if not show_axes: ax.set_axis_off() xmin = numpy.amin(points[:, 0]) xmax = numpy.amax(points[:, 0]) ymin = numpy.amin(points[:, 1]) ymax = numpy.amax(points[:, 1]) width = xmax - xmin xmin -= 0.1 * width xmax += 0.1 * width height = ymax - ymin ymin -= 0.1 * height ymax += 0.1 * height ax.set_xlim(xmin, xmax) ax.set_ylim(ymin, ymax) edge_nodes, _ = create_edges(cells) # Get edges, cut off z-component. e = points[edge_nodes][:, :, :2] line_segments = LineCollection(e, color=mesh_color) ax.add_collection(line_segments) return fig
[ "def", "plot2d", "(", "points", ",", "cells", ",", "mesh_color", "=", "\"k\"", ",", "show_axes", "=", "False", ")", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "from", "matplotlib", ".", "collections", "import", "LineCollection", "fig", "=", "plt", ".", "figure", "(", ")", "ax", "=", "fig", ".", "gca", "(", ")", "plt", ".", "axis", "(", "\"equal\"", ")", "if", "not", "show_axes", ":", "ax", ".", "set_axis_off", "(", ")", "xmin", "=", "numpy", ".", "amin", "(", "points", "[", ":", ",", "0", "]", ")", "xmax", "=", "numpy", ".", "amax", "(", "points", "[", ":", ",", "0", "]", ")", "ymin", "=", "numpy", ".", "amin", "(", "points", "[", ":", ",", "1", "]", ")", "ymax", "=", "numpy", ".", "amax", "(", "points", "[", ":", ",", "1", "]", ")", "width", "=", "xmax", "-", "xmin", "xmin", "-=", "0.1", "*", "width", "xmax", "+=", "0.1", "*", "width", "height", "=", "ymax", "-", "ymin", "ymin", "-=", "0.1", "*", "height", "ymax", "+=", "0.1", "*", "height", "ax", ".", "set_xlim", "(", "xmin", ",", "xmax", ")", "ax", ".", "set_ylim", "(", "ymin", ",", "ymax", ")", "edge_nodes", ",", "_", "=", "create_edges", "(", "cells", ")", "# Get edges, cut off z-component.", "e", "=", "points", "[", "edge_nodes", "]", "[", ":", ",", ":", ",", ":", "2", "]", "line_segments", "=", "LineCollection", "(", "e", ",", "color", "=", "mesh_color", ")", "ax", ".", "add_collection", "(", "line_segments", ")", "return", "fig" ]
Plot a 2D mesh using matplotlib.
[ "Plot", "a", "2D", "mesh", "using", "matplotlib", "." ]
train
https://github.com/nschloe/meshzoo/blob/623b84c8b985dcc8e5ccc6250d1dbb989736dcef/meshzoo/helpers.py#L169-L203
shazow/workerpool
samples/blockingworker.py
BlockingWorkerPool.put
def put(self, job, result): "Perform a job by a member in the pool and return the result." self.job.put(job) r = result.get() return r
python
def put(self, job, result): "Perform a job by a member in the pool and return the result." self.job.put(job) r = result.get() return r
[ "def", "put", "(", "self", ",", "job", ",", "result", ")", ":", "self", ".", "job", ".", "put", "(", "job", ")", "r", "=", "result", ".", "get", "(", ")", "return", "r" ]
Perform a job by a member in the pool and return the result.
[ "Perform", "a", "job", "by", "a", "member", "in", "the", "pool", "and", "return", "the", "result", "." ]
train
https://github.com/shazow/workerpool/blob/2c5b29ec64ffbc94fc3623a4531eaf7c7c1a9ab5/samples/blockingworker.py#L16-L20
shazow/workerpool
samples/blockingworker.py
BlockingWorkerPool.contract
def contract(self, jobs, result): """ Perform a contract on a number of jobs and block until a result is retrieved for each job. """ for j in jobs: WorkerPool.put(self, j) r = [] for i in xrange(len(jobs)): r.append(result.get()) return r
python
def contract(self, jobs, result): """ Perform a contract on a number of jobs and block until a result is retrieved for each job. """ for j in jobs: WorkerPool.put(self, j) r = [] for i in xrange(len(jobs)): r.append(result.get()) return r
[ "def", "contract", "(", "self", ",", "jobs", ",", "result", ")", ":", "for", "j", "in", "jobs", ":", "WorkerPool", ".", "put", "(", "self", ",", "j", ")", "r", "=", "[", "]", "for", "i", "in", "xrange", "(", "len", "(", "jobs", ")", ")", ":", "r", ".", "append", "(", "result", ".", "get", "(", ")", ")", "return", "r" ]
Perform a contract on a number of jobs and block until a result is retrieved for each job.
[ "Perform", "a", "contract", "on", "a", "number", "of", "jobs", "and", "block", "until", "a", "result", "is", "retrieved", "for", "each", "job", "." ]
train
https://github.com/shazow/workerpool/blob/2c5b29ec64ffbc94fc3623a4531eaf7c7c1a9ab5/samples/blockingworker.py#L22-L34
nschloe/meshzoo
meshzoo/cube.py
cube
def cube( xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0, zmin=0.0, zmax=1.0, nx=11, ny=11, nz=11 ): """Canonical tetrahedrization of the cube. Input: Edge lenghts of the cube Number of nodes along the edges. """ # Generate suitable ranges for parametrization x_range = numpy.linspace(xmin, xmax, nx) y_range = numpy.linspace(ymin, ymax, ny) z_range = numpy.linspace(zmin, zmax, nz) # Create the vertices. x, y, z = numpy.meshgrid(x_range, y_range, z_range, indexing="ij") # Alternative with slightly different order: # ``` # nodes = numpy.stack([x, y, z]).T.reshape(-1, 3) # ``` nodes = numpy.array([x, y, z]).T.reshape(-1, 3) # Create the elements (cells). # There is 1 way to split a cube into 5 tetrahedra, # and 12 ways to split it into 6 tetrahedra. # See # <http://www.baumanneduard.ch/Splitting%20a%20cube%20in%20tetrahedras2.htm> # Also interesting: <http://en.wikipedia.org/wiki/Marching_tetrahedrons>. a0 = numpy.add.outer(numpy.array(range(nx - 1)), nx * numpy.array(range(ny - 1))) a = numpy.add.outer(a0, nx * ny * numpy.array(range(nz - 1))) # The general scheme here is: # * Initialize everything with `a`, equivalent to # [i + nx * j + nx*ny * k]. # * Add the "even" elements. # * Switch the element styles for every other element to make sure the # edges match at the faces of the cubes. # The last step requires adapting the original pattern at # [1::2, 0::2, 0::2, :] # [0::2, 1::2, 0::2, :] # [0::2, 0::2, 1::2, :] # [1::2, 1::2, 1::2, :] # # Tetrahedron 0: # [ # i + nx*j + nx*ny * k, # i + nx*(j+1) + nx*ny * k, # i+1 + nx*j + nx*ny * k, # i + nx*j + nx*ny * (k+1) # ] # TODO get # ``` # elems0 = numpy.stack([a, a + nx, a + 1, a + nx*ny]).T # ``` # back. elems0 = numpy.concatenate( [a[..., None], a[..., None] + nx, a[..., None] + 1, a[..., None] + nx * ny], axis=3, ) # Every other element cube: # [ # i+1 + nx * j + nx*ny * k, # i+1 + nx * (j+1) + nx*ny * k, # i + nx * j + nx*ny * k, # i+1 + nx * j + nx*ny * (k+1) # ] elems0[1::2, 0::2, 0::2, 0] += 1 elems0[0::2, 1::2, 0::2, 0] += 1 elems0[0::2, 0::2, 1::2, 0] += 1 elems0[1::2, 1::2, 1::2, 0] += 1 elems0[1::2, 0::2, 0::2, 1] += 1 elems0[0::2, 1::2, 0::2, 1] += 1 elems0[0::2, 0::2, 1::2, 1] += 1 elems0[1::2, 1::2, 1::2, 1] += 1 elems0[1::2, 0::2, 0::2, 2] -= 1 elems0[0::2, 1::2, 0::2, 2] -= 1 elems0[0::2, 0::2, 1::2, 2] -= 1 elems0[1::2, 1::2, 1::2, 2] -= 1 elems0[1::2, 0::2, 0::2, 3] += 1 elems0[0::2, 1::2, 0::2, 3] += 1 elems0[0::2, 0::2, 1::2, 3] += 1 elems0[1::2, 1::2, 1::2, 3] += 1 # Tetrahedron 1: # [ # i + nx*(j+1) + nx*ny * k, # i+1 + nx*(j+1) + nx*ny * k, # i+1 + nx*j + nx*ny * k, # i+1 + nx*(j+1) + nx*ny * (k+1) # ] # elems1 = numpy.stack([a + nx, a + 1 + nx, a + 1, a + 1 + nx + nx*ny]).T elems1 = numpy.concatenate( [ a[..., None] + nx, a[..., None] + 1 + nx, a[..., None] + 1, a[..., None] + 1 + nx + nx * ny, ], axis=3, ) # Every other element cube: # [ # i+1 + nx * (j+1) + nx*ny * k, # i + nx * (j+1) + nx*ny * k, # i + nx * j + nx*ny * k, # i + nx * (j+1) + nx*ny * (k+1) # ] elems1[1::2, 0::2, 0::2, 0] += 1 elems1[0::2, 1::2, 0::2, 0] += 1 elems1[0::2, 0::2, 1::2, 0] += 1 elems1[1::2, 1::2, 1::2, 0] += 1 elems1[1::2, 0::2, 0::2, 1] -= 1 elems1[0::2, 1::2, 0::2, 1] -= 1 elems1[0::2, 0::2, 1::2, 1] -= 1 elems1[1::2, 1::2, 1::2, 1] -= 1 elems1[1::2, 0::2, 0::2, 2] -= 1 elems1[0::2, 1::2, 0::2, 2] -= 1 elems1[0::2, 0::2, 1::2, 2] -= 1 elems1[1::2, 1::2, 1::2, 2] -= 1 elems1[1::2, 0::2, 0::2, 3] -= 1 elems1[0::2, 1::2, 0::2, 3] -= 1 elems1[0::2, 0::2, 1::2, 3] -= 1 elems1[1::2, 1::2, 1::2, 3] -= 1 # Tetrahedron 2: # [ # i + nx*(j+1) + nx*ny * k, # i+1 + nx*j + nx*ny * k, # i + nx*j + nx*ny * (k+1), # i+1 + nx*(j+1) + nx*ny * (k+1) # ] # elems2 = numpy.stack([a + nx, a + 1, a + nx*ny, a + 1 + nx + nx*ny]).T elems2 = numpy.concatenate( [ a[..., None] + nx, a[..., None] + 1, a[..., None] + nx * ny, a[..., None] + 1 + nx + nx * ny, ], axis=3, ) # Every other element cube: # [ # i+1 + nx * (j+1) + nx*ny * k, # i + nx * j + nx*ny * k, # i+1 + nx * j + nx*ny * (k+1), # i + nx * (j+1) + nx*ny * (k+1) # ] elems2[1::2, 0::2, 0::2, 0] += 1 elems2[0::2, 1::2, 0::2, 0] += 1 elems2[0::2, 0::2, 1::2, 0] += 1 elems2[1::2, 1::2, 1::2, 0] += 1 elems2[1::2, 0::2, 0::2, 1] -= 1 elems2[0::2, 1::2, 0::2, 1] -= 1 elems2[0::2, 0::2, 1::2, 1] -= 1 elems2[1::2, 1::2, 1::2, 1] -= 1 elems2[1::2, 0::2, 0::2, 2] += 1 elems2[0::2, 1::2, 0::2, 2] += 1 elems2[0::2, 0::2, 1::2, 2] += 1 elems2[1::2, 1::2, 1::2, 2] += 1 elems2[1::2, 0::2, 0::2, 3] -= 1 elems2[0::2, 1::2, 0::2, 3] -= 1 elems2[0::2, 0::2, 1::2, 3] -= 1 elems2[1::2, 1::2, 1::2, 3] -= 1 # Tetrahedron 3: # [ # i + nx * (j+1) + nx*ny * k, # i + nx * j + nx*ny * (k+1), # i + nx * (j+1) + nx*ny * (k+1), # i+1 + nx * (j+1) + nx*ny * (k+1) # ] # elems3 = numpy.stack([ # a + nx, # a + nx*ny, # a + nx + nx*ny, # a + 1 + nx + nx*ny # ]).T elems3 = numpy.concatenate( [ a[..., None] + nx, a[..., None] + nx * ny, a[..., None] + nx + nx * ny, a[..., None] + 1 + nx + nx * ny, ], axis=3, ) # Every other element cube: # [ # i+1 + nx * (j+1) + nx*ny * k, # i+1 + nx * j + nx*ny * (k+1), # i+1 + nx * (j+1) + nx*ny * (k+1), # i + nx * (j+1) + nx*ny * (k+1) # ] elems3[1::2, 0::2, 0::2, 0] += 1 elems3[0::2, 1::2, 0::2, 0] += 1 elems3[0::2, 0::2, 1::2, 0] += 1 elems3[1::2, 1::2, 1::2, 0] += 1 elems3[1::2, 0::2, 0::2, 1] += 1 elems3[0::2, 1::2, 0::2, 1] += 1 elems3[0::2, 0::2, 1::2, 1] += 1 elems3[1::2, 1::2, 1::2, 1] += 1 elems3[1::2, 0::2, 0::2, 2] += 1 elems3[0::2, 1::2, 0::2, 2] += 1 elems3[0::2, 0::2, 1::2, 2] += 1 elems3[1::2, 1::2, 1::2, 2] += 1 elems3[1::2, 0::2, 0::2, 3] -= 1 elems3[0::2, 1::2, 0::2, 3] -= 1 elems3[0::2, 0::2, 1::2, 3] -= 1 elems3[1::2, 1::2, 1::2, 3] -= 1 # Tetrahedron 4: # [ # i+1 + nx * j + nx*ny * k, # i + nx * j + nx*ny * (k+1), # i+1 + nx * (j+1) + nx*ny * (k+1), # i+1 + nx * j + nx*ny * (k+1) # ] # elems4 = numpy.stack([ # a + 1, # a + nx*ny, # a + 1 + nx + nx*ny, # a + 1 + nx*ny # ]).T elems4 = numpy.concatenate( [ a[..., None] + 1, a[..., None] + nx * ny, a[..., None] + 1 + nx + nx * ny, a[..., None] + 1 + nx * ny, ], axis=3, ) # Every other element cube: # [ # i + nx * j + nx*ny * k, # i+1 + nx * j + nx*ny * (k+1), # i + nx * (j+1) + nx*ny * (k+1), # i + nx * j + nx*ny * (k+1) # ] elems4[1::2, 0::2, 0::2, 0] -= 1 elems4[0::2, 1::2, 0::2, 0] -= 1 elems4[0::2, 0::2, 1::2, 0] -= 1 elems4[1::2, 1::2, 1::2, 0] -= 1 elems4[1::2, 0::2, 0::2, 1] += 1 elems4[0::2, 1::2, 0::2, 1] += 1 elems4[0::2, 0::2, 1::2, 1] += 1 elems4[1::2, 1::2, 1::2, 1] += 1 elems4[1::2, 0::2, 0::2, 2] -= 1 elems4[0::2, 1::2, 0::2, 2] -= 1 elems4[0::2, 0::2, 1::2, 2] -= 1 elems4[1::2, 1::2, 1::2, 2] -= 1 elems4[1::2, 0::2, 0::2, 3] -= 1 elems4[0::2, 1::2, 0::2, 3] -= 1 elems4[0::2, 0::2, 1::2, 3] -= 1 elems4[1::2, 1::2, 1::2, 3] -= 1 elems = numpy.vstack( [ elems0.reshape(-1, 4), elems1.reshape(-1, 4), elems2.reshape(-1, 4), elems3.reshape(-1, 4), elems4.reshape(-1, 4), ] ) return nodes, elems
python
def cube( xmin=0.0, xmax=1.0, ymin=0.0, ymax=1.0, zmin=0.0, zmax=1.0, nx=11, ny=11, nz=11 ): """Canonical tetrahedrization of the cube. Input: Edge lenghts of the cube Number of nodes along the edges. """ # Generate suitable ranges for parametrization x_range = numpy.linspace(xmin, xmax, nx) y_range = numpy.linspace(ymin, ymax, ny) z_range = numpy.linspace(zmin, zmax, nz) # Create the vertices. x, y, z = numpy.meshgrid(x_range, y_range, z_range, indexing="ij") # Alternative with slightly different order: # ``` # nodes = numpy.stack([x, y, z]).T.reshape(-1, 3) # ``` nodes = numpy.array([x, y, z]).T.reshape(-1, 3) # Create the elements (cells). # There is 1 way to split a cube into 5 tetrahedra, # and 12 ways to split it into 6 tetrahedra. # See # <http://www.baumanneduard.ch/Splitting%20a%20cube%20in%20tetrahedras2.htm> # Also interesting: <http://en.wikipedia.org/wiki/Marching_tetrahedrons>. a0 = numpy.add.outer(numpy.array(range(nx - 1)), nx * numpy.array(range(ny - 1))) a = numpy.add.outer(a0, nx * ny * numpy.array(range(nz - 1))) # The general scheme here is: # * Initialize everything with `a`, equivalent to # [i + nx * j + nx*ny * k]. # * Add the "even" elements. # * Switch the element styles for every other element to make sure the # edges match at the faces of the cubes. # The last step requires adapting the original pattern at # [1::2, 0::2, 0::2, :] # [0::2, 1::2, 0::2, :] # [0::2, 0::2, 1::2, :] # [1::2, 1::2, 1::2, :] # # Tetrahedron 0: # [ # i + nx*j + nx*ny * k, # i + nx*(j+1) + nx*ny * k, # i+1 + nx*j + nx*ny * k, # i + nx*j + nx*ny * (k+1) # ] # TODO get # ``` # elems0 = numpy.stack([a, a + nx, a + 1, a + nx*ny]).T # ``` # back. elems0 = numpy.concatenate( [a[..., None], a[..., None] + nx, a[..., None] + 1, a[..., None] + nx * ny], axis=3, ) # Every other element cube: # [ # i+1 + nx * j + nx*ny * k, # i+1 + nx * (j+1) + nx*ny * k, # i + nx * j + nx*ny * k, # i+1 + nx * j + nx*ny * (k+1) # ] elems0[1::2, 0::2, 0::2, 0] += 1 elems0[0::2, 1::2, 0::2, 0] += 1 elems0[0::2, 0::2, 1::2, 0] += 1 elems0[1::2, 1::2, 1::2, 0] += 1 elems0[1::2, 0::2, 0::2, 1] += 1 elems0[0::2, 1::2, 0::2, 1] += 1 elems0[0::2, 0::2, 1::2, 1] += 1 elems0[1::2, 1::2, 1::2, 1] += 1 elems0[1::2, 0::2, 0::2, 2] -= 1 elems0[0::2, 1::2, 0::2, 2] -= 1 elems0[0::2, 0::2, 1::2, 2] -= 1 elems0[1::2, 1::2, 1::2, 2] -= 1 elems0[1::2, 0::2, 0::2, 3] += 1 elems0[0::2, 1::2, 0::2, 3] += 1 elems0[0::2, 0::2, 1::2, 3] += 1 elems0[1::2, 1::2, 1::2, 3] += 1 # Tetrahedron 1: # [ # i + nx*(j+1) + nx*ny * k, # i+1 + nx*(j+1) + nx*ny * k, # i+1 + nx*j + nx*ny * k, # i+1 + nx*(j+1) + nx*ny * (k+1) # ] # elems1 = numpy.stack([a + nx, a + 1 + nx, a + 1, a + 1 + nx + nx*ny]).T elems1 = numpy.concatenate( [ a[..., None] + nx, a[..., None] + 1 + nx, a[..., None] + 1, a[..., None] + 1 + nx + nx * ny, ], axis=3, ) # Every other element cube: # [ # i+1 + nx * (j+1) + nx*ny * k, # i + nx * (j+1) + nx*ny * k, # i + nx * j + nx*ny * k, # i + nx * (j+1) + nx*ny * (k+1) # ] elems1[1::2, 0::2, 0::2, 0] += 1 elems1[0::2, 1::2, 0::2, 0] += 1 elems1[0::2, 0::2, 1::2, 0] += 1 elems1[1::2, 1::2, 1::2, 0] += 1 elems1[1::2, 0::2, 0::2, 1] -= 1 elems1[0::2, 1::2, 0::2, 1] -= 1 elems1[0::2, 0::2, 1::2, 1] -= 1 elems1[1::2, 1::2, 1::2, 1] -= 1 elems1[1::2, 0::2, 0::2, 2] -= 1 elems1[0::2, 1::2, 0::2, 2] -= 1 elems1[0::2, 0::2, 1::2, 2] -= 1 elems1[1::2, 1::2, 1::2, 2] -= 1 elems1[1::2, 0::2, 0::2, 3] -= 1 elems1[0::2, 1::2, 0::2, 3] -= 1 elems1[0::2, 0::2, 1::2, 3] -= 1 elems1[1::2, 1::2, 1::2, 3] -= 1 # Tetrahedron 2: # [ # i + nx*(j+1) + nx*ny * k, # i+1 + nx*j + nx*ny * k, # i + nx*j + nx*ny * (k+1), # i+1 + nx*(j+1) + nx*ny * (k+1) # ] # elems2 = numpy.stack([a + nx, a + 1, a + nx*ny, a + 1 + nx + nx*ny]).T elems2 = numpy.concatenate( [ a[..., None] + nx, a[..., None] + 1, a[..., None] + nx * ny, a[..., None] + 1 + nx + nx * ny, ], axis=3, ) # Every other element cube: # [ # i+1 + nx * (j+1) + nx*ny * k, # i + nx * j + nx*ny * k, # i+1 + nx * j + nx*ny * (k+1), # i + nx * (j+1) + nx*ny * (k+1) # ] elems2[1::2, 0::2, 0::2, 0] += 1 elems2[0::2, 1::2, 0::2, 0] += 1 elems2[0::2, 0::2, 1::2, 0] += 1 elems2[1::2, 1::2, 1::2, 0] += 1 elems2[1::2, 0::2, 0::2, 1] -= 1 elems2[0::2, 1::2, 0::2, 1] -= 1 elems2[0::2, 0::2, 1::2, 1] -= 1 elems2[1::2, 1::2, 1::2, 1] -= 1 elems2[1::2, 0::2, 0::2, 2] += 1 elems2[0::2, 1::2, 0::2, 2] += 1 elems2[0::2, 0::2, 1::2, 2] += 1 elems2[1::2, 1::2, 1::2, 2] += 1 elems2[1::2, 0::2, 0::2, 3] -= 1 elems2[0::2, 1::2, 0::2, 3] -= 1 elems2[0::2, 0::2, 1::2, 3] -= 1 elems2[1::2, 1::2, 1::2, 3] -= 1 # Tetrahedron 3: # [ # i + nx * (j+1) + nx*ny * k, # i + nx * j + nx*ny * (k+1), # i + nx * (j+1) + nx*ny * (k+1), # i+1 + nx * (j+1) + nx*ny * (k+1) # ] # elems3 = numpy.stack([ # a + nx, # a + nx*ny, # a + nx + nx*ny, # a + 1 + nx + nx*ny # ]).T elems3 = numpy.concatenate( [ a[..., None] + nx, a[..., None] + nx * ny, a[..., None] + nx + nx * ny, a[..., None] + 1 + nx + nx * ny, ], axis=3, ) # Every other element cube: # [ # i+1 + nx * (j+1) + nx*ny * k, # i+1 + nx * j + nx*ny * (k+1), # i+1 + nx * (j+1) + nx*ny * (k+1), # i + nx * (j+1) + nx*ny * (k+1) # ] elems3[1::2, 0::2, 0::2, 0] += 1 elems3[0::2, 1::2, 0::2, 0] += 1 elems3[0::2, 0::2, 1::2, 0] += 1 elems3[1::2, 1::2, 1::2, 0] += 1 elems3[1::2, 0::2, 0::2, 1] += 1 elems3[0::2, 1::2, 0::2, 1] += 1 elems3[0::2, 0::2, 1::2, 1] += 1 elems3[1::2, 1::2, 1::2, 1] += 1 elems3[1::2, 0::2, 0::2, 2] += 1 elems3[0::2, 1::2, 0::2, 2] += 1 elems3[0::2, 0::2, 1::2, 2] += 1 elems3[1::2, 1::2, 1::2, 2] += 1 elems3[1::2, 0::2, 0::2, 3] -= 1 elems3[0::2, 1::2, 0::2, 3] -= 1 elems3[0::2, 0::2, 1::2, 3] -= 1 elems3[1::2, 1::2, 1::2, 3] -= 1 # Tetrahedron 4: # [ # i+1 + nx * j + nx*ny * k, # i + nx * j + nx*ny * (k+1), # i+1 + nx * (j+1) + nx*ny * (k+1), # i+1 + nx * j + nx*ny * (k+1) # ] # elems4 = numpy.stack([ # a + 1, # a + nx*ny, # a + 1 + nx + nx*ny, # a + 1 + nx*ny # ]).T elems4 = numpy.concatenate( [ a[..., None] + 1, a[..., None] + nx * ny, a[..., None] + 1 + nx + nx * ny, a[..., None] + 1 + nx * ny, ], axis=3, ) # Every other element cube: # [ # i + nx * j + nx*ny * k, # i+1 + nx * j + nx*ny * (k+1), # i + nx * (j+1) + nx*ny * (k+1), # i + nx * j + nx*ny * (k+1) # ] elems4[1::2, 0::2, 0::2, 0] -= 1 elems4[0::2, 1::2, 0::2, 0] -= 1 elems4[0::2, 0::2, 1::2, 0] -= 1 elems4[1::2, 1::2, 1::2, 0] -= 1 elems4[1::2, 0::2, 0::2, 1] += 1 elems4[0::2, 1::2, 0::2, 1] += 1 elems4[0::2, 0::2, 1::2, 1] += 1 elems4[1::2, 1::2, 1::2, 1] += 1 elems4[1::2, 0::2, 0::2, 2] -= 1 elems4[0::2, 1::2, 0::2, 2] -= 1 elems4[0::2, 0::2, 1::2, 2] -= 1 elems4[1::2, 1::2, 1::2, 2] -= 1 elems4[1::2, 0::2, 0::2, 3] -= 1 elems4[0::2, 1::2, 0::2, 3] -= 1 elems4[0::2, 0::2, 1::2, 3] -= 1 elems4[1::2, 1::2, 1::2, 3] -= 1 elems = numpy.vstack( [ elems0.reshape(-1, 4), elems1.reshape(-1, 4), elems2.reshape(-1, 4), elems3.reshape(-1, 4), elems4.reshape(-1, 4), ] ) return nodes, elems
[ "def", "cube", "(", "xmin", "=", "0.0", ",", "xmax", "=", "1.0", ",", "ymin", "=", "0.0", ",", "ymax", "=", "1.0", ",", "zmin", "=", "0.0", ",", "zmax", "=", "1.0", ",", "nx", "=", "11", ",", "ny", "=", "11", ",", "nz", "=", "11", ")", ":", "# Generate suitable ranges for parametrization", "x_range", "=", "numpy", ".", "linspace", "(", "xmin", ",", "xmax", ",", "nx", ")", "y_range", "=", "numpy", ".", "linspace", "(", "ymin", ",", "ymax", ",", "ny", ")", "z_range", "=", "numpy", ".", "linspace", "(", "zmin", ",", "zmax", ",", "nz", ")", "# Create the vertices.", "x", ",", "y", ",", "z", "=", "numpy", ".", "meshgrid", "(", "x_range", ",", "y_range", ",", "z_range", ",", "indexing", "=", "\"ij\"", ")", "# Alternative with slightly different order:", "# ```", "# nodes = numpy.stack([x, y, z]).T.reshape(-1, 3)", "# ```", "nodes", "=", "numpy", ".", "array", "(", "[", "x", ",", "y", ",", "z", "]", ")", ".", "T", ".", "reshape", "(", "-", "1", ",", "3", ")", "# Create the elements (cells).", "# There is 1 way to split a cube into 5 tetrahedra,", "# and 12 ways to split it into 6 tetrahedra.", "# See", "# <http://www.baumanneduard.ch/Splitting%20a%20cube%20in%20tetrahedras2.htm>", "# Also interesting: <http://en.wikipedia.org/wiki/Marching_tetrahedrons>.", "a0", "=", "numpy", ".", "add", ".", "outer", "(", "numpy", ".", "array", "(", "range", "(", "nx", "-", "1", ")", ")", ",", "nx", "*", "numpy", ".", "array", "(", "range", "(", "ny", "-", "1", ")", ")", ")", "a", "=", "numpy", ".", "add", ".", "outer", "(", "a0", ",", "nx", "*", "ny", "*", "numpy", ".", "array", "(", "range", "(", "nz", "-", "1", ")", ")", ")", "# The general scheme here is:", "# * Initialize everything with `a`, equivalent to", "# [i + nx * j + nx*ny * k].", "# * Add the \"even\" elements.", "# * Switch the element styles for every other element to make sure the", "# edges match at the faces of the cubes.", "# The last step requires adapting the original pattern at", "# [1::2, 0::2, 0::2, :]", "# [0::2, 1::2, 0::2, :]", "# [0::2, 0::2, 1::2, :]", "# [1::2, 1::2, 1::2, :]", "#", "# Tetrahedron 0:", "# [", "# i + nx*j + nx*ny * k,", "# i + nx*(j+1) + nx*ny * k,", "# i+1 + nx*j + nx*ny * k,", "# i + nx*j + nx*ny * (k+1)", "# ]", "# TODO get", "# ```", "# elems0 = numpy.stack([a, a + nx, a + 1, a + nx*ny]).T", "# ```", "# back.", "elems0", "=", "numpy", ".", "concatenate", "(", "[", "a", "[", "...", ",", "None", "]", ",", "a", "[", "...", ",", "None", "]", "+", "nx", ",", "a", "[", "...", ",", "None", "]", "+", "1", ",", "a", "[", "...", ",", "None", "]", "+", "nx", "*", "ny", "]", ",", "axis", "=", "3", ",", ")", "# Every other element cube:", "# [", "# i+1 + nx * j + nx*ny * k,", "# i+1 + nx * (j+1) + nx*ny * k,", "# i + nx * j + nx*ny * k,", "# i+1 + nx * j + nx*ny * (k+1)", "# ]", "elems0", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", "]", "+=", "1", "elems0", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", "]", "+=", "1", "elems0", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", "]", "+=", "1", "elems0", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", "]", "+=", "1", "elems0", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", "]", "+=", "1", "elems0", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", "]", "+=", "1", "elems0", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", "]", "+=", "1", "elems0", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", "]", "+=", "1", "elems0", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "2", "]", "-=", "1", "elems0", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "2", "]", "-=", "1", "elems0", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "2", "]", "-=", "1", "elems0", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "2", "]", "-=", "1", "elems0", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "3", "]", "+=", "1", "elems0", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "3", "]", "+=", "1", "elems0", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "3", "]", "+=", "1", "elems0", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "3", "]", "+=", "1", "# Tetrahedron 1:", "# [", "# i + nx*(j+1) + nx*ny * k,", "# i+1 + nx*(j+1) + nx*ny * k,", "# i+1 + nx*j + nx*ny * k,", "# i+1 + nx*(j+1) + nx*ny * (k+1)", "# ]", "# elems1 = numpy.stack([a + nx, a + 1 + nx, a + 1, a + 1 + nx + nx*ny]).T", "elems1", "=", "numpy", ".", "concatenate", "(", "[", "a", "[", "...", ",", "None", "]", "+", "nx", ",", "a", "[", "...", ",", "None", "]", "+", "1", "+", "nx", ",", "a", "[", "...", ",", "None", "]", "+", "1", ",", "a", "[", "...", ",", "None", "]", "+", "1", "+", "nx", "+", "nx", "*", "ny", ",", "]", ",", "axis", "=", "3", ",", ")", "# Every other element cube:", "# [", "# i+1 + nx * (j+1) + nx*ny * k,", "# i + nx * (j+1) + nx*ny * k,", "# i + nx * j + nx*ny * k,", "# i + nx * (j+1) + nx*ny * (k+1)", "# ]", "elems1", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", "]", "+=", "1", "elems1", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", "]", "+=", "1", "elems1", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", "]", "+=", "1", "elems1", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", "]", "+=", "1", "elems1", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", "]", "-=", "1", "elems1", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", "]", "-=", "1", "elems1", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", "]", "-=", "1", "elems1", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", "]", "-=", "1", "elems1", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "2", "]", "-=", "1", "elems1", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "2", "]", "-=", "1", "elems1", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "2", "]", "-=", "1", "elems1", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "2", "]", "-=", "1", "elems1", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "3", "]", "-=", "1", "elems1", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "3", "]", "-=", "1", "elems1", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "3", "]", "-=", "1", "elems1", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "3", "]", "-=", "1", "# Tetrahedron 2:", "# [", "# i + nx*(j+1) + nx*ny * k,", "# i+1 + nx*j + nx*ny * k,", "# i + nx*j + nx*ny * (k+1),", "# i+1 + nx*(j+1) + nx*ny * (k+1)", "# ]", "# elems2 = numpy.stack([a + nx, a + 1, a + nx*ny, a + 1 + nx + nx*ny]).T", "elems2", "=", "numpy", ".", "concatenate", "(", "[", "a", "[", "...", ",", "None", "]", "+", "nx", ",", "a", "[", "...", ",", "None", "]", "+", "1", ",", "a", "[", "...", ",", "None", "]", "+", "nx", "*", "ny", ",", "a", "[", "...", ",", "None", "]", "+", "1", "+", "nx", "+", "nx", "*", "ny", ",", "]", ",", "axis", "=", "3", ",", ")", "# Every other element cube:", "# [", "# i+1 + nx * (j+1) + nx*ny * k,", "# i + nx * j + nx*ny * k,", "# i+1 + nx * j + nx*ny * (k+1),", "# i + nx * (j+1) + nx*ny * (k+1)", "# ]", "elems2", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", "]", "+=", "1", "elems2", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", "]", "+=", "1", "elems2", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", "]", "+=", "1", "elems2", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", "]", "+=", "1", "elems2", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", "]", "-=", "1", "elems2", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", "]", "-=", "1", "elems2", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", "]", "-=", "1", "elems2", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", "]", "-=", "1", "elems2", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "2", "]", "+=", "1", "elems2", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "2", "]", "+=", "1", "elems2", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "2", "]", "+=", "1", "elems2", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "2", "]", "+=", "1", "elems2", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "3", "]", "-=", "1", "elems2", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "3", "]", "-=", "1", "elems2", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "3", "]", "-=", "1", "elems2", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "3", "]", "-=", "1", "# Tetrahedron 3:", "# [", "# i + nx * (j+1) + nx*ny * k,", "# i + nx * j + nx*ny * (k+1),", "# i + nx * (j+1) + nx*ny * (k+1),", "# i+1 + nx * (j+1) + nx*ny * (k+1)", "# ]", "# elems3 = numpy.stack([", "# a + nx,", "# a + nx*ny,", "# a + nx + nx*ny,", "# a + 1 + nx + nx*ny", "# ]).T", "elems3", "=", "numpy", ".", "concatenate", "(", "[", "a", "[", "...", ",", "None", "]", "+", "nx", ",", "a", "[", "...", ",", "None", "]", "+", "nx", "*", "ny", ",", "a", "[", "...", ",", "None", "]", "+", "nx", "+", "nx", "*", "ny", ",", "a", "[", "...", ",", "None", "]", "+", "1", "+", "nx", "+", "nx", "*", "ny", ",", "]", ",", "axis", "=", "3", ",", ")", "# Every other element cube:", "# [", "# i+1 + nx * (j+1) + nx*ny * k,", "# i+1 + nx * j + nx*ny * (k+1),", "# i+1 + nx * (j+1) + nx*ny * (k+1),", "# i + nx * (j+1) + nx*ny * (k+1)", "# ]", "elems3", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", "]", "+=", "1", "elems3", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", "]", "+=", "1", "elems3", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", "]", "+=", "1", "elems3", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", "]", "+=", "1", "elems3", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", "]", "+=", "1", "elems3", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", "]", "+=", "1", "elems3", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", "]", "+=", "1", "elems3", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", "]", "+=", "1", "elems3", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "2", "]", "+=", "1", "elems3", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "2", "]", "+=", "1", "elems3", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "2", "]", "+=", "1", "elems3", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "2", "]", "+=", "1", "elems3", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "3", "]", "-=", "1", "elems3", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "3", "]", "-=", "1", "elems3", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "3", "]", "-=", "1", "elems3", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "3", "]", "-=", "1", "# Tetrahedron 4:", "# [", "# i+1 + nx * j + nx*ny * k,", "# i + nx * j + nx*ny * (k+1),", "# i+1 + nx * (j+1) + nx*ny * (k+1),", "# i+1 + nx * j + nx*ny * (k+1)", "# ]", "# elems4 = numpy.stack([", "# a + 1,", "# a + nx*ny,", "# a + 1 + nx + nx*ny,", "# a + 1 + nx*ny", "# ]).T", "elems4", "=", "numpy", ".", "concatenate", "(", "[", "a", "[", "...", ",", "None", "]", "+", "1", ",", "a", "[", "...", ",", "None", "]", "+", "nx", "*", "ny", ",", "a", "[", "...", ",", "None", "]", "+", "1", "+", "nx", "+", "nx", "*", "ny", ",", "a", "[", "...", ",", "None", "]", "+", "1", "+", "nx", "*", "ny", ",", "]", ",", "axis", "=", "3", ",", ")", "# Every other element cube:", "# [", "# i + nx * j + nx*ny * k,", "# i+1 + nx * j + nx*ny * (k+1),", "# i + nx * (j+1) + nx*ny * (k+1),", "# i + nx * j + nx*ny * (k+1)", "# ]", "elems4", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", "]", "-=", "1", "elems4", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", "]", "-=", "1", "elems4", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", "]", "-=", "1", "elems4", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", "]", "-=", "1", "elems4", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", "]", "+=", "1", "elems4", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", "]", "+=", "1", "elems4", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", "]", "+=", "1", "elems4", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", "]", "+=", "1", "elems4", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "2", "]", "-=", "1", "elems4", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "2", "]", "-=", "1", "elems4", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "2", "]", "-=", "1", "elems4", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "2", "]", "-=", "1", "elems4", "[", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "3", "]", "-=", "1", "elems4", "[", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "0", ":", ":", "2", ",", "3", "]", "-=", "1", "elems4", "[", "0", ":", ":", "2", ",", "0", ":", ":", "2", ",", "1", ":", ":", "2", ",", "3", "]", "-=", "1", "elems4", "[", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "1", ":", ":", "2", ",", "3", "]", "-=", "1", "elems", "=", "numpy", ".", "vstack", "(", "[", "elems0", ".", "reshape", "(", "-", "1", ",", "4", ")", ",", "elems1", ".", "reshape", "(", "-", "1", ",", "4", ")", ",", "elems2", ".", "reshape", "(", "-", "1", ",", "4", ")", ",", "elems3", ".", "reshape", "(", "-", "1", ",", "4", ")", ",", "elems4", ".", "reshape", "(", "-", "1", ",", "4", ")", ",", "]", ")", "return", "nodes", ",", "elems" ]
Canonical tetrahedrization of the cube. Input: Edge lenghts of the cube Number of nodes along the edges.
[ "Canonical", "tetrahedrization", "of", "the", "cube", ".", "Input", ":", "Edge", "lenghts", "of", "the", "cube", "Number", "of", "nodes", "along", "the", "edges", "." ]
train
https://github.com/nschloe/meshzoo/blob/623b84c8b985dcc8e5ccc6250d1dbb989736dcef/meshzoo/cube.py#L6-L294
shazow/workerpool
workerpool/pools.py
WorkerPool.grow
def grow(self): "Add another worker to the pool." t = self.worker_factory(self) t.start() self._size += 1
python
def grow(self): "Add another worker to the pool." t = self.worker_factory(self) t.start() self._size += 1
[ "def", "grow", "(", "self", ")", ":", "t", "=", "self", ".", "worker_factory", "(", "self", ")", "t", ".", "start", "(", ")", "self", ".", "_size", "+=", "1" ]
Add another worker to the pool.
[ "Add", "another", "worker", "to", "the", "pool", "." ]
train
https://github.com/shazow/workerpool/blob/2c5b29ec64ffbc94fc3623a4531eaf7c7c1a9ab5/workerpool/pools.py#L66-L70
shazow/workerpool
workerpool/pools.py
WorkerPool.shrink
def shrink(self): "Get rid of one worker from the pool. Raises IndexError if empty." if self._size <= 0: raise IndexError("pool is already empty") self._size -= 1 self.put(SuicideJob())
python
def shrink(self): "Get rid of one worker from the pool. Raises IndexError if empty." if self._size <= 0: raise IndexError("pool is already empty") self._size -= 1 self.put(SuicideJob())
[ "def", "shrink", "(", "self", ")", ":", "if", "self", ".", "_size", "<=", "0", ":", "raise", "IndexError", "(", "\"pool is already empty\"", ")", "self", ".", "_size", "-=", "1", "self", ".", "put", "(", "SuicideJob", "(", ")", ")" ]
Get rid of one worker from the pool. Raises IndexError if empty.
[ "Get", "rid", "of", "one", "worker", "from", "the", "pool", ".", "Raises", "IndexError", "if", "empty", "." ]
train
https://github.com/shazow/workerpool/blob/2c5b29ec64ffbc94fc3623a4531eaf7c7c1a9ab5/workerpool/pools.py#L72-L77
shazow/workerpool
workerpool/pools.py
WorkerPool.map
def map(self, fn, *seq): "Perform a map operation distributed among the workers. Will " "block until done." results = Queue() args = zip(*seq) for seq in args: j = SimpleJob(results, fn, seq) self.put(j) # Aggregate results r = [] for i in range(len(list(args))): r.append(results.get()) return r
python
def map(self, fn, *seq): "Perform a map operation distributed among the workers. Will " "block until done." results = Queue() args = zip(*seq) for seq in args: j = SimpleJob(results, fn, seq) self.put(j) # Aggregate results r = [] for i in range(len(list(args))): r.append(results.get()) return r
[ "def", "map", "(", "self", ",", "fn", ",", "*", "seq", ")", ":", "\"block until done.\"", "results", "=", "Queue", "(", ")", "args", "=", "zip", "(", "*", "seq", ")", "for", "seq", "in", "args", ":", "j", "=", "SimpleJob", "(", "results", ",", "fn", ",", "seq", ")", "self", ".", "put", "(", "j", ")", "# Aggregate results", "r", "=", "[", "]", "for", "i", "in", "range", "(", "len", "(", "list", "(", "args", ")", ")", ")", ":", "r", ".", "append", "(", "results", ".", "get", "(", ")", ")", "return", "r" ]
Perform a map operation distributed among the workers. Will
[ "Perform", "a", "map", "operation", "distributed", "among", "the", "workers", ".", "Will" ]
train
https://github.com/shazow/workerpool/blob/2c5b29ec64ffbc94fc3623a4531eaf7c7c1a9ab5/workerpool/pools.py#L89-L103
nschloe/meshzoo
meshzoo/moebius.py
moebius
def moebius( num_twists=1, # How many twists are there in the 'paper'? nl=60, # Number of nodes along the length of the strip nw=11, # Number of nodes along the width of the strip (>= 2) mode="classical", ): """Creates a simplistic triangular mesh on a slightly Möbius strip. The Möbius strip here deviates slightly from the ordinary geometry in that it is constructed in such a way that the two halves can be exchanged as to allow better comparison with the pseudo-Möbius geometry. The mode is either `'classical'` or `'smooth'`. The first is the classical Möbius band parametrization, the latter a smoothed variant matching `'pseudo'`. """ # The width of the strip width = 1.0 scale = 10.0 # radius of the strip when flattened out r = 1.0 # seam displacement alpha0 = 0.0 # pi / 2 # How flat the strip will be. # Positive values result in left-turning Möbius strips, negative in # right-turning ones. # Also influences the width of the strip. flatness = 1.0 # Generate suitable ranges for parametrization u_range = numpy.linspace(0.0, 2 * numpy.pi, num=nl, endpoint=False) v_range = numpy.linspace(-0.5 * width, 0.5 * width, num=nw) # Create the vertices. This is based on the parameterization # of the Möbius strip as given in # <http://en.wikipedia.org/wiki/M%C3%B6bius_strip#Geometry_and_topology> sin_u = numpy.sin(u_range) cos_u = numpy.cos(u_range) alpha = num_twists * 0.5 * u_range + alpha0 sin_alpha = numpy.sin(alpha) cos_alpha = numpy.cos(alpha) if mode == "classical": a = cos_alpha b = sin_alpha reverse_seam = num_twists % 2 == 1 elif mode == "smooth": # The fundamental difference with the ordinary Möbius band here are the # squares. # It is also possible to to abs() the respective sines and cosines, but # this results in a non-smooth manifold. a = numpy.copysign(cos_alpha ** 2, cos_alpha) b = numpy.copysign(sin_alpha ** 2, sin_alpha) reverse_seam = num_twists % 2 == 1 else: assert mode == "pseudo" a = cos_alpha ** 2 b = sin_alpha ** 2 reverse_seam = False nodes = ( scale * numpy.array( [ numpy.outer(a * cos_u, v_range) + r * cos_u[:, numpy.newaxis], numpy.outer(a * sin_u, v_range) + r * sin_u[:, numpy.newaxis], numpy.outer(b, v_range) * flatness, ] ) .reshape(3, -1) .T ) elems = _create_elements(nl, nw, reverse_seam) return nodes, elems
python
def moebius( num_twists=1, # How many twists are there in the 'paper'? nl=60, # Number of nodes along the length of the strip nw=11, # Number of nodes along the width of the strip (>= 2) mode="classical", ): """Creates a simplistic triangular mesh on a slightly Möbius strip. The Möbius strip here deviates slightly from the ordinary geometry in that it is constructed in such a way that the two halves can be exchanged as to allow better comparison with the pseudo-Möbius geometry. The mode is either `'classical'` or `'smooth'`. The first is the classical Möbius band parametrization, the latter a smoothed variant matching `'pseudo'`. """ # The width of the strip width = 1.0 scale = 10.0 # radius of the strip when flattened out r = 1.0 # seam displacement alpha0 = 0.0 # pi / 2 # How flat the strip will be. # Positive values result in left-turning Möbius strips, negative in # right-turning ones. # Also influences the width of the strip. flatness = 1.0 # Generate suitable ranges for parametrization u_range = numpy.linspace(0.0, 2 * numpy.pi, num=nl, endpoint=False) v_range = numpy.linspace(-0.5 * width, 0.5 * width, num=nw) # Create the vertices. This is based on the parameterization # of the Möbius strip as given in # <http://en.wikipedia.org/wiki/M%C3%B6bius_strip#Geometry_and_topology> sin_u = numpy.sin(u_range) cos_u = numpy.cos(u_range) alpha = num_twists * 0.5 * u_range + alpha0 sin_alpha = numpy.sin(alpha) cos_alpha = numpy.cos(alpha) if mode == "classical": a = cos_alpha b = sin_alpha reverse_seam = num_twists % 2 == 1 elif mode == "smooth": # The fundamental difference with the ordinary Möbius band here are the # squares. # It is also possible to to abs() the respective sines and cosines, but # this results in a non-smooth manifold. a = numpy.copysign(cos_alpha ** 2, cos_alpha) b = numpy.copysign(sin_alpha ** 2, sin_alpha) reverse_seam = num_twists % 2 == 1 else: assert mode == "pseudo" a = cos_alpha ** 2 b = sin_alpha ** 2 reverse_seam = False nodes = ( scale * numpy.array( [ numpy.outer(a * cos_u, v_range) + r * cos_u[:, numpy.newaxis], numpy.outer(a * sin_u, v_range) + r * sin_u[:, numpy.newaxis], numpy.outer(b, v_range) * flatness, ] ) .reshape(3, -1) .T ) elems = _create_elements(nl, nw, reverse_seam) return nodes, elems
[ "def", "moebius", "(", "num_twists", "=", "1", ",", "# How many twists are there in the 'paper'?", "nl", "=", "60", ",", "# Number of nodes along the length of the strip", "nw", "=", "11", ",", "# Number of nodes along the width of the strip (>= 2)", "mode", "=", "\"classical\"", ",", ")", ":", "# The width of the strip", "width", "=", "1.0", "scale", "=", "10.0", "# radius of the strip when flattened out", "r", "=", "1.0", "# seam displacement", "alpha0", "=", "0.0", "# pi / 2", "# How flat the strip will be.", "# Positive values result in left-turning Möbius strips, negative in", "# right-turning ones.", "# Also influences the width of the strip.", "flatness", "=", "1.0", "# Generate suitable ranges for parametrization", "u_range", "=", "numpy", ".", "linspace", "(", "0.0", ",", "2", "*", "numpy", ".", "pi", ",", "num", "=", "nl", ",", "endpoint", "=", "False", ")", "v_range", "=", "numpy", ".", "linspace", "(", "-", "0.5", "*", "width", ",", "0.5", "*", "width", ",", "num", "=", "nw", ")", "# Create the vertices. This is based on the parameterization", "# of the Möbius strip as given in", "# <http://en.wikipedia.org/wiki/M%C3%B6bius_strip#Geometry_and_topology>", "sin_u", "=", "numpy", ".", "sin", "(", "u_range", ")", "cos_u", "=", "numpy", ".", "cos", "(", "u_range", ")", "alpha", "=", "num_twists", "*", "0.5", "*", "u_range", "+", "alpha0", "sin_alpha", "=", "numpy", ".", "sin", "(", "alpha", ")", "cos_alpha", "=", "numpy", ".", "cos", "(", "alpha", ")", "if", "mode", "==", "\"classical\"", ":", "a", "=", "cos_alpha", "b", "=", "sin_alpha", "reverse_seam", "=", "num_twists", "%", "2", "==", "1", "elif", "mode", "==", "\"smooth\"", ":", "# The fundamental difference with the ordinary Möbius band here are the", "# squares.", "# It is also possible to to abs() the respective sines and cosines, but", "# this results in a non-smooth manifold.", "a", "=", "numpy", ".", "copysign", "(", "cos_alpha", "**", "2", ",", "cos_alpha", ")", "b", "=", "numpy", ".", "copysign", "(", "sin_alpha", "**", "2", ",", "sin_alpha", ")", "reverse_seam", "=", "num_twists", "%", "2", "==", "1", "else", ":", "assert", "mode", "==", "\"pseudo\"", "a", "=", "cos_alpha", "**", "2", "b", "=", "sin_alpha", "**", "2", "reverse_seam", "=", "False", "nodes", "=", "(", "scale", "*", "numpy", ".", "array", "(", "[", "numpy", ".", "outer", "(", "a", "*", "cos_u", ",", "v_range", ")", "+", "r", "*", "cos_u", "[", ":", ",", "numpy", ".", "newaxis", "]", ",", "numpy", ".", "outer", "(", "a", "*", "sin_u", ",", "v_range", ")", "+", "r", "*", "sin_u", "[", ":", ",", "numpy", ".", "newaxis", "]", ",", "numpy", ".", "outer", "(", "b", ",", "v_range", ")", "*", "flatness", ",", "]", ")", ".", "reshape", "(", "3", ",", "-", "1", ")", ".", "T", ")", "elems", "=", "_create_elements", "(", "nl", ",", "nw", ",", "reverse_seam", ")", "return", "nodes", ",", "elems" ]
Creates a simplistic triangular mesh on a slightly Möbius strip. The Möbius strip here deviates slightly from the ordinary geometry in that it is constructed in such a way that the two halves can be exchanged as to allow better comparison with the pseudo-Möbius geometry. The mode is either `'classical'` or `'smooth'`. The first is the classical Möbius band parametrization, the latter a smoothed variant matching `'pseudo'`.
[ "Creates", "a", "simplistic", "triangular", "mesh", "on", "a", "slightly", "Möbius", "strip", ".", "The", "Möbius", "strip", "here", "deviates", "slightly", "from", "the", "ordinary", "geometry", "in", "that", "it", "is", "constructed", "in", "such", "a", "way", "that", "the", "two", "halves", "can", "be", "exchanged", "as", "to", "allow", "better", "comparison", "with", "the", "pseudo", "-", "Möbius", "geometry", "." ]
train
https://github.com/nschloe/meshzoo/blob/623b84c8b985dcc8e5ccc6250d1dbb989736dcef/meshzoo/moebius.py#L7-L83
shazow/workerpool
workerpool/workers.py
Worker.run
def run(self): "Get jobs from the queue and perform them as they arrive." while 1: # Sleep until there is a job to perform. job = self.jobs.get() # Yawn. Time to get some work done. try: job.run() self.jobs.task_done() except TerminationNotice: self.jobs.task_done() break
python
def run(self): "Get jobs from the queue and perform them as they arrive." while 1: # Sleep until there is a job to perform. job = self.jobs.get() # Yawn. Time to get some work done. try: job.run() self.jobs.task_done() except TerminationNotice: self.jobs.task_done() break
[ "def", "run", "(", "self", ")", ":", "while", "1", ":", "# Sleep until there is a job to perform.", "job", "=", "self", ".", "jobs", ".", "get", "(", ")", "# Yawn. Time to get some work done.", "try", ":", "job", ".", "run", "(", ")", "self", ".", "jobs", ".", "task_done", "(", ")", "except", "TerminationNotice", ":", "self", ".", "jobs", ".", "task_done", "(", ")", "break" ]
Get jobs from the queue and perform them as they arrive.
[ "Get", "jobs", "from", "the", "queue", "and", "perform", "them", "as", "they", "arrive", "." ]
train
https://github.com/shazow/workerpool/blob/2c5b29ec64ffbc94fc3623a4531eaf7c7c1a9ab5/workerpool/workers.py#L28-L40
jwass/geojsonio.py
geojsonio/geojsonio.py
display
def display(contents, domain=DEFAULT_DOMAIN, force_gist=False): """ Open a web browser pointing to geojson.io with the specified content. If the content is large, an anonymous gist will be created on github and the URL will instruct geojson.io to download the gist data and then display. If the content is small, this step is not needed as the data can be included in the URL Parameters ---------- content - (see make_geojson) domain - string, default http://geojson.io force_gist - bool, default False Create an anonymous gist on Github regardless of the size of the contents """ url = make_url(contents, domain, force_gist) webbrowser.open(url) return url
python
def display(contents, domain=DEFAULT_DOMAIN, force_gist=False): """ Open a web browser pointing to geojson.io with the specified content. If the content is large, an anonymous gist will be created on github and the URL will instruct geojson.io to download the gist data and then display. If the content is small, this step is not needed as the data can be included in the URL Parameters ---------- content - (see make_geojson) domain - string, default http://geojson.io force_gist - bool, default False Create an anonymous gist on Github regardless of the size of the contents """ url = make_url(contents, domain, force_gist) webbrowser.open(url) return url
[ "def", "display", "(", "contents", ",", "domain", "=", "DEFAULT_DOMAIN", ",", "force_gist", "=", "False", ")", ":", "url", "=", "make_url", "(", "contents", ",", "domain", ",", "force_gist", ")", "webbrowser", ".", "open", "(", "url", ")", "return", "url" ]
Open a web browser pointing to geojson.io with the specified content. If the content is large, an anonymous gist will be created on github and the URL will instruct geojson.io to download the gist data and then display. If the content is small, this step is not needed as the data can be included in the URL Parameters ---------- content - (see make_geojson) domain - string, default http://geojson.io force_gist - bool, default False Create an anonymous gist on Github regardless of the size of the contents
[ "Open", "a", "web", "browser", "pointing", "to", "geojson", ".", "io", "with", "the", "specified", "content", "." ]
train
https://github.com/jwass/geojsonio.py/blob/8229a48238f128837e6dce49f18310df84968825/geojsonio/geojsonio.py#L18-L38
jwass/geojsonio.py
geojsonio/geojsonio.py
embed
def embed(contents='', width='100%', height=512, *args, **kwargs): """ Embed geojson.io in an iframe in Jupyter/IPython notebook. Parameters ---------- contents - see make_url() width - string, default '100%' - width of the iframe height - string / int, default 512 - height of the iframe kwargs - additional arguments are passed to `make_url()` """ from IPython.display import HTML url = make_url(contents, *args, **kwargs) html = '<iframe src={url} width={width} height={height}></iframe>'.format( url=url, width=width, height=height) return HTML(html)
python
def embed(contents='', width='100%', height=512, *args, **kwargs): """ Embed geojson.io in an iframe in Jupyter/IPython notebook. Parameters ---------- contents - see make_url() width - string, default '100%' - width of the iframe height - string / int, default 512 - height of the iframe kwargs - additional arguments are passed to `make_url()` """ from IPython.display import HTML url = make_url(contents, *args, **kwargs) html = '<iframe src={url} width={width} height={height}></iframe>'.format( url=url, width=width, height=height) return HTML(html)
[ "def", "embed", "(", "contents", "=", "''", ",", "width", "=", "'100%'", ",", "height", "=", "512", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "from", "IPython", ".", "display", "import", "HTML", "url", "=", "make_url", "(", "contents", ",", "*", "args", ",", "*", "*", "kwargs", ")", "html", "=", "'<iframe src={url} width={width} height={height}></iframe>'", ".", "format", "(", "url", "=", "url", ",", "width", "=", "width", ",", "height", "=", "height", ")", "return", "HTML", "(", "html", ")" ]
Embed geojson.io in an iframe in Jupyter/IPython notebook. Parameters ---------- contents - see make_url() width - string, default '100%' - width of the iframe height - string / int, default 512 - height of the iframe kwargs - additional arguments are passed to `make_url()`
[ "Embed", "geojson", ".", "io", "in", "an", "iframe", "in", "Jupyter", "/", "IPython", "notebook", "." ]
train
https://github.com/jwass/geojsonio.py/blob/8229a48238f128837e6dce49f18310df84968825/geojsonio/geojsonio.py#L43-L60
jwass/geojsonio.py
geojsonio/geojsonio.py
make_url
def make_url(contents, domain=DEFAULT_DOMAIN, force_gist=False, size_for_gist=MAX_URL_LEN): """ Returns the URL to open given the domain and contents. If the file contents are large, an anonymous gist will be created. Parameters ---------- contents * string - assumed to be GeoJSON * an object that implements __geo_interface__ A FeatureCollection will be constructed with one feature, the object. * a sequence of objects that each implement __geo_interface__ A FeatureCollection will be constructed with the objects as the features domain - string, default http://geojson.io force_gist - force gist creation regardless of file size. For more information about __geo_interface__ see: https://gist.github.com/sgillies/2217756 If the contents are large, then a gist will be created. """ contents = make_geojson(contents) if len(contents) <= size_for_gist and not force_gist: url = data_url(contents, domain) else: gist = _make_gist(contents) url = gist_url(gist.id, domain) return url
python
def make_url(contents, domain=DEFAULT_DOMAIN, force_gist=False, size_for_gist=MAX_URL_LEN): """ Returns the URL to open given the domain and contents. If the file contents are large, an anonymous gist will be created. Parameters ---------- contents * string - assumed to be GeoJSON * an object that implements __geo_interface__ A FeatureCollection will be constructed with one feature, the object. * a sequence of objects that each implement __geo_interface__ A FeatureCollection will be constructed with the objects as the features domain - string, default http://geojson.io force_gist - force gist creation regardless of file size. For more information about __geo_interface__ see: https://gist.github.com/sgillies/2217756 If the contents are large, then a gist will be created. """ contents = make_geojson(contents) if len(contents) <= size_for_gist and not force_gist: url = data_url(contents, domain) else: gist = _make_gist(contents) url = gist_url(gist.id, domain) return url
[ "def", "make_url", "(", "contents", ",", "domain", "=", "DEFAULT_DOMAIN", ",", "force_gist", "=", "False", ",", "size_for_gist", "=", "MAX_URL_LEN", ")", ":", "contents", "=", "make_geojson", "(", "contents", ")", "if", "len", "(", "contents", ")", "<=", "size_for_gist", "and", "not", "force_gist", ":", "url", "=", "data_url", "(", "contents", ",", "domain", ")", "else", ":", "gist", "=", "_make_gist", "(", "contents", ")", "url", "=", "gist_url", "(", "gist", ".", "id", ",", "domain", ")", "return", "url" ]
Returns the URL to open given the domain and contents. If the file contents are large, an anonymous gist will be created. Parameters ---------- contents * string - assumed to be GeoJSON * an object that implements __geo_interface__ A FeatureCollection will be constructed with one feature, the object. * a sequence of objects that each implement __geo_interface__ A FeatureCollection will be constructed with the objects as the features domain - string, default http://geojson.io force_gist - force gist creation regardless of file size. For more information about __geo_interface__ see: https://gist.github.com/sgillies/2217756 If the contents are large, then a gist will be created.
[ "Returns", "the", "URL", "to", "open", "given", "the", "domain", "and", "contents", "." ]
train
https://github.com/jwass/geojsonio.py/blob/8229a48238f128837e6dce49f18310df84968825/geojsonio/geojsonio.py#L63-L96
jwass/geojsonio.py
geojsonio/geojsonio.py
make_geojson
def make_geojson(contents): """ Return a GeoJSON string from a variety of inputs. See the documentation for make_url for the possible contents input. Returns ------- GeoJSON string """ if isinstance(contents, six.string_types): return contents if hasattr(contents, '__geo_interface__'): features = [_geo_to_feature(contents)] else: try: feature_iter = iter(contents) except TypeError: raise ValueError('Unknown type for input') features = [] for i, f in enumerate(feature_iter): if not hasattr(f, '__geo_interface__'): raise ValueError('Unknown type at index {0}'.format(i)) features.append(_geo_to_feature(f)) data = {'type': 'FeatureCollection', 'features': features} return json.dumps(data)
python
def make_geojson(contents): """ Return a GeoJSON string from a variety of inputs. See the documentation for make_url for the possible contents input. Returns ------- GeoJSON string """ if isinstance(contents, six.string_types): return contents if hasattr(contents, '__geo_interface__'): features = [_geo_to_feature(contents)] else: try: feature_iter = iter(contents) except TypeError: raise ValueError('Unknown type for input') features = [] for i, f in enumerate(feature_iter): if not hasattr(f, '__geo_interface__'): raise ValueError('Unknown type at index {0}'.format(i)) features.append(_geo_to_feature(f)) data = {'type': 'FeatureCollection', 'features': features} return json.dumps(data)
[ "def", "make_geojson", "(", "contents", ")", ":", "if", "isinstance", "(", "contents", ",", "six", ".", "string_types", ")", ":", "return", "contents", "if", "hasattr", "(", "contents", ",", "'__geo_interface__'", ")", ":", "features", "=", "[", "_geo_to_feature", "(", "contents", ")", "]", "else", ":", "try", ":", "feature_iter", "=", "iter", "(", "contents", ")", "except", "TypeError", ":", "raise", "ValueError", "(", "'Unknown type for input'", ")", "features", "=", "[", "]", "for", "i", ",", "f", "in", "enumerate", "(", "feature_iter", ")", ":", "if", "not", "hasattr", "(", "f", ",", "'__geo_interface__'", ")", ":", "raise", "ValueError", "(", "'Unknown type at index {0}'", ".", "format", "(", "i", ")", ")", "features", ".", "append", "(", "_geo_to_feature", "(", "f", ")", ")", "data", "=", "{", "'type'", ":", "'FeatureCollection'", ",", "'features'", ":", "features", "}", "return", "json", ".", "dumps", "(", "data", ")" ]
Return a GeoJSON string from a variety of inputs. See the documentation for make_url for the possible contents input. Returns ------- GeoJSON string
[ "Return", "a", "GeoJSON", "string", "from", "a", "variety", "of", "inputs", ".", "See", "the", "documentation", "for", "make_url", "for", "the", "possible", "contents", "input", "." ]
train
https://github.com/jwass/geojsonio.py/blob/8229a48238f128837e6dce49f18310df84968825/geojsonio/geojsonio.py#L99-L128
jwass/geojsonio.py
geojsonio/geojsonio.py
data_url
def data_url(contents, domain=DEFAULT_DOMAIN): """ Return the URL for embedding the GeoJSON data in the URL hash Parameters ---------- contents - string of GeoJSON domain - string, default http://geojson.io """ url = (domain + '#data=data:application/json,' + urllib.parse.quote(contents)) return url
python
def data_url(contents, domain=DEFAULT_DOMAIN): """ Return the URL for embedding the GeoJSON data in the URL hash Parameters ---------- contents - string of GeoJSON domain - string, default http://geojson.io """ url = (domain + '#data=data:application/json,' + urllib.parse.quote(contents)) return url
[ "def", "data_url", "(", "contents", ",", "domain", "=", "DEFAULT_DOMAIN", ")", ":", "url", "=", "(", "domain", "+", "'#data=data:application/json,'", "+", "urllib", ".", "parse", ".", "quote", "(", "contents", ")", ")", "return", "url" ]
Return the URL for embedding the GeoJSON data in the URL hash Parameters ---------- contents - string of GeoJSON domain - string, default http://geojson.io
[ "Return", "the", "URL", "for", "embedding", "the", "GeoJSON", "data", "in", "the", "URL", "hash" ]
train
https://github.com/jwass/geojsonio.py/blob/8229a48238f128837e6dce49f18310df84968825/geojsonio/geojsonio.py#L149-L161
jwass/geojsonio.py
geojsonio/geojsonio.py
_make_gist
def _make_gist(contents, description='', filename='data.geojson'): """ Create and return an anonymous gist with a single file and specified contents """ ghapi = github3.GitHub() files = {filename: {'content': contents}} gist = ghapi.create_gist(description, files) return gist
python
def _make_gist(contents, description='', filename='data.geojson'): """ Create and return an anonymous gist with a single file and specified contents """ ghapi = github3.GitHub() files = {filename: {'content': contents}} gist = ghapi.create_gist(description, files) return gist
[ "def", "_make_gist", "(", "contents", ",", "description", "=", "''", ",", "filename", "=", "'data.geojson'", ")", ":", "ghapi", "=", "github3", ".", "GitHub", "(", ")", "files", "=", "{", "filename", ":", "{", "'content'", ":", "contents", "}", "}", "gist", "=", "ghapi", ".", "create_gist", "(", "description", ",", "files", ")", "return", "gist" ]
Create and return an anonymous gist with a single file and specified contents
[ "Create", "and", "return", "an", "anonymous", "gist", "with", "a", "single", "file", "and", "specified", "contents" ]
train
https://github.com/jwass/geojsonio.py/blob/8229a48238f128837e6dce49f18310df84968825/geojsonio/geojsonio.py#L164-L174
nschloe/orthopy
orthopy/line_segment/tools.py
clenshaw
def clenshaw(a, alpha, beta, t): """Clenshaw's algorithm for evaluating S(t) = \\sum a_k P_k(alpha, beta)(t) where P_k(alpha, beta) is the kth orthogonal polynomial defined by the recurrence coefficients alpha, beta. See <https://en.wikipedia.org/wiki/Clenshaw_algorithm> for details. """ n = len(alpha) assert len(beta) == n assert len(a) == n + 1 try: b = numpy.empty((n + 1,) + t.shape) except AttributeError: # 'float' object has no attribute 'shape' b = numpy.empty(n + 1) # b[0] is unused, can be any value # TODO shift the array b[0] = 1.0 b[n] = a[n] b[n - 1] = a[n - 1] + (t - alpha[n - 1]) * b[n] for k in range(n - 2, 0, -1): b[k] = a[k] + (t - alpha[k]) * b[k + 1] - beta[k + 1] * b[k + 2] phi0 = 1.0 phi1 = t - alpha[0] return phi0 * a[0] + phi1 * b[1] - beta[1] * phi0 * b[2]
python
def clenshaw(a, alpha, beta, t): """Clenshaw's algorithm for evaluating S(t) = \\sum a_k P_k(alpha, beta)(t) where P_k(alpha, beta) is the kth orthogonal polynomial defined by the recurrence coefficients alpha, beta. See <https://en.wikipedia.org/wiki/Clenshaw_algorithm> for details. """ n = len(alpha) assert len(beta) == n assert len(a) == n + 1 try: b = numpy.empty((n + 1,) + t.shape) except AttributeError: # 'float' object has no attribute 'shape' b = numpy.empty(n + 1) # b[0] is unused, can be any value # TODO shift the array b[0] = 1.0 b[n] = a[n] b[n - 1] = a[n - 1] + (t - alpha[n - 1]) * b[n] for k in range(n - 2, 0, -1): b[k] = a[k] + (t - alpha[k]) * b[k + 1] - beta[k + 1] * b[k + 2] phi0 = 1.0 phi1 = t - alpha[0] return phi0 * a[0] + phi1 * b[1] - beta[1] * phi0 * b[2]
[ "def", "clenshaw", "(", "a", ",", "alpha", ",", "beta", ",", "t", ")", ":", "n", "=", "len", "(", "alpha", ")", "assert", "len", "(", "beta", ")", "==", "n", "assert", "len", "(", "a", ")", "==", "n", "+", "1", "try", ":", "b", "=", "numpy", ".", "empty", "(", "(", "n", "+", "1", ",", ")", "+", "t", ".", "shape", ")", "except", "AttributeError", ":", "# 'float' object has no attribute 'shape'", "b", "=", "numpy", ".", "empty", "(", "n", "+", "1", ")", "# b[0] is unused, can be any value", "# TODO shift the array", "b", "[", "0", "]", "=", "1.0", "b", "[", "n", "]", "=", "a", "[", "n", "]", "b", "[", "n", "-", "1", "]", "=", "a", "[", "n", "-", "1", "]", "+", "(", "t", "-", "alpha", "[", "n", "-", "1", "]", ")", "*", "b", "[", "n", "]", "for", "k", "in", "range", "(", "n", "-", "2", ",", "0", ",", "-", "1", ")", ":", "b", "[", "k", "]", "=", "a", "[", "k", "]", "+", "(", "t", "-", "alpha", "[", "k", "]", ")", "*", "b", "[", "k", "+", "1", "]", "-", "beta", "[", "k", "+", "1", "]", "*", "b", "[", "k", "+", "2", "]", "phi0", "=", "1.0", "phi1", "=", "t", "-", "alpha", "[", "0", "]", "return", "phi0", "*", "a", "[", "0", "]", "+", "phi1", "*", "b", "[", "1", "]", "-", "beta", "[", "1", "]", "*", "phi0", "*", "b", "[", "2", "]" ]
Clenshaw's algorithm for evaluating S(t) = \\sum a_k P_k(alpha, beta)(t) where P_k(alpha, beta) is the kth orthogonal polynomial defined by the recurrence coefficients alpha, beta. See <https://en.wikipedia.org/wiki/Clenshaw_algorithm> for details.
[ "Clenshaw", "s", "algorithm", "for", "evaluating" ]
train
https://github.com/nschloe/orthopy/blob/64713d0533b0af042810a7535fff411b8e0aea9e/orthopy/line_segment/tools.py#L8-L39
nschloe/orthopy
orthopy/e1r/orth.py
tree
def tree(X, n, alpha=0, symbolic=False): """Recurrence coefficients for generalized Laguerre polynomials. Set alpha=0 (default) to get classical Laguerre. """ args = recurrence_coefficients(n, alpha=alpha, symbolic=symbolic) return line_tree(X, *args)
python
def tree(X, n, alpha=0, symbolic=False): """Recurrence coefficients for generalized Laguerre polynomials. Set alpha=0 (default) to get classical Laguerre. """ args = recurrence_coefficients(n, alpha=alpha, symbolic=symbolic) return line_tree(X, *args)
[ "def", "tree", "(", "X", ",", "n", ",", "alpha", "=", "0", ",", "symbolic", "=", "False", ")", ":", "args", "=", "recurrence_coefficients", "(", "n", ",", "alpha", "=", "alpha", ",", "symbolic", "=", "symbolic", ")", "return", "line_tree", "(", "X", ",", "*", "args", ")" ]
Recurrence coefficients for generalized Laguerre polynomials. Set alpha=0 (default) to get classical Laguerre.
[ "Recurrence", "coefficients", "for", "generalized", "Laguerre", "polynomials", ".", "Set", "alpha", "=", "0", "(", "default", ")", "to", "get", "classical", "Laguerre", "." ]
train
https://github.com/nschloe/orthopy/blob/64713d0533b0af042810a7535fff411b8e0aea9e/orthopy/e1r/orth.py#L12-L17
nschloe/orthopy
orthopy/e1r/orth.py
recurrence_coefficients
def recurrence_coefficients(n, alpha, standardization="normal", symbolic=False): """Recurrence coefficients for generalized Laguerre polynomials. vals_k = vals_{k-1} * (t*a_k - b_k) - vals{k-2} * c_k """ S = sympy.S if symbolic else lambda x: x sqrt = sympy.sqrt if symbolic else numpy.sqrt gamma = sympy.gamma if symbolic else scipy.special.gamma if standardization == "monic": p0 = 1 a = n * [1] b = [2 * k + 1 + alpha for k in range(n)] c = [k * (k + alpha) for k in range(n)] c[0] = gamma(alpha + 1) elif standardization == "classical": p0 = 1 a = [-S(1) / (k + 1) for k in range(n)] b = [-S(2 * k + 1 + alpha) / (k + 1) for k in range(n)] c = [S(k + alpha) / (k + 1) for k in range(n)] c[0] = numpy.nan else: assert ( standardization == "normal" ), "Unknown Laguerre standardization '{}'.".format( standardization ) p0 = 1 / sqrt(gamma(alpha + 1)) a = [-1 / sqrt((k + 1) * (k + 1 + alpha)) for k in range(n)] b = [-(2 * k + 1 + alpha) / sqrt((k + 1) * (k + 1 + alpha)) for k in range(n)] c = [sqrt(k * S(k + alpha) / ((k + 1) * (k + 1 + alpha))) for k in range(n)] c[0] = numpy.nan return p0, numpy.array(a), numpy.array(b), numpy.array(c)
python
def recurrence_coefficients(n, alpha, standardization="normal", symbolic=False): """Recurrence coefficients for generalized Laguerre polynomials. vals_k = vals_{k-1} * (t*a_k - b_k) - vals{k-2} * c_k """ S = sympy.S if symbolic else lambda x: x sqrt = sympy.sqrt if symbolic else numpy.sqrt gamma = sympy.gamma if symbolic else scipy.special.gamma if standardization == "monic": p0 = 1 a = n * [1] b = [2 * k + 1 + alpha for k in range(n)] c = [k * (k + alpha) for k in range(n)] c[0] = gamma(alpha + 1) elif standardization == "classical": p0 = 1 a = [-S(1) / (k + 1) for k in range(n)] b = [-S(2 * k + 1 + alpha) / (k + 1) for k in range(n)] c = [S(k + alpha) / (k + 1) for k in range(n)] c[0] = numpy.nan else: assert ( standardization == "normal" ), "Unknown Laguerre standardization '{}'.".format( standardization ) p0 = 1 / sqrt(gamma(alpha + 1)) a = [-1 / sqrt((k + 1) * (k + 1 + alpha)) for k in range(n)] b = [-(2 * k + 1 + alpha) / sqrt((k + 1) * (k + 1 + alpha)) for k in range(n)] c = [sqrt(k * S(k + alpha) / ((k + 1) * (k + 1 + alpha))) for k in range(n)] c[0] = numpy.nan return p0, numpy.array(a), numpy.array(b), numpy.array(c)
[ "def", "recurrence_coefficients", "(", "n", ",", "alpha", ",", "standardization", "=", "\"normal\"", ",", "symbolic", "=", "False", ")", ":", "S", "=", "sympy", ".", "S", "if", "symbolic", "else", "lambda", "x", ":", "x", "sqrt", "=", "sympy", ".", "sqrt", "if", "symbolic", "else", "numpy", ".", "sqrt", "gamma", "=", "sympy", ".", "gamma", "if", "symbolic", "else", "scipy", ".", "special", ".", "gamma", "if", "standardization", "==", "\"monic\"", ":", "p0", "=", "1", "a", "=", "n", "*", "[", "1", "]", "b", "=", "[", "2", "*", "k", "+", "1", "+", "alpha", "for", "k", "in", "range", "(", "n", ")", "]", "c", "=", "[", "k", "*", "(", "k", "+", "alpha", ")", "for", "k", "in", "range", "(", "n", ")", "]", "c", "[", "0", "]", "=", "gamma", "(", "alpha", "+", "1", ")", "elif", "standardization", "==", "\"classical\"", ":", "p0", "=", "1", "a", "=", "[", "-", "S", "(", "1", ")", "/", "(", "k", "+", "1", ")", "for", "k", "in", "range", "(", "n", ")", "]", "b", "=", "[", "-", "S", "(", "2", "*", "k", "+", "1", "+", "alpha", ")", "/", "(", "k", "+", "1", ")", "for", "k", "in", "range", "(", "n", ")", "]", "c", "=", "[", "S", "(", "k", "+", "alpha", ")", "/", "(", "k", "+", "1", ")", "for", "k", "in", "range", "(", "n", ")", "]", "c", "[", "0", "]", "=", "numpy", ".", "nan", "else", ":", "assert", "(", "standardization", "==", "\"normal\"", ")", ",", "\"Unknown Laguerre standardization '{}'.\"", ".", "format", "(", "standardization", ")", "p0", "=", "1", "/", "sqrt", "(", "gamma", "(", "alpha", "+", "1", ")", ")", "a", "=", "[", "-", "1", "/", "sqrt", "(", "(", "k", "+", "1", ")", "*", "(", "k", "+", "1", "+", "alpha", ")", ")", "for", "k", "in", "range", "(", "n", ")", "]", "b", "=", "[", "-", "(", "2", "*", "k", "+", "1", "+", "alpha", ")", "/", "sqrt", "(", "(", "k", "+", "1", ")", "*", "(", "k", "+", "1", "+", "alpha", ")", ")", "for", "k", "in", "range", "(", "n", ")", "]", "c", "=", "[", "sqrt", "(", "k", "*", "S", "(", "k", "+", "alpha", ")", "/", "(", "(", "k", "+", "1", ")", "*", "(", "k", "+", "1", "+", "alpha", ")", ")", ")", "for", "k", "in", "range", "(", "n", ")", "]", "c", "[", "0", "]", "=", "numpy", ".", "nan", "return", "p0", ",", "numpy", ".", "array", "(", "a", ")", ",", "numpy", ".", "array", "(", "b", ")", ",", "numpy", ".", "array", "(", "c", ")" ]
Recurrence coefficients for generalized Laguerre polynomials. vals_k = vals_{k-1} * (t*a_k - b_k) - vals{k-2} * c_k
[ "Recurrence", "coefficients", "for", "generalized", "Laguerre", "polynomials", "." ]
train
https://github.com/nschloe/orthopy/blob/64713d0533b0af042810a7535fff411b8e0aea9e/orthopy/e1r/orth.py#L20-L53
nschloe/orthopy
orthopy/line_segment/recurrence_coefficients.py
jacobi
def jacobi(n, alpha, beta, standardization, symbolic=False): """Generate the recurrence coefficients a_k, b_k, c_k in P_{k+1}(x) = (a_k x - b_k)*P_{k}(x) - c_k P_{k-1}(x) for the Jacobi polynomials which are orthogonal on [-1, 1] with respect to the weight w(x)=[(1-x)^alpha]*[(1+x)^beta]; see <https://en.wikipedia.org/wiki/Jacobi_polynomials#Recurrence_relations>. """ gamma = sympy.gamma if symbolic else lambda x: scipy.special.gamma(float(x)) def rational(x, y): # <https://github.com/sympy/sympy/pull/13670> return ( sympy.Rational(x, y) if all([isinstance(val, int) for val in [x, y]]) else x / y ) frac = rational if symbolic else lambda x, y: x / y sqrt = sympy.sqrt if symbolic else numpy.sqrt int_1 = ( 2 ** (alpha + beta + 1) * gamma(alpha + 1) * gamma(beta + 1) / gamma(alpha + beta + 2) ) if standardization == "monic": p0 = 1 a = numpy.ones(n, dtype=int) # work around bug <https://github.com/sympy/sympy/issues/13618> if isinstance(alpha, numpy.int64): alpha = int(alpha) if isinstance(beta, numpy.int64): beta = int(beta) b = [ frac(beta - alpha, alpha + beta + 2) if N == 0 else frac( beta ** 2 - alpha ** 2, (2 * N + alpha + beta) * (2 * N + alpha + beta + 2), ) for N in range(n) ] # c[0] is not used in the actual recurrence, but is often defined # as the integral of the weight function of the domain, i.e., # ``` # int_{-1}^{+1} (1-x)^a * (1+x)^b dx = # 2^(a+b+1) * Gamma(a+1) * Gamma(b+1) / Gamma(a+b+2). # ``` # Note also that we have the treat the case N==1 separately to avoid # division by 0 for alpha=beta=-1/2. c = [ int_1 if N == 0 else frac( 4 * (1 + alpha) * (1 + beta), (2 + alpha + beta) ** 2 * (3 + alpha + beta), ) if N == 1 else frac( 4 * (N + alpha) * (N + beta) * N * (N + alpha + beta), (2 * N + alpha + beta) ** 2 * (2 * N + alpha + beta + 1) * (2 * N + alpha + beta - 1), ) for N in range(n) ] elif standardization == "p(1)=(n+alpha over n)" or ( alpha == 0 and standardization == "p(1)=1" ): p0 = 1 # work around bug <https://github.com/sympy/sympy/issues/13618> if isinstance(alpha, numpy.int64): alpha = int(alpha) if isinstance(beta, numpy.int64): beta = int(beta) # Treat N==0 separately to avoid division by 0 for alpha=beta=-1/2. a = [ frac(alpha + beta + 2, 2) if N == 0 else frac( (2 * N + alpha + beta + 1) * (2 * N + alpha + beta + 2), 2 * (N + 1) * (N + alpha + beta + 1), ) for N in range(n) ] b = [ frac(beta - alpha, 2) if N == 0 else frac( (beta ** 2 - alpha ** 2) * (2 * N + alpha + beta + 1), 2 * (N + 1) * (N + alpha + beta + 1) * (2 * N + alpha + beta), ) for N in range(n) ] c = [ int_1 if N == 0 else frac( (N + alpha) * (N + beta) * (2 * N + alpha + beta + 2), (N + 1) * (N + alpha + beta + 1) * (2 * N + alpha + beta), ) for N in range(n) ] else: assert standardization == "normal", "Unknown standardization '{}'.".format( standardization ) p0 = sqrt(1 / int_1) # Treat N==0 separately to avoid division by 0 for alpha=beta=-1/2. a = [ frac(alpha + beta + 2, 2) * sqrt(frac(alpha + beta + 3, (alpha + 1) * (beta + 1))) if N == 0 else frac(2 * N + alpha + beta + 2, 2) * sqrt( frac( (2 * N + alpha + beta + 1) * (2 * N + alpha + beta + 3), (N + 1) * (N + alpha + 1) * (N + beta + 1) * (N + alpha + beta + 1), ) ) for N in range(n) ] b = [ ( frac(beta - alpha, 2) if N == 0 else frac(beta ** 2 - alpha ** 2, 2 * (2 * N + alpha + beta)) ) * sqrt( frac( (2 * N + alpha + beta + 3) * (2 * N + alpha + beta + 1), (N + 1) * (N + alpha + 1) * (N + beta + 1) * (N + alpha + beta + 1), ) ) for N in range(n) ] c = [ int_1 if N == 0 else frac(4 + alpha + beta, 2 + alpha + beta) * sqrt( frac( (1 + alpha) * (1 + beta) * (5 + alpha + beta), 2 * (2 + alpha) * (2 + beta) * (2 + alpha + beta), ) ) if N == 1 else frac(2 * N + alpha + beta + 2, 2 * N + alpha + beta) * sqrt( frac( N * (N + alpha) * (N + beta) * (N + alpha + beta) * (2 * N + alpha + beta + 3), (N + 1) * (N + alpha + 1) * (N + beta + 1) * (N + alpha + beta + 1) * (2 * N + alpha + beta - 1), ) ) for N in range(n) ] return p0, numpy.array(a), numpy.array(b), numpy.array(c)
python
def jacobi(n, alpha, beta, standardization, symbolic=False): """Generate the recurrence coefficients a_k, b_k, c_k in P_{k+1}(x) = (a_k x - b_k)*P_{k}(x) - c_k P_{k-1}(x) for the Jacobi polynomials which are orthogonal on [-1, 1] with respect to the weight w(x)=[(1-x)^alpha]*[(1+x)^beta]; see <https://en.wikipedia.org/wiki/Jacobi_polynomials#Recurrence_relations>. """ gamma = sympy.gamma if symbolic else lambda x: scipy.special.gamma(float(x)) def rational(x, y): # <https://github.com/sympy/sympy/pull/13670> return ( sympy.Rational(x, y) if all([isinstance(val, int) for val in [x, y]]) else x / y ) frac = rational if symbolic else lambda x, y: x / y sqrt = sympy.sqrt if symbolic else numpy.sqrt int_1 = ( 2 ** (alpha + beta + 1) * gamma(alpha + 1) * gamma(beta + 1) / gamma(alpha + beta + 2) ) if standardization == "monic": p0 = 1 a = numpy.ones(n, dtype=int) # work around bug <https://github.com/sympy/sympy/issues/13618> if isinstance(alpha, numpy.int64): alpha = int(alpha) if isinstance(beta, numpy.int64): beta = int(beta) b = [ frac(beta - alpha, alpha + beta + 2) if N == 0 else frac( beta ** 2 - alpha ** 2, (2 * N + alpha + beta) * (2 * N + alpha + beta + 2), ) for N in range(n) ] # c[0] is not used in the actual recurrence, but is often defined # as the integral of the weight function of the domain, i.e., # ``` # int_{-1}^{+1} (1-x)^a * (1+x)^b dx = # 2^(a+b+1) * Gamma(a+1) * Gamma(b+1) / Gamma(a+b+2). # ``` # Note also that we have the treat the case N==1 separately to avoid # division by 0 for alpha=beta=-1/2. c = [ int_1 if N == 0 else frac( 4 * (1 + alpha) * (1 + beta), (2 + alpha + beta) ** 2 * (3 + alpha + beta), ) if N == 1 else frac( 4 * (N + alpha) * (N + beta) * N * (N + alpha + beta), (2 * N + alpha + beta) ** 2 * (2 * N + alpha + beta + 1) * (2 * N + alpha + beta - 1), ) for N in range(n) ] elif standardization == "p(1)=(n+alpha over n)" or ( alpha == 0 and standardization == "p(1)=1" ): p0 = 1 # work around bug <https://github.com/sympy/sympy/issues/13618> if isinstance(alpha, numpy.int64): alpha = int(alpha) if isinstance(beta, numpy.int64): beta = int(beta) # Treat N==0 separately to avoid division by 0 for alpha=beta=-1/2. a = [ frac(alpha + beta + 2, 2) if N == 0 else frac( (2 * N + alpha + beta + 1) * (2 * N + alpha + beta + 2), 2 * (N + 1) * (N + alpha + beta + 1), ) for N in range(n) ] b = [ frac(beta - alpha, 2) if N == 0 else frac( (beta ** 2 - alpha ** 2) * (2 * N + alpha + beta + 1), 2 * (N + 1) * (N + alpha + beta + 1) * (2 * N + alpha + beta), ) for N in range(n) ] c = [ int_1 if N == 0 else frac( (N + alpha) * (N + beta) * (2 * N + alpha + beta + 2), (N + 1) * (N + alpha + beta + 1) * (2 * N + alpha + beta), ) for N in range(n) ] else: assert standardization == "normal", "Unknown standardization '{}'.".format( standardization ) p0 = sqrt(1 / int_1) # Treat N==0 separately to avoid division by 0 for alpha=beta=-1/2. a = [ frac(alpha + beta + 2, 2) * sqrt(frac(alpha + beta + 3, (alpha + 1) * (beta + 1))) if N == 0 else frac(2 * N + alpha + beta + 2, 2) * sqrt( frac( (2 * N + alpha + beta + 1) * (2 * N + alpha + beta + 3), (N + 1) * (N + alpha + 1) * (N + beta + 1) * (N + alpha + beta + 1), ) ) for N in range(n) ] b = [ ( frac(beta - alpha, 2) if N == 0 else frac(beta ** 2 - alpha ** 2, 2 * (2 * N + alpha + beta)) ) * sqrt( frac( (2 * N + alpha + beta + 3) * (2 * N + alpha + beta + 1), (N + 1) * (N + alpha + 1) * (N + beta + 1) * (N + alpha + beta + 1), ) ) for N in range(n) ] c = [ int_1 if N == 0 else frac(4 + alpha + beta, 2 + alpha + beta) * sqrt( frac( (1 + alpha) * (1 + beta) * (5 + alpha + beta), 2 * (2 + alpha) * (2 + beta) * (2 + alpha + beta), ) ) if N == 1 else frac(2 * N + alpha + beta + 2, 2 * N + alpha + beta) * sqrt( frac( N * (N + alpha) * (N + beta) * (N + alpha + beta) * (2 * N + alpha + beta + 3), (N + 1) * (N + alpha + 1) * (N + beta + 1) * (N + alpha + beta + 1) * (2 * N + alpha + beta - 1), ) ) for N in range(n) ] return p0, numpy.array(a), numpy.array(b), numpy.array(c)
[ "def", "jacobi", "(", "n", ",", "alpha", ",", "beta", ",", "standardization", ",", "symbolic", "=", "False", ")", ":", "gamma", "=", "sympy", ".", "gamma", "if", "symbolic", "else", "lambda", "x", ":", "scipy", ".", "special", ".", "gamma", "(", "float", "(", "x", ")", ")", "def", "rational", "(", "x", ",", "y", ")", ":", "# <https://github.com/sympy/sympy/pull/13670>", "return", "(", "sympy", ".", "Rational", "(", "x", ",", "y", ")", "if", "all", "(", "[", "isinstance", "(", "val", ",", "int", ")", "for", "val", "in", "[", "x", ",", "y", "]", "]", ")", "else", "x", "/", "y", ")", "frac", "=", "rational", "if", "symbolic", "else", "lambda", "x", ",", "y", ":", "x", "/", "y", "sqrt", "=", "sympy", ".", "sqrt", "if", "symbolic", "else", "numpy", ".", "sqrt", "int_1", "=", "(", "2", "**", "(", "alpha", "+", "beta", "+", "1", ")", "*", "gamma", "(", "alpha", "+", "1", ")", "*", "gamma", "(", "beta", "+", "1", ")", "/", "gamma", "(", "alpha", "+", "beta", "+", "2", ")", ")", "if", "standardization", "==", "\"monic\"", ":", "p0", "=", "1", "a", "=", "numpy", ".", "ones", "(", "n", ",", "dtype", "=", "int", ")", "# work around bug <https://github.com/sympy/sympy/issues/13618>", "if", "isinstance", "(", "alpha", ",", "numpy", ".", "int64", ")", ":", "alpha", "=", "int", "(", "alpha", ")", "if", "isinstance", "(", "beta", ",", "numpy", ".", "int64", ")", ":", "beta", "=", "int", "(", "beta", ")", "b", "=", "[", "frac", "(", "beta", "-", "alpha", ",", "alpha", "+", "beta", "+", "2", ")", "if", "N", "==", "0", "else", "frac", "(", "beta", "**", "2", "-", "alpha", "**", "2", ",", "(", "2", "*", "N", "+", "alpha", "+", "beta", ")", "*", "(", "2", "*", "N", "+", "alpha", "+", "beta", "+", "2", ")", ",", ")", "for", "N", "in", "range", "(", "n", ")", "]", "# c[0] is not used in the actual recurrence, but is often defined", "# as the integral of the weight function of the domain, i.e.,", "# ```", "# int_{-1}^{+1} (1-x)^a * (1+x)^b dx =", "# 2^(a+b+1) * Gamma(a+1) * Gamma(b+1) / Gamma(a+b+2).", "# ```", "# Note also that we have the treat the case N==1 separately to avoid", "# division by 0 for alpha=beta=-1/2.", "c", "=", "[", "int_1", "if", "N", "==", "0", "else", "frac", "(", "4", "*", "(", "1", "+", "alpha", ")", "*", "(", "1", "+", "beta", ")", ",", "(", "2", "+", "alpha", "+", "beta", ")", "**", "2", "*", "(", "3", "+", "alpha", "+", "beta", ")", ",", ")", "if", "N", "==", "1", "else", "frac", "(", "4", "*", "(", "N", "+", "alpha", ")", "*", "(", "N", "+", "beta", ")", "*", "N", "*", "(", "N", "+", "alpha", "+", "beta", ")", ",", "(", "2", "*", "N", "+", "alpha", "+", "beta", ")", "**", "2", "*", "(", "2", "*", "N", "+", "alpha", "+", "beta", "+", "1", ")", "*", "(", "2", "*", "N", "+", "alpha", "+", "beta", "-", "1", ")", ",", ")", "for", "N", "in", "range", "(", "n", ")", "]", "elif", "standardization", "==", "\"p(1)=(n+alpha over n)\"", "or", "(", "alpha", "==", "0", "and", "standardization", "==", "\"p(1)=1\"", ")", ":", "p0", "=", "1", "# work around bug <https://github.com/sympy/sympy/issues/13618>", "if", "isinstance", "(", "alpha", ",", "numpy", ".", "int64", ")", ":", "alpha", "=", "int", "(", "alpha", ")", "if", "isinstance", "(", "beta", ",", "numpy", ".", "int64", ")", ":", "beta", "=", "int", "(", "beta", ")", "# Treat N==0 separately to avoid division by 0 for alpha=beta=-1/2.", "a", "=", "[", "frac", "(", "alpha", "+", "beta", "+", "2", ",", "2", ")", "if", "N", "==", "0", "else", "frac", "(", "(", "2", "*", "N", "+", "alpha", "+", "beta", "+", "1", ")", "*", "(", "2", "*", "N", "+", "alpha", "+", "beta", "+", "2", ")", ",", "2", "*", "(", "N", "+", "1", ")", "*", "(", "N", "+", "alpha", "+", "beta", "+", "1", ")", ",", ")", "for", "N", "in", "range", "(", "n", ")", "]", "b", "=", "[", "frac", "(", "beta", "-", "alpha", ",", "2", ")", "if", "N", "==", "0", "else", "frac", "(", "(", "beta", "**", "2", "-", "alpha", "**", "2", ")", "*", "(", "2", "*", "N", "+", "alpha", "+", "beta", "+", "1", ")", ",", "2", "*", "(", "N", "+", "1", ")", "*", "(", "N", "+", "alpha", "+", "beta", "+", "1", ")", "*", "(", "2", "*", "N", "+", "alpha", "+", "beta", ")", ",", ")", "for", "N", "in", "range", "(", "n", ")", "]", "c", "=", "[", "int_1", "if", "N", "==", "0", "else", "frac", "(", "(", "N", "+", "alpha", ")", "*", "(", "N", "+", "beta", ")", "*", "(", "2", "*", "N", "+", "alpha", "+", "beta", "+", "2", ")", ",", "(", "N", "+", "1", ")", "*", "(", "N", "+", "alpha", "+", "beta", "+", "1", ")", "*", "(", "2", "*", "N", "+", "alpha", "+", "beta", ")", ",", ")", "for", "N", "in", "range", "(", "n", ")", "]", "else", ":", "assert", "standardization", "==", "\"normal\"", ",", "\"Unknown standardization '{}'.\"", ".", "format", "(", "standardization", ")", "p0", "=", "sqrt", "(", "1", "/", "int_1", ")", "# Treat N==0 separately to avoid division by 0 for alpha=beta=-1/2.", "a", "=", "[", "frac", "(", "alpha", "+", "beta", "+", "2", ",", "2", ")", "*", "sqrt", "(", "frac", "(", "alpha", "+", "beta", "+", "3", ",", "(", "alpha", "+", "1", ")", "*", "(", "beta", "+", "1", ")", ")", ")", "if", "N", "==", "0", "else", "frac", "(", "2", "*", "N", "+", "alpha", "+", "beta", "+", "2", ",", "2", ")", "*", "sqrt", "(", "frac", "(", "(", "2", "*", "N", "+", "alpha", "+", "beta", "+", "1", ")", "*", "(", "2", "*", "N", "+", "alpha", "+", "beta", "+", "3", ")", ",", "(", "N", "+", "1", ")", "*", "(", "N", "+", "alpha", "+", "1", ")", "*", "(", "N", "+", "beta", "+", "1", ")", "*", "(", "N", "+", "alpha", "+", "beta", "+", "1", ")", ",", ")", ")", "for", "N", "in", "range", "(", "n", ")", "]", "b", "=", "[", "(", "frac", "(", "beta", "-", "alpha", ",", "2", ")", "if", "N", "==", "0", "else", "frac", "(", "beta", "**", "2", "-", "alpha", "**", "2", ",", "2", "*", "(", "2", "*", "N", "+", "alpha", "+", "beta", ")", ")", ")", "*", "sqrt", "(", "frac", "(", "(", "2", "*", "N", "+", "alpha", "+", "beta", "+", "3", ")", "*", "(", "2", "*", "N", "+", "alpha", "+", "beta", "+", "1", ")", ",", "(", "N", "+", "1", ")", "*", "(", "N", "+", "alpha", "+", "1", ")", "*", "(", "N", "+", "beta", "+", "1", ")", "*", "(", "N", "+", "alpha", "+", "beta", "+", "1", ")", ",", ")", ")", "for", "N", "in", "range", "(", "n", ")", "]", "c", "=", "[", "int_1", "if", "N", "==", "0", "else", "frac", "(", "4", "+", "alpha", "+", "beta", ",", "2", "+", "alpha", "+", "beta", ")", "*", "sqrt", "(", "frac", "(", "(", "1", "+", "alpha", ")", "*", "(", "1", "+", "beta", ")", "*", "(", "5", "+", "alpha", "+", "beta", ")", ",", "2", "*", "(", "2", "+", "alpha", ")", "*", "(", "2", "+", "beta", ")", "*", "(", "2", "+", "alpha", "+", "beta", ")", ",", ")", ")", "if", "N", "==", "1", "else", "frac", "(", "2", "*", "N", "+", "alpha", "+", "beta", "+", "2", ",", "2", "*", "N", "+", "alpha", "+", "beta", ")", "*", "sqrt", "(", "frac", "(", "N", "*", "(", "N", "+", "alpha", ")", "*", "(", "N", "+", "beta", ")", "*", "(", "N", "+", "alpha", "+", "beta", ")", "*", "(", "2", "*", "N", "+", "alpha", "+", "beta", "+", "3", ")", ",", "(", "N", "+", "1", ")", "*", "(", "N", "+", "alpha", "+", "1", ")", "*", "(", "N", "+", "beta", "+", "1", ")", "*", "(", "N", "+", "alpha", "+", "beta", "+", "1", ")", "*", "(", "2", "*", "N", "+", "alpha", "+", "beta", "-", "1", ")", ",", ")", ")", "for", "N", "in", "range", "(", "n", ")", "]", "return", "p0", ",", "numpy", ".", "array", "(", "a", ")", ",", "numpy", ".", "array", "(", "b", ")", ",", "numpy", ".", "array", "(", "c", ")" ]
Generate the recurrence coefficients a_k, b_k, c_k in P_{k+1}(x) = (a_k x - b_k)*P_{k}(x) - c_k P_{k-1}(x) for the Jacobi polynomials which are orthogonal on [-1, 1] with respect to the weight w(x)=[(1-x)^alpha]*[(1+x)^beta]; see <https://en.wikipedia.org/wiki/Jacobi_polynomials#Recurrence_relations>.
[ "Generate", "the", "recurrence", "coefficients", "a_k", "b_k", "c_k", "in" ]
train
https://github.com/nschloe/orthopy/blob/64713d0533b0af042810a7535fff411b8e0aea9e/orthopy/line_segment/recurrence_coefficients.py#L28-L211
nschloe/orthopy
orthopy/disk/tools.py
plot
def plot(f, lcar=1.0e-1): """Plot function over a disk. """ import matplotlib import matplotlib.pyplot as plt import pygmsh geom = pygmsh.built_in.Geometry() geom.add_circle([0.0, 0.0, 0.0], 1.0, lcar, num_sections=4, compound=True) points, cells, _, _, _ = pygmsh.generate_mesh(geom, verbose=True) x = points[:, 0] y = points[:, 1] triang = matplotlib.tri.Triangulation(x, y, cells["triangle"]) plt.tripcolor(triang, f(points.T), shading="flat") plt.colorbar() # Choose a diverging colormap such that the zeros are clearly # distinguishable. plt.set_cmap("coolwarm") # Make sure the color map limits are symmetric around 0. clim = plt.gci().get_clim() mx = max(abs(clim[0]), abs(clim[1])) plt.clim(-mx, mx) # circle outline circle = plt.Circle((0, 0), 1.0, edgecolor="k", fill=False) plt.gca().add_artist(circle) plt.gca().set_aspect("equal") plt.axis("off") return
python
def plot(f, lcar=1.0e-1): """Plot function over a disk. """ import matplotlib import matplotlib.pyplot as plt import pygmsh geom = pygmsh.built_in.Geometry() geom.add_circle([0.0, 0.0, 0.0], 1.0, lcar, num_sections=4, compound=True) points, cells, _, _, _ = pygmsh.generate_mesh(geom, verbose=True) x = points[:, 0] y = points[:, 1] triang = matplotlib.tri.Triangulation(x, y, cells["triangle"]) plt.tripcolor(triang, f(points.T), shading="flat") plt.colorbar() # Choose a diverging colormap such that the zeros are clearly # distinguishable. plt.set_cmap("coolwarm") # Make sure the color map limits are symmetric around 0. clim = plt.gci().get_clim() mx = max(abs(clim[0]), abs(clim[1])) plt.clim(-mx, mx) # circle outline circle = plt.Circle((0, 0), 1.0, edgecolor="k", fill=False) plt.gca().add_artist(circle) plt.gca().set_aspect("equal") plt.axis("off") return
[ "def", "plot", "(", "f", ",", "lcar", "=", "1.0e-1", ")", ":", "import", "matplotlib", "import", "matplotlib", ".", "pyplot", "as", "plt", "import", "pygmsh", "geom", "=", "pygmsh", ".", "built_in", ".", "Geometry", "(", ")", "geom", ".", "add_circle", "(", "[", "0.0", ",", "0.0", ",", "0.0", "]", ",", "1.0", ",", "lcar", ",", "num_sections", "=", "4", ",", "compound", "=", "True", ")", "points", ",", "cells", ",", "_", ",", "_", ",", "_", "=", "pygmsh", ".", "generate_mesh", "(", "geom", ",", "verbose", "=", "True", ")", "x", "=", "points", "[", ":", ",", "0", "]", "y", "=", "points", "[", ":", ",", "1", "]", "triang", "=", "matplotlib", ".", "tri", ".", "Triangulation", "(", "x", ",", "y", ",", "cells", "[", "\"triangle\"", "]", ")", "plt", ".", "tripcolor", "(", "triang", ",", "f", "(", "points", ".", "T", ")", ",", "shading", "=", "\"flat\"", ")", "plt", ".", "colorbar", "(", ")", "# Choose a diverging colormap such that the zeros are clearly", "# distinguishable.", "plt", ".", "set_cmap", "(", "\"coolwarm\"", ")", "# Make sure the color map limits are symmetric around 0.", "clim", "=", "plt", ".", "gci", "(", ")", ".", "get_clim", "(", ")", "mx", "=", "max", "(", "abs", "(", "clim", "[", "0", "]", ")", ",", "abs", "(", "clim", "[", "1", "]", ")", ")", "plt", ".", "clim", "(", "-", "mx", ",", "mx", ")", "# circle outline", "circle", "=", "plt", ".", "Circle", "(", "(", "0", ",", "0", ")", ",", "1.0", ",", "edgecolor", "=", "\"k\"", ",", "fill", "=", "False", ")", "plt", ".", "gca", "(", ")", ".", "add_artist", "(", "circle", ")", "plt", ".", "gca", "(", ")", ".", "set_aspect", "(", "\"equal\"", ")", "plt", ".", "axis", "(", "\"off\"", ")", "return" ]
Plot function over a disk.
[ "Plot", "function", "over", "a", "disk", "." ]
train
https://github.com/nschloe/orthopy/blob/64713d0533b0af042810a7535fff411b8e0aea9e/orthopy/disk/tools.py#L13-L45
nschloe/orthopy
orthopy/ncube/orth.py
tree
def tree(X, n, symbolic=False): """Evaluates the entire tree of orthogonal polynomials for the n-cube The computation is organized such that tree returns a list of arrays, L={0, ..., dim}, where each level corresponds to the polynomial degree L. Further, each level is organized like a discrete (dim-1)-dimensional simplex. Let's demonstrate this for 3D: L = 1: (0, 0, 0) L = 2: (1, 0, 0) (0, 1, 0) (0, 0, 1) L = 3: (2, 0, 0) (1, 1, 0) (1, 0, 1) (0, 2, 0) (0, 1, 1) (0, 0, 2) The main insight here that makes computation for n dimensions easy is that the next level is composed by: * Taking the whole previous level and adding +1 to the first entry. * Taking the last row of the previous level and adding +1 to the second entry. * Taking the last entry of the last row of the previous and adding +1 to the third entry. In the same manner this can be repeated for `dim` dimensions. """ p0, a, b, c = legendre(n + 1, "normal", symbolic=symbolic) dim = X.shape[0] p0n = p0 ** dim out = [] level = numpy.array([numpy.ones(X.shape[1:], dtype=int) * p0n]) out.append(level) # TODO use a simpler binom implementation for L in range(n): level = [] for i in range(dim - 1): m1 = int(scipy.special.binom(L + dim - i - 1, dim - i - 1)) if L > 0: m2 = int(scipy.special.binom(L + dim - i - 2, dim - i - 1)) r = 0 for k in range(L + 1): m = int(scipy.special.binom(k + dim - i - 2, dim - i - 2)) val = out[L][-m1:][r : r + m] * (a[L - k] * X[i] - b[L - k]) if L - k > 0: val -= out[L - 1][-m2:][r : r + m] * c[L - k] r += m level.append(val) # treat the last one separately val = out[L][-1] * (a[L] * X[-1] - b[L]) if L > 0: val -= out[L - 1][-1] * c[L] level.append([val]) out.append(numpy.concatenate(level)) return out
python
def tree(X, n, symbolic=False): """Evaluates the entire tree of orthogonal polynomials for the n-cube The computation is organized such that tree returns a list of arrays, L={0, ..., dim}, where each level corresponds to the polynomial degree L. Further, each level is organized like a discrete (dim-1)-dimensional simplex. Let's demonstrate this for 3D: L = 1: (0, 0, 0) L = 2: (1, 0, 0) (0, 1, 0) (0, 0, 1) L = 3: (2, 0, 0) (1, 1, 0) (1, 0, 1) (0, 2, 0) (0, 1, 1) (0, 0, 2) The main insight here that makes computation for n dimensions easy is that the next level is composed by: * Taking the whole previous level and adding +1 to the first entry. * Taking the last row of the previous level and adding +1 to the second entry. * Taking the last entry of the last row of the previous and adding +1 to the third entry. In the same manner this can be repeated for `dim` dimensions. """ p0, a, b, c = legendre(n + 1, "normal", symbolic=symbolic) dim = X.shape[0] p0n = p0 ** dim out = [] level = numpy.array([numpy.ones(X.shape[1:], dtype=int) * p0n]) out.append(level) # TODO use a simpler binom implementation for L in range(n): level = [] for i in range(dim - 1): m1 = int(scipy.special.binom(L + dim - i - 1, dim - i - 1)) if L > 0: m2 = int(scipy.special.binom(L + dim - i - 2, dim - i - 1)) r = 0 for k in range(L + 1): m = int(scipy.special.binom(k + dim - i - 2, dim - i - 2)) val = out[L][-m1:][r : r + m] * (a[L - k] * X[i] - b[L - k]) if L - k > 0: val -= out[L - 1][-m2:][r : r + m] * c[L - k] r += m level.append(val) # treat the last one separately val = out[L][-1] * (a[L] * X[-1] - b[L]) if L > 0: val -= out[L - 1][-1] * c[L] level.append([val]) out.append(numpy.concatenate(level)) return out
[ "def", "tree", "(", "X", ",", "n", ",", "symbolic", "=", "False", ")", ":", "p0", ",", "a", ",", "b", ",", "c", "=", "legendre", "(", "n", "+", "1", ",", "\"normal\"", ",", "symbolic", "=", "symbolic", ")", "dim", "=", "X", ".", "shape", "[", "0", "]", "p0n", "=", "p0", "**", "dim", "out", "=", "[", "]", "level", "=", "numpy", ".", "array", "(", "[", "numpy", ".", "ones", "(", "X", ".", "shape", "[", "1", ":", "]", ",", "dtype", "=", "int", ")", "*", "p0n", "]", ")", "out", ".", "append", "(", "level", ")", "# TODO use a simpler binom implementation", "for", "L", "in", "range", "(", "n", ")", ":", "level", "=", "[", "]", "for", "i", "in", "range", "(", "dim", "-", "1", ")", ":", "m1", "=", "int", "(", "scipy", ".", "special", ".", "binom", "(", "L", "+", "dim", "-", "i", "-", "1", ",", "dim", "-", "i", "-", "1", ")", ")", "if", "L", ">", "0", ":", "m2", "=", "int", "(", "scipy", ".", "special", ".", "binom", "(", "L", "+", "dim", "-", "i", "-", "2", ",", "dim", "-", "i", "-", "1", ")", ")", "r", "=", "0", "for", "k", "in", "range", "(", "L", "+", "1", ")", ":", "m", "=", "int", "(", "scipy", ".", "special", ".", "binom", "(", "k", "+", "dim", "-", "i", "-", "2", ",", "dim", "-", "i", "-", "2", ")", ")", "val", "=", "out", "[", "L", "]", "[", "-", "m1", ":", "]", "[", "r", ":", "r", "+", "m", "]", "*", "(", "a", "[", "L", "-", "k", "]", "*", "X", "[", "i", "]", "-", "b", "[", "L", "-", "k", "]", ")", "if", "L", "-", "k", ">", "0", ":", "val", "-=", "out", "[", "L", "-", "1", "]", "[", "-", "m2", ":", "]", "[", "r", ":", "r", "+", "m", "]", "*", "c", "[", "L", "-", "k", "]", "r", "+=", "m", "level", ".", "append", "(", "val", ")", "# treat the last one separately", "val", "=", "out", "[", "L", "]", "[", "-", "1", "]", "*", "(", "a", "[", "L", "]", "*", "X", "[", "-", "1", "]", "-", "b", "[", "L", "]", ")", "if", "L", ">", "0", ":", "val", "-=", "out", "[", "L", "-", "1", "]", "[", "-", "1", "]", "*", "c", "[", "L", "]", "level", ".", "append", "(", "[", "val", "]", ")", "out", ".", "append", "(", "numpy", ".", "concatenate", "(", "level", ")", ")", "return", "out" ]
Evaluates the entire tree of orthogonal polynomials for the n-cube The computation is organized such that tree returns a list of arrays, L={0, ..., dim}, where each level corresponds to the polynomial degree L. Further, each level is organized like a discrete (dim-1)-dimensional simplex. Let's demonstrate this for 3D: L = 1: (0, 0, 0) L = 2: (1, 0, 0) (0, 1, 0) (0, 0, 1) L = 3: (2, 0, 0) (1, 1, 0) (1, 0, 1) (0, 2, 0) (0, 1, 1) (0, 0, 2) The main insight here that makes computation for n dimensions easy is that the next level is composed by: * Taking the whole previous level and adding +1 to the first entry. * Taking the last row of the previous level and adding +1 to the second entry. * Taking the last entry of the last row of the previous and adding +1 to the third entry. In the same manner this can be repeated for `dim` dimensions.
[ "Evaluates", "the", "entire", "tree", "of", "orthogonal", "polynomials", "for", "the", "n", "-", "cube" ]
train
https://github.com/nschloe/orthopy/blob/64713d0533b0af042810a7535fff411b8e0aea9e/orthopy/ncube/orth.py#L11-L76
nschloe/orthopy
orthopy/tools.py
line_evaluate
def line_evaluate(t, p0, a, b, c): """Evaluate the orthogonal polynomial defined by its recurrence coefficients a, b, and c at the point(s) t. """ vals1 = numpy.zeros_like(t, dtype=int) # The order is important here; see # <https://github.com/sympy/sympy/issues/13637>. vals2 = numpy.ones_like(t) * p0 for a_k, b_k, c_k in zip(a, b, c): vals0, vals1 = vals1, vals2 vals2 = vals1 * (t * a_k - b_k) - vals0 * c_k return vals2
python
def line_evaluate(t, p0, a, b, c): """Evaluate the orthogonal polynomial defined by its recurrence coefficients a, b, and c at the point(s) t. """ vals1 = numpy.zeros_like(t, dtype=int) # The order is important here; see # <https://github.com/sympy/sympy/issues/13637>. vals2 = numpy.ones_like(t) * p0 for a_k, b_k, c_k in zip(a, b, c): vals0, vals1 = vals1, vals2 vals2 = vals1 * (t * a_k - b_k) - vals0 * c_k return vals2
[ "def", "line_evaluate", "(", "t", ",", "p0", ",", "a", ",", "b", ",", "c", ")", ":", "vals1", "=", "numpy", ".", "zeros_like", "(", "t", ",", "dtype", "=", "int", ")", "# The order is important here; see", "# <https://github.com/sympy/sympy/issues/13637>.", "vals2", "=", "numpy", ".", "ones_like", "(", "t", ")", "*", "p0", "for", "a_k", ",", "b_k", ",", "c_k", "in", "zip", "(", "a", ",", "b", ",", "c", ")", ":", "vals0", ",", "vals1", "=", "vals1", ",", "vals2", "vals2", "=", "vals1", "*", "(", "t", "*", "a_k", "-", "b_k", ")", "-", "vals0", "*", "c_k", "return", "vals2" ]
Evaluate the orthogonal polynomial defined by its recurrence coefficients a, b, and c at the point(s) t.
[ "Evaluate", "the", "orthogonal", "polynomial", "defined", "by", "its", "recurrence", "coefficients", "a", "b", "and", "c", "at", "the", "point", "(", "s", ")", "t", "." ]
train
https://github.com/nschloe/orthopy/blob/64713d0533b0af042810a7535fff411b8e0aea9e/orthopy/tools.py#L21-L33
nschloe/orthopy
orthopy/triangle/tools.py
plot
def plot(corners, f, n=100): """Plot function over a triangle. """ import matplotlib.tri import matplotlib.pyplot as plt # discretization points def partition(boxes, balls): # <https://stackoverflow.com/a/36748940/353337> def rec(boxes, balls, parent=tuple()): if boxes > 1: for i in range(balls + 1): for x in rec(boxes - 1, i, parent + (balls - i,)): yield x else: yield parent + (balls,) return list(rec(boxes, balls)) bary = numpy.array(partition(3, n)).T / n X = numpy.sum([numpy.outer(bary[k], corners[:, k]) for k in range(3)], axis=0).T # plot the points # plt.plot(X[0], X[1], 'xk') x = numpy.array(X[0]) y = numpy.array(X[1]) z = numpy.array(f(bary), dtype=float) triang = matplotlib.tri.Triangulation(x, y) plt.tripcolor(triang, z, shading="flat") plt.colorbar() # Choose a diverging colormap such that the zeros are clearly # distinguishable. plt.set_cmap("coolwarm") # Make sure the color map limits are symmetric around 0. clim = plt.gci().get_clim() mx = max(abs(clim[0]), abs(clim[1])) plt.clim(-mx, mx) # triangle outlines X = numpy.column_stack([corners, corners[:, 0]]) plt.plot(X[0], X[1], "-k") plt.gca().set_aspect("equal") plt.axis("off") return
python
def plot(corners, f, n=100): """Plot function over a triangle. """ import matplotlib.tri import matplotlib.pyplot as plt # discretization points def partition(boxes, balls): # <https://stackoverflow.com/a/36748940/353337> def rec(boxes, balls, parent=tuple()): if boxes > 1: for i in range(balls + 1): for x in rec(boxes - 1, i, parent + (balls - i,)): yield x else: yield parent + (balls,) return list(rec(boxes, balls)) bary = numpy.array(partition(3, n)).T / n X = numpy.sum([numpy.outer(bary[k], corners[:, k]) for k in range(3)], axis=0).T # plot the points # plt.plot(X[0], X[1], 'xk') x = numpy.array(X[0]) y = numpy.array(X[1]) z = numpy.array(f(bary), dtype=float) triang = matplotlib.tri.Triangulation(x, y) plt.tripcolor(triang, z, shading="flat") plt.colorbar() # Choose a diverging colormap such that the zeros are clearly # distinguishable. plt.set_cmap("coolwarm") # Make sure the color map limits are symmetric around 0. clim = plt.gci().get_clim() mx = max(abs(clim[0]), abs(clim[1])) plt.clim(-mx, mx) # triangle outlines X = numpy.column_stack([corners, corners[:, 0]]) plt.plot(X[0], X[1], "-k") plt.gca().set_aspect("equal") plt.axis("off") return
[ "def", "plot", "(", "corners", ",", "f", ",", "n", "=", "100", ")", ":", "import", "matplotlib", ".", "tri", "import", "matplotlib", ".", "pyplot", "as", "plt", "# discretization points", "def", "partition", "(", "boxes", ",", "balls", ")", ":", "# <https://stackoverflow.com/a/36748940/353337>", "def", "rec", "(", "boxes", ",", "balls", ",", "parent", "=", "tuple", "(", ")", ")", ":", "if", "boxes", ">", "1", ":", "for", "i", "in", "range", "(", "balls", "+", "1", ")", ":", "for", "x", "in", "rec", "(", "boxes", "-", "1", ",", "i", ",", "parent", "+", "(", "balls", "-", "i", ",", ")", ")", ":", "yield", "x", "else", ":", "yield", "parent", "+", "(", "balls", ",", ")", "return", "list", "(", "rec", "(", "boxes", ",", "balls", ")", ")", "bary", "=", "numpy", ".", "array", "(", "partition", "(", "3", ",", "n", ")", ")", ".", "T", "/", "n", "X", "=", "numpy", ".", "sum", "(", "[", "numpy", ".", "outer", "(", "bary", "[", "k", "]", ",", "corners", "[", ":", ",", "k", "]", ")", "for", "k", "in", "range", "(", "3", ")", "]", ",", "axis", "=", "0", ")", ".", "T", "# plot the points", "# plt.plot(X[0], X[1], 'xk')", "x", "=", "numpy", ".", "array", "(", "X", "[", "0", "]", ")", "y", "=", "numpy", ".", "array", "(", "X", "[", "1", "]", ")", "z", "=", "numpy", ".", "array", "(", "f", "(", "bary", ")", ",", "dtype", "=", "float", ")", "triang", "=", "matplotlib", ".", "tri", ".", "Triangulation", "(", "x", ",", "y", ")", "plt", ".", "tripcolor", "(", "triang", ",", "z", ",", "shading", "=", "\"flat\"", ")", "plt", ".", "colorbar", "(", ")", "# Choose a diverging colormap such that the zeros are clearly", "# distinguishable.", "plt", ".", "set_cmap", "(", "\"coolwarm\"", ")", "# Make sure the color map limits are symmetric around 0.", "clim", "=", "plt", ".", "gci", "(", ")", ".", "get_clim", "(", ")", "mx", "=", "max", "(", "abs", "(", "clim", "[", "0", "]", ")", ",", "abs", "(", "clim", "[", "1", "]", ")", ")", "plt", ".", "clim", "(", "-", "mx", ",", "mx", ")", "# triangle outlines", "X", "=", "numpy", ".", "column_stack", "(", "[", "corners", ",", "corners", "[", ":", ",", "0", "]", "]", ")", "plt", ".", "plot", "(", "X", "[", "0", "]", ",", "X", "[", "1", "]", ",", "\"-k\"", ")", "plt", ".", "gca", "(", ")", ".", "set_aspect", "(", "\"equal\"", ")", "plt", ".", "axis", "(", "\"off\"", ")", "return" ]
Plot function over a triangle.
[ "Plot", "function", "over", "a", "triangle", "." ]
train
https://github.com/nschloe/orthopy/blob/64713d0533b0af042810a7535fff411b8e0aea9e/orthopy/triangle/tools.py#L16-L63
learningequality/le-utils
le_utils/constants/languages.py
_iget
def _iget(key, lookup_dict): """ Case-insensitive search for `key` within keys of `lookup_dict`. """ for k, v in lookup_dict.items(): if k.lower() == key.lower(): return v return None
python
def _iget(key, lookup_dict): """ Case-insensitive search for `key` within keys of `lookup_dict`. """ for k, v in lookup_dict.items(): if k.lower() == key.lower(): return v return None
[ "def", "_iget", "(", "key", ",", "lookup_dict", ")", ":", "for", "k", ",", "v", "in", "lookup_dict", ".", "items", "(", ")", ":", "if", "k", ".", "lower", "(", ")", "==", "key", ".", "lower", "(", ")", ":", "return", "v", "return", "None" ]
Case-insensitive search for `key` within keys of `lookup_dict`.
[ "Case", "-", "insensitive", "search", "for", "key", "within", "keys", "of", "lookup_dict", "." ]
train
https://github.com/learningequality/le-utils/blob/07a776e3d5c288818764a25170de2e9ec5850c0d/le_utils/constants/languages.py#L83-L90
learningequality/le-utils
le_utils/constants/languages.py
getlang_by_name
def getlang_by_name(name): """ Try to lookup a Language object by name, e.g. 'English', in internal language list. Returns None if lookup by language name fails in resources/languagelookup.json. """ direct_match = _iget(name, _LANGUAGE_NAME_LOOKUP) if direct_match: return direct_match else: simple_name = name.split(',')[0] # take part before comma simple_name = simple_name.split('(')[0].strip() # and before any bracket return _LANGUAGE_NAME_LOOKUP.get(simple_name, None)
python
def getlang_by_name(name): """ Try to lookup a Language object by name, e.g. 'English', in internal language list. Returns None if lookup by language name fails in resources/languagelookup.json. """ direct_match = _iget(name, _LANGUAGE_NAME_LOOKUP) if direct_match: return direct_match else: simple_name = name.split(',')[0] # take part before comma simple_name = simple_name.split('(')[0].strip() # and before any bracket return _LANGUAGE_NAME_LOOKUP.get(simple_name, None)
[ "def", "getlang_by_name", "(", "name", ")", ":", "direct_match", "=", "_iget", "(", "name", ",", "_LANGUAGE_NAME_LOOKUP", ")", "if", "direct_match", ":", "return", "direct_match", "else", ":", "simple_name", "=", "name", ".", "split", "(", "','", ")", "[", "0", "]", "# take part before comma", "simple_name", "=", "simple_name", ".", "split", "(", "'('", ")", "[", "0", "]", ".", "strip", "(", ")", "# and before any bracket", "return", "_LANGUAGE_NAME_LOOKUP", ".", "get", "(", "simple_name", ",", "None", ")" ]
Try to lookup a Language object by name, e.g. 'English', in internal language list. Returns None if lookup by language name fails in resources/languagelookup.json.
[ "Try", "to", "lookup", "a", "Language", "object", "by", "name", "e", ".", "g", ".", "English", "in", "internal", "language", "list", ".", "Returns", "None", "if", "lookup", "by", "language", "name", "fails", "in", "resources", "/", "languagelookup", ".", "json", "." ]
train
https://github.com/learningequality/le-utils/blob/07a776e3d5c288818764a25170de2e9ec5850c0d/le_utils/constants/languages.py#L146-L157
learningequality/le-utils
le_utils/constants/languages.py
getlang_by_native_name
def getlang_by_native_name(native_name): """ Try to lookup a Language object by native_name, e.g. 'English', in internal language list. Returns None if lookup by language name fails in resources/languagelookup.json. """ direct_match = _iget(native_name, _LANGUAGE_NATIVE_NAME_LOOKUP) if direct_match: return direct_match else: simple_native_name = native_name.split(',')[0] # take part before comma simple_native_name = simple_native_name.split('(')[0].strip() # and before any bracket return _LANGUAGE_NATIVE_NAME_LOOKUP.get(simple_native_name, None)
python
def getlang_by_native_name(native_name): """ Try to lookup a Language object by native_name, e.g. 'English', in internal language list. Returns None if lookup by language name fails in resources/languagelookup.json. """ direct_match = _iget(native_name, _LANGUAGE_NATIVE_NAME_LOOKUP) if direct_match: return direct_match else: simple_native_name = native_name.split(',')[0] # take part before comma simple_native_name = simple_native_name.split('(')[0].strip() # and before any bracket return _LANGUAGE_NATIVE_NAME_LOOKUP.get(simple_native_name, None)
[ "def", "getlang_by_native_name", "(", "native_name", ")", ":", "direct_match", "=", "_iget", "(", "native_name", ",", "_LANGUAGE_NATIVE_NAME_LOOKUP", ")", "if", "direct_match", ":", "return", "direct_match", "else", ":", "simple_native_name", "=", "native_name", ".", "split", "(", "','", ")", "[", "0", "]", "# take part before comma", "simple_native_name", "=", "simple_native_name", ".", "split", "(", "'('", ")", "[", "0", "]", ".", "strip", "(", ")", "# and before any bracket", "return", "_LANGUAGE_NATIVE_NAME_LOOKUP", ".", "get", "(", "simple_native_name", ",", "None", ")" ]
Try to lookup a Language object by native_name, e.g. 'English', in internal language list. Returns None if lookup by language name fails in resources/languagelookup.json.
[ "Try", "to", "lookup", "a", "Language", "object", "by", "native_name", "e", ".", "g", ".", "English", "in", "internal", "language", "list", ".", "Returns", "None", "if", "lookup", "by", "language", "name", "fails", "in", "resources", "/", "languagelookup", ".", "json", "." ]
train
https://github.com/learningequality/le-utils/blob/07a776e3d5c288818764a25170de2e9ec5850c0d/le_utils/constants/languages.py#L185-L196
learningequality/le-utils
le_utils/constants/languages.py
getlang_by_alpha2
def getlang_by_alpha2(code): """ Lookup a Language object for language code `code` based on these strategies: - Special case rules for Hebrew and Chinese Hans/Hant scripts - Using `alpha_2` lookup in `pycountry.languages` followed by lookup for a language with the same `name` in the internal representaion Returns `None` if no matching language is found. """ # Handle special cases for language codes returned by YouTube API if code == 'iw': # handle old Hebrew code 'iw' and return modern code 'he' return getlang('he') elif 'zh-Hans' in code: return getlang('zh-CN') # use code `zh-CN` for all simplified Chinese elif 'zh-Hant' in code or re.match('zh(.*)?-HK', code): return getlang('zh-TW') # use code `zh-TW` for all traditional Chinese # extract prefix only if specified with subcode: e.g. zh-Hans --> zh first_part = code.split('-')[0] # See if pycountry can find this language try: pyc_lang = pycountry.languages.get(alpha_2=first_part) if pyc_lang: if hasattr(pyc_lang, 'inverted_name'): lang_name = pyc_lang.inverted_name else: lang_name = pyc_lang.name return getlang_by_name(lang_name) else: return None except KeyError: return None
python
def getlang_by_alpha2(code): """ Lookup a Language object for language code `code` based on these strategies: - Special case rules for Hebrew and Chinese Hans/Hant scripts - Using `alpha_2` lookup in `pycountry.languages` followed by lookup for a language with the same `name` in the internal representaion Returns `None` if no matching language is found. """ # Handle special cases for language codes returned by YouTube API if code == 'iw': # handle old Hebrew code 'iw' and return modern code 'he' return getlang('he') elif 'zh-Hans' in code: return getlang('zh-CN') # use code `zh-CN` for all simplified Chinese elif 'zh-Hant' in code or re.match('zh(.*)?-HK', code): return getlang('zh-TW') # use code `zh-TW` for all traditional Chinese # extract prefix only if specified with subcode: e.g. zh-Hans --> zh first_part = code.split('-')[0] # See if pycountry can find this language try: pyc_lang = pycountry.languages.get(alpha_2=first_part) if pyc_lang: if hasattr(pyc_lang, 'inverted_name'): lang_name = pyc_lang.inverted_name else: lang_name = pyc_lang.name return getlang_by_name(lang_name) else: return None except KeyError: return None
[ "def", "getlang_by_alpha2", "(", "code", ")", ":", "# Handle special cases for language codes returned by YouTube API", "if", "code", "==", "'iw'", ":", "# handle old Hebrew code 'iw' and return modern code 'he'", "return", "getlang", "(", "'he'", ")", "elif", "'zh-Hans'", "in", "code", ":", "return", "getlang", "(", "'zh-CN'", ")", "# use code `zh-CN` for all simplified Chinese", "elif", "'zh-Hant'", "in", "code", "or", "re", ".", "match", "(", "'zh(.*)?-HK'", ",", "code", ")", ":", "return", "getlang", "(", "'zh-TW'", ")", "# use code `zh-TW` for all traditional Chinese", "# extract prefix only if specified with subcode: e.g. zh-Hans --> zh", "first_part", "=", "code", ".", "split", "(", "'-'", ")", "[", "0", "]", "# See if pycountry can find this language", "try", ":", "pyc_lang", "=", "pycountry", ".", "languages", ".", "get", "(", "alpha_2", "=", "first_part", ")", "if", "pyc_lang", ":", "if", "hasattr", "(", "pyc_lang", ",", "'inverted_name'", ")", ":", "lang_name", "=", "pyc_lang", ".", "inverted_name", "else", ":", "lang_name", "=", "pyc_lang", ".", "name", "return", "getlang_by_name", "(", "lang_name", ")", "else", ":", "return", "None", "except", "KeyError", ":", "return", "None" ]
Lookup a Language object for language code `code` based on these strategies: - Special case rules for Hebrew and Chinese Hans/Hant scripts - Using `alpha_2` lookup in `pycountry.languages` followed by lookup for a language with the same `name` in the internal representaion Returns `None` if no matching language is found.
[ "Lookup", "a", "Language", "object", "for", "language", "code", "code", "based", "on", "these", "strategies", ":", "-", "Special", "case", "rules", "for", "Hebrew", "and", "Chinese", "Hans", "/", "Hant", "scripts", "-", "Using", "alpha_2", "lookup", "in", "pycountry", ".", "languages", "followed", "by", "lookup", "for", "a", "language", "with", "the", "same", "name", "in", "the", "internal", "representaion", "Returns", "None", "if", "no", "matching", "language", "is", "found", "." ]
train
https://github.com/learningequality/le-utils/blob/07a776e3d5c288818764a25170de2e9ec5850c0d/le_utils/constants/languages.py#L200-L231
nschloe/orthopy
orthopy/sphere/tools.py
write
def write(filename, f): """Write a function `f` defined in terms of spherical coordinates to a file. """ import meshio import meshzoo points, cells = meshzoo.iso_sphere(5) # get spherical coordinates from points polar = numpy.arccos(points[:, 2]) azimuthal = numpy.arctan2(points[:, 1], points[:, 0]) vals = f(polar, azimuthal) meshio.write(filename, points, {"triangle": cells}, point_data={"f": vals}) return
python
def write(filename, f): """Write a function `f` defined in terms of spherical coordinates to a file. """ import meshio import meshzoo points, cells = meshzoo.iso_sphere(5) # get spherical coordinates from points polar = numpy.arccos(points[:, 2]) azimuthal = numpy.arctan2(points[:, 1], points[:, 0]) vals = f(polar, azimuthal) meshio.write(filename, points, {"triangle": cells}, point_data={"f": vals}) return
[ "def", "write", "(", "filename", ",", "f", ")", ":", "import", "meshio", "import", "meshzoo", "points", ",", "cells", "=", "meshzoo", ".", "iso_sphere", "(", "5", ")", "# get spherical coordinates from points", "polar", "=", "numpy", ".", "arccos", "(", "points", "[", ":", ",", "2", "]", ")", "azimuthal", "=", "numpy", ".", "arctan2", "(", "points", "[", ":", ",", "1", "]", ",", "points", "[", ":", ",", "0", "]", ")", "vals", "=", "f", "(", "polar", ",", "azimuthal", ")", "meshio", ".", "write", "(", "filename", ",", "points", ",", "{", "\"triangle\"", ":", "cells", "}", ",", "point_data", "=", "{", "\"f\"", ":", "vals", "}", ")", "return" ]
Write a function `f` defined in terms of spherical coordinates to a file.
[ "Write", "a", "function", "f", "defined", "in", "terms", "of", "spherical", "coordinates", "to", "a", "file", "." ]
train
https://github.com/nschloe/orthopy/blob/64713d0533b0af042810a7535fff411b8e0aea9e/orthopy/sphere/tools.py#L6-L18
nschloe/orthopy
orthopy/sphere/orth.py
tree_sph
def tree_sph(polar, azimuthal, n, standardization, symbolic=False): """Evaluate all spherical harmonics of degree at most `n` at angles `polar`, `azimuthal`. """ cos = numpy.vectorize(sympy.cos) if symbolic else numpy.cos # Conventions from # <https://en.wikipedia.org/wiki/Spherical_harmonics#Orthogonality_and_normalization>. config = { "acoustic": ("complex spherical", False), "quantum mechanic": ("complex spherical", True), "geodetic": ("complex spherical 1", False), "schmidt": ("schmidt", False), } standard, cs_phase = config[standardization] return tree_alp( cos(polar), n, phi=azimuthal, standardization=standard, with_condon_shortley_phase=cs_phase, symbolic=symbolic, )
python
def tree_sph(polar, azimuthal, n, standardization, symbolic=False): """Evaluate all spherical harmonics of degree at most `n` at angles `polar`, `azimuthal`. """ cos = numpy.vectorize(sympy.cos) if symbolic else numpy.cos # Conventions from # <https://en.wikipedia.org/wiki/Spherical_harmonics#Orthogonality_and_normalization>. config = { "acoustic": ("complex spherical", False), "quantum mechanic": ("complex spherical", True), "geodetic": ("complex spherical 1", False), "schmidt": ("schmidt", False), } standard, cs_phase = config[standardization] return tree_alp( cos(polar), n, phi=azimuthal, standardization=standard, with_condon_shortley_phase=cs_phase, symbolic=symbolic, )
[ "def", "tree_sph", "(", "polar", ",", "azimuthal", ",", "n", ",", "standardization", ",", "symbolic", "=", "False", ")", ":", "cos", "=", "numpy", ".", "vectorize", "(", "sympy", ".", "cos", ")", "if", "symbolic", "else", "numpy", ".", "cos", "# Conventions from", "# <https://en.wikipedia.org/wiki/Spherical_harmonics#Orthogonality_and_normalization>.", "config", "=", "{", "\"acoustic\"", ":", "(", "\"complex spherical\"", ",", "False", ")", ",", "\"quantum mechanic\"", ":", "(", "\"complex spherical\"", ",", "True", ")", ",", "\"geodetic\"", ":", "(", "\"complex spherical 1\"", ",", "False", ")", ",", "\"schmidt\"", ":", "(", "\"schmidt\"", ",", "False", ")", ",", "}", "standard", ",", "cs_phase", "=", "config", "[", "standardization", "]", "return", "tree_alp", "(", "cos", "(", "polar", ")", ",", "n", ",", "phi", "=", "azimuthal", ",", "standardization", "=", "standard", ",", "with_condon_shortley_phase", "=", "cs_phase", ",", "symbolic", "=", "symbolic", ",", ")" ]
Evaluate all spherical harmonics of degree at most `n` at angles `polar`, `azimuthal`.
[ "Evaluate", "all", "spherical", "harmonics", "of", "degree", "at", "most", "n", "at", "angles", "polar", "azimuthal", "." ]
train
https://github.com/nschloe/orthopy/blob/64713d0533b0af042810a7535fff411b8e0aea9e/orthopy/sphere/orth.py#L9-L33
nschloe/orthopy
orthopy/line_segment/orth.py
tree_alp
def tree_alp( x, n, standardization, phi=None, with_condon_shortley_phase=True, symbolic=False ): """Evaluates the entire tree of associated Legendre polynomials up to depth n. There are many recurrence relations that can be used to construct the associated Legendre polynomials. However, only few are numerically stable. Many implementations (including this one) use the classical Legendre recurrence relation with increasing L. Useful references are Taweetham Limpanuparb, Josh Milthorpe, Associated Legendre Polynomials and Spherical Harmonics Computation for Chemistry Applications, Proceedings of The 40th Congress on Science and Technology of Thailand; 2014 Dec 2-4, Khon Kaen, Thailand. P. 233-241. <https://arxiv.org/abs/1410.1748> and Schneider et al., A new Fortran 90 program to compute regular and irregular associated Legendre functions, Computer Physics Communications, Volume 181, Issue 12, December 2010, Pages 2091-2097, <https://doi.org/10.1016/j.cpc.2010.08.038>. The return value is a list of arrays, where `out[k]` hosts the `2*k+1` values of the `k`th level of the tree (0, 0) (-1, 1) (0, 1) (1, 1) (-2, 2) (-1, 2) (0, 2) (1, 2) (2, 2) ... ... ... ... ... """ # assert numpy.all(numpy.abs(x) <= 1.0) d = { "natural": (_Natural, [x, symbolic]), "spherical": (_Spherical, [x, symbolic]), "complex spherical": (_ComplexSpherical, [x, phi, symbolic, False]), "complex spherical 1": (_ComplexSpherical, [x, phi, symbolic, True]), "normal": (_Normal, [x, symbolic]), "schmidt": (_Schmidt, [x, phi, symbolic]), } fun, args = d[standardization] c = fun(*args) if with_condon_shortley_phase: def z1_factor_CSP(L): return -1 * c.z1_factor(L) else: z1_factor_CSP = c.z1_factor # Here comes the actual loop. e = numpy.ones_like(x, dtype=int) out = [[e * c.p0]] for L in range(1, n + 1): out.append( numpy.concatenate( [ [out[L - 1][0] * c.z0_factor(L)], out[L - 1] * numpy.multiply.outer(c.C0(L), x), [out[L - 1][-1] * z1_factor_CSP(L)], ] ) ) if L > 1: out[-1][2:-2] -= numpy.multiply.outer(c.C1(L), e) * out[L - 2] return out
python
def tree_alp( x, n, standardization, phi=None, with_condon_shortley_phase=True, symbolic=False ): """Evaluates the entire tree of associated Legendre polynomials up to depth n. There are many recurrence relations that can be used to construct the associated Legendre polynomials. However, only few are numerically stable. Many implementations (including this one) use the classical Legendre recurrence relation with increasing L. Useful references are Taweetham Limpanuparb, Josh Milthorpe, Associated Legendre Polynomials and Spherical Harmonics Computation for Chemistry Applications, Proceedings of The 40th Congress on Science and Technology of Thailand; 2014 Dec 2-4, Khon Kaen, Thailand. P. 233-241. <https://arxiv.org/abs/1410.1748> and Schneider et al., A new Fortran 90 program to compute regular and irregular associated Legendre functions, Computer Physics Communications, Volume 181, Issue 12, December 2010, Pages 2091-2097, <https://doi.org/10.1016/j.cpc.2010.08.038>. The return value is a list of arrays, where `out[k]` hosts the `2*k+1` values of the `k`th level of the tree (0, 0) (-1, 1) (0, 1) (1, 1) (-2, 2) (-1, 2) (0, 2) (1, 2) (2, 2) ... ... ... ... ... """ # assert numpy.all(numpy.abs(x) <= 1.0) d = { "natural": (_Natural, [x, symbolic]), "spherical": (_Spherical, [x, symbolic]), "complex spherical": (_ComplexSpherical, [x, phi, symbolic, False]), "complex spherical 1": (_ComplexSpherical, [x, phi, symbolic, True]), "normal": (_Normal, [x, symbolic]), "schmidt": (_Schmidt, [x, phi, symbolic]), } fun, args = d[standardization] c = fun(*args) if with_condon_shortley_phase: def z1_factor_CSP(L): return -1 * c.z1_factor(L) else: z1_factor_CSP = c.z1_factor # Here comes the actual loop. e = numpy.ones_like(x, dtype=int) out = [[e * c.p0]] for L in range(1, n + 1): out.append( numpy.concatenate( [ [out[L - 1][0] * c.z0_factor(L)], out[L - 1] * numpy.multiply.outer(c.C0(L), x), [out[L - 1][-1] * z1_factor_CSP(L)], ] ) ) if L > 1: out[-1][2:-2] -= numpy.multiply.outer(c.C1(L), e) * out[L - 2] return out
[ "def", "tree_alp", "(", "x", ",", "n", ",", "standardization", ",", "phi", "=", "None", ",", "with_condon_shortley_phase", "=", "True", ",", "symbolic", "=", "False", ")", ":", "# assert numpy.all(numpy.abs(x) <= 1.0)", "d", "=", "{", "\"natural\"", ":", "(", "_Natural", ",", "[", "x", ",", "symbolic", "]", ")", ",", "\"spherical\"", ":", "(", "_Spherical", ",", "[", "x", ",", "symbolic", "]", ")", ",", "\"complex spherical\"", ":", "(", "_ComplexSpherical", ",", "[", "x", ",", "phi", ",", "symbolic", ",", "False", "]", ")", ",", "\"complex spherical 1\"", ":", "(", "_ComplexSpherical", ",", "[", "x", ",", "phi", ",", "symbolic", ",", "True", "]", ")", ",", "\"normal\"", ":", "(", "_Normal", ",", "[", "x", ",", "symbolic", "]", ")", ",", "\"schmidt\"", ":", "(", "_Schmidt", ",", "[", "x", ",", "phi", ",", "symbolic", "]", ")", ",", "}", "fun", ",", "args", "=", "d", "[", "standardization", "]", "c", "=", "fun", "(", "*", "args", ")", "if", "with_condon_shortley_phase", ":", "def", "z1_factor_CSP", "(", "L", ")", ":", "return", "-", "1", "*", "c", ".", "z1_factor", "(", "L", ")", "else", ":", "z1_factor_CSP", "=", "c", ".", "z1_factor", "# Here comes the actual loop.", "e", "=", "numpy", ".", "ones_like", "(", "x", ",", "dtype", "=", "int", ")", "out", "=", "[", "[", "e", "*", "c", ".", "p0", "]", "]", "for", "L", "in", "range", "(", "1", ",", "n", "+", "1", ")", ":", "out", ".", "append", "(", "numpy", ".", "concatenate", "(", "[", "[", "out", "[", "L", "-", "1", "]", "[", "0", "]", "*", "c", ".", "z0_factor", "(", "L", ")", "]", ",", "out", "[", "L", "-", "1", "]", "*", "numpy", ".", "multiply", ".", "outer", "(", "c", ".", "C0", "(", "L", ")", ",", "x", ")", ",", "[", "out", "[", "L", "-", "1", "]", "[", "-", "1", "]", "*", "z1_factor_CSP", "(", "L", ")", "]", ",", "]", ")", ")", "if", "L", ">", "1", ":", "out", "[", "-", "1", "]", "[", "2", ":", "-", "2", "]", "-=", "numpy", ".", "multiply", ".", "outer", "(", "c", ".", "C1", "(", "L", ")", ",", "e", ")", "*", "out", "[", "L", "-", "2", "]", "return", "out" ]
Evaluates the entire tree of associated Legendre polynomials up to depth n. There are many recurrence relations that can be used to construct the associated Legendre polynomials. However, only few are numerically stable. Many implementations (including this one) use the classical Legendre recurrence relation with increasing L. Useful references are Taweetham Limpanuparb, Josh Milthorpe, Associated Legendre Polynomials and Spherical Harmonics Computation for Chemistry Applications, Proceedings of The 40th Congress on Science and Technology of Thailand; 2014 Dec 2-4, Khon Kaen, Thailand. P. 233-241. <https://arxiv.org/abs/1410.1748> and Schneider et al., A new Fortran 90 program to compute regular and irregular associated Legendre functions, Computer Physics Communications, Volume 181, Issue 12, December 2010, Pages 2091-2097, <https://doi.org/10.1016/j.cpc.2010.08.038>. The return value is a list of arrays, where `out[k]` hosts the `2*k+1` values of the `k`th level of the tree (0, 0) (-1, 1) (0, 1) (1, 1) (-2, 2) (-1, 2) (0, 2) (1, 2) (2, 2) ... ... ... ... ...
[ "Evaluates", "the", "entire", "tree", "of", "associated", "Legendre", "polynomials", "up", "to", "depth", "n", ".", "There", "are", "many", "recurrence", "relations", "that", "can", "be", "used", "to", "construct", "the", "associated", "Legendre", "polynomials", ".", "However", "only", "few", "are", "numerically", "stable", ".", "Many", "implementations", "(", "including", "this", "one", ")", "use", "the", "classical", "Legendre", "recurrence", "relation", "with", "increasing", "L", "." ]
train
https://github.com/nschloe/orthopy/blob/64713d0533b0af042810a7535fff411b8e0aea9e/orthopy/line_segment/orth.py#L186-L260
nschloe/orthopy
orthopy/disk/orth.py
tree
def tree(X, n, symbolic=False): """Evaluates the entire tree of orthogonal polynomials on the unit disk. The return value is a list of arrays, where `out[k]` hosts the `2*k+1` values of the `k`th level of the tree (0, 0) (0, 1) (1, 1) (0, 2) (1, 2) (2, 2) ... ... ... """ frac = sympy.Rational if symbolic else lambda x, y: x / y sqrt = sympy.sqrt if symbolic else numpy.sqrt pi = sympy.pi if symbolic else numpy.pi mu = frac(1, 2) p0 = 1 / sqrt(pi) def alpha(n): return numpy.array( [ 2 * sqrt( frac( (n + mu + frac(1, 2)) * (n + mu - frac(1, 2)), (n - k) * (n + k + 2 * mu), ) ) for k in range(n) ] ) def beta(n): return 2 * sqrt( frac((n + mu - 1) * (n + mu + frac(1, 2)), (n + 2 * mu - 1) * n) ) def gamma(n): return numpy.array( [ sqrt( frac( (n - 1 - k) * (n + mu + frac(1, 2)) * (n + k + 2 * mu - 1), (n - k) * (n + mu - frac(3, 2)) * (n + k + 2 * mu), ) ) for k in range(n - 1) ] ) def delta(n): return sqrt( frac( (n - 1) * (n + 2 * mu - 2) * (n + mu - frac(1, 2)) * (n + mu + frac(1, 2)), n * (n + 2 * mu - 1) * (n + mu - 1) * (n + mu - 2), ) ) out = [numpy.array([0 * X[0] + p0])] one_min_x2 = 1 - X[0] ** 2 for L in range(1, n + 1): out.append( numpy.concatenate( [ out[L - 1] * numpy.multiply.outer(alpha(L), X[0]), [out[L - 1][L - 1] * beta(L) * X[1]], ] ) ) if L > 1: out[-1][: L - 1] -= (out[L - 2].T * gamma(L)).T out[-1][-1] -= out[L - 2][L - 2] * delta(L) * one_min_x2 return out
python
def tree(X, n, symbolic=False): """Evaluates the entire tree of orthogonal polynomials on the unit disk. The return value is a list of arrays, where `out[k]` hosts the `2*k+1` values of the `k`th level of the tree (0, 0) (0, 1) (1, 1) (0, 2) (1, 2) (2, 2) ... ... ... """ frac = sympy.Rational if symbolic else lambda x, y: x / y sqrt = sympy.sqrt if symbolic else numpy.sqrt pi = sympy.pi if symbolic else numpy.pi mu = frac(1, 2) p0 = 1 / sqrt(pi) def alpha(n): return numpy.array( [ 2 * sqrt( frac( (n + mu + frac(1, 2)) * (n + mu - frac(1, 2)), (n - k) * (n + k + 2 * mu), ) ) for k in range(n) ] ) def beta(n): return 2 * sqrt( frac((n + mu - 1) * (n + mu + frac(1, 2)), (n + 2 * mu - 1) * n) ) def gamma(n): return numpy.array( [ sqrt( frac( (n - 1 - k) * (n + mu + frac(1, 2)) * (n + k + 2 * mu - 1), (n - k) * (n + mu - frac(3, 2)) * (n + k + 2 * mu), ) ) for k in range(n - 1) ] ) def delta(n): return sqrt( frac( (n - 1) * (n + 2 * mu - 2) * (n + mu - frac(1, 2)) * (n + mu + frac(1, 2)), n * (n + 2 * mu - 1) * (n + mu - 1) * (n + mu - 2), ) ) out = [numpy.array([0 * X[0] + p0])] one_min_x2 = 1 - X[0] ** 2 for L in range(1, n + 1): out.append( numpy.concatenate( [ out[L - 1] * numpy.multiply.outer(alpha(L), X[0]), [out[L - 1][L - 1] * beta(L) * X[1]], ] ) ) if L > 1: out[-1][: L - 1] -= (out[L - 2].T * gamma(L)).T out[-1][-1] -= out[L - 2][L - 2] * delta(L) * one_min_x2 return out
[ "def", "tree", "(", "X", ",", "n", ",", "symbolic", "=", "False", ")", ":", "frac", "=", "sympy", ".", "Rational", "if", "symbolic", "else", "lambda", "x", ",", "y", ":", "x", "/", "y", "sqrt", "=", "sympy", ".", "sqrt", "if", "symbolic", "else", "numpy", ".", "sqrt", "pi", "=", "sympy", ".", "pi", "if", "symbolic", "else", "numpy", ".", "pi", "mu", "=", "frac", "(", "1", ",", "2", ")", "p0", "=", "1", "/", "sqrt", "(", "pi", ")", "def", "alpha", "(", "n", ")", ":", "return", "numpy", ".", "array", "(", "[", "2", "*", "sqrt", "(", "frac", "(", "(", "n", "+", "mu", "+", "frac", "(", "1", ",", "2", ")", ")", "*", "(", "n", "+", "mu", "-", "frac", "(", "1", ",", "2", ")", ")", ",", "(", "n", "-", "k", ")", "*", "(", "n", "+", "k", "+", "2", "*", "mu", ")", ",", ")", ")", "for", "k", "in", "range", "(", "n", ")", "]", ")", "def", "beta", "(", "n", ")", ":", "return", "2", "*", "sqrt", "(", "frac", "(", "(", "n", "+", "mu", "-", "1", ")", "*", "(", "n", "+", "mu", "+", "frac", "(", "1", ",", "2", ")", ")", ",", "(", "n", "+", "2", "*", "mu", "-", "1", ")", "*", "n", ")", ")", "def", "gamma", "(", "n", ")", ":", "return", "numpy", ".", "array", "(", "[", "sqrt", "(", "frac", "(", "(", "n", "-", "1", "-", "k", ")", "*", "(", "n", "+", "mu", "+", "frac", "(", "1", ",", "2", ")", ")", "*", "(", "n", "+", "k", "+", "2", "*", "mu", "-", "1", ")", ",", "(", "n", "-", "k", ")", "*", "(", "n", "+", "mu", "-", "frac", "(", "3", ",", "2", ")", ")", "*", "(", "n", "+", "k", "+", "2", "*", "mu", ")", ",", ")", ")", "for", "k", "in", "range", "(", "n", "-", "1", ")", "]", ")", "def", "delta", "(", "n", ")", ":", "return", "sqrt", "(", "frac", "(", "(", "n", "-", "1", ")", "*", "(", "n", "+", "2", "*", "mu", "-", "2", ")", "*", "(", "n", "+", "mu", "-", "frac", "(", "1", ",", "2", ")", ")", "*", "(", "n", "+", "mu", "+", "frac", "(", "1", ",", "2", ")", ")", ",", "n", "*", "(", "n", "+", "2", "*", "mu", "-", "1", ")", "*", "(", "n", "+", "mu", "-", "1", ")", "*", "(", "n", "+", "mu", "-", "2", ")", ",", ")", ")", "out", "=", "[", "numpy", ".", "array", "(", "[", "0", "*", "X", "[", "0", "]", "+", "p0", "]", ")", "]", "one_min_x2", "=", "1", "-", "X", "[", "0", "]", "**", "2", "for", "L", "in", "range", "(", "1", ",", "n", "+", "1", ")", ":", "out", ".", "append", "(", "numpy", ".", "concatenate", "(", "[", "out", "[", "L", "-", "1", "]", "*", "numpy", ".", "multiply", ".", "outer", "(", "alpha", "(", "L", ")", ",", "X", "[", "0", "]", ")", ",", "[", "out", "[", "L", "-", "1", "]", "[", "L", "-", "1", "]", "*", "beta", "(", "L", ")", "*", "X", "[", "1", "]", "]", ",", "]", ")", ")", "if", "L", ">", "1", ":", "out", "[", "-", "1", "]", "[", ":", "L", "-", "1", "]", "-=", "(", "out", "[", "L", "-", "2", "]", ".", "T", "*", "gamma", "(", "L", ")", ")", ".", "T", "out", "[", "-", "1", "]", "[", "-", "1", "]", "-=", "out", "[", "L", "-", "2", "]", "[", "L", "-", "2", "]", "*", "delta", "(", "L", ")", "*", "one_min_x2", "return", "out" ]
Evaluates the entire tree of orthogonal polynomials on the unit disk. The return value is a list of arrays, where `out[k]` hosts the `2*k+1` values of the `k`th level of the tree (0, 0) (0, 1) (1, 1) (0, 2) (1, 2) (2, 2) ... ... ...
[ "Evaluates", "the", "entire", "tree", "of", "orthogonal", "polynomials", "on", "the", "unit", "disk", "." ]
train
https://github.com/nschloe/orthopy/blob/64713d0533b0af042810a7535fff411b8e0aea9e/orthopy/disk/orth.py#L9-L89
nschloe/orthopy
orthopy/triangle/orth.py
tree
def tree(bary, n, standardization, symbolic=False): """Evaluates the entire tree of orthogonal triangle polynomials. The return value is a list of arrays, where `out[k]` hosts the `2*k+1` values of the `k`th level of the tree (0, 0) (0, 1) (1, 1) (0, 2) (1, 2) (2, 2) ... ... ... For reference, see Abedallah Rababah, Recurrence Relations for Orthogonal Polynomials on Triangular Domains, Mathematics 2016, 4(2), 25, <https://doi.org/10.3390/math4020025>. """ S = numpy.vectorize(sympy.S) if symbolic else lambda x: x sqrt = numpy.vectorize(sympy.sqrt) if symbolic else numpy.sqrt if standardization == "1": p0 = 1 def alpha(n): r = numpy.arange(n) return S(n * (2 * n + 1)) / ((n - r) * (n + r + 1)) def beta(n): r = numpy.arange(n) return S(n * (2 * r + 1) ** 2) / ((n - r) * (n + r + 1) * (2 * n - 1)) def gamma(n): r = numpy.arange(n - 1) return S((n - r - 1) * (n + r) * (2 * n + 1)) / ( (n - r) * (n + r + 1) * (2 * n - 1) ) def delta(n): return S(2 * n - 1) / n def epsilon(n): return S(n - 1) / n else: # The coefficients here are based on the insight that # # int_T P_{n, r}^2 = # int_0^1 L_r^2(t) dt * int_0^1 q_{n,r}(w)^2 (1-w)^(r+s+1) dw. # # For reference, see # page 219 (and the reference to Gould, 1972) in # # Farouki, Goodman, Sauer, # Construction of orthogonal bases for polynomials in Bernstein form # on triangular and simplex domains, # Computer Aided Geometric Design 20 (2003) 209–230. # # The Legendre integral is 1/(2*r+1), and one gets # # int_T P_{n, r}^2 = 1 / (2*r+1) / (2*n+2) # sum_{i=0}^{n-r} sum_{j=0}^{n-r} # (-1)**(i+j) * binom(n+r+1, i) * binom(n-r, i) # * binom(n+r+1, j) * binom(n-r, j) # / binom(2*n+1, i+j) # # Astonishingly, the double sum is always 1, hence # # int_T P_{n, r}^2 = 1 / (2*r+1) / (2*n+2). # assert standardization == "normal" p0 = sqrt(2) def alpha(n): r = numpy.arange(n) return sqrt((n + 1) * n) * (S(2 * n + 1) / ((n - r) * (n + r + 1))) def beta(n): r = numpy.arange(n) return ( sqrt((n + 1) * n) * S((2 * r + 1) ** 2) / ((n - r) * (n + r + 1) * (2 * n - 1)) ) def gamma(n): r = numpy.arange(n - 1) return sqrt(S(n + 1) / (n - 1)) * ( S((n - r - 1) * (n + r) * (2 * n + 1)) / ((n - r) * (n + r + 1) * (2 * n - 1)) ) def delta(n): return sqrt(S((2 * n + 1) * (n + 1) * (2 * n - 1)) / n ** 3) def epsilon(n): return sqrt(S((2 * n + 1) * (n + 1) * (n - 1)) / ((2 * n - 3) * n ** 2)) u, v, w = bary out = [numpy.array([numpy.zeros_like(u) + p0])] for L in range(1, n + 1): out.append( numpy.concatenate( [ out[L - 1] * (numpy.multiply.outer(alpha(L), 1 - 2 * w).T - beta(L)).T, [delta(L) * out[L - 1][L - 1] * (u - v)], ] ) ) if L > 1: out[-1][: L - 1] -= (out[L - 2].T * gamma(L)).T out[-1][-1] -= epsilon(L) * out[L - 2][L - 2] * (u + v) ** 2 return out
python
def tree(bary, n, standardization, symbolic=False): """Evaluates the entire tree of orthogonal triangle polynomials. The return value is a list of arrays, where `out[k]` hosts the `2*k+1` values of the `k`th level of the tree (0, 0) (0, 1) (1, 1) (0, 2) (1, 2) (2, 2) ... ... ... For reference, see Abedallah Rababah, Recurrence Relations for Orthogonal Polynomials on Triangular Domains, Mathematics 2016, 4(2), 25, <https://doi.org/10.3390/math4020025>. """ S = numpy.vectorize(sympy.S) if symbolic else lambda x: x sqrt = numpy.vectorize(sympy.sqrt) if symbolic else numpy.sqrt if standardization == "1": p0 = 1 def alpha(n): r = numpy.arange(n) return S(n * (2 * n + 1)) / ((n - r) * (n + r + 1)) def beta(n): r = numpy.arange(n) return S(n * (2 * r + 1) ** 2) / ((n - r) * (n + r + 1) * (2 * n - 1)) def gamma(n): r = numpy.arange(n - 1) return S((n - r - 1) * (n + r) * (2 * n + 1)) / ( (n - r) * (n + r + 1) * (2 * n - 1) ) def delta(n): return S(2 * n - 1) / n def epsilon(n): return S(n - 1) / n else: # The coefficients here are based on the insight that # # int_T P_{n, r}^2 = # int_0^1 L_r^2(t) dt * int_0^1 q_{n,r}(w)^2 (1-w)^(r+s+1) dw. # # For reference, see # page 219 (and the reference to Gould, 1972) in # # Farouki, Goodman, Sauer, # Construction of orthogonal bases for polynomials in Bernstein form # on triangular and simplex domains, # Computer Aided Geometric Design 20 (2003) 209–230. # # The Legendre integral is 1/(2*r+1), and one gets # # int_T P_{n, r}^2 = 1 / (2*r+1) / (2*n+2) # sum_{i=0}^{n-r} sum_{j=0}^{n-r} # (-1)**(i+j) * binom(n+r+1, i) * binom(n-r, i) # * binom(n+r+1, j) * binom(n-r, j) # / binom(2*n+1, i+j) # # Astonishingly, the double sum is always 1, hence # # int_T P_{n, r}^2 = 1 / (2*r+1) / (2*n+2). # assert standardization == "normal" p0 = sqrt(2) def alpha(n): r = numpy.arange(n) return sqrt((n + 1) * n) * (S(2 * n + 1) / ((n - r) * (n + r + 1))) def beta(n): r = numpy.arange(n) return ( sqrt((n + 1) * n) * S((2 * r + 1) ** 2) / ((n - r) * (n + r + 1) * (2 * n - 1)) ) def gamma(n): r = numpy.arange(n - 1) return sqrt(S(n + 1) / (n - 1)) * ( S((n - r - 1) * (n + r) * (2 * n + 1)) / ((n - r) * (n + r + 1) * (2 * n - 1)) ) def delta(n): return sqrt(S((2 * n + 1) * (n + 1) * (2 * n - 1)) / n ** 3) def epsilon(n): return sqrt(S((2 * n + 1) * (n + 1) * (n - 1)) / ((2 * n - 3) * n ** 2)) u, v, w = bary out = [numpy.array([numpy.zeros_like(u) + p0])] for L in range(1, n + 1): out.append( numpy.concatenate( [ out[L - 1] * (numpy.multiply.outer(alpha(L), 1 - 2 * w).T - beta(L)).T, [delta(L) * out[L - 1][L - 1] * (u - v)], ] ) ) if L > 1: out[-1][: L - 1] -= (out[L - 2].T * gamma(L)).T out[-1][-1] -= epsilon(L) * out[L - 2][L - 2] * (u + v) ** 2 return out
[ "def", "tree", "(", "bary", ",", "n", ",", "standardization", ",", "symbolic", "=", "False", ")", ":", "S", "=", "numpy", ".", "vectorize", "(", "sympy", ".", "S", ")", "if", "symbolic", "else", "lambda", "x", ":", "x", "sqrt", "=", "numpy", ".", "vectorize", "(", "sympy", ".", "sqrt", ")", "if", "symbolic", "else", "numpy", ".", "sqrt", "if", "standardization", "==", "\"1\"", ":", "p0", "=", "1", "def", "alpha", "(", "n", ")", ":", "r", "=", "numpy", ".", "arange", "(", "n", ")", "return", "S", "(", "n", "*", "(", "2", "*", "n", "+", "1", ")", ")", "/", "(", "(", "n", "-", "r", ")", "*", "(", "n", "+", "r", "+", "1", ")", ")", "def", "beta", "(", "n", ")", ":", "r", "=", "numpy", ".", "arange", "(", "n", ")", "return", "S", "(", "n", "*", "(", "2", "*", "r", "+", "1", ")", "**", "2", ")", "/", "(", "(", "n", "-", "r", ")", "*", "(", "n", "+", "r", "+", "1", ")", "*", "(", "2", "*", "n", "-", "1", ")", ")", "def", "gamma", "(", "n", ")", ":", "r", "=", "numpy", ".", "arange", "(", "n", "-", "1", ")", "return", "S", "(", "(", "n", "-", "r", "-", "1", ")", "*", "(", "n", "+", "r", ")", "*", "(", "2", "*", "n", "+", "1", ")", ")", "/", "(", "(", "n", "-", "r", ")", "*", "(", "n", "+", "r", "+", "1", ")", "*", "(", "2", "*", "n", "-", "1", ")", ")", "def", "delta", "(", "n", ")", ":", "return", "S", "(", "2", "*", "n", "-", "1", ")", "/", "n", "def", "epsilon", "(", "n", ")", ":", "return", "S", "(", "n", "-", "1", ")", "/", "n", "else", ":", "# The coefficients here are based on the insight that", "#", "# int_T P_{n, r}^2 =", "# int_0^1 L_r^2(t) dt * int_0^1 q_{n,r}(w)^2 (1-w)^(r+s+1) dw.", "#", "# For reference, see", "# page 219 (and the reference to Gould, 1972) in", "#", "# Farouki, Goodman, Sauer,", "# Construction of orthogonal bases for polynomials in Bernstein form", "# on triangular and simplex domains,", "# Computer Aided Geometric Design 20 (2003) 209–230.", "#", "# The Legendre integral is 1/(2*r+1), and one gets", "#", "# int_T P_{n, r}^2 = 1 / (2*r+1) / (2*n+2)", "# sum_{i=0}^{n-r} sum_{j=0}^{n-r}", "# (-1)**(i+j) * binom(n+r+1, i) * binom(n-r, i)", "# * binom(n+r+1, j) * binom(n-r, j)", "# / binom(2*n+1, i+j)", "#", "# Astonishingly, the double sum is always 1, hence", "#", "# int_T P_{n, r}^2 = 1 / (2*r+1) / (2*n+2).", "#", "assert", "standardization", "==", "\"normal\"", "p0", "=", "sqrt", "(", "2", ")", "def", "alpha", "(", "n", ")", ":", "r", "=", "numpy", ".", "arange", "(", "n", ")", "return", "sqrt", "(", "(", "n", "+", "1", ")", "*", "n", ")", "*", "(", "S", "(", "2", "*", "n", "+", "1", ")", "/", "(", "(", "n", "-", "r", ")", "*", "(", "n", "+", "r", "+", "1", ")", ")", ")", "def", "beta", "(", "n", ")", ":", "r", "=", "numpy", ".", "arange", "(", "n", ")", "return", "(", "sqrt", "(", "(", "n", "+", "1", ")", "*", "n", ")", "*", "S", "(", "(", "2", "*", "r", "+", "1", ")", "**", "2", ")", "/", "(", "(", "n", "-", "r", ")", "*", "(", "n", "+", "r", "+", "1", ")", "*", "(", "2", "*", "n", "-", "1", ")", ")", ")", "def", "gamma", "(", "n", ")", ":", "r", "=", "numpy", ".", "arange", "(", "n", "-", "1", ")", "return", "sqrt", "(", "S", "(", "n", "+", "1", ")", "/", "(", "n", "-", "1", ")", ")", "*", "(", "S", "(", "(", "n", "-", "r", "-", "1", ")", "*", "(", "n", "+", "r", ")", "*", "(", "2", "*", "n", "+", "1", ")", ")", "/", "(", "(", "n", "-", "r", ")", "*", "(", "n", "+", "r", "+", "1", ")", "*", "(", "2", "*", "n", "-", "1", ")", ")", ")", "def", "delta", "(", "n", ")", ":", "return", "sqrt", "(", "S", "(", "(", "2", "*", "n", "+", "1", ")", "*", "(", "n", "+", "1", ")", "*", "(", "2", "*", "n", "-", "1", ")", ")", "/", "n", "**", "3", ")", "def", "epsilon", "(", "n", ")", ":", "return", "sqrt", "(", "S", "(", "(", "2", "*", "n", "+", "1", ")", "*", "(", "n", "+", "1", ")", "*", "(", "n", "-", "1", ")", ")", "/", "(", "(", "2", "*", "n", "-", "3", ")", "*", "n", "**", "2", ")", ")", "u", ",", "v", ",", "w", "=", "bary", "out", "=", "[", "numpy", ".", "array", "(", "[", "numpy", ".", "zeros_like", "(", "u", ")", "+", "p0", "]", ")", "]", "for", "L", "in", "range", "(", "1", ",", "n", "+", "1", ")", ":", "out", ".", "append", "(", "numpy", ".", "concatenate", "(", "[", "out", "[", "L", "-", "1", "]", "*", "(", "numpy", ".", "multiply", ".", "outer", "(", "alpha", "(", "L", ")", ",", "1", "-", "2", "*", "w", ")", ".", "T", "-", "beta", "(", "L", ")", ")", ".", "T", ",", "[", "delta", "(", "L", ")", "*", "out", "[", "L", "-", "1", "]", "[", "L", "-", "1", "]", "*", "(", "u", "-", "v", ")", "]", ",", "]", ")", ")", "if", "L", ">", "1", ":", "out", "[", "-", "1", "]", "[", ":", "L", "-", "1", "]", "-=", "(", "out", "[", "L", "-", "2", "]", ".", "T", "*", "gamma", "(", "L", ")", ")", ".", "T", "out", "[", "-", "1", "]", "[", "-", "1", "]", "-=", "epsilon", "(", "L", ")", "*", "out", "[", "L", "-", "2", "]", "[", "L", "-", "2", "]", "*", "(", "u", "+", "v", ")", "**", "2", "return", "out" ]
Evaluates the entire tree of orthogonal triangle polynomials. The return value is a list of arrays, where `out[k]` hosts the `2*k+1` values of the `k`th level of the tree (0, 0) (0, 1) (1, 1) (0, 2) (1, 2) (2, 2) ... ... ... For reference, see Abedallah Rababah, Recurrence Relations for Orthogonal Polynomials on Triangular Domains, Mathematics 2016, 4(2), 25, <https://doi.org/10.3390/math4020025>.
[ "Evaluates", "the", "entire", "tree", "of", "orthogonal", "triangle", "polynomials", "." ]
train
https://github.com/nschloe/orthopy/blob/64713d0533b0af042810a7535fff411b8e0aea9e/orthopy/triangle/orth.py#L9-L126
guinslym/pyexifinfo
pyexifinfo/pyexifinfo.py
check_if_this_file_exist
def check_if_this_file_exist(filename): """Check if this file exist and if it's a directory This function will check if the given filename actually exists and if it's not a Directory Arguments: filename {string} -- filename Return: True : if it's not a directory and if this file exist False : If it's not a file and if it's a directory """ #get the absolute path filename = os.path.abspath(filename) #Boolean this_file_exist = os.path.exists(filename) a_directory = os.path.isdir(filename) result = this_file_exist and not a_directory if result == False: raise ValueError('The filename given was either non existent or was a directory') else: return result
python
def check_if_this_file_exist(filename): """Check if this file exist and if it's a directory This function will check if the given filename actually exists and if it's not a Directory Arguments: filename {string} -- filename Return: True : if it's not a directory and if this file exist False : If it's not a file and if it's a directory """ #get the absolute path filename = os.path.abspath(filename) #Boolean this_file_exist = os.path.exists(filename) a_directory = os.path.isdir(filename) result = this_file_exist and not a_directory if result == False: raise ValueError('The filename given was either non existent or was a directory') else: return result
[ "def", "check_if_this_file_exist", "(", "filename", ")", ":", "#get the absolute path", "filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "#Boolean", "this_file_exist", "=", "os", ".", "path", ".", "exists", "(", "filename", ")", "a_directory", "=", "os", ".", "path", ".", "isdir", "(", "filename", ")", "result", "=", "this_file_exist", "and", "not", "a_directory", "if", "result", "==", "False", ":", "raise", "ValueError", "(", "'The filename given was either non existent or was a directory'", ")", "else", ":", "return", "result" ]
Check if this file exist and if it's a directory This function will check if the given filename actually exists and if it's not a Directory Arguments: filename {string} -- filename Return: True : if it's not a directory and if this file exist False : If it's not a file and if it's a directory
[ "Check", "if", "this", "file", "exist", "and", "if", "it", "s", "a", "directory" ]
train
https://github.com/guinslym/pyexifinfo/blob/56e5b44e77ee17b018a530ec858f19a9c6c07018/pyexifinfo/pyexifinfo.py#L17-L41
guinslym/pyexifinfo
pyexifinfo/pyexifinfo.py
command_line
def command_line(cmd): """Handle the command line call keyword arguments: cmd = a list return 0 if error or a string for the command line output """ try: s = subprocess.Popen(cmd, stdout=subprocess.PIPE) s = s.stdout.read() return s.strip() except subprocess.CalledProcessError: return 0
python
def command_line(cmd): """Handle the command line call keyword arguments: cmd = a list return 0 if error or a string for the command line output """ try: s = subprocess.Popen(cmd, stdout=subprocess.PIPE) s = s.stdout.read() return s.strip() except subprocess.CalledProcessError: return 0
[ "def", "command_line", "(", "cmd", ")", ":", "try", ":", "s", "=", "subprocess", ".", "Popen", "(", "cmd", ",", "stdout", "=", "subprocess", ".", "PIPE", ")", "s", "=", "s", ".", "stdout", ".", "read", "(", ")", "return", "s", ".", "strip", "(", ")", "except", "subprocess", ".", "CalledProcessError", ":", "return", "0" ]
Handle the command line call keyword arguments: cmd = a list return 0 if error or a string for the command line output
[ "Handle", "the", "command", "line", "call" ]
train
https://github.com/guinslym/pyexifinfo/blob/56e5b44e77ee17b018a530ec858f19a9c6c07018/pyexifinfo/pyexifinfo.py#L45-L62
guinslym/pyexifinfo
pyexifinfo/pyexifinfo.py
information
def information(filename): """Returns the file exif""" check_if_this_file_exist(filename) filename = os.path.abspath(filename) result = get_json(filename) result = result[0] return result
python
def information(filename): """Returns the file exif""" check_if_this_file_exist(filename) filename = os.path.abspath(filename) result = get_json(filename) result = result[0] return result
[ "def", "information", "(", "filename", ")", ":", "check_if_this_file_exist", "(", "filename", ")", "filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "result", "=", "get_json", "(", "filename", ")", "result", "=", "result", "[", "0", "]", "return", "result" ]
Returns the file exif
[ "Returns", "the", "file", "exif" ]
train
https://github.com/guinslym/pyexifinfo/blob/56e5b44e77ee17b018a530ec858f19a9c6c07018/pyexifinfo/pyexifinfo.py#L64-L70
guinslym/pyexifinfo
pyexifinfo/pyexifinfo.py
get_json
def get_json(filename): """ Return a json value of the exif Get a filename and return a JSON object Arguments: filename {string} -- your filename Returns: [JSON] -- Return a JSON object """ check_if_this_file_exist(filename) #Process this function filename = os.path.abspath(filename) s = command_line(['exiftool', '-G', '-j', '-sort', filename]) if s: #convert bytes to string s = s.decode('utf-8').rstrip('\r\n') return json.loads(s) else: return s
python
def get_json(filename): """ Return a json value of the exif Get a filename and return a JSON object Arguments: filename {string} -- your filename Returns: [JSON] -- Return a JSON object """ check_if_this_file_exist(filename) #Process this function filename = os.path.abspath(filename) s = command_line(['exiftool', '-G', '-j', '-sort', filename]) if s: #convert bytes to string s = s.decode('utf-8').rstrip('\r\n') return json.loads(s) else: return s
[ "def", "get_json", "(", "filename", ")", ":", "check_if_this_file_exist", "(", "filename", ")", "#Process this function", "filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "s", "=", "command_line", "(", "[", "'exiftool'", ",", "'-G'", ",", "'-j'", ",", "'-sort'", ",", "filename", "]", ")", "if", "s", ":", "#convert bytes to string", "s", "=", "s", ".", "decode", "(", "'utf-8'", ")", ".", "rstrip", "(", "'\\r\\n'", ")", "return", "json", ".", "loads", "(", "s", ")", "else", ":", "return", "s" ]
Return a json value of the exif Get a filename and return a JSON object Arguments: filename {string} -- your filename Returns: [JSON] -- Return a JSON object
[ "Return", "a", "json", "value", "of", "the", "exif" ]
train
https://github.com/guinslym/pyexifinfo/blob/56e5b44e77ee17b018a530ec858f19a9c6c07018/pyexifinfo/pyexifinfo.py#L100-L121
guinslym/pyexifinfo
pyexifinfo/pyexifinfo.py
get_csv
def get_csv(filename): """ Return a csv representation of the exif get a filename and returns a unicode string with a CSV format Arguments: filename {string} -- your filename Returns: [unicode] -- unicode string """ check_if_this_file_exist(filename) #Process this function filename = os.path.abspath(filename) s = command_line(['exiftool', '-G', '-csv', '-sort', filename]) if s: #convert bytes to string s = s.decode('utf-8') return s else: return 0
python
def get_csv(filename): """ Return a csv representation of the exif get a filename and returns a unicode string with a CSV format Arguments: filename {string} -- your filename Returns: [unicode] -- unicode string """ check_if_this_file_exist(filename) #Process this function filename = os.path.abspath(filename) s = command_line(['exiftool', '-G', '-csv', '-sort', filename]) if s: #convert bytes to string s = s.decode('utf-8') return s else: return 0
[ "def", "get_csv", "(", "filename", ")", ":", "check_if_this_file_exist", "(", "filename", ")", "#Process this function", "filename", "=", "os", ".", "path", ".", "abspath", "(", "filename", ")", "s", "=", "command_line", "(", "[", "'exiftool'", ",", "'-G'", ",", "'-csv'", ",", "'-sort'", ",", "filename", "]", ")", "if", "s", ":", "#convert bytes to string", "s", "=", "s", ".", "decode", "(", "'utf-8'", ")", "return", "s", "else", ":", "return", "0" ]
Return a csv representation of the exif get a filename and returns a unicode string with a CSV format Arguments: filename {string} -- your filename Returns: [unicode] -- unicode string
[ "Return", "a", "csv", "representation", "of", "the", "exif" ]
train
https://github.com/guinslym/pyexifinfo/blob/56e5b44e77ee17b018a530ec858f19a9c6c07018/pyexifinfo/pyexifinfo.py#L123-L144
guinslym/pyexifinfo
setup.py
print_a_header
def print_a_header(message="-"): """Header This function will output a message in a header Keyword Arguments: message {str} -- [the message string] (default: {"-"}) """ print("-".center(60,'-')) print(message.center(60,'-')) print("-".center(60,'-')) print()
python
def print_a_header(message="-"): """Header This function will output a message in a header Keyword Arguments: message {str} -- [the message string] (default: {"-"}) """ print("-".center(60,'-')) print(message.center(60,'-')) print("-".center(60,'-')) print()
[ "def", "print_a_header", "(", "message", "=", "\"-\"", ")", ":", "print", "(", "\"-\"", ".", "center", "(", "60", ",", "'-'", ")", ")", "print", "(", "message", ".", "center", "(", "60", ",", "'-'", ")", ")", "print", "(", "\"-\"", ".", "center", "(", "60", ",", "'-'", ")", ")", "print", "(", ")" ]
Header This function will output a message in a header Keyword Arguments: message {str} -- [the message string] (default: {"-"})
[ "Header" ]
train
https://github.com/guinslym/pyexifinfo/blob/56e5b44e77ee17b018a530ec858f19a9c6c07018/setup.py#L22-L33
guinslym/pyexifinfo
setup.py
check_if_exiftool_is_already_installed
def check_if_exiftool_is_already_installed(): """Requirements This function will check if Exiftool is installed on your system Return: True if Exiftool is Installed False if not """ result = 1; command = ["exiftool", "-ver"] with open(os.devnull, "w") as fnull: result = subprocess.call( command, stdout = fnull, stderr = fnull ) #Exiftool is not installed if result != 0: print_a_header('Exiftool needs to be installed on your system') print_a_header('Visit http://www.sno.phy.queensu.ca/~phil/exiftool/') return False else: return True
python
def check_if_exiftool_is_already_installed(): """Requirements This function will check if Exiftool is installed on your system Return: True if Exiftool is Installed False if not """ result = 1; command = ["exiftool", "-ver"] with open(os.devnull, "w") as fnull: result = subprocess.call( command, stdout = fnull, stderr = fnull ) #Exiftool is not installed if result != 0: print_a_header('Exiftool needs to be installed on your system') print_a_header('Visit http://www.sno.phy.queensu.ca/~phil/exiftool/') return False else: return True
[ "def", "check_if_exiftool_is_already_installed", "(", ")", ":", "result", "=", "1", "command", "=", "[", "\"exiftool\"", ",", "\"-ver\"", "]", "with", "open", "(", "os", ".", "devnull", ",", "\"w\"", ")", "as", "fnull", ":", "result", "=", "subprocess", ".", "call", "(", "command", ",", "stdout", "=", "fnull", ",", "stderr", "=", "fnull", ")", "#Exiftool is not installed", "if", "result", "!=", "0", ":", "print_a_header", "(", "'Exiftool needs to be installed on your system'", ")", "print_a_header", "(", "'Visit http://www.sno.phy.queensu.ca/~phil/exiftool/'", ")", "return", "False", "else", ":", "return", "True" ]
Requirements This function will check if Exiftool is installed on your system Return: True if Exiftool is Installed False if not
[ "Requirements" ]
train
https://github.com/guinslym/pyexifinfo/blob/56e5b44e77ee17b018a530ec858f19a9c6c07018/setup.py#L35-L59
artemrizhov/django-mail-templated
mail_templated/message.py
EmailMessage.render
def render(self, context=None, clean=False): """ Render email with provided context Arguments --------- context : dict |context| If not specified then the :attr:`~mail_templated.EmailMessage.context` property is used. Keyword Arguments ----------------- clean : bool If ``True``, remove any template specific properties from the message object. Default is ``False``. """ # Load template if it is not loaded yet. if not self.template: self.load_template(self.template_name) # The signature of the `render()` method was changed in Django 1.7. # https://docs.djangoproject.com/en/1.8/ref/templates/upgrading/#get-template-and-select-template if hasattr(self.template, 'template'): context = (context or self.context).copy() else: context = Context(context or self.context) # Add tag strings to the context. context.update(self.extra_context) result = self.template.render(context) # Don't overwrite default value with empty one. subject = self._get_block(result, 'subject') if subject: self.subject = self._get_block(result, 'subject') body = self._get_block(result, 'body') is_html_body = False # The html block is optional, and it also may be set manually. html = self._get_block(result, 'html') if html: if not body: # This is an html message without plain text part. body = html is_html_body = True else: # Add alternative content. self.attach_alternative(html, 'text/html') # Don't overwrite default value with empty one. if body: self.body = body if is_html_body: self.content_subtype = 'html' self._is_rendered = True if clean: self.clean()
python
def render(self, context=None, clean=False): """ Render email with provided context Arguments --------- context : dict |context| If not specified then the :attr:`~mail_templated.EmailMessage.context` property is used. Keyword Arguments ----------------- clean : bool If ``True``, remove any template specific properties from the message object. Default is ``False``. """ # Load template if it is not loaded yet. if not self.template: self.load_template(self.template_name) # The signature of the `render()` method was changed in Django 1.7. # https://docs.djangoproject.com/en/1.8/ref/templates/upgrading/#get-template-and-select-template if hasattr(self.template, 'template'): context = (context or self.context).copy() else: context = Context(context or self.context) # Add tag strings to the context. context.update(self.extra_context) result = self.template.render(context) # Don't overwrite default value with empty one. subject = self._get_block(result, 'subject') if subject: self.subject = self._get_block(result, 'subject') body = self._get_block(result, 'body') is_html_body = False # The html block is optional, and it also may be set manually. html = self._get_block(result, 'html') if html: if not body: # This is an html message without plain text part. body = html is_html_body = True else: # Add alternative content. self.attach_alternative(html, 'text/html') # Don't overwrite default value with empty one. if body: self.body = body if is_html_body: self.content_subtype = 'html' self._is_rendered = True if clean: self.clean()
[ "def", "render", "(", "self", ",", "context", "=", "None", ",", "clean", "=", "False", ")", ":", "# Load template if it is not loaded yet.", "if", "not", "self", ".", "template", ":", "self", ".", "load_template", "(", "self", ".", "template_name", ")", "# The signature of the `render()` method was changed in Django 1.7.", "# https://docs.djangoproject.com/en/1.8/ref/templates/upgrading/#get-template-and-select-template", "if", "hasattr", "(", "self", ".", "template", ",", "'template'", ")", ":", "context", "=", "(", "context", "or", "self", ".", "context", ")", ".", "copy", "(", ")", "else", ":", "context", "=", "Context", "(", "context", "or", "self", ".", "context", ")", "# Add tag strings to the context.", "context", ".", "update", "(", "self", ".", "extra_context", ")", "result", "=", "self", ".", "template", ".", "render", "(", "context", ")", "# Don't overwrite default value with empty one.", "subject", "=", "self", ".", "_get_block", "(", "result", ",", "'subject'", ")", "if", "subject", ":", "self", ".", "subject", "=", "self", ".", "_get_block", "(", "result", ",", "'subject'", ")", "body", "=", "self", ".", "_get_block", "(", "result", ",", "'body'", ")", "is_html_body", "=", "False", "# The html block is optional, and it also may be set manually.", "html", "=", "self", ".", "_get_block", "(", "result", ",", "'html'", ")", "if", "html", ":", "if", "not", "body", ":", "# This is an html message without plain text part.", "body", "=", "html", "is_html_body", "=", "True", "else", ":", "# Add alternative content.", "self", ".", "attach_alternative", "(", "html", ",", "'text/html'", ")", "# Don't overwrite default value with empty one.", "if", "body", ":", "self", ".", "body", "=", "body", "if", "is_html_body", ":", "self", ".", "content_subtype", "=", "'html'", "self", ".", "_is_rendered", "=", "True", "if", "clean", ":", "self", ".", "clean", "(", ")" ]
Render email with provided context Arguments --------- context : dict |context| If not specified then the :attr:`~mail_templated.EmailMessage.context` property is used. Keyword Arguments ----------------- clean : bool If ``True``, remove any template specific properties from the message object. Default is ``False``.
[ "Render", "email", "with", "provided", "context" ]
train
https://github.com/artemrizhov/django-mail-templated/blob/1b428e7b6e02a5cf775bc83d6f5fd8c5f56d7932/mail_templated/message.py#L153-L205
artemrizhov/django-mail-templated
mail_templated/message.py
EmailMessage.send
def send(self, *args, **kwargs): """ Send email message, render if it is not rendered yet. Note ---- Any extra arguments are passed to :class:`EmailMultiAlternatives.send() <django.core.mail.EmailMessage>`. Keyword Arguments ----------------- clean : bool If ``True``, remove any template specific properties from the message object. Default is ``False``. """ clean = kwargs.pop('clean', False) if not self._is_rendered: self.render() if clean: self.clean() return super(EmailMessage, self).send(*args, **kwargs)
python
def send(self, *args, **kwargs): """ Send email message, render if it is not rendered yet. Note ---- Any extra arguments are passed to :class:`EmailMultiAlternatives.send() <django.core.mail.EmailMessage>`. Keyword Arguments ----------------- clean : bool If ``True``, remove any template specific properties from the message object. Default is ``False``. """ clean = kwargs.pop('clean', False) if not self._is_rendered: self.render() if clean: self.clean() return super(EmailMessage, self).send(*args, **kwargs)
[ "def", "send", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "clean", "=", "kwargs", ".", "pop", "(", "'clean'", ",", "False", ")", "if", "not", "self", ".", "_is_rendered", ":", "self", ".", "render", "(", ")", "if", "clean", ":", "self", ".", "clean", "(", ")", "return", "super", "(", "EmailMessage", ",", "self", ")", ".", "send", "(", "*", "args", ",", "*", "*", "kwargs", ")" ]
Send email message, render if it is not rendered yet. Note ---- Any extra arguments are passed to :class:`EmailMultiAlternatives.send() <django.core.mail.EmailMessage>`. Keyword Arguments ----------------- clean : bool If ``True``, remove any template specific properties from the message object. Default is ``False``.
[ "Send", "email", "message", "render", "if", "it", "is", "not", "rendered", "yet", "." ]
train
https://github.com/artemrizhov/django-mail-templated/blob/1b428e7b6e02a5cf775bc83d6f5fd8c5f56d7932/mail_templated/message.py#L207-L227
artemrizhov/django-mail-templated
mail_templated/utils.py
send_mail
def send_mail(template_name, context, from_email, recipient_list, fail_silently=False, auth_user=None, auth_password=None, connection=None, **kwargs): """ Easy wrapper for sending a single email message to a recipient list using django template system. It works almost the same way as the standard :func:`send_mail()<django.core.mail.send_mail>` function. .. |main_difference| replace:: The main difference is that two first arguments ``subject`` and ``body`` are replaced with ``template_name`` and ``context``. However you still can pass subject or body as keyword arguments to provide static content if needed. |main_difference| The ``template_name``, ``context``, ``from_email`` and ``recipient_list`` parameters are required. Note ---- |args_note| Arguments --------- template_name : str |template_name| context : dict |context| from_email : str |from_email| recipient_list : list |recipient_list| Keyword Arguments ----------------- fail_silently : bool If it's False, send_mail will raise an :exc:`smtplib.SMTPException`. See the :mod:`smtplib` docs for a list of possible exceptions, all of which are subclasses of :exc:`smtplib.SMTPException`. auth_user | str The optional username to use to authenticate to the SMTP server. If this isn't provided, Django will use the value of the :django:setting:`EMAIL_HOST_USER` setting. auth_password | str The optional password to use to authenticate to the SMTP server. If this isn't provided, Django will use the value of the :django:setting:`EMAIL_HOST_PASSWORD` setting. connection : EmailBackend The optional email backend to use to send the mail. If unspecified, an instance of the default backend will be used. See the documentation on :ref:`Email backends<django:topic-email-backends>` for more details. subject : str |subject| body : str |body| render : bool |render| Returns ------- int The number of successfully delivered messages (which can be 0 or 1 since it can only send one message). See Also -------- :func:`django.core.mail.send_mail` Documentation for the standard ``send_mail()`` function. """ connection = connection or mail.get_connection(username=auth_user, password=auth_password, fail_silently=fail_silently) clean = kwargs.pop('clean', True) return EmailMessage( template_name, context, from_email, recipient_list, connection=connection, **kwargs).send(clean=clean)
python
def send_mail(template_name, context, from_email, recipient_list, fail_silently=False, auth_user=None, auth_password=None, connection=None, **kwargs): """ Easy wrapper for sending a single email message to a recipient list using django template system. It works almost the same way as the standard :func:`send_mail()<django.core.mail.send_mail>` function. .. |main_difference| replace:: The main difference is that two first arguments ``subject`` and ``body`` are replaced with ``template_name`` and ``context``. However you still can pass subject or body as keyword arguments to provide static content if needed. |main_difference| The ``template_name``, ``context``, ``from_email`` and ``recipient_list`` parameters are required. Note ---- |args_note| Arguments --------- template_name : str |template_name| context : dict |context| from_email : str |from_email| recipient_list : list |recipient_list| Keyword Arguments ----------------- fail_silently : bool If it's False, send_mail will raise an :exc:`smtplib.SMTPException`. See the :mod:`smtplib` docs for a list of possible exceptions, all of which are subclasses of :exc:`smtplib.SMTPException`. auth_user | str The optional username to use to authenticate to the SMTP server. If this isn't provided, Django will use the value of the :django:setting:`EMAIL_HOST_USER` setting. auth_password | str The optional password to use to authenticate to the SMTP server. If this isn't provided, Django will use the value of the :django:setting:`EMAIL_HOST_PASSWORD` setting. connection : EmailBackend The optional email backend to use to send the mail. If unspecified, an instance of the default backend will be used. See the documentation on :ref:`Email backends<django:topic-email-backends>` for more details. subject : str |subject| body : str |body| render : bool |render| Returns ------- int The number of successfully delivered messages (which can be 0 or 1 since it can only send one message). See Also -------- :func:`django.core.mail.send_mail` Documentation for the standard ``send_mail()`` function. """ connection = connection or mail.get_connection(username=auth_user, password=auth_password, fail_silently=fail_silently) clean = kwargs.pop('clean', True) return EmailMessage( template_name, context, from_email, recipient_list, connection=connection, **kwargs).send(clean=clean)
[ "def", "send_mail", "(", "template_name", ",", "context", ",", "from_email", ",", "recipient_list", ",", "fail_silently", "=", "False", ",", "auth_user", "=", "None", ",", "auth_password", "=", "None", ",", "connection", "=", "None", ",", "*", "*", "kwargs", ")", ":", "connection", "=", "connection", "or", "mail", ".", "get_connection", "(", "username", "=", "auth_user", ",", "password", "=", "auth_password", ",", "fail_silently", "=", "fail_silently", ")", "clean", "=", "kwargs", ".", "pop", "(", "'clean'", ",", "True", ")", "return", "EmailMessage", "(", "template_name", ",", "context", ",", "from_email", ",", "recipient_list", ",", "connection", "=", "connection", ",", "*", "*", "kwargs", ")", ".", "send", "(", "clean", "=", "clean", ")" ]
Easy wrapper for sending a single email message to a recipient list using django template system. It works almost the same way as the standard :func:`send_mail()<django.core.mail.send_mail>` function. .. |main_difference| replace:: The main difference is that two first arguments ``subject`` and ``body`` are replaced with ``template_name`` and ``context``. However you still can pass subject or body as keyword arguments to provide static content if needed. |main_difference| The ``template_name``, ``context``, ``from_email`` and ``recipient_list`` parameters are required. Note ---- |args_note| Arguments --------- template_name : str |template_name| context : dict |context| from_email : str |from_email| recipient_list : list |recipient_list| Keyword Arguments ----------------- fail_silently : bool If it's False, send_mail will raise an :exc:`smtplib.SMTPException`. See the :mod:`smtplib` docs for a list of possible exceptions, all of which are subclasses of :exc:`smtplib.SMTPException`. auth_user | str The optional username to use to authenticate to the SMTP server. If this isn't provided, Django will use the value of the :django:setting:`EMAIL_HOST_USER` setting. auth_password | str The optional password to use to authenticate to the SMTP server. If this isn't provided, Django will use the value of the :django:setting:`EMAIL_HOST_PASSWORD` setting. connection : EmailBackend The optional email backend to use to send the mail. If unspecified, an instance of the default backend will be used. See the documentation on :ref:`Email backends<django:topic-email-backends>` for more details. subject : str |subject| body : str |body| render : bool |render| Returns ------- int The number of successfully delivered messages (which can be 0 or 1 since it can only send one message). See Also -------- :func:`django.core.mail.send_mail` Documentation for the standard ``send_mail()`` function.
[ "Easy", "wrapper", "for", "sending", "a", "single", "email", "message", "to", "a", "recipient", "list", "using", "django", "template", "system", "." ]
train
https://github.com/artemrizhov/django-mail-templated/blob/1b428e7b6e02a5cf775bc83d6f5fd8c5f56d7932/mail_templated/utils.py#L13-L93
brianhie/scanorama
bin/unsupervised.py
silhouette_score
def silhouette_score(X, labels, metric='euclidean', sample_size=None, random_state=None, **kwds): """Compute the mean Silhouette Coefficient of all samples. The Silhouette Coefficient is calculated using the mean intra-cluster distance (``a``) and the mean nearest-cluster distance (``b``) for each sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a, b)``. To clarify, ``b`` is the distance between a sample and the nearest cluster that the sample is not a part of. Note that Silhouette Coefficient is only defined if number of labels is 2 <= n_labels <= n_samples - 1. This function returns the mean Silhouette Coefficient over all samples. To obtain the values for each sample, use :func:`silhouette_samples`. The best value is 1 and the worst value is -1. Values near 0 indicate overlapping clusters. Negative values generally indicate that a sample has been assigned to the wrong cluster, as a different cluster is more similar. Read more in the :ref:`User Guide <silhouette_coefficient>`. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise distances between samples, or a feature array. labels : array, shape = [n_samples] Predicted labels for each sample. metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by :func:`metrics.pairwise.pairwise_distances <sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance array itself, use ``metric="precomputed"``. sample_size : int or None The size of the sample to use when computing the Silhouette Coefficient on a random subset of the data. If ``sample_size is None``, no sampling is used. random_state : int, RandomState instance or None, optional (default=None) The generator used to randomly select a subset of samples. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``sample_size is not None``. **kwds : optional keyword parameters Any further parameters are passed directly to the distance function. If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for usage examples. Returns ------- silhouette : float Mean Silhouette Coefficient for all samples. References ---------- .. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the Interpretation and Validation of Cluster Analysis". Computational and Applied Mathematics 20: 53-65. <http://www.sciencedirect.com/science/article/pii/0377042787901257>`_ .. [2] `Wikipedia entry on the Silhouette Coefficient <https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_ """ if sample_size is not None: X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr']) random_state = check_random_state(random_state) indices = random_state.permutation(X.shape[0])[:sample_size] if metric == "precomputed": X, labels = X[indices].T[indices].T, labels[indices] else: X, labels = X[indices], labels[indices] return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
python
def silhouette_score(X, labels, metric='euclidean', sample_size=None, random_state=None, **kwds): """Compute the mean Silhouette Coefficient of all samples. The Silhouette Coefficient is calculated using the mean intra-cluster distance (``a``) and the mean nearest-cluster distance (``b``) for each sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a, b)``. To clarify, ``b`` is the distance between a sample and the nearest cluster that the sample is not a part of. Note that Silhouette Coefficient is only defined if number of labels is 2 <= n_labels <= n_samples - 1. This function returns the mean Silhouette Coefficient over all samples. To obtain the values for each sample, use :func:`silhouette_samples`. The best value is 1 and the worst value is -1. Values near 0 indicate overlapping clusters. Negative values generally indicate that a sample has been assigned to the wrong cluster, as a different cluster is more similar. Read more in the :ref:`User Guide <silhouette_coefficient>`. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise distances between samples, or a feature array. labels : array, shape = [n_samples] Predicted labels for each sample. metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by :func:`metrics.pairwise.pairwise_distances <sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance array itself, use ``metric="precomputed"``. sample_size : int or None The size of the sample to use when computing the Silhouette Coefficient on a random subset of the data. If ``sample_size is None``, no sampling is used. random_state : int, RandomState instance or None, optional (default=None) The generator used to randomly select a subset of samples. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``sample_size is not None``. **kwds : optional keyword parameters Any further parameters are passed directly to the distance function. If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for usage examples. Returns ------- silhouette : float Mean Silhouette Coefficient for all samples. References ---------- .. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the Interpretation and Validation of Cluster Analysis". Computational and Applied Mathematics 20: 53-65. <http://www.sciencedirect.com/science/article/pii/0377042787901257>`_ .. [2] `Wikipedia entry on the Silhouette Coefficient <https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_ """ if sample_size is not None: X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr']) random_state = check_random_state(random_state) indices = random_state.permutation(X.shape[0])[:sample_size] if metric == "precomputed": X, labels = X[indices].T[indices].T, labels[indices] else: X, labels = X[indices], labels[indices] return np.mean(silhouette_samples(X, labels, metric=metric, **kwds))
[ "def", "silhouette_score", "(", "X", ",", "labels", ",", "metric", "=", "'euclidean'", ",", "sample_size", "=", "None", ",", "random_state", "=", "None", ",", "*", "*", "kwds", ")", ":", "if", "sample_size", "is", "not", "None", ":", "X", ",", "labels", "=", "check_X_y", "(", "X", ",", "labels", ",", "accept_sparse", "=", "[", "'csc'", ",", "'csr'", "]", ")", "random_state", "=", "check_random_state", "(", "random_state", ")", "indices", "=", "random_state", ".", "permutation", "(", "X", ".", "shape", "[", "0", "]", ")", "[", ":", "sample_size", "]", "if", "metric", "==", "\"precomputed\"", ":", "X", ",", "labels", "=", "X", "[", "indices", "]", ".", "T", "[", "indices", "]", ".", "T", ",", "labels", "[", "indices", "]", "else", ":", "X", ",", "labels", "=", "X", "[", "indices", "]", ",", "labels", "[", "indices", "]", "return", "np", ".", "mean", "(", "silhouette_samples", "(", "X", ",", "labels", ",", "metric", "=", "metric", ",", "*", "*", "kwds", ")", ")" ]
Compute the mean Silhouette Coefficient of all samples. The Silhouette Coefficient is calculated using the mean intra-cluster distance (``a``) and the mean nearest-cluster distance (``b``) for each sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a, b)``. To clarify, ``b`` is the distance between a sample and the nearest cluster that the sample is not a part of. Note that Silhouette Coefficient is only defined if number of labels is 2 <= n_labels <= n_samples - 1. This function returns the mean Silhouette Coefficient over all samples. To obtain the values for each sample, use :func:`silhouette_samples`. The best value is 1 and the worst value is -1. Values near 0 indicate overlapping clusters. Negative values generally indicate that a sample has been assigned to the wrong cluster, as a different cluster is more similar. Read more in the :ref:`User Guide <silhouette_coefficient>`. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise distances between samples, or a feature array. labels : array, shape = [n_samples] Predicted labels for each sample. metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by :func:`metrics.pairwise.pairwise_distances <sklearn.metrics.pairwise.pairwise_distances>`. If X is the distance array itself, use ``metric="precomputed"``. sample_size : int or None The size of the sample to use when computing the Silhouette Coefficient on a random subset of the data. If ``sample_size is None``, no sampling is used. random_state : int, RandomState instance or None, optional (default=None) The generator used to randomly select a subset of samples. If int, random_state is the seed used by the random number generator; If RandomState instance, random_state is the random number generator; If None, the random number generator is the RandomState instance used by `np.random`. Used when ``sample_size is not None``. **kwds : optional keyword parameters Any further parameters are passed directly to the distance function. If using a scipy.spatial.distance metric, the parameters are still metric dependent. See the scipy docs for usage examples. Returns ------- silhouette : float Mean Silhouette Coefficient for all samples. References ---------- .. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the Interpretation and Validation of Cluster Analysis". Computational and Applied Mathematics 20: 53-65. <http://www.sciencedirect.com/science/article/pii/0377042787901257>`_ .. [2] `Wikipedia entry on the Silhouette Coefficient <https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
[ "Compute", "the", "mean", "Silhouette", "Coefficient", "of", "all", "samples", "." ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/bin/unsupervised.py#L27-L106
brianhie/scanorama
bin/unsupervised.py
silhouette_samples
def silhouette_samples(X, labels, metric='euclidean', **kwds): """Compute the Silhouette Coefficient for each sample. The Silhouette Coefficient is a measure of how well samples are clustered with samples that are similar to themselves. Clustering models with a high Silhouette Coefficient are said to be dense, where samples in the same cluster are similar to each other, and well separated, where samples in different clusters are not very similar to each other. The Silhouette Coefficient is calculated using the mean intra-cluster distance (``a``) and the mean nearest-cluster distance (``b``) for each sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a, b)``. Note that Silhouette Coefficient is only defined if number of labels is 2 <= n_labels <= n_samples - 1. This function returns the Silhouette Coefficient for each sample. The best value is 1 and the worst value is -1. Values near 0 indicate overlapping clusters. Read more in the :ref:`User Guide <silhouette_coefficient>`. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise distances between samples, or a feature array. labels : array, shape = [n_samples] label values for each sample metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is the distance array itself, use "precomputed" as the metric. **kwds : optional keyword parameters Any further parameters are passed directly to the distance function. If using a ``scipy.spatial.distance`` metric, the parameters are still metric dependent. See the scipy docs for usage examples. Returns ------- silhouette : array, shape = [n_samples] Silhouette Coefficient for each samples. References ---------- .. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the Interpretation and Validation of Cluster Analysis". Computational and Applied Mathematics 20: 53-65. <http://www.sciencedirect.com/science/article/pii/0377042787901257>`_ .. [2] `Wikipedia entry on the Silhouette Coefficient <https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_ """ X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr']) le = LabelEncoder() labels = le.fit_transform(labels) check_number_of_labels(len(le.classes_), X.shape[0]) distances = pairwise_distances(X, metric=metric, **kwds) unique_labels = le.classes_ n_samples_per_label = np.bincount(labels, minlength=len(unique_labels)) # For sample i, store the mean distance of the cluster to which # it belongs in intra_clust_dists[i] intra_clust_dists = np.zeros(distances.shape[0], dtype=distances.dtype) # For sample i, store the mean distance of the second closest # cluster in inter_clust_dists[i] inter_clust_dists = np.inf + intra_clust_dists for curr_label in range(len(unique_labels)): # Find inter_clust_dist for all samples belonging to the same # label. mask = labels == curr_label current_distances = distances[mask] # Leave out current sample. n_samples_curr_lab = n_samples_per_label[curr_label] - 1 if n_samples_curr_lab != 0: intra_clust_dists[mask] = np.sum( current_distances[:, mask], axis=1) / n_samples_curr_lab # Now iterate over all other labels, finding the mean # cluster distance that is closest to every sample. for other_label in range(len(unique_labels)): if other_label != curr_label: other_mask = labels == other_label other_distances = np.mean( current_distances[:, other_mask], axis=1) inter_clust_dists[mask] = np.minimum( inter_clust_dists[mask], other_distances) sil_samples = inter_clust_dists - intra_clust_dists sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists) # score 0 for clusters of size 1, according to the paper sil_samples[n_samples_per_label.take(labels) == 1] = 0 return sil_samples
python
def silhouette_samples(X, labels, metric='euclidean', **kwds): """Compute the Silhouette Coefficient for each sample. The Silhouette Coefficient is a measure of how well samples are clustered with samples that are similar to themselves. Clustering models with a high Silhouette Coefficient are said to be dense, where samples in the same cluster are similar to each other, and well separated, where samples in different clusters are not very similar to each other. The Silhouette Coefficient is calculated using the mean intra-cluster distance (``a``) and the mean nearest-cluster distance (``b``) for each sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a, b)``. Note that Silhouette Coefficient is only defined if number of labels is 2 <= n_labels <= n_samples - 1. This function returns the Silhouette Coefficient for each sample. The best value is 1 and the worst value is -1. Values near 0 indicate overlapping clusters. Read more in the :ref:`User Guide <silhouette_coefficient>`. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise distances between samples, or a feature array. labels : array, shape = [n_samples] label values for each sample metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is the distance array itself, use "precomputed" as the metric. **kwds : optional keyword parameters Any further parameters are passed directly to the distance function. If using a ``scipy.spatial.distance`` metric, the parameters are still metric dependent. See the scipy docs for usage examples. Returns ------- silhouette : array, shape = [n_samples] Silhouette Coefficient for each samples. References ---------- .. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the Interpretation and Validation of Cluster Analysis". Computational and Applied Mathematics 20: 53-65. <http://www.sciencedirect.com/science/article/pii/0377042787901257>`_ .. [2] `Wikipedia entry on the Silhouette Coefficient <https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_ """ X, labels = check_X_y(X, labels, accept_sparse=['csc', 'csr']) le = LabelEncoder() labels = le.fit_transform(labels) check_number_of_labels(len(le.classes_), X.shape[0]) distances = pairwise_distances(X, metric=metric, **kwds) unique_labels = le.classes_ n_samples_per_label = np.bincount(labels, minlength=len(unique_labels)) # For sample i, store the mean distance of the cluster to which # it belongs in intra_clust_dists[i] intra_clust_dists = np.zeros(distances.shape[0], dtype=distances.dtype) # For sample i, store the mean distance of the second closest # cluster in inter_clust_dists[i] inter_clust_dists = np.inf + intra_clust_dists for curr_label in range(len(unique_labels)): # Find inter_clust_dist for all samples belonging to the same # label. mask = labels == curr_label current_distances = distances[mask] # Leave out current sample. n_samples_curr_lab = n_samples_per_label[curr_label] - 1 if n_samples_curr_lab != 0: intra_clust_dists[mask] = np.sum( current_distances[:, mask], axis=1) / n_samples_curr_lab # Now iterate over all other labels, finding the mean # cluster distance that is closest to every sample. for other_label in range(len(unique_labels)): if other_label != curr_label: other_mask = labels == other_label other_distances = np.mean( current_distances[:, other_mask], axis=1) inter_clust_dists[mask] = np.minimum( inter_clust_dists[mask], other_distances) sil_samples = inter_clust_dists - intra_clust_dists sil_samples /= np.maximum(intra_clust_dists, inter_clust_dists) # score 0 for clusters of size 1, according to the paper sil_samples[n_samples_per_label.take(labels) == 1] = 0 return sil_samples
[ "def", "silhouette_samples", "(", "X", ",", "labels", ",", "metric", "=", "'euclidean'", ",", "*", "*", "kwds", ")", ":", "X", ",", "labels", "=", "check_X_y", "(", "X", ",", "labels", ",", "accept_sparse", "=", "[", "'csc'", ",", "'csr'", "]", ")", "le", "=", "LabelEncoder", "(", ")", "labels", "=", "le", ".", "fit_transform", "(", "labels", ")", "check_number_of_labels", "(", "len", "(", "le", ".", "classes_", ")", ",", "X", ".", "shape", "[", "0", "]", ")", "distances", "=", "pairwise_distances", "(", "X", ",", "metric", "=", "metric", ",", "*", "*", "kwds", ")", "unique_labels", "=", "le", ".", "classes_", "n_samples_per_label", "=", "np", ".", "bincount", "(", "labels", ",", "minlength", "=", "len", "(", "unique_labels", ")", ")", "# For sample i, store the mean distance of the cluster to which", "# it belongs in intra_clust_dists[i]", "intra_clust_dists", "=", "np", ".", "zeros", "(", "distances", ".", "shape", "[", "0", "]", ",", "dtype", "=", "distances", ".", "dtype", ")", "# For sample i, store the mean distance of the second closest", "# cluster in inter_clust_dists[i]", "inter_clust_dists", "=", "np", ".", "inf", "+", "intra_clust_dists", "for", "curr_label", "in", "range", "(", "len", "(", "unique_labels", ")", ")", ":", "# Find inter_clust_dist for all samples belonging to the same", "# label.", "mask", "=", "labels", "==", "curr_label", "current_distances", "=", "distances", "[", "mask", "]", "# Leave out current sample.", "n_samples_curr_lab", "=", "n_samples_per_label", "[", "curr_label", "]", "-", "1", "if", "n_samples_curr_lab", "!=", "0", ":", "intra_clust_dists", "[", "mask", "]", "=", "np", ".", "sum", "(", "current_distances", "[", ":", ",", "mask", "]", ",", "axis", "=", "1", ")", "/", "n_samples_curr_lab", "# Now iterate over all other labels, finding the mean", "# cluster distance that is closest to every sample.", "for", "other_label", "in", "range", "(", "len", "(", "unique_labels", ")", ")", ":", "if", "other_label", "!=", "curr_label", ":", "other_mask", "=", "labels", "==", "other_label", "other_distances", "=", "np", ".", "mean", "(", "current_distances", "[", ":", ",", "other_mask", "]", ",", "axis", "=", "1", ")", "inter_clust_dists", "[", "mask", "]", "=", "np", ".", "minimum", "(", "inter_clust_dists", "[", "mask", "]", ",", "other_distances", ")", "sil_samples", "=", "inter_clust_dists", "-", "intra_clust_dists", "sil_samples", "/=", "np", ".", "maximum", "(", "intra_clust_dists", ",", "inter_clust_dists", ")", "# score 0 for clusters of size 1, according to the paper", "sil_samples", "[", "n_samples_per_label", ".", "take", "(", "labels", ")", "==", "1", "]", "=", "0", "return", "sil_samples" ]
Compute the Silhouette Coefficient for each sample. The Silhouette Coefficient is a measure of how well samples are clustered with samples that are similar to themselves. Clustering models with a high Silhouette Coefficient are said to be dense, where samples in the same cluster are similar to each other, and well separated, where samples in different clusters are not very similar to each other. The Silhouette Coefficient is calculated using the mean intra-cluster distance (``a``) and the mean nearest-cluster distance (``b``) for each sample. The Silhouette Coefficient for a sample is ``(b - a) / max(a, b)``. Note that Silhouette Coefficient is only defined if number of labels is 2 <= n_labels <= n_samples - 1. This function returns the Silhouette Coefficient for each sample. The best value is 1 and the worst value is -1. Values near 0 indicate overlapping clusters. Read more in the :ref:`User Guide <silhouette_coefficient>`. Parameters ---------- X : array [n_samples_a, n_samples_a] if metric == "precomputed", or, \ [n_samples_a, n_features] otherwise Array of pairwise distances between samples, or a feature array. labels : array, shape = [n_samples] label values for each sample metric : string, or callable The metric to use when calculating distance between instances in a feature array. If metric is a string, it must be one of the options allowed by :func:`sklearn.metrics.pairwise.pairwise_distances`. If X is the distance array itself, use "precomputed" as the metric. **kwds : optional keyword parameters Any further parameters are passed directly to the distance function. If using a ``scipy.spatial.distance`` metric, the parameters are still metric dependent. See the scipy docs for usage examples. Returns ------- silhouette : array, shape = [n_samples] Silhouette Coefficient for each samples. References ---------- .. [1] `Peter J. Rousseeuw (1987). "Silhouettes: a Graphical Aid to the Interpretation and Validation of Cluster Analysis". Computational and Applied Mathematics 20: 53-65. <http://www.sciencedirect.com/science/article/pii/0377042787901257>`_ .. [2] `Wikipedia entry on the Silhouette Coefficient <https://en.wikipedia.org/wiki/Silhouette_(clustering)>`_
[ "Compute", "the", "Silhouette", "Coefficient", "for", "each", "sample", "." ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/bin/unsupervised.py#L109-L213
brianhie/scanorama
bin/unsupervised.py
calinski_harabaz_score
def calinski_harabaz_score(X, labels): """Compute the Calinski and Harabaz score. The score is defined as ratio between the within-cluster dispersion and the between-cluster dispersion. Read more in the :ref:`User Guide <calinski_harabaz_index>`. Parameters ---------- X : array-like, shape (``n_samples``, ``n_features``) List of ``n_features``-dimensional data points. Each row corresponds to a single data point. labels : array-like, shape (``n_samples``,) Predicted labels for each sample. Returns ------- score : float The resulting Calinski-Harabaz score. References ---------- .. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster analysis". Communications in Statistics <http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_ """ X, labels = check_X_y(X, labels) le = LabelEncoder() labels = le.fit_transform(labels) n_samples, _ = X.shape n_labels = len(le.classes_) check_number_of_labels(n_labels, n_samples) extra_disp, intra_disp = 0., 0. mean = np.mean(X, axis=0) for k in range(n_labels): cluster_k = X[labels == k] mean_k = np.mean(cluster_k, axis=0) extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2) intra_disp += np.sum((cluster_k - mean_k) ** 2) return (1. if intra_disp == 0. else extra_disp * (n_samples - n_labels) / (intra_disp * (n_labels - 1.)))
python
def calinski_harabaz_score(X, labels): """Compute the Calinski and Harabaz score. The score is defined as ratio between the within-cluster dispersion and the between-cluster dispersion. Read more in the :ref:`User Guide <calinski_harabaz_index>`. Parameters ---------- X : array-like, shape (``n_samples``, ``n_features``) List of ``n_features``-dimensional data points. Each row corresponds to a single data point. labels : array-like, shape (``n_samples``,) Predicted labels for each sample. Returns ------- score : float The resulting Calinski-Harabaz score. References ---------- .. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster analysis". Communications in Statistics <http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_ """ X, labels = check_X_y(X, labels) le = LabelEncoder() labels = le.fit_transform(labels) n_samples, _ = X.shape n_labels = len(le.classes_) check_number_of_labels(n_labels, n_samples) extra_disp, intra_disp = 0., 0. mean = np.mean(X, axis=0) for k in range(n_labels): cluster_k = X[labels == k] mean_k = np.mean(cluster_k, axis=0) extra_disp += len(cluster_k) * np.sum((mean_k - mean) ** 2) intra_disp += np.sum((cluster_k - mean_k) ** 2) return (1. if intra_disp == 0. else extra_disp * (n_samples - n_labels) / (intra_disp * (n_labels - 1.)))
[ "def", "calinski_harabaz_score", "(", "X", ",", "labels", ")", ":", "X", ",", "labels", "=", "check_X_y", "(", "X", ",", "labels", ")", "le", "=", "LabelEncoder", "(", ")", "labels", "=", "le", ".", "fit_transform", "(", "labels", ")", "n_samples", ",", "_", "=", "X", ".", "shape", "n_labels", "=", "len", "(", "le", ".", "classes_", ")", "check_number_of_labels", "(", "n_labels", ",", "n_samples", ")", "extra_disp", ",", "intra_disp", "=", "0.", ",", "0.", "mean", "=", "np", ".", "mean", "(", "X", ",", "axis", "=", "0", ")", "for", "k", "in", "range", "(", "n_labels", ")", ":", "cluster_k", "=", "X", "[", "labels", "==", "k", "]", "mean_k", "=", "np", ".", "mean", "(", "cluster_k", ",", "axis", "=", "0", ")", "extra_disp", "+=", "len", "(", "cluster_k", ")", "*", "np", ".", "sum", "(", "(", "mean_k", "-", "mean", ")", "**", "2", ")", "intra_disp", "+=", "np", ".", "sum", "(", "(", "cluster_k", "-", "mean_k", ")", "**", "2", ")", "return", "(", "1.", "if", "intra_disp", "==", "0.", "else", "extra_disp", "*", "(", "n_samples", "-", "n_labels", ")", "/", "(", "intra_disp", "*", "(", "n_labels", "-", "1.", ")", ")", ")" ]
Compute the Calinski and Harabaz score. The score is defined as ratio between the within-cluster dispersion and the between-cluster dispersion. Read more in the :ref:`User Guide <calinski_harabaz_index>`. Parameters ---------- X : array-like, shape (``n_samples``, ``n_features``) List of ``n_features``-dimensional data points. Each row corresponds to a single data point. labels : array-like, shape (``n_samples``,) Predicted labels for each sample. Returns ------- score : float The resulting Calinski-Harabaz score. References ---------- .. [1] `T. Calinski and J. Harabasz, 1974. "A dendrite method for cluster analysis". Communications in Statistics <http://www.tandfonline.com/doi/abs/10.1080/03610927408827101>`_
[ "Compute", "the", "Calinski", "and", "Harabaz", "score", "." ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/bin/unsupervised.py#L216-L263
brianhie/scanorama
scanorama/utils.py
handle_zeros_in_scale
def handle_zeros_in_scale(scale, copy=True): ''' Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features. Adapted from sklearn.preprocessing.data''' # if we are fitting on 1D arrays, scale might be a scalar if np.isscalar(scale): if scale == .0: scale = 1. return scale elif isinstance(scale, np.ndarray): if copy: # New array to avoid side-effects scale = scale.copy() scale[scale == 0.0] = 1.0 return scale
python
def handle_zeros_in_scale(scale, copy=True): ''' Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features. Adapted from sklearn.preprocessing.data''' # if we are fitting on 1D arrays, scale might be a scalar if np.isscalar(scale): if scale == .0: scale = 1. return scale elif isinstance(scale, np.ndarray): if copy: # New array to avoid side-effects scale = scale.copy() scale[scale == 0.0] = 1.0 return scale
[ "def", "handle_zeros_in_scale", "(", "scale", ",", "copy", "=", "True", ")", ":", "# if we are fitting on 1D arrays, scale might be a scalar", "if", "np", ".", "isscalar", "(", "scale", ")", ":", "if", "scale", "==", ".0", ":", "scale", "=", "1.", "return", "scale", "elif", "isinstance", "(", "scale", ",", "np", ".", "ndarray", ")", ":", "if", "copy", ":", "# New array to avoid side-effects", "scale", "=", "scale", ".", "copy", "(", ")", "scale", "[", "scale", "==", "0.0", "]", "=", "1.0", "return", "scale" ]
Makes sure that whenever scale is zero, we handle it correctly. This happens in most scalers when we have constant features. Adapted from sklearn.preprocessing.data
[ "Makes", "sure", "that", "whenever", "scale", "is", "zero", "we", "handle", "it", "correctly", ".", "This", "happens", "in", "most", "scalers", "when", "we", "have", "constant", "features", ".", "Adapted", "from", "sklearn", ".", "preprocessing", ".", "data" ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/utils.py#L124-L139
brianhie/scanorama
scanorama/t_sne_approx.py
_joint_probabilities
def _joint_probabilities(distances, desired_perplexity, verbose): """Compute joint probabilities p_ij from distances. Parameters ---------- distances : array, shape (n_samples * (n_samples-1) / 2,) Distances of samples are stored as condensed matrices, i.e. we omit the diagonal and duplicate entries and store everything in a one-dimensional array. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. """ # Compute conditional probabilities such that they approximately match # the desired perplexity distances = distances.astype(np.float32, copy=False) conditional_P = _utils._binary_search_perplexity( distances, None, desired_perplexity, verbose) P = conditional_P + conditional_P.T sum_P = np.maximum(np.sum(P), MACHINE_EPSILON) P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON) return P
python
def _joint_probabilities(distances, desired_perplexity, verbose): """Compute joint probabilities p_ij from distances. Parameters ---------- distances : array, shape (n_samples * (n_samples-1) / 2,) Distances of samples are stored as condensed matrices, i.e. we omit the diagonal and duplicate entries and store everything in a one-dimensional array. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. """ # Compute conditional probabilities such that they approximately match # the desired perplexity distances = distances.astype(np.float32, copy=False) conditional_P = _utils._binary_search_perplexity( distances, None, desired_perplexity, verbose) P = conditional_P + conditional_P.T sum_P = np.maximum(np.sum(P), MACHINE_EPSILON) P = np.maximum(squareform(P) / sum_P, MACHINE_EPSILON) return P
[ "def", "_joint_probabilities", "(", "distances", ",", "desired_perplexity", ",", "verbose", ")", ":", "# Compute conditional probabilities such that they approximately match", "# the desired perplexity", "distances", "=", "distances", ".", "astype", "(", "np", ".", "float32", ",", "copy", "=", "False", ")", "conditional_P", "=", "_utils", ".", "_binary_search_perplexity", "(", "distances", ",", "None", ",", "desired_perplexity", ",", "verbose", ")", "P", "=", "conditional_P", "+", "conditional_P", ".", "T", "sum_P", "=", "np", ".", "maximum", "(", "np", ".", "sum", "(", "P", ")", ",", "MACHINE_EPSILON", ")", "P", "=", "np", ".", "maximum", "(", "squareform", "(", "P", ")", "/", "sum_P", ",", "MACHINE_EPSILON", ")", "return", "P" ]
Compute joint probabilities p_ij from distances. Parameters ---------- distances : array, shape (n_samples * (n_samples-1) / 2,) Distances of samples are stored as condensed matrices, i.e. we omit the diagonal and duplicate entries and store everything in a one-dimensional array. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix.
[ "Compute", "joint", "probabilities", "p_ij", "from", "distances", "." ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L39-L68
brianhie/scanorama
scanorama/t_sne_approx.py
_joint_probabilities_nn
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose): """Compute joint probabilities p_ij from distances using just nearest neighbors. This method is approximately equal to _joint_probabilities. The latter is O(N), but limiting the joint probability to nearest neighbors improves this substantially to O(uN). Parameters ---------- distances : array, shape (n_samples, k) Distances of samples to its k nearest neighbors. neighbors : array, shape (n_samples, k) Indices of the k nearest-neighbors for each samples. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : csr sparse matrix, shape (n_samples, n_samples) Condensed joint probability matrix with only nearest neighbors. """ t0 = time() # Compute conditional probabilities such that they approximately match # the desired perplexity n_samples, k = neighbors.shape distances = distances.astype(np.float32, copy=False) neighbors = neighbors.astype(np.int64, copy=False) conditional_P = _utils._binary_search_perplexity( distances, neighbors, desired_perplexity, verbose) assert np.all(np.isfinite(conditional_P)), \ "All probabilities should be finite" # Symmetrize the joint probability distribution using sparse operations P = csr_matrix((conditional_P.ravel(), neighbors.ravel(), range(0, n_samples * k + 1, k)), shape=(n_samples, n_samples)) P = P + P.T # Normalize the joint probability distribution sum_P = np.maximum(P.sum(), MACHINE_EPSILON) P /= sum_P assert np.all(np.abs(P.data) <= 1.0) if verbose >= 2: duration = time() - t0 print("[t-SNE] Computed conditional probabilities in {:.3f}s" .format(duration)) return P
python
def _joint_probabilities_nn(distances, neighbors, desired_perplexity, verbose): """Compute joint probabilities p_ij from distances using just nearest neighbors. This method is approximately equal to _joint_probabilities. The latter is O(N), but limiting the joint probability to nearest neighbors improves this substantially to O(uN). Parameters ---------- distances : array, shape (n_samples, k) Distances of samples to its k nearest neighbors. neighbors : array, shape (n_samples, k) Indices of the k nearest-neighbors for each samples. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : csr sparse matrix, shape (n_samples, n_samples) Condensed joint probability matrix with only nearest neighbors. """ t0 = time() # Compute conditional probabilities such that they approximately match # the desired perplexity n_samples, k = neighbors.shape distances = distances.astype(np.float32, copy=False) neighbors = neighbors.astype(np.int64, copy=False) conditional_P = _utils._binary_search_perplexity( distances, neighbors, desired_perplexity, verbose) assert np.all(np.isfinite(conditional_P)), \ "All probabilities should be finite" # Symmetrize the joint probability distribution using sparse operations P = csr_matrix((conditional_P.ravel(), neighbors.ravel(), range(0, n_samples * k + 1, k)), shape=(n_samples, n_samples)) P = P + P.T # Normalize the joint probability distribution sum_P = np.maximum(P.sum(), MACHINE_EPSILON) P /= sum_P assert np.all(np.abs(P.data) <= 1.0) if verbose >= 2: duration = time() - t0 print("[t-SNE] Computed conditional probabilities in {:.3f}s" .format(duration)) return P
[ "def", "_joint_probabilities_nn", "(", "distances", ",", "neighbors", ",", "desired_perplexity", ",", "verbose", ")", ":", "t0", "=", "time", "(", ")", "# Compute conditional probabilities such that they approximately match", "# the desired perplexity", "n_samples", ",", "k", "=", "neighbors", ".", "shape", "distances", "=", "distances", ".", "astype", "(", "np", ".", "float32", ",", "copy", "=", "False", ")", "neighbors", "=", "neighbors", ".", "astype", "(", "np", ".", "int64", ",", "copy", "=", "False", ")", "conditional_P", "=", "_utils", ".", "_binary_search_perplexity", "(", "distances", ",", "neighbors", ",", "desired_perplexity", ",", "verbose", ")", "assert", "np", ".", "all", "(", "np", ".", "isfinite", "(", "conditional_P", ")", ")", ",", "\"All probabilities should be finite\"", "# Symmetrize the joint probability distribution using sparse operations", "P", "=", "csr_matrix", "(", "(", "conditional_P", ".", "ravel", "(", ")", ",", "neighbors", ".", "ravel", "(", ")", ",", "range", "(", "0", ",", "n_samples", "*", "k", "+", "1", ",", "k", ")", ")", ",", "shape", "=", "(", "n_samples", ",", "n_samples", ")", ")", "P", "=", "P", "+", "P", ".", "T", "# Normalize the joint probability distribution", "sum_P", "=", "np", ".", "maximum", "(", "P", ".", "sum", "(", ")", ",", "MACHINE_EPSILON", ")", "P", "/=", "sum_P", "assert", "np", ".", "all", "(", "np", ".", "abs", "(", "P", ".", "data", ")", "<=", "1.0", ")", "if", "verbose", ">=", "2", ":", "duration", "=", "time", "(", ")", "-", "t0", "print", "(", "\"[t-SNE] Computed conditional probabilities in {:.3f}s\"", ".", "format", "(", "duration", ")", ")", "return", "P" ]
Compute joint probabilities p_ij from distances using just nearest neighbors. This method is approximately equal to _joint_probabilities. The latter is O(N), but limiting the joint probability to nearest neighbors improves this substantially to O(uN). Parameters ---------- distances : array, shape (n_samples, k) Distances of samples to its k nearest neighbors. neighbors : array, shape (n_samples, k) Indices of the k nearest-neighbors for each samples. desired_perplexity : float Desired perplexity of the joint probability distributions. verbose : int Verbosity level. Returns ------- P : csr sparse matrix, shape (n_samples, n_samples) Condensed joint probability matrix with only nearest neighbors.
[ "Compute", "joint", "probabilities", "p_ij", "from", "distances", "using", "just", "nearest", "neighbors", "." ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L71-L124
brianhie/scanorama
scanorama/t_sne_approx.py
_kl_divergence
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components, skip_num_points=0): """t-SNE objective function: gradient of the KL divergence of p_ijs and q_ijs and the absolute error. Parameters ---------- params : array, shape (n_params,) Unraveled embedding. P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. degrees_of_freedom : float Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : array, shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding. """ X_embedded = params.reshape(n_samples, n_components) # Q is a heavy-tailed distribution: Student's t-distribution dist = pdist(X_embedded, "sqeuclidean") dist += 1. dist /= degrees_of_freedom dist **= (degrees_of_freedom + 1.0) / -2.0 Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON) # Optimization trick below: np.dot(x, y) is faster than # np.sum(x * y) because it calls BLAS # Objective: C (Kullback-Leibler divergence of P and Q) kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q)) # Gradient: dC/dY # pdist always returns double precision distances. Thus we need to take grad = np.ndarray((n_samples, n_components), dtype=params.dtype) PQd = squareform((P - Q) * dist) for i in range(skip_num_points, n_samples): grad[i] = np.dot(np.ravel(PQd[i], order='K'), X_embedded[i] - X_embedded) grad = grad.ravel() c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom grad *= c return kl_divergence, grad
python
def _kl_divergence(params, P, degrees_of_freedom, n_samples, n_components, skip_num_points=0): """t-SNE objective function: gradient of the KL divergence of p_ijs and q_ijs and the absolute error. Parameters ---------- params : array, shape (n_params,) Unraveled embedding. P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. degrees_of_freedom : float Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : array, shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding. """ X_embedded = params.reshape(n_samples, n_components) # Q is a heavy-tailed distribution: Student's t-distribution dist = pdist(X_embedded, "sqeuclidean") dist += 1. dist /= degrees_of_freedom dist **= (degrees_of_freedom + 1.0) / -2.0 Q = np.maximum(dist / (2.0 * np.sum(dist)), MACHINE_EPSILON) # Optimization trick below: np.dot(x, y) is faster than # np.sum(x * y) because it calls BLAS # Objective: C (Kullback-Leibler divergence of P and Q) kl_divergence = 2.0 * np.dot(P, np.log(np.maximum(P, MACHINE_EPSILON) / Q)) # Gradient: dC/dY # pdist always returns double precision distances. Thus we need to take grad = np.ndarray((n_samples, n_components), dtype=params.dtype) PQd = squareform((P - Q) * dist) for i in range(skip_num_points, n_samples): grad[i] = np.dot(np.ravel(PQd[i], order='K'), X_embedded[i] - X_embedded) grad = grad.ravel() c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom grad *= c return kl_divergence, grad
[ "def", "_kl_divergence", "(", "params", ",", "P", ",", "degrees_of_freedom", ",", "n_samples", ",", "n_components", ",", "skip_num_points", "=", "0", ")", ":", "X_embedded", "=", "params", ".", "reshape", "(", "n_samples", ",", "n_components", ")", "# Q is a heavy-tailed distribution: Student's t-distribution", "dist", "=", "pdist", "(", "X_embedded", ",", "\"sqeuclidean\"", ")", "dist", "+=", "1.", "dist", "/=", "degrees_of_freedom", "dist", "**=", "(", "degrees_of_freedom", "+", "1.0", ")", "/", "-", "2.0", "Q", "=", "np", ".", "maximum", "(", "dist", "/", "(", "2.0", "*", "np", ".", "sum", "(", "dist", ")", ")", ",", "MACHINE_EPSILON", ")", "# Optimization trick below: np.dot(x, y) is faster than", "# np.sum(x * y) because it calls BLAS", "# Objective: C (Kullback-Leibler divergence of P and Q)", "kl_divergence", "=", "2.0", "*", "np", ".", "dot", "(", "P", ",", "np", ".", "log", "(", "np", ".", "maximum", "(", "P", ",", "MACHINE_EPSILON", ")", "/", "Q", ")", ")", "# Gradient: dC/dY", "# pdist always returns double precision distances. Thus we need to take", "grad", "=", "np", ".", "ndarray", "(", "(", "n_samples", ",", "n_components", ")", ",", "dtype", "=", "params", ".", "dtype", ")", "PQd", "=", "squareform", "(", "(", "P", "-", "Q", ")", "*", "dist", ")", "for", "i", "in", "range", "(", "skip_num_points", ",", "n_samples", ")", ":", "grad", "[", "i", "]", "=", "np", ".", "dot", "(", "np", ".", "ravel", "(", "PQd", "[", "i", "]", ",", "order", "=", "'K'", ")", ",", "X_embedded", "[", "i", "]", "-", "X_embedded", ")", "grad", "=", "grad", ".", "ravel", "(", ")", "c", "=", "2.0", "*", "(", "degrees_of_freedom", "+", "1.0", ")", "/", "degrees_of_freedom", "grad", "*=", "c", "return", "kl_divergence", ",", "grad" ]
t-SNE objective function: gradient of the KL divergence of p_ijs and q_ijs and the absolute error. Parameters ---------- params : array, shape (n_params,) Unraveled embedding. P : array, shape (n_samples * (n_samples-1) / 2,) Condensed joint probability matrix. degrees_of_freedom : float Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : array, shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding.
[ "t", "-", "SNE", "objective", "function", ":", "gradient", "of", "the", "KL", "divergence", "of", "p_ijs", "and", "q_ijs", "and", "the", "absolute", "error", "." ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L127-L189
brianhie/scanorama
scanorama/t_sne_approx.py
_kl_divergence_bh
def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components, angle=0.5, skip_num_points=0, verbose=False): """t-SNE objective function: KL divergence of p_ijs and q_ijs. Uses Barnes-Hut tree methods to calculate the gradient that runs in O(NlogN) instead of O(N^2) Parameters ---------- params : array, shape (n_params,) Unraveled embedding. P : csr sparse matrix, shape (n_samples, n_sample) Sparse approximate joint probability matrix, computed only for the k nearest-neighbors and symmetrized. degrees_of_freedom : float Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. angle : float (default: 0.5) This is the trade-off between speed and accuracy for Barnes-Hut T-SNE. 'angle' is the angular size (referred to as theta in [3]) of a distant node as measured from a point. If this size is below 'angle' then it is used as a summary node of all points contained within it. This method is not very sensitive to changes in this parameter in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing computation time and angle greater 0.8 has quickly increasing error. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. verbose : int Verbosity level. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : array, shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding. """ params = params.astype(np.float32, copy=False) X_embedded = params.reshape(n_samples, n_components) val_P = P.data.astype(np.float32, copy=False) neighbors = P.indices.astype(np.int64, copy=False) indptr = P.indptr.astype(np.int64, copy=False) grad = np.zeros(X_embedded.shape, dtype=np.float32) error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr, grad, angle, n_components, verbose, dof=degrees_of_freedom) c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom grad = grad.ravel() grad *= c return error, grad
python
def _kl_divergence_bh(params, P, degrees_of_freedom, n_samples, n_components, angle=0.5, skip_num_points=0, verbose=False): """t-SNE objective function: KL divergence of p_ijs and q_ijs. Uses Barnes-Hut tree methods to calculate the gradient that runs in O(NlogN) instead of O(N^2) Parameters ---------- params : array, shape (n_params,) Unraveled embedding. P : csr sparse matrix, shape (n_samples, n_sample) Sparse approximate joint probability matrix, computed only for the k nearest-neighbors and symmetrized. degrees_of_freedom : float Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. angle : float (default: 0.5) This is the trade-off between speed and accuracy for Barnes-Hut T-SNE. 'angle' is the angular size (referred to as theta in [3]) of a distant node as measured from a point. If this size is below 'angle' then it is used as a summary node of all points contained within it. This method is not very sensitive to changes in this parameter in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing computation time and angle greater 0.8 has quickly increasing error. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. verbose : int Verbosity level. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : array, shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding. """ params = params.astype(np.float32, copy=False) X_embedded = params.reshape(n_samples, n_components) val_P = P.data.astype(np.float32, copy=False) neighbors = P.indices.astype(np.int64, copy=False) indptr = P.indptr.astype(np.int64, copy=False) grad = np.zeros(X_embedded.shape, dtype=np.float32) error = _barnes_hut_tsne.gradient(val_P, X_embedded, neighbors, indptr, grad, angle, n_components, verbose, dof=degrees_of_freedom) c = 2.0 * (degrees_of_freedom + 1.0) / degrees_of_freedom grad = grad.ravel() grad *= c return error, grad
[ "def", "_kl_divergence_bh", "(", "params", ",", "P", ",", "degrees_of_freedom", ",", "n_samples", ",", "n_components", ",", "angle", "=", "0.5", ",", "skip_num_points", "=", "0", ",", "verbose", "=", "False", ")", ":", "params", "=", "params", ".", "astype", "(", "np", ".", "float32", ",", "copy", "=", "False", ")", "X_embedded", "=", "params", ".", "reshape", "(", "n_samples", ",", "n_components", ")", "val_P", "=", "P", ".", "data", ".", "astype", "(", "np", ".", "float32", ",", "copy", "=", "False", ")", "neighbors", "=", "P", ".", "indices", ".", "astype", "(", "np", ".", "int64", ",", "copy", "=", "False", ")", "indptr", "=", "P", ".", "indptr", ".", "astype", "(", "np", ".", "int64", ",", "copy", "=", "False", ")", "grad", "=", "np", ".", "zeros", "(", "X_embedded", ".", "shape", ",", "dtype", "=", "np", ".", "float32", ")", "error", "=", "_barnes_hut_tsne", ".", "gradient", "(", "val_P", ",", "X_embedded", ",", "neighbors", ",", "indptr", ",", "grad", ",", "angle", ",", "n_components", ",", "verbose", ",", "dof", "=", "degrees_of_freedom", ")", "c", "=", "2.0", "*", "(", "degrees_of_freedom", "+", "1.0", ")", "/", "degrees_of_freedom", "grad", "=", "grad", ".", "ravel", "(", ")", "grad", "*=", "c", "return", "error", ",", "grad" ]
t-SNE objective function: KL divergence of p_ijs and q_ijs. Uses Barnes-Hut tree methods to calculate the gradient that runs in O(NlogN) instead of O(N^2) Parameters ---------- params : array, shape (n_params,) Unraveled embedding. P : csr sparse matrix, shape (n_samples, n_sample) Sparse approximate joint probability matrix, computed only for the k nearest-neighbors and symmetrized. degrees_of_freedom : float Degrees of freedom of the Student's-t distribution. n_samples : int Number of samples. n_components : int Dimension of the embedded space. angle : float (default: 0.5) This is the trade-off between speed and accuracy for Barnes-Hut T-SNE. 'angle' is the angular size (referred to as theta in [3]) of a distant node as measured from a point. If this size is below 'angle' then it is used as a summary node of all points contained within it. This method is not very sensitive to changes in this parameter in the range of 0.2 - 0.8. Angle less than 0.2 has quickly increasing computation time and angle greater 0.8 has quickly increasing error. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. verbose : int Verbosity level. Returns ------- kl_divergence : float Kullback-Leibler divergence of p_ij and q_ij. grad : array, shape (n_params,) Unraveled gradient of the Kullback-Leibler divergence with respect to the embedding.
[ "t", "-", "SNE", "objective", "function", ":", "KL", "divergence", "of", "p_ijs", "and", "q_ijs", "." ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L192-L258
brianhie/scanorama
scanorama/t_sne_approx.py
_gradient_descent
def _gradient_descent(objective, p0, it, n_iter, n_iter_check=1, n_iter_without_progress=300, momentum=0.8, learning_rate=200.0, min_gain=0.01, min_grad_norm=1e-7, verbose=0, args=None, kwargs=None): """Batch gradient descent with momentum and individual gains. Parameters ---------- objective : function or callable Should return a tuple of cost and gradient for a given parameter vector. When expensive to compute, the cost can optionally be None and can be computed every n_iter_check steps using the objective_error function. p0 : array-like, shape (n_params,) Initial parameter vector. it : int Current number of iterations (this function will be called more than once during the optimization). n_iter : int Maximum number of gradient descent iterations. n_iter_check : int Number of iterations before evaluating the global error. If the error is sufficiently low, we abort the optimization. n_iter_without_progress : int, optional (default: 300) Maximum number of iterations without progress before we abort the optimization. momentum : float, within (0.0, 1.0), optional (default: 0.8) The momentum generates a weight for previous gradients that decays exponentially. learning_rate : float, optional (default: 200.0) The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If the learning rate is too high, the data may look like a 'ball' with any point approximately equidistant from its nearest neighbours. If the learning rate is too low, most points may look compressed in a dense cloud with few outliers. min_gain : float, optional (default: 0.01) Minimum individual gain for each parameter. min_grad_norm : float, optional (default: 1e-7) If the gradient norm is below this threshold, the optimization will be aborted. verbose : int, optional (default: 0) Verbosity level. args : sequence Arguments to pass to objective function. kwargs : dict Keyword arguments to pass to objective function. Returns ------- p : array, shape (n_params,) Optimum parameters. error : float Optimum. i : int Last iteration. """ if args is None: args = [] if kwargs is None: kwargs = {} p = p0.copy().ravel() update = np.zeros_like(p) gains = np.ones_like(p) error = np.finfo(np.float).max best_error = np.finfo(np.float).max best_iter = i = it tic = time() for i in range(it, n_iter): error, grad = objective(p, *args, **kwargs) grad_norm = linalg.norm(grad) inc = update * grad < 0.0 dec = np.invert(inc) gains[inc] += 0.2 gains[dec] *= 0.8 np.clip(gains, min_gain, np.inf, out=gains) grad *= gains update = momentum * update - learning_rate * grad p += update if (i + 1) % n_iter_check == 0: toc = time() duration = toc - tic tic = toc if verbose >= 2: print("[t-SNE] Iteration %d: error = %.7f," " gradient norm = %.7f" " (%s iterations in %0.3fs)" % (i + 1, error, grad_norm, n_iter_check, duration)) if error < best_error: best_error = error best_iter = i elif i - best_iter > n_iter_without_progress: if verbose >= 2: print("[t-SNE] Iteration %d: did not make any progress " "during the last %d episodes. Finished." % (i + 1, n_iter_without_progress)) break if grad_norm <= min_grad_norm: if verbose >= 2: print("[t-SNE] Iteration %d: gradient norm %f. Finished." % (i + 1, grad_norm)) break return p, error, i
python
def _gradient_descent(objective, p0, it, n_iter, n_iter_check=1, n_iter_without_progress=300, momentum=0.8, learning_rate=200.0, min_gain=0.01, min_grad_norm=1e-7, verbose=0, args=None, kwargs=None): """Batch gradient descent with momentum and individual gains. Parameters ---------- objective : function or callable Should return a tuple of cost and gradient for a given parameter vector. When expensive to compute, the cost can optionally be None and can be computed every n_iter_check steps using the objective_error function. p0 : array-like, shape (n_params,) Initial parameter vector. it : int Current number of iterations (this function will be called more than once during the optimization). n_iter : int Maximum number of gradient descent iterations. n_iter_check : int Number of iterations before evaluating the global error. If the error is sufficiently low, we abort the optimization. n_iter_without_progress : int, optional (default: 300) Maximum number of iterations without progress before we abort the optimization. momentum : float, within (0.0, 1.0), optional (default: 0.8) The momentum generates a weight for previous gradients that decays exponentially. learning_rate : float, optional (default: 200.0) The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If the learning rate is too high, the data may look like a 'ball' with any point approximately equidistant from its nearest neighbours. If the learning rate is too low, most points may look compressed in a dense cloud with few outliers. min_gain : float, optional (default: 0.01) Minimum individual gain for each parameter. min_grad_norm : float, optional (default: 1e-7) If the gradient norm is below this threshold, the optimization will be aborted. verbose : int, optional (default: 0) Verbosity level. args : sequence Arguments to pass to objective function. kwargs : dict Keyword arguments to pass to objective function. Returns ------- p : array, shape (n_params,) Optimum parameters. error : float Optimum. i : int Last iteration. """ if args is None: args = [] if kwargs is None: kwargs = {} p = p0.copy().ravel() update = np.zeros_like(p) gains = np.ones_like(p) error = np.finfo(np.float).max best_error = np.finfo(np.float).max best_iter = i = it tic = time() for i in range(it, n_iter): error, grad = objective(p, *args, **kwargs) grad_norm = linalg.norm(grad) inc = update * grad < 0.0 dec = np.invert(inc) gains[inc] += 0.2 gains[dec] *= 0.8 np.clip(gains, min_gain, np.inf, out=gains) grad *= gains update = momentum * update - learning_rate * grad p += update if (i + 1) % n_iter_check == 0: toc = time() duration = toc - tic tic = toc if verbose >= 2: print("[t-SNE] Iteration %d: error = %.7f," " gradient norm = %.7f" " (%s iterations in %0.3fs)" % (i + 1, error, grad_norm, n_iter_check, duration)) if error < best_error: best_error = error best_iter = i elif i - best_iter > n_iter_without_progress: if verbose >= 2: print("[t-SNE] Iteration %d: did not make any progress " "during the last %d episodes. Finished." % (i + 1, n_iter_without_progress)) break if grad_norm <= min_grad_norm: if verbose >= 2: print("[t-SNE] Iteration %d: gradient norm %f. Finished." % (i + 1, grad_norm)) break return p, error, i
[ "def", "_gradient_descent", "(", "objective", ",", "p0", ",", "it", ",", "n_iter", ",", "n_iter_check", "=", "1", ",", "n_iter_without_progress", "=", "300", ",", "momentum", "=", "0.8", ",", "learning_rate", "=", "200.0", ",", "min_gain", "=", "0.01", ",", "min_grad_norm", "=", "1e-7", ",", "verbose", "=", "0", ",", "args", "=", "None", ",", "kwargs", "=", "None", ")", ":", "if", "args", "is", "None", ":", "args", "=", "[", "]", "if", "kwargs", "is", "None", ":", "kwargs", "=", "{", "}", "p", "=", "p0", ".", "copy", "(", ")", ".", "ravel", "(", ")", "update", "=", "np", ".", "zeros_like", "(", "p", ")", "gains", "=", "np", ".", "ones_like", "(", "p", ")", "error", "=", "np", ".", "finfo", "(", "np", ".", "float", ")", ".", "max", "best_error", "=", "np", ".", "finfo", "(", "np", ".", "float", ")", ".", "max", "best_iter", "=", "i", "=", "it", "tic", "=", "time", "(", ")", "for", "i", "in", "range", "(", "it", ",", "n_iter", ")", ":", "error", ",", "grad", "=", "objective", "(", "p", ",", "*", "args", ",", "*", "*", "kwargs", ")", "grad_norm", "=", "linalg", ".", "norm", "(", "grad", ")", "inc", "=", "update", "*", "grad", "<", "0.0", "dec", "=", "np", ".", "invert", "(", "inc", ")", "gains", "[", "inc", "]", "+=", "0.2", "gains", "[", "dec", "]", "*=", "0.8", "np", ".", "clip", "(", "gains", ",", "min_gain", ",", "np", ".", "inf", ",", "out", "=", "gains", ")", "grad", "*=", "gains", "update", "=", "momentum", "*", "update", "-", "learning_rate", "*", "grad", "p", "+=", "update", "if", "(", "i", "+", "1", ")", "%", "n_iter_check", "==", "0", ":", "toc", "=", "time", "(", ")", "duration", "=", "toc", "-", "tic", "tic", "=", "toc", "if", "verbose", ">=", "2", ":", "print", "(", "\"[t-SNE] Iteration %d: error = %.7f,\"", "\" gradient norm = %.7f\"", "\" (%s iterations in %0.3fs)\"", "%", "(", "i", "+", "1", ",", "error", ",", "grad_norm", ",", "n_iter_check", ",", "duration", ")", ")", "if", "error", "<", "best_error", ":", "best_error", "=", "error", "best_iter", "=", "i", "elif", "i", "-", "best_iter", ">", "n_iter_without_progress", ":", "if", "verbose", ">=", "2", ":", "print", "(", "\"[t-SNE] Iteration %d: did not make any progress \"", "\"during the last %d episodes. Finished.\"", "%", "(", "i", "+", "1", ",", "n_iter_without_progress", ")", ")", "break", "if", "grad_norm", "<=", "min_grad_norm", ":", "if", "verbose", ">=", "2", ":", "print", "(", "\"[t-SNE] Iteration %d: gradient norm %f. Finished.\"", "%", "(", "i", "+", "1", ",", "grad_norm", ")", ")", "break", "return", "p", ",", "error", ",", "i" ]
Batch gradient descent with momentum and individual gains. Parameters ---------- objective : function or callable Should return a tuple of cost and gradient for a given parameter vector. When expensive to compute, the cost can optionally be None and can be computed every n_iter_check steps using the objective_error function. p0 : array-like, shape (n_params,) Initial parameter vector. it : int Current number of iterations (this function will be called more than once during the optimization). n_iter : int Maximum number of gradient descent iterations. n_iter_check : int Number of iterations before evaluating the global error. If the error is sufficiently low, we abort the optimization. n_iter_without_progress : int, optional (default: 300) Maximum number of iterations without progress before we abort the optimization. momentum : float, within (0.0, 1.0), optional (default: 0.8) The momentum generates a weight for previous gradients that decays exponentially. learning_rate : float, optional (default: 200.0) The learning rate for t-SNE is usually in the range [10.0, 1000.0]. If the learning rate is too high, the data may look like a 'ball' with any point approximately equidistant from its nearest neighbours. If the learning rate is too low, most points may look compressed in a dense cloud with few outliers. min_gain : float, optional (default: 0.01) Minimum individual gain for each parameter. min_grad_norm : float, optional (default: 1e-7) If the gradient norm is below this threshold, the optimization will be aborted. verbose : int, optional (default: 0) Verbosity level. args : sequence Arguments to pass to objective function. kwargs : dict Keyword arguments to pass to objective function. Returns ------- p : array, shape (n_params,) Optimum parameters. error : float Optimum. i : int Last iteration.
[ "Batch", "gradient", "descent", "with", "momentum", "and", "individual", "gains", "." ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L261-L383
brianhie/scanorama
scanorama/t_sne_approx.py
trustworthiness
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False): """Expresses to what extent the local structure is retained. The trustworthiness is within [0, 1]. It is defined as .. math:: T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1} \sum_{j \in U^{(k)}_i} (r(i, j) - k) where :math:`r(i, j)` is the rank of the embedded datapoint j according to the pairwise distances between the embedded datapoints, :math:`U^{(k)}_i` is the set of points that are in the k nearest neighbors in the embedded space but not in the original space. * "Neighborhood Preservation in Nonlinear Projection Methods: An Experimental Study" J. Venna, S. Kaski * "Learning a Parametric Embedding by Preserving Local Structure" L.J.P. van der Maaten Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. X_embedded : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space. n_neighbors : int, optional (default: 5) Number of neighbors k that will be considered. precomputed : bool, optional (default: False) Set this flag if X is a precomputed square distance matrix. Returns ------- trustworthiness : float Trustworthiness of the low-dimensional embedding. """ if precomputed: dist_X = X else: dist_X = pairwise_distances(X, squared=True) dist_X_embedded = pairwise_distances(X_embedded, squared=True) ind_X = np.argsort(dist_X, axis=1) ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1] n_samples = X.shape[0] t = 0.0 ranks = np.zeros(n_neighbors) for i in range(n_samples): for j in range(n_neighbors): ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0] ranks -= n_neighbors t += np.sum(ranks[ranks > 0]) t = 1.0 - t * (2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0))) return t
python
def trustworthiness(X, X_embedded, n_neighbors=5, precomputed=False): """Expresses to what extent the local structure is retained. The trustworthiness is within [0, 1]. It is defined as .. math:: T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1} \sum_{j \in U^{(k)}_i} (r(i, j) - k) where :math:`r(i, j)` is the rank of the embedded datapoint j according to the pairwise distances between the embedded datapoints, :math:`U^{(k)}_i` is the set of points that are in the k nearest neighbors in the embedded space but not in the original space. * "Neighborhood Preservation in Nonlinear Projection Methods: An Experimental Study" J. Venna, S. Kaski * "Learning a Parametric Embedding by Preserving Local Structure" L.J.P. van der Maaten Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. X_embedded : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space. n_neighbors : int, optional (default: 5) Number of neighbors k that will be considered. precomputed : bool, optional (default: False) Set this flag if X is a precomputed square distance matrix. Returns ------- trustworthiness : float Trustworthiness of the low-dimensional embedding. """ if precomputed: dist_X = X else: dist_X = pairwise_distances(X, squared=True) dist_X_embedded = pairwise_distances(X_embedded, squared=True) ind_X = np.argsort(dist_X, axis=1) ind_X_embedded = np.argsort(dist_X_embedded, axis=1)[:, 1:n_neighbors + 1] n_samples = X.shape[0] t = 0.0 ranks = np.zeros(n_neighbors) for i in range(n_samples): for j in range(n_neighbors): ranks[j] = np.where(ind_X[i] == ind_X_embedded[i, j])[0][0] ranks -= n_neighbors t += np.sum(ranks[ranks > 0]) t = 1.0 - t * (2.0 / (n_samples * n_neighbors * (2.0 * n_samples - 3.0 * n_neighbors - 1.0))) return t
[ "def", "trustworthiness", "(", "X", ",", "X_embedded", ",", "n_neighbors", "=", "5", ",", "precomputed", "=", "False", ")", ":", "if", "precomputed", ":", "dist_X", "=", "X", "else", ":", "dist_X", "=", "pairwise_distances", "(", "X", ",", "squared", "=", "True", ")", "dist_X_embedded", "=", "pairwise_distances", "(", "X_embedded", ",", "squared", "=", "True", ")", "ind_X", "=", "np", ".", "argsort", "(", "dist_X", ",", "axis", "=", "1", ")", "ind_X_embedded", "=", "np", ".", "argsort", "(", "dist_X_embedded", ",", "axis", "=", "1", ")", "[", ":", ",", "1", ":", "n_neighbors", "+", "1", "]", "n_samples", "=", "X", ".", "shape", "[", "0", "]", "t", "=", "0.0", "ranks", "=", "np", ".", "zeros", "(", "n_neighbors", ")", "for", "i", "in", "range", "(", "n_samples", ")", ":", "for", "j", "in", "range", "(", "n_neighbors", ")", ":", "ranks", "[", "j", "]", "=", "np", ".", "where", "(", "ind_X", "[", "i", "]", "==", "ind_X_embedded", "[", "i", ",", "j", "]", ")", "[", "0", "]", "[", "0", "]", "ranks", "-=", "n_neighbors", "t", "+=", "np", ".", "sum", "(", "ranks", "[", "ranks", ">", "0", "]", ")", "t", "=", "1.0", "-", "t", "*", "(", "2.0", "/", "(", "n_samples", "*", "n_neighbors", "*", "(", "2.0", "*", "n_samples", "-", "3.0", "*", "n_neighbors", "-", "1.0", ")", ")", ")", "return", "t" ]
Expresses to what extent the local structure is retained. The trustworthiness is within [0, 1]. It is defined as .. math:: T(k) = 1 - \frac{2}{nk (2n - 3k - 1)} \sum^n_{i=1} \sum_{j \in U^{(k)}_i} (r(i, j) - k) where :math:`r(i, j)` is the rank of the embedded datapoint j according to the pairwise distances between the embedded datapoints, :math:`U^{(k)}_i` is the set of points that are in the k nearest neighbors in the embedded space but not in the original space. * "Neighborhood Preservation in Nonlinear Projection Methods: An Experimental Study" J. Venna, S. Kaski * "Learning a Parametric Embedding by Preserving Local Structure" L.J.P. van der Maaten Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. X_embedded : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space. n_neighbors : int, optional (default: 5) Number of neighbors k that will be considered. precomputed : bool, optional (default: False) Set this flag if X is a precomputed square distance matrix. Returns ------- trustworthiness : float Trustworthiness of the low-dimensional embedding.
[ "Expresses", "to", "what", "extent", "the", "local", "structure", "is", "retained", "." ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L386-L445
brianhie/scanorama
scanorama/t_sne_approx.py
TSNEApprox._fit
def _fit(self, X, skip_num_points=0): """Fit the model using X as training data. Note that sparse arrays can only be handled by method='exact'. It is recommended that you convert your sparse array to dense (e.g. `X.toarray()`) if it fits in memory, or otherwise using a dimensionality reduction technique (e.g. TruncatedSVD). Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. Note that this when method='barnes_hut', X cannot be a sparse array and if need be will be converted to a 32 bit float array. Method='exact' allows sparse arrays and 64bit floating point inputs. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. """ if self.method not in ['barnes_hut', 'exact']: raise ValueError("'method' must be 'barnes_hut' or 'exact'") if self.angle < 0.0 or self.angle > 1.0: raise ValueError("'angle' must be between 0.0 - 1.0") if self.metric == "precomputed": if isinstance(self.init, string_types) and self.init == 'pca': raise ValueError("The parameter init=\"pca\" cannot be " "used with metric=\"precomputed\".") if X.shape[0] != X.shape[1]: raise ValueError("X should be a square distance matrix") if np.any(X < 0): raise ValueError("All distances should be positive, the " "precomputed distances given as X is not " "correct") if self.method == 'barnes_hut' and sp.issparse(X): raise TypeError('A sparse matrix was passed, but dense ' 'data is required for method="barnes_hut". Use ' 'X.toarray() to convert to a dense numpy array if ' 'the array is small enough for it to fit in ' 'memory. Otherwise consider dimensionality ' 'reduction techniques (e.g. TruncatedSVD)') else: X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=[np.float32, np.float64]) if self.method == 'barnes_hut' and self.n_components > 3: raise ValueError("'n_components' should be inferior to 4 for the " "barnes_hut algorithm as it relies on " "quad-tree or oct-tree.") random_state = check_random_state(self.random_state) if self.early_exaggeration < 1.0: raise ValueError("early_exaggeration must be at least 1, but is {}" .format(self.early_exaggeration)) if self.n_iter < 250: raise ValueError("n_iter should be at least 250") n_samples = X.shape[0] neighbors_nn = None if self.method == "exact": # Retrieve the distance matrix, either using the precomputed one or # computing it. if self.metric == "precomputed": distances = X else: if self.verbose: print("[t-SNE] Computing pairwise distances...") if self.metric == "euclidean": distances = pairwise_distances(X, metric=self.metric, squared=True) else: distances = pairwise_distances(X, metric=self.metric) if np.any(distances < 0): raise ValueError("All distances should be positive, the " "metric given is not correct") # compute the joint probability distribution for the input space P = _joint_probabilities(distances, self.perplexity, self.verbose) assert np.all(np.isfinite(P)), "All probabilities should be finite" assert np.all(P >= 0), "All probabilities should be non-negative" assert np.all(P <= 1), ("All probabilities should be less " "or then equal to one") else: # Cpmpute the number of nearest neighbors to find. # LvdM uses 3 * perplexity as the number of neighbors. # In the event that we have very small # of points # set the neighbors to n - 1. k = min(n_samples - 1, int(3. * self.perplexity + 1)) if self.verbose: print("[t-SNE] Computing {} nearest neighbors...".format(k)) # Find the nearest neighbors for every point neighbors_method = 'ball_tree' if (self.metric == 'precomputed'): neighbors_method = 'brute' knn = AnnoyIndex(X.shape[1], metric='euclidean') t0 = time() for i in range(n_samples): knn.add_item(i, X[i, :]) knn.build(50) duration = time() - t0 if self.verbose: print("[t-SNE] Indexed {} samples in {:.3f}s...".format( n_samples, duration)) t0 = time() neighbors_nn = np.zeros((n_samples, k), dtype=int) distances_nn = np.zeros((n_samples, k)) for i in range(n_samples): (neighbors_nn[i, :], distances_nn[i, :]) = knn.get_nns_by_vector( X[i, :], k, include_distances=True ) duration = time() - t0 if self.verbose: print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..." .format(n_samples, duration)) # Free the memory used by the ball_tree del knn if self.metric == "euclidean": # knn return the euclidean distance but we need it squared # to be consistent with the 'exact' method. Note that the # the method was derived using the euclidean method as in the # input space. Not sure of the implication of using a different # metric. distances_nn **= 2 # compute the joint probability distribution for the input space P = _joint_probabilities_nn(distances_nn, neighbors_nn, self.perplexity, self.verbose) if isinstance(self.init, np.ndarray): X_embedded = self.init elif self.init == 'pca': pca = PCA(n_components=self.n_components, svd_solver='randomized', random_state=random_state) X_embedded = pca.fit_transform(X).astype(np.float32, copy=False) elif self.init == 'random': # The embedding is initialized with iid samples from Gaussians with # standard deviation 1e-4. X_embedded = 1e-4 * random_state.randn( n_samples, self.n_components).astype(np.float32) else: raise ValueError("'init' must be 'pca', 'random', or " "a numpy array") # Degrees of freedom of the Student's t-distribution. The suggestion # degrees_of_freedom = n_components - 1 comes from # "Learning a Parametric Embedding by Preserving Local Structure" # Laurens van der Maaten, 2009. degrees_of_freedom = max(self.n_components - 1.0, 1) return self._tsne(P, degrees_of_freedom, n_samples, random_state, X_embedded=X_embedded, neighbors=neighbors_nn, skip_num_points=skip_num_points)
python
def _fit(self, X, skip_num_points=0): """Fit the model using X as training data. Note that sparse arrays can only be handled by method='exact'. It is recommended that you convert your sparse array to dense (e.g. `X.toarray()`) if it fits in memory, or otherwise using a dimensionality reduction technique (e.g. TruncatedSVD). Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. Note that this when method='barnes_hut', X cannot be a sparse array and if need be will be converted to a 32 bit float array. Method='exact' allows sparse arrays and 64bit floating point inputs. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed. """ if self.method not in ['barnes_hut', 'exact']: raise ValueError("'method' must be 'barnes_hut' or 'exact'") if self.angle < 0.0 or self.angle > 1.0: raise ValueError("'angle' must be between 0.0 - 1.0") if self.metric == "precomputed": if isinstance(self.init, string_types) and self.init == 'pca': raise ValueError("The parameter init=\"pca\" cannot be " "used with metric=\"precomputed\".") if X.shape[0] != X.shape[1]: raise ValueError("X should be a square distance matrix") if np.any(X < 0): raise ValueError("All distances should be positive, the " "precomputed distances given as X is not " "correct") if self.method == 'barnes_hut' and sp.issparse(X): raise TypeError('A sparse matrix was passed, but dense ' 'data is required for method="barnes_hut". Use ' 'X.toarray() to convert to a dense numpy array if ' 'the array is small enough for it to fit in ' 'memory. Otherwise consider dimensionality ' 'reduction techniques (e.g. TruncatedSVD)') else: X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=[np.float32, np.float64]) if self.method == 'barnes_hut' and self.n_components > 3: raise ValueError("'n_components' should be inferior to 4 for the " "barnes_hut algorithm as it relies on " "quad-tree or oct-tree.") random_state = check_random_state(self.random_state) if self.early_exaggeration < 1.0: raise ValueError("early_exaggeration must be at least 1, but is {}" .format(self.early_exaggeration)) if self.n_iter < 250: raise ValueError("n_iter should be at least 250") n_samples = X.shape[0] neighbors_nn = None if self.method == "exact": # Retrieve the distance matrix, either using the precomputed one or # computing it. if self.metric == "precomputed": distances = X else: if self.verbose: print("[t-SNE] Computing pairwise distances...") if self.metric == "euclidean": distances = pairwise_distances(X, metric=self.metric, squared=True) else: distances = pairwise_distances(X, metric=self.metric) if np.any(distances < 0): raise ValueError("All distances should be positive, the " "metric given is not correct") # compute the joint probability distribution for the input space P = _joint_probabilities(distances, self.perplexity, self.verbose) assert np.all(np.isfinite(P)), "All probabilities should be finite" assert np.all(P >= 0), "All probabilities should be non-negative" assert np.all(P <= 1), ("All probabilities should be less " "or then equal to one") else: # Cpmpute the number of nearest neighbors to find. # LvdM uses 3 * perplexity as the number of neighbors. # In the event that we have very small # of points # set the neighbors to n - 1. k = min(n_samples - 1, int(3. * self.perplexity + 1)) if self.verbose: print("[t-SNE] Computing {} nearest neighbors...".format(k)) # Find the nearest neighbors for every point neighbors_method = 'ball_tree' if (self.metric == 'precomputed'): neighbors_method = 'brute' knn = AnnoyIndex(X.shape[1], metric='euclidean') t0 = time() for i in range(n_samples): knn.add_item(i, X[i, :]) knn.build(50) duration = time() - t0 if self.verbose: print("[t-SNE] Indexed {} samples in {:.3f}s...".format( n_samples, duration)) t0 = time() neighbors_nn = np.zeros((n_samples, k), dtype=int) distances_nn = np.zeros((n_samples, k)) for i in range(n_samples): (neighbors_nn[i, :], distances_nn[i, :]) = knn.get_nns_by_vector( X[i, :], k, include_distances=True ) duration = time() - t0 if self.verbose: print("[t-SNE] Computed neighbors for {} samples in {:.3f}s..." .format(n_samples, duration)) # Free the memory used by the ball_tree del knn if self.metric == "euclidean": # knn return the euclidean distance but we need it squared # to be consistent with the 'exact' method. Note that the # the method was derived using the euclidean method as in the # input space. Not sure of the implication of using a different # metric. distances_nn **= 2 # compute the joint probability distribution for the input space P = _joint_probabilities_nn(distances_nn, neighbors_nn, self.perplexity, self.verbose) if isinstance(self.init, np.ndarray): X_embedded = self.init elif self.init == 'pca': pca = PCA(n_components=self.n_components, svd_solver='randomized', random_state=random_state) X_embedded = pca.fit_transform(X).astype(np.float32, copy=False) elif self.init == 'random': # The embedding is initialized with iid samples from Gaussians with # standard deviation 1e-4. X_embedded = 1e-4 * random_state.randn( n_samples, self.n_components).astype(np.float32) else: raise ValueError("'init' must be 'pca', 'random', or " "a numpy array") # Degrees of freedom of the Student's t-distribution. The suggestion # degrees_of_freedom = n_components - 1 comes from # "Learning a Parametric Embedding by Preserving Local Structure" # Laurens van der Maaten, 2009. degrees_of_freedom = max(self.n_components - 1.0, 1) return self._tsne(P, degrees_of_freedom, n_samples, random_state, X_embedded=X_embedded, neighbors=neighbors_nn, skip_num_points=skip_num_points)
[ "def", "_fit", "(", "self", ",", "X", ",", "skip_num_points", "=", "0", ")", ":", "if", "self", ".", "method", "not", "in", "[", "'barnes_hut'", ",", "'exact'", "]", ":", "raise", "ValueError", "(", "\"'method' must be 'barnes_hut' or 'exact'\"", ")", "if", "self", ".", "angle", "<", "0.0", "or", "self", ".", "angle", ">", "1.0", ":", "raise", "ValueError", "(", "\"'angle' must be between 0.0 - 1.0\"", ")", "if", "self", ".", "metric", "==", "\"precomputed\"", ":", "if", "isinstance", "(", "self", ".", "init", ",", "string_types", ")", "and", "self", ".", "init", "==", "'pca'", ":", "raise", "ValueError", "(", "\"The parameter init=\\\"pca\\\" cannot be \"", "\"used with metric=\\\"precomputed\\\".\"", ")", "if", "X", ".", "shape", "[", "0", "]", "!=", "X", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "\"X should be a square distance matrix\"", ")", "if", "np", ".", "any", "(", "X", "<", "0", ")", ":", "raise", "ValueError", "(", "\"All distances should be positive, the \"", "\"precomputed distances given as X is not \"", "\"correct\"", ")", "if", "self", ".", "method", "==", "'barnes_hut'", "and", "sp", ".", "issparse", "(", "X", ")", ":", "raise", "TypeError", "(", "'A sparse matrix was passed, but dense '", "'data is required for method=\"barnes_hut\". Use '", "'X.toarray() to convert to a dense numpy array if '", "'the array is small enough for it to fit in '", "'memory. Otherwise consider dimensionality '", "'reduction techniques (e.g. TruncatedSVD)'", ")", "else", ":", "X", "=", "check_array", "(", "X", ",", "accept_sparse", "=", "[", "'csr'", ",", "'csc'", ",", "'coo'", "]", ",", "dtype", "=", "[", "np", ".", "float32", ",", "np", ".", "float64", "]", ")", "if", "self", ".", "method", "==", "'barnes_hut'", "and", "self", ".", "n_components", ">", "3", ":", "raise", "ValueError", "(", "\"'n_components' should be inferior to 4 for the \"", "\"barnes_hut algorithm as it relies on \"", "\"quad-tree or oct-tree.\"", ")", "random_state", "=", "check_random_state", "(", "self", ".", "random_state", ")", "if", "self", ".", "early_exaggeration", "<", "1.0", ":", "raise", "ValueError", "(", "\"early_exaggeration must be at least 1, but is {}\"", ".", "format", "(", "self", ".", "early_exaggeration", ")", ")", "if", "self", ".", "n_iter", "<", "250", ":", "raise", "ValueError", "(", "\"n_iter should be at least 250\"", ")", "n_samples", "=", "X", ".", "shape", "[", "0", "]", "neighbors_nn", "=", "None", "if", "self", ".", "method", "==", "\"exact\"", ":", "# Retrieve the distance matrix, either using the precomputed one or", "# computing it.", "if", "self", ".", "metric", "==", "\"precomputed\"", ":", "distances", "=", "X", "else", ":", "if", "self", ".", "verbose", ":", "print", "(", "\"[t-SNE] Computing pairwise distances...\"", ")", "if", "self", ".", "metric", "==", "\"euclidean\"", ":", "distances", "=", "pairwise_distances", "(", "X", ",", "metric", "=", "self", ".", "metric", ",", "squared", "=", "True", ")", "else", ":", "distances", "=", "pairwise_distances", "(", "X", ",", "metric", "=", "self", ".", "metric", ")", "if", "np", ".", "any", "(", "distances", "<", "0", ")", ":", "raise", "ValueError", "(", "\"All distances should be positive, the \"", "\"metric given is not correct\"", ")", "# compute the joint probability distribution for the input space", "P", "=", "_joint_probabilities", "(", "distances", ",", "self", ".", "perplexity", ",", "self", ".", "verbose", ")", "assert", "np", ".", "all", "(", "np", ".", "isfinite", "(", "P", ")", ")", ",", "\"All probabilities should be finite\"", "assert", "np", ".", "all", "(", "P", ">=", "0", ")", ",", "\"All probabilities should be non-negative\"", "assert", "np", ".", "all", "(", "P", "<=", "1", ")", ",", "(", "\"All probabilities should be less \"", "\"or then equal to one\"", ")", "else", ":", "# Cpmpute the number of nearest neighbors to find.", "# LvdM uses 3 * perplexity as the number of neighbors.", "# In the event that we have very small # of points", "# set the neighbors to n - 1.", "k", "=", "min", "(", "n_samples", "-", "1", ",", "int", "(", "3.", "*", "self", ".", "perplexity", "+", "1", ")", ")", "if", "self", ".", "verbose", ":", "print", "(", "\"[t-SNE] Computing {} nearest neighbors...\"", ".", "format", "(", "k", ")", ")", "# Find the nearest neighbors for every point", "neighbors_method", "=", "'ball_tree'", "if", "(", "self", ".", "metric", "==", "'precomputed'", ")", ":", "neighbors_method", "=", "'brute'", "knn", "=", "AnnoyIndex", "(", "X", ".", "shape", "[", "1", "]", ",", "metric", "=", "'euclidean'", ")", "t0", "=", "time", "(", ")", "for", "i", "in", "range", "(", "n_samples", ")", ":", "knn", ".", "add_item", "(", "i", ",", "X", "[", "i", ",", ":", "]", ")", "knn", ".", "build", "(", "50", ")", "duration", "=", "time", "(", ")", "-", "t0", "if", "self", ".", "verbose", ":", "print", "(", "\"[t-SNE] Indexed {} samples in {:.3f}s...\"", ".", "format", "(", "n_samples", ",", "duration", ")", ")", "t0", "=", "time", "(", ")", "neighbors_nn", "=", "np", ".", "zeros", "(", "(", "n_samples", ",", "k", ")", ",", "dtype", "=", "int", ")", "distances_nn", "=", "np", ".", "zeros", "(", "(", "n_samples", ",", "k", ")", ")", "for", "i", "in", "range", "(", "n_samples", ")", ":", "(", "neighbors_nn", "[", "i", ",", ":", "]", ",", "distances_nn", "[", "i", ",", ":", "]", ")", "=", "knn", ".", "get_nns_by_vector", "(", "X", "[", "i", ",", ":", "]", ",", "k", ",", "include_distances", "=", "True", ")", "duration", "=", "time", "(", ")", "-", "t0", "if", "self", ".", "verbose", ":", "print", "(", "\"[t-SNE] Computed neighbors for {} samples in {:.3f}s...\"", ".", "format", "(", "n_samples", ",", "duration", ")", ")", "# Free the memory used by the ball_tree", "del", "knn", "if", "self", ".", "metric", "==", "\"euclidean\"", ":", "# knn return the euclidean distance but we need it squared", "# to be consistent with the 'exact' method. Note that the", "# the method was derived using the euclidean method as in the", "# input space. Not sure of the implication of using a different", "# metric.", "distances_nn", "**=", "2", "# compute the joint probability distribution for the input space", "P", "=", "_joint_probabilities_nn", "(", "distances_nn", ",", "neighbors_nn", ",", "self", ".", "perplexity", ",", "self", ".", "verbose", ")", "if", "isinstance", "(", "self", ".", "init", ",", "np", ".", "ndarray", ")", ":", "X_embedded", "=", "self", ".", "init", "elif", "self", ".", "init", "==", "'pca'", ":", "pca", "=", "PCA", "(", "n_components", "=", "self", ".", "n_components", ",", "svd_solver", "=", "'randomized'", ",", "random_state", "=", "random_state", ")", "X_embedded", "=", "pca", ".", "fit_transform", "(", "X", ")", ".", "astype", "(", "np", ".", "float32", ",", "copy", "=", "False", ")", "elif", "self", ".", "init", "==", "'random'", ":", "# The embedding is initialized with iid samples from Gaussians with", "# standard deviation 1e-4.", "X_embedded", "=", "1e-4", "*", "random_state", ".", "randn", "(", "n_samples", ",", "self", ".", "n_components", ")", ".", "astype", "(", "np", ".", "float32", ")", "else", ":", "raise", "ValueError", "(", "\"'init' must be 'pca', 'random', or \"", "\"a numpy array\"", ")", "# Degrees of freedom of the Student's t-distribution. The suggestion", "# degrees_of_freedom = n_components - 1 comes from", "# \"Learning a Parametric Embedding by Preserving Local Structure\"", "# Laurens van der Maaten, 2009.", "degrees_of_freedom", "=", "max", "(", "self", ".", "n_components", "-", "1.0", ",", "1", ")", "return", "self", ".", "_tsne", "(", "P", ",", "degrees_of_freedom", ",", "n_samples", ",", "random_state", ",", "X_embedded", "=", "X_embedded", ",", "neighbors", "=", "neighbors_nn", ",", "skip_num_points", "=", "skip_num_points", ")" ]
Fit the model using X as training data. Note that sparse arrays can only be handled by method='exact'. It is recommended that you convert your sparse array to dense (e.g. `X.toarray()`) if it fits in memory, or otherwise using a dimensionality reduction technique (e.g. TruncatedSVD). Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. Note that this when method='barnes_hut', X cannot be a sparse array and if need be will be converted to a 32 bit float array. Method='exact' allows sparse arrays and 64bit floating point inputs. skip_num_points : int (optional, default:0) This does not compute the gradient for points with indices below `skip_num_points`. This is useful when computing transforms of new data where you'd like to keep the old data fixed.
[ "Fit", "the", "model", "using", "X", "as", "training", "data", "." ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L621-L784
brianhie/scanorama
scanorama/t_sne_approx.py
TSNEApprox._tsne
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded, neighbors=None, skip_num_points=0): """Runs t-SNE.""" # t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P # and the Student's t-distributions Q. The optimization algorithm that # we use is batch gradient descent with two stages: # * initial optimization with early exaggeration and momentum at 0.5 # * final optimization with momentum at 0.8 params = X_embedded.ravel() opt_args = { "it": 0, "n_iter_check": self._N_ITER_CHECK, "min_grad_norm": self.min_grad_norm, "learning_rate": self.learning_rate, "verbose": self.verbose, "kwargs": dict(skip_num_points=skip_num_points), "args": [P, degrees_of_freedom, n_samples, self.n_components], "n_iter_without_progress": self._EXPLORATION_N_ITER, "n_iter": self._EXPLORATION_N_ITER, "momentum": 0.5, } if self.method == 'barnes_hut': obj_func = _kl_divergence_bh opt_args['kwargs']['angle'] = self.angle # Repeat verbose argument for _kl_divergence_bh opt_args['kwargs']['verbose'] = self.verbose else: obj_func = _kl_divergence # Learning schedule (part 1): do 250 iteration with lower momentum but # higher learning rate controlled via the early exageration parameter P *= self.early_exaggeration params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args) if self.verbose: print("[t-SNE] KL divergence after %d iterations with early " "exaggeration: %f" % (it + 1, kl_divergence)) # Learning schedule (part 2): disable early exaggeration and finish # optimization with a higher momentum at 0.8 P /= self.early_exaggeration remaining = self.n_iter - self._EXPLORATION_N_ITER if it < self._EXPLORATION_N_ITER or remaining > 0: opt_args['n_iter'] = self.n_iter opt_args['it'] = it + 1 opt_args['momentum'] = 0.8 opt_args['n_iter_without_progress'] = self.n_iter_without_progress params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args) # Save the final number of iterations self.n_iter_ = it if self.verbose: print("[t-SNE] Error after %d iterations: %f" % (it + 1, kl_divergence)) X_embedded = params.reshape(n_samples, self.n_components) self.kl_divergence_ = kl_divergence return X_embedded
python
def _tsne(self, P, degrees_of_freedom, n_samples, random_state, X_embedded, neighbors=None, skip_num_points=0): """Runs t-SNE.""" # t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P # and the Student's t-distributions Q. The optimization algorithm that # we use is batch gradient descent with two stages: # * initial optimization with early exaggeration and momentum at 0.5 # * final optimization with momentum at 0.8 params = X_embedded.ravel() opt_args = { "it": 0, "n_iter_check": self._N_ITER_CHECK, "min_grad_norm": self.min_grad_norm, "learning_rate": self.learning_rate, "verbose": self.verbose, "kwargs": dict(skip_num_points=skip_num_points), "args": [P, degrees_of_freedom, n_samples, self.n_components], "n_iter_without_progress": self._EXPLORATION_N_ITER, "n_iter": self._EXPLORATION_N_ITER, "momentum": 0.5, } if self.method == 'barnes_hut': obj_func = _kl_divergence_bh opt_args['kwargs']['angle'] = self.angle # Repeat verbose argument for _kl_divergence_bh opt_args['kwargs']['verbose'] = self.verbose else: obj_func = _kl_divergence # Learning schedule (part 1): do 250 iteration with lower momentum but # higher learning rate controlled via the early exageration parameter P *= self.early_exaggeration params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args) if self.verbose: print("[t-SNE] KL divergence after %d iterations with early " "exaggeration: %f" % (it + 1, kl_divergence)) # Learning schedule (part 2): disable early exaggeration and finish # optimization with a higher momentum at 0.8 P /= self.early_exaggeration remaining = self.n_iter - self._EXPLORATION_N_ITER if it < self._EXPLORATION_N_ITER or remaining > 0: opt_args['n_iter'] = self.n_iter opt_args['it'] = it + 1 opt_args['momentum'] = 0.8 opt_args['n_iter_without_progress'] = self.n_iter_without_progress params, kl_divergence, it = _gradient_descent(obj_func, params, **opt_args) # Save the final number of iterations self.n_iter_ = it if self.verbose: print("[t-SNE] Error after %d iterations: %f" % (it + 1, kl_divergence)) X_embedded = params.reshape(n_samples, self.n_components) self.kl_divergence_ = kl_divergence return X_embedded
[ "def", "_tsne", "(", "self", ",", "P", ",", "degrees_of_freedom", ",", "n_samples", ",", "random_state", ",", "X_embedded", ",", "neighbors", "=", "None", ",", "skip_num_points", "=", "0", ")", ":", "# t-SNE minimizes the Kullback-Leiber divergence of the Gaussians P", "# and the Student's t-distributions Q. The optimization algorithm that", "# we use is batch gradient descent with two stages:", "# * initial optimization with early exaggeration and momentum at 0.5", "# * final optimization with momentum at 0.8", "params", "=", "X_embedded", ".", "ravel", "(", ")", "opt_args", "=", "{", "\"it\"", ":", "0", ",", "\"n_iter_check\"", ":", "self", ".", "_N_ITER_CHECK", ",", "\"min_grad_norm\"", ":", "self", ".", "min_grad_norm", ",", "\"learning_rate\"", ":", "self", ".", "learning_rate", ",", "\"verbose\"", ":", "self", ".", "verbose", ",", "\"kwargs\"", ":", "dict", "(", "skip_num_points", "=", "skip_num_points", ")", ",", "\"args\"", ":", "[", "P", ",", "degrees_of_freedom", ",", "n_samples", ",", "self", ".", "n_components", "]", ",", "\"n_iter_without_progress\"", ":", "self", ".", "_EXPLORATION_N_ITER", ",", "\"n_iter\"", ":", "self", ".", "_EXPLORATION_N_ITER", ",", "\"momentum\"", ":", "0.5", ",", "}", "if", "self", ".", "method", "==", "'barnes_hut'", ":", "obj_func", "=", "_kl_divergence_bh", "opt_args", "[", "'kwargs'", "]", "[", "'angle'", "]", "=", "self", ".", "angle", "# Repeat verbose argument for _kl_divergence_bh", "opt_args", "[", "'kwargs'", "]", "[", "'verbose'", "]", "=", "self", ".", "verbose", "else", ":", "obj_func", "=", "_kl_divergence", "# Learning schedule (part 1): do 250 iteration with lower momentum but", "# higher learning rate controlled via the early exageration parameter", "P", "*=", "self", ".", "early_exaggeration", "params", ",", "kl_divergence", ",", "it", "=", "_gradient_descent", "(", "obj_func", ",", "params", ",", "*", "*", "opt_args", ")", "if", "self", ".", "verbose", ":", "print", "(", "\"[t-SNE] KL divergence after %d iterations with early \"", "\"exaggeration: %f\"", "%", "(", "it", "+", "1", ",", "kl_divergence", ")", ")", "# Learning schedule (part 2): disable early exaggeration and finish", "# optimization with a higher momentum at 0.8", "P", "/=", "self", ".", "early_exaggeration", "remaining", "=", "self", ".", "n_iter", "-", "self", ".", "_EXPLORATION_N_ITER", "if", "it", "<", "self", ".", "_EXPLORATION_N_ITER", "or", "remaining", ">", "0", ":", "opt_args", "[", "'n_iter'", "]", "=", "self", ".", "n_iter", "opt_args", "[", "'it'", "]", "=", "it", "+", "1", "opt_args", "[", "'momentum'", "]", "=", "0.8", "opt_args", "[", "'n_iter_without_progress'", "]", "=", "self", ".", "n_iter_without_progress", "params", ",", "kl_divergence", ",", "it", "=", "_gradient_descent", "(", "obj_func", ",", "params", ",", "*", "*", "opt_args", ")", "# Save the final number of iterations", "self", ".", "n_iter_", "=", "it", "if", "self", ".", "verbose", ":", "print", "(", "\"[t-SNE] Error after %d iterations: %f\"", "%", "(", "it", "+", "1", ",", "kl_divergence", ")", ")", "X_embedded", "=", "params", ".", "reshape", "(", "n_samples", ",", "self", ".", "n_components", ")", "self", ".", "kl_divergence_", "=", "kl_divergence", "return", "X_embedded" ]
Runs t-SNE.
[ "Runs", "t", "-", "SNE", "." ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L792-L853
brianhie/scanorama
scanorama/t_sne_approx.py
TSNEApprox.fit_transform
def fit_transform(self, X, y=None): """Fit X into an embedded space and return that transformed output. Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. Returns ------- X_new : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space. """ embedding = self._fit(X) self.embedding_ = embedding return self.embedding_
python
def fit_transform(self, X, y=None): """Fit X into an embedded space and return that transformed output. Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. Returns ------- X_new : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space. """ embedding = self._fit(X) self.embedding_ = embedding return self.embedding_
[ "def", "fit_transform", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "embedding", "=", "self", ".", "_fit", "(", "X", ")", "self", ".", "embedding_", "=", "embedding", "return", "self", ".", "embedding_" ]
Fit X into an embedded space and return that transformed output. Parameters ---------- X : array, shape (n_samples, n_features) or (n_samples, n_samples) If the metric is 'precomputed' X must be a square distance matrix. Otherwise it contains a sample per row. Returns ------- X_new : array, shape (n_samples, n_components) Embedding of the training data in low-dimensional space.
[ "Fit", "X", "into", "an", "embedded", "space", "and", "return", "that", "transformed", "output", "." ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/t_sne_approx.py#L855-L872
brianhie/scanorama
scanorama/scanorama.py
correct
def correct(datasets_full, genes_list, return_dimred=False, batch_size=BATCH_SIZE, verbose=VERBOSE, ds_names=None, dimred=DIMRED, approx=APPROX, sigma=SIGMA, alpha=ALPHA, knn=KNN, return_dense=False, hvg=None, union=False, geosketch=False, geosketch_max=20000): """Integrate and batch correct a list of data sets. Parameters ---------- datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray` Data sets to integrate and correct. genes_list: `list` of `list` of `string` List of genes for each data set. return_dimred: `bool`, optional (default: `False`) In addition to returning batch corrected matrices, also returns integrated low-dimesional embeddings. batch_size: `int`, optional (default: `5000`) The batch size used in the alignment vector computation. Useful when correcting very large (>100k samples) data sets. Set to large value that runs within available memory. verbose: `bool` or `int`, optional (default: 2) When `True` or not equal to 0, prints logging output. ds_names: `list` of `string`, optional When `verbose=True`, reports data set names in logging output. dimred: `int`, optional (default: 100) Dimensionality of integrated embedding. approx: `bool`, optional (default: `True`) Use approximate nearest neighbors, greatly speeds up matching runtime. sigma: `float`, optional (default: 15) Correction smoothing parameter on Gaussian kernel. alpha: `float`, optional (default: 0.10) Alignment score minimum cutoff. knn: `int`, optional (default: 20) Number of nearest neighbors to use for matching. return_dense: `bool`, optional (default: `False`) Return `numpy.ndarray` matrices instead of `scipy.sparse.csr_matrix`. hvg: `int`, optional (default: None) Use this number of top highly variable genes based on dispersion. Returns ------- corrected, genes By default (`return_dimred=False`), returns a two-tuple containing a list of `scipy.sparse.csr_matrix` each with batch corrected values, and a single list of genes containing the intersection of inputted genes. integrated, corrected, genes When `return_dimred=False`, returns a three-tuple containing a list of `numpy.ndarray` with integrated low dimensional embeddings, a list of `scipy.sparse.csr_matrix` each with batch corrected values, and a a single list of genes containing the intersection of inputted genes. """ datasets_full = check_datasets(datasets_full) datasets, genes = merge_datasets(datasets_full, genes_list, ds_names=ds_names, union=union) datasets_dimred, genes = process_data(datasets, genes, hvg=hvg, dimred=dimred) datasets_dimred = assemble( datasets_dimred, # Assemble in low dimensional space. expr_datasets=datasets, # Modified in place. verbose=verbose, knn=knn, sigma=sigma, approx=approx, alpha=alpha, ds_names=ds_names, batch_size=batch_size, geosketch=geosketch, geosketch_max=geosketch_max, ) if return_dense: datasets = [ ds.toarray() for ds in datasets ] if return_dimred: return datasets_dimred, datasets, genes return datasets, genes
python
def correct(datasets_full, genes_list, return_dimred=False, batch_size=BATCH_SIZE, verbose=VERBOSE, ds_names=None, dimred=DIMRED, approx=APPROX, sigma=SIGMA, alpha=ALPHA, knn=KNN, return_dense=False, hvg=None, union=False, geosketch=False, geosketch_max=20000): """Integrate and batch correct a list of data sets. Parameters ---------- datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray` Data sets to integrate and correct. genes_list: `list` of `list` of `string` List of genes for each data set. return_dimred: `bool`, optional (default: `False`) In addition to returning batch corrected matrices, also returns integrated low-dimesional embeddings. batch_size: `int`, optional (default: `5000`) The batch size used in the alignment vector computation. Useful when correcting very large (>100k samples) data sets. Set to large value that runs within available memory. verbose: `bool` or `int`, optional (default: 2) When `True` or not equal to 0, prints logging output. ds_names: `list` of `string`, optional When `verbose=True`, reports data set names in logging output. dimred: `int`, optional (default: 100) Dimensionality of integrated embedding. approx: `bool`, optional (default: `True`) Use approximate nearest neighbors, greatly speeds up matching runtime. sigma: `float`, optional (default: 15) Correction smoothing parameter on Gaussian kernel. alpha: `float`, optional (default: 0.10) Alignment score minimum cutoff. knn: `int`, optional (default: 20) Number of nearest neighbors to use for matching. return_dense: `bool`, optional (default: `False`) Return `numpy.ndarray` matrices instead of `scipy.sparse.csr_matrix`. hvg: `int`, optional (default: None) Use this number of top highly variable genes based on dispersion. Returns ------- corrected, genes By default (`return_dimred=False`), returns a two-tuple containing a list of `scipy.sparse.csr_matrix` each with batch corrected values, and a single list of genes containing the intersection of inputted genes. integrated, corrected, genes When `return_dimred=False`, returns a three-tuple containing a list of `numpy.ndarray` with integrated low dimensional embeddings, a list of `scipy.sparse.csr_matrix` each with batch corrected values, and a a single list of genes containing the intersection of inputted genes. """ datasets_full = check_datasets(datasets_full) datasets, genes = merge_datasets(datasets_full, genes_list, ds_names=ds_names, union=union) datasets_dimred, genes = process_data(datasets, genes, hvg=hvg, dimred=dimred) datasets_dimred = assemble( datasets_dimred, # Assemble in low dimensional space. expr_datasets=datasets, # Modified in place. verbose=verbose, knn=knn, sigma=sigma, approx=approx, alpha=alpha, ds_names=ds_names, batch_size=batch_size, geosketch=geosketch, geosketch_max=geosketch_max, ) if return_dense: datasets = [ ds.toarray() for ds in datasets ] if return_dimred: return datasets_dimred, datasets, genes return datasets, genes
[ "def", "correct", "(", "datasets_full", ",", "genes_list", ",", "return_dimred", "=", "False", ",", "batch_size", "=", "BATCH_SIZE", ",", "verbose", "=", "VERBOSE", ",", "ds_names", "=", "None", ",", "dimred", "=", "DIMRED", ",", "approx", "=", "APPROX", ",", "sigma", "=", "SIGMA", ",", "alpha", "=", "ALPHA", ",", "knn", "=", "KNN", ",", "return_dense", "=", "False", ",", "hvg", "=", "None", ",", "union", "=", "False", ",", "geosketch", "=", "False", ",", "geosketch_max", "=", "20000", ")", ":", "datasets_full", "=", "check_datasets", "(", "datasets_full", ")", "datasets", ",", "genes", "=", "merge_datasets", "(", "datasets_full", ",", "genes_list", ",", "ds_names", "=", "ds_names", ",", "union", "=", "union", ")", "datasets_dimred", ",", "genes", "=", "process_data", "(", "datasets", ",", "genes", ",", "hvg", "=", "hvg", ",", "dimred", "=", "dimred", ")", "datasets_dimred", "=", "assemble", "(", "datasets_dimred", ",", "# Assemble in low dimensional space.", "expr_datasets", "=", "datasets", ",", "# Modified in place.", "verbose", "=", "verbose", ",", "knn", "=", "knn", ",", "sigma", "=", "sigma", ",", "approx", "=", "approx", ",", "alpha", "=", "alpha", ",", "ds_names", "=", "ds_names", ",", "batch_size", "=", "batch_size", ",", "geosketch", "=", "geosketch", ",", "geosketch_max", "=", "geosketch_max", ",", ")", "if", "return_dense", ":", "datasets", "=", "[", "ds", ".", "toarray", "(", ")", "for", "ds", "in", "datasets", "]", "if", "return_dimred", ":", "return", "datasets_dimred", ",", "datasets", ",", "genes", "return", "datasets", ",", "genes" ]
Integrate and batch correct a list of data sets. Parameters ---------- datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray` Data sets to integrate and correct. genes_list: `list` of `list` of `string` List of genes for each data set. return_dimred: `bool`, optional (default: `False`) In addition to returning batch corrected matrices, also returns integrated low-dimesional embeddings. batch_size: `int`, optional (default: `5000`) The batch size used in the alignment vector computation. Useful when correcting very large (>100k samples) data sets. Set to large value that runs within available memory. verbose: `bool` or `int`, optional (default: 2) When `True` or not equal to 0, prints logging output. ds_names: `list` of `string`, optional When `verbose=True`, reports data set names in logging output. dimred: `int`, optional (default: 100) Dimensionality of integrated embedding. approx: `bool`, optional (default: `True`) Use approximate nearest neighbors, greatly speeds up matching runtime. sigma: `float`, optional (default: 15) Correction smoothing parameter on Gaussian kernel. alpha: `float`, optional (default: 0.10) Alignment score minimum cutoff. knn: `int`, optional (default: 20) Number of nearest neighbors to use for matching. return_dense: `bool`, optional (default: `False`) Return `numpy.ndarray` matrices instead of `scipy.sparse.csr_matrix`. hvg: `int`, optional (default: None) Use this number of top highly variable genes based on dispersion. Returns ------- corrected, genes By default (`return_dimred=False`), returns a two-tuple containing a list of `scipy.sparse.csr_matrix` each with batch corrected values, and a single list of genes containing the intersection of inputted genes. integrated, corrected, genes When `return_dimred=False`, returns a three-tuple containing a list of `numpy.ndarray` with integrated low dimensional embeddings, a list of `scipy.sparse.csr_matrix` each with batch corrected values, and a a single list of genes containing the intersection of inputted genes.
[ "Integrate", "and", "batch", "correct", "a", "list", "of", "data", "sets", "." ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/scanorama.py#L37-L111
brianhie/scanorama
scanorama/scanorama.py
integrate
def integrate(datasets_full, genes_list, batch_size=BATCH_SIZE, verbose=VERBOSE, ds_names=None, dimred=DIMRED, approx=APPROX, sigma=SIGMA, alpha=ALPHA, knn=KNN, geosketch=False, geosketch_max=20000, n_iter=1, union=False, hvg=None): """Integrate a list of data sets. Parameters ---------- datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray` Data sets to integrate and correct. genes_list: `list` of `list` of `string` List of genes for each data set. batch_size: `int`, optional (default: `5000`) The batch size used in the alignment vector computation. Useful when correcting very large (>100k samples) data sets. Set to large value that runs within available memory. verbose: `bool` or `int`, optional (default: 2) When `True` or not equal to 0, prints logging output. ds_names: `list` of `string`, optional When `verbose=True`, reports data set names in logging output. dimred: `int`, optional (default: 100) Dimensionality of integrated embedding. approx: `bool`, optional (default: `True`) Use approximate nearest neighbors, greatly speeds up matching runtime. sigma: `float`, optional (default: 15) Correction smoothing parameter on Gaussian kernel. alpha: `float`, optional (default: 0.10) Alignment score minimum cutoff. knn: `int`, optional (default: 20) Number of nearest neighbors to use for matching. hvg: `int`, optional (default: None) Use this number of top highly variable genes based on dispersion. Returns ------- integrated, genes Returns a two-tuple containing a list of `numpy.ndarray` with integrated low dimensional embeddings and a single list of genes containing the intersection of inputted genes. """ datasets_full = check_datasets(datasets_full) datasets, genes = merge_datasets(datasets_full, genes_list, ds_names=ds_names, union=union) datasets_dimred, genes = process_data(datasets, genes, hvg=hvg, dimred=dimred) for _ in range(n_iter): datasets_dimred = assemble( datasets_dimred, # Assemble in low dimensional space. verbose=verbose, knn=knn, sigma=sigma, approx=approx, alpha=alpha, ds_names=ds_names, batch_size=batch_size, geosketch=geosketch, geosketch_max=geosketch_max, ) return datasets_dimred, genes
python
def integrate(datasets_full, genes_list, batch_size=BATCH_SIZE, verbose=VERBOSE, ds_names=None, dimred=DIMRED, approx=APPROX, sigma=SIGMA, alpha=ALPHA, knn=KNN, geosketch=False, geosketch_max=20000, n_iter=1, union=False, hvg=None): """Integrate a list of data sets. Parameters ---------- datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray` Data sets to integrate and correct. genes_list: `list` of `list` of `string` List of genes for each data set. batch_size: `int`, optional (default: `5000`) The batch size used in the alignment vector computation. Useful when correcting very large (>100k samples) data sets. Set to large value that runs within available memory. verbose: `bool` or `int`, optional (default: 2) When `True` or not equal to 0, prints logging output. ds_names: `list` of `string`, optional When `verbose=True`, reports data set names in logging output. dimred: `int`, optional (default: 100) Dimensionality of integrated embedding. approx: `bool`, optional (default: `True`) Use approximate nearest neighbors, greatly speeds up matching runtime. sigma: `float`, optional (default: 15) Correction smoothing parameter on Gaussian kernel. alpha: `float`, optional (default: 0.10) Alignment score minimum cutoff. knn: `int`, optional (default: 20) Number of nearest neighbors to use for matching. hvg: `int`, optional (default: None) Use this number of top highly variable genes based on dispersion. Returns ------- integrated, genes Returns a two-tuple containing a list of `numpy.ndarray` with integrated low dimensional embeddings and a single list of genes containing the intersection of inputted genes. """ datasets_full = check_datasets(datasets_full) datasets, genes = merge_datasets(datasets_full, genes_list, ds_names=ds_names, union=union) datasets_dimred, genes = process_data(datasets, genes, hvg=hvg, dimred=dimred) for _ in range(n_iter): datasets_dimred = assemble( datasets_dimred, # Assemble in low dimensional space. verbose=verbose, knn=knn, sigma=sigma, approx=approx, alpha=alpha, ds_names=ds_names, batch_size=batch_size, geosketch=geosketch, geosketch_max=geosketch_max, ) return datasets_dimred, genes
[ "def", "integrate", "(", "datasets_full", ",", "genes_list", ",", "batch_size", "=", "BATCH_SIZE", ",", "verbose", "=", "VERBOSE", ",", "ds_names", "=", "None", ",", "dimred", "=", "DIMRED", ",", "approx", "=", "APPROX", ",", "sigma", "=", "SIGMA", ",", "alpha", "=", "ALPHA", ",", "knn", "=", "KNN", ",", "geosketch", "=", "False", ",", "geosketch_max", "=", "20000", ",", "n_iter", "=", "1", ",", "union", "=", "False", ",", "hvg", "=", "None", ")", ":", "datasets_full", "=", "check_datasets", "(", "datasets_full", ")", "datasets", ",", "genes", "=", "merge_datasets", "(", "datasets_full", ",", "genes_list", ",", "ds_names", "=", "ds_names", ",", "union", "=", "union", ")", "datasets_dimred", ",", "genes", "=", "process_data", "(", "datasets", ",", "genes", ",", "hvg", "=", "hvg", ",", "dimred", "=", "dimred", ")", "for", "_", "in", "range", "(", "n_iter", ")", ":", "datasets_dimred", "=", "assemble", "(", "datasets_dimred", ",", "# Assemble in low dimensional space.", "verbose", "=", "verbose", ",", "knn", "=", "knn", ",", "sigma", "=", "sigma", ",", "approx", "=", "approx", ",", "alpha", "=", "alpha", ",", "ds_names", "=", "ds_names", ",", "batch_size", "=", "batch_size", ",", "geosketch", "=", "geosketch", ",", "geosketch_max", "=", "geosketch_max", ",", ")", "return", "datasets_dimred", ",", "genes" ]
Integrate a list of data sets. Parameters ---------- datasets_full : `list` of `scipy.sparse.csr_matrix` or of `numpy.ndarray` Data sets to integrate and correct. genes_list: `list` of `list` of `string` List of genes for each data set. batch_size: `int`, optional (default: `5000`) The batch size used in the alignment vector computation. Useful when correcting very large (>100k samples) data sets. Set to large value that runs within available memory. verbose: `bool` or `int`, optional (default: 2) When `True` or not equal to 0, prints logging output. ds_names: `list` of `string`, optional When `verbose=True`, reports data set names in logging output. dimred: `int`, optional (default: 100) Dimensionality of integrated embedding. approx: `bool`, optional (default: `True`) Use approximate nearest neighbors, greatly speeds up matching runtime. sigma: `float`, optional (default: 15) Correction smoothing parameter on Gaussian kernel. alpha: `float`, optional (default: 0.10) Alignment score minimum cutoff. knn: `int`, optional (default: 20) Number of nearest neighbors to use for matching. hvg: `int`, optional (default: None) Use this number of top highly variable genes based on dispersion. Returns ------- integrated, genes Returns a two-tuple containing a list of `numpy.ndarray` with integrated low dimensional embeddings and a single list of genes containing the intersection of inputted genes.
[ "Integrate", "a", "list", "of", "data", "sets", "." ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/scanorama.py#L114-L169
brianhie/scanorama
scanorama/scanorama.py
correct_scanpy
def correct_scanpy(adatas, **kwargs): """Batch correct a list of `scanpy.api.AnnData`. Parameters ---------- adatas : `list` of `scanpy.api.AnnData` Data sets to integrate and/or correct. kwargs : `dict` See documentation for the `correct()` method for a full list of parameters to use for batch correction. Returns ------- corrected By default (`return_dimred=False`), returns a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field. corrected, integrated When `return_dimred=False`, returns a two-tuple containing a list of `np.ndarray` with integrated low-dimensional embeddings and a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field. """ if 'return_dimred' in kwargs and kwargs['return_dimred']: datasets_dimred, datasets, genes = correct( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) else: datasets, genes = correct( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) new_adatas = [] for i, adata in enumerate(adatas): adata.X = datasets[i] new_adatas.append(adata) if 'return_dimred' in kwargs and kwargs['return_dimred']: return datasets_dimred, new_adatas else: return new_adatas
python
def correct_scanpy(adatas, **kwargs): """Batch correct a list of `scanpy.api.AnnData`. Parameters ---------- adatas : `list` of `scanpy.api.AnnData` Data sets to integrate and/or correct. kwargs : `dict` See documentation for the `correct()` method for a full list of parameters to use for batch correction. Returns ------- corrected By default (`return_dimred=False`), returns a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field. corrected, integrated When `return_dimred=False`, returns a two-tuple containing a list of `np.ndarray` with integrated low-dimensional embeddings and a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field. """ if 'return_dimred' in kwargs and kwargs['return_dimred']: datasets_dimred, datasets, genes = correct( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) else: datasets, genes = correct( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) new_adatas = [] for i, adata in enumerate(adatas): adata.X = datasets[i] new_adatas.append(adata) if 'return_dimred' in kwargs and kwargs['return_dimred']: return datasets_dimred, new_adatas else: return new_adatas
[ "def", "correct_scanpy", "(", "adatas", ",", "*", "*", "kwargs", ")", ":", "if", "'return_dimred'", "in", "kwargs", "and", "kwargs", "[", "'return_dimred'", "]", ":", "datasets_dimred", ",", "datasets", ",", "genes", "=", "correct", "(", "[", "adata", ".", "X", "for", "adata", "in", "adatas", "]", ",", "[", "adata", ".", "var_names", ".", "values", "for", "adata", "in", "adatas", "]", ",", "*", "*", "kwargs", ")", "else", ":", "datasets", ",", "genes", "=", "correct", "(", "[", "adata", ".", "X", "for", "adata", "in", "adatas", "]", ",", "[", "adata", ".", "var_names", ".", "values", "for", "adata", "in", "adatas", "]", ",", "*", "*", "kwargs", ")", "new_adatas", "=", "[", "]", "for", "i", ",", "adata", "in", "enumerate", "(", "adatas", ")", ":", "adata", ".", "X", "=", "datasets", "[", "i", "]", "new_adatas", ".", "append", "(", "adata", ")", "if", "'return_dimred'", "in", "kwargs", "and", "kwargs", "[", "'return_dimred'", "]", ":", "return", "datasets_dimred", ",", "new_adatas", "else", ":", "return", "new_adatas" ]
Batch correct a list of `scanpy.api.AnnData`. Parameters ---------- adatas : `list` of `scanpy.api.AnnData` Data sets to integrate and/or correct. kwargs : `dict` See documentation for the `correct()` method for a full list of parameters to use for batch correction. Returns ------- corrected By default (`return_dimred=False`), returns a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field. corrected, integrated When `return_dimred=False`, returns a two-tuple containing a list of `np.ndarray` with integrated low-dimensional embeddings and a list of `scanpy.api.AnnData` with batch corrected values in the `.X` field.
[ "Batch", "correct", "a", "list", "of", "scanpy", ".", "api", ".", "AnnData", "." ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/scanorama.py#L172-L216
brianhie/scanorama
scanorama/scanorama.py
integrate_scanpy
def integrate_scanpy(adatas, **kwargs): """Integrate a list of `scanpy.api.AnnData`. Parameters ---------- adatas : `list` of `scanpy.api.AnnData` Data sets to integrate. kwargs : `dict` See documentation for the `integrate()` method for a full list of parameters to use for batch correction. Returns ------- integrated Returns a list of `np.ndarray` with integrated low-dimensional embeddings. """ datasets_dimred, genes = integrate( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) return datasets_dimred
python
def integrate_scanpy(adatas, **kwargs): """Integrate a list of `scanpy.api.AnnData`. Parameters ---------- adatas : `list` of `scanpy.api.AnnData` Data sets to integrate. kwargs : `dict` See documentation for the `integrate()` method for a full list of parameters to use for batch correction. Returns ------- integrated Returns a list of `np.ndarray` with integrated low-dimensional embeddings. """ datasets_dimred, genes = integrate( [adata.X for adata in adatas], [adata.var_names.values for adata in adatas], **kwargs ) return datasets_dimred
[ "def", "integrate_scanpy", "(", "adatas", ",", "*", "*", "kwargs", ")", ":", "datasets_dimred", ",", "genes", "=", "integrate", "(", "[", "adata", ".", "X", "for", "adata", "in", "adatas", "]", ",", "[", "adata", ".", "var_names", ".", "values", "for", "adata", "in", "adatas", "]", ",", "*", "*", "kwargs", ")", "return", "datasets_dimred" ]
Integrate a list of `scanpy.api.AnnData`. Parameters ---------- adatas : `list` of `scanpy.api.AnnData` Data sets to integrate. kwargs : `dict` See documentation for the `integrate()` method for a full list of parameters to use for batch correction. Returns ------- integrated Returns a list of `np.ndarray` with integrated low-dimensional embeddings.
[ "Integrate", "a", "list", "of", "scanpy", ".", "api", ".", "AnnData", "." ]
train
https://github.com/brianhie/scanorama/blob/57aafac87d07a8d682f57450165dd07f066ebb3c/scanorama/scanorama.py#L219-L242
johntfoster/bspline
bspline/splinelab.py
augknt
def augknt(knots, order): """Augment a knot vector. Parameters: knots: Python list or rank-1 array, the original knot vector (without endpoint repeats) order: int, >= 0, order of spline Returns: list_of_knots: rank-1 array that has (`order` + 1) copies of ``knots[0]``, then ``knots[1:-1]``, and finally (`order` + 1) copies of ``knots[-1]``. Caveats: `order` is the spline order `p`, not `p` + 1, and existing knots are never deleted. The knot vector always becomes longer by calling this function. """ if isinstance(knots, np.ndarray) and knots.ndim > 1: raise ValueError("knots must be a list or a rank-1 array") knots = list(knots) # ensure Python list # One copy of knots[0] and knots[-1] will come from "knots" itself, # so we only need to prepend/append "order" copies. # return np.array( [knots[0]] * order + knots + [knots[-1]] * order )
python
def augknt(knots, order): """Augment a knot vector. Parameters: knots: Python list or rank-1 array, the original knot vector (without endpoint repeats) order: int, >= 0, order of spline Returns: list_of_knots: rank-1 array that has (`order` + 1) copies of ``knots[0]``, then ``knots[1:-1]``, and finally (`order` + 1) copies of ``knots[-1]``. Caveats: `order` is the spline order `p`, not `p` + 1, and existing knots are never deleted. The knot vector always becomes longer by calling this function. """ if isinstance(knots, np.ndarray) and knots.ndim > 1: raise ValueError("knots must be a list or a rank-1 array") knots = list(knots) # ensure Python list # One copy of knots[0] and knots[-1] will come from "knots" itself, # so we only need to prepend/append "order" copies. # return np.array( [knots[0]] * order + knots + [knots[-1]] * order )
[ "def", "augknt", "(", "knots", ",", "order", ")", ":", "if", "isinstance", "(", "knots", ",", "np", ".", "ndarray", ")", "and", "knots", ".", "ndim", ">", "1", ":", "raise", "ValueError", "(", "\"knots must be a list or a rank-1 array\"", ")", "knots", "=", "list", "(", "knots", ")", "# ensure Python list", "# One copy of knots[0] and knots[-1] will come from \"knots\" itself,", "# so we only need to prepend/append \"order\" copies.", "#", "return", "np", ".", "array", "(", "[", "knots", "[", "0", "]", "]", "*", "order", "+", "knots", "+", "[", "knots", "[", "-", "1", "]", "]", "*", "order", ")" ]
Augment a knot vector. Parameters: knots: Python list or rank-1 array, the original knot vector (without endpoint repeats) order: int, >= 0, order of spline Returns: list_of_knots: rank-1 array that has (`order` + 1) copies of ``knots[0]``, then ``knots[1:-1]``, and finally (`order` + 1) copies of ``knots[-1]``. Caveats: `order` is the spline order `p`, not `p` + 1, and existing knots are never deleted. The knot vector always becomes longer by calling this function.
[ "Augment", "a", "knot", "vector", "." ]
train
https://github.com/johntfoster/bspline/blob/366085a665da6fe907258eafcc8032c58a0601e0/bspline/splinelab.py#L23-L47
johntfoster/bspline
bspline/splinelab.py
aveknt
def aveknt(t, k): """Compute the running average of `k` successive elements of `t`. Return the averaged array. Parameters: t: Python list or rank-1 array k: int, >= 2, how many successive elements to average Returns: rank-1 array, averaged data. If k > len(t), returns a zero-length array. Caveat: This is slightly different from MATLAB's aveknt, which returns the running average of `k`-1 successive elements of ``t[1:-1]`` (and the empty vector if ``len(t) - 2 < k - 1``). """ t = np.atleast_1d(t) if t.ndim > 1: raise ValueError("t must be a list or a rank-1 array") n = t.shape[0] u = max(0, n - (k-1)) # number of elements in the output array out = np.empty( (u,), dtype=t.dtype ) for j in range(u): out[j] = sum( t[j:(j+k)] ) / k return out
python
def aveknt(t, k): """Compute the running average of `k` successive elements of `t`. Return the averaged array. Parameters: t: Python list or rank-1 array k: int, >= 2, how many successive elements to average Returns: rank-1 array, averaged data. If k > len(t), returns a zero-length array. Caveat: This is slightly different from MATLAB's aveknt, which returns the running average of `k`-1 successive elements of ``t[1:-1]`` (and the empty vector if ``len(t) - 2 < k - 1``). """ t = np.atleast_1d(t) if t.ndim > 1: raise ValueError("t must be a list or a rank-1 array") n = t.shape[0] u = max(0, n - (k-1)) # number of elements in the output array out = np.empty( (u,), dtype=t.dtype ) for j in range(u): out[j] = sum( t[j:(j+k)] ) / k return out
[ "def", "aveknt", "(", "t", ",", "k", ")", ":", "t", "=", "np", ".", "atleast_1d", "(", "t", ")", "if", "t", ".", "ndim", ">", "1", ":", "raise", "ValueError", "(", "\"t must be a list or a rank-1 array\"", ")", "n", "=", "t", ".", "shape", "[", "0", "]", "u", "=", "max", "(", "0", ",", "n", "-", "(", "k", "-", "1", ")", ")", "# number of elements in the output array", "out", "=", "np", ".", "empty", "(", "(", "u", ",", ")", ",", "dtype", "=", "t", ".", "dtype", ")", "for", "j", "in", "range", "(", "u", ")", ":", "out", "[", "j", "]", "=", "sum", "(", "t", "[", "j", ":", "(", "j", "+", "k", ")", "]", ")", "/", "k", "return", "out" ]
Compute the running average of `k` successive elements of `t`. Return the averaged array. Parameters: t: Python list or rank-1 array k: int, >= 2, how many successive elements to average Returns: rank-1 array, averaged data. If k > len(t), returns a zero-length array. Caveat: This is slightly different from MATLAB's aveknt, which returns the running average of `k`-1 successive elements of ``t[1:-1]`` (and the empty vector if ``len(t) - 2 < k - 1``).
[ "Compute", "the", "running", "average", "of", "k", "successive", "elements", "of", "t", ".", "Return", "the", "averaged", "array", "." ]
train
https://github.com/johntfoster/bspline/blob/366085a665da6fe907258eafcc8032c58a0601e0/bspline/splinelab.py#L50-L78
johntfoster/bspline
bspline/splinelab.py
aptknt
def aptknt(tau, order): """Create an acceptable knot vector. Minimal emulation of MATLAB's ``aptknt``. The returned knot vector can be used to generate splines of desired `order` that are suitable for interpolation to the collocation sites `tau`. Note that this is only possible when ``len(tau)`` >= `order` + 1. When this condition does not hold, a valid knot vector is returned, but using it to generate a spline basis will not have the desired effect (the spline will return a length-zero array upon evaluation). Parameters: tau: Python list or rank-1 array, collocation sites order: int, >= 0, order of spline Returns: rank-1 array, `k` copies of ``tau[0]``, then ``aveknt(tau[1:-1], k-1)``, and finally `k` copies of ``tau[-1]``, where ``k = min(order+1, len(tau))``. """ tau = np.atleast_1d(tau) k = order + 1 if tau.ndim > 1: raise ValueError("tau must be a list or a rank-1 array") # emulate MATLAB behavior for the "k" parameter # # See # https://se.mathworks.com/help/curvefit/aptknt.html # if len(tau) < k: k = len(tau) if not (tau == sorted(tau)).all(): raise ValueError("tau must be nondecreasing") # last processed element needs to be: # i + k - 1 = len(tau)- 1 # => i + k = len(tau) # => i = len(tau) - k # u = len(tau) - k for i in range(u): if tau[i+k-1] == tau[i]: raise ValueError("k-fold (or higher) repeated sites not allowed, but tau[i+k-1] == tau[i] for i = %d, k = %d" % (i,k)) # form the output sequence # prefix = [ tau[0] ] * k suffix = [ tau[-1] ] * k # https://se.mathworks.com/help/curvefit/aveknt.html # MATLAB's aveknt(): # - averages successive k-1 entries, but ours averages k # - seems to ignore the endpoints # tmp = aveknt(tau[1:-1], k-1) middle = tmp.tolist() return np.array( prefix + middle + suffix, dtype=tmp.dtype )
python
def aptknt(tau, order): """Create an acceptable knot vector. Minimal emulation of MATLAB's ``aptknt``. The returned knot vector can be used to generate splines of desired `order` that are suitable for interpolation to the collocation sites `tau`. Note that this is only possible when ``len(tau)`` >= `order` + 1. When this condition does not hold, a valid knot vector is returned, but using it to generate a spline basis will not have the desired effect (the spline will return a length-zero array upon evaluation). Parameters: tau: Python list or rank-1 array, collocation sites order: int, >= 0, order of spline Returns: rank-1 array, `k` copies of ``tau[0]``, then ``aveknt(tau[1:-1], k-1)``, and finally `k` copies of ``tau[-1]``, where ``k = min(order+1, len(tau))``. """ tau = np.atleast_1d(tau) k = order + 1 if tau.ndim > 1: raise ValueError("tau must be a list or a rank-1 array") # emulate MATLAB behavior for the "k" parameter # # See # https://se.mathworks.com/help/curvefit/aptknt.html # if len(tau) < k: k = len(tau) if not (tau == sorted(tau)).all(): raise ValueError("tau must be nondecreasing") # last processed element needs to be: # i + k - 1 = len(tau)- 1 # => i + k = len(tau) # => i = len(tau) - k # u = len(tau) - k for i in range(u): if tau[i+k-1] == tau[i]: raise ValueError("k-fold (or higher) repeated sites not allowed, but tau[i+k-1] == tau[i] for i = %d, k = %d" % (i,k)) # form the output sequence # prefix = [ tau[0] ] * k suffix = [ tau[-1] ] * k # https://se.mathworks.com/help/curvefit/aveknt.html # MATLAB's aveknt(): # - averages successive k-1 entries, but ours averages k # - seems to ignore the endpoints # tmp = aveknt(tau[1:-1], k-1) middle = tmp.tolist() return np.array( prefix + middle + suffix, dtype=tmp.dtype )
[ "def", "aptknt", "(", "tau", ",", "order", ")", ":", "tau", "=", "np", ".", "atleast_1d", "(", "tau", ")", "k", "=", "order", "+", "1", "if", "tau", ".", "ndim", ">", "1", ":", "raise", "ValueError", "(", "\"tau must be a list or a rank-1 array\"", ")", "# emulate MATLAB behavior for the \"k\" parameter", "#", "# See", "# https://se.mathworks.com/help/curvefit/aptknt.html", "#", "if", "len", "(", "tau", ")", "<", "k", ":", "k", "=", "len", "(", "tau", ")", "if", "not", "(", "tau", "==", "sorted", "(", "tau", ")", ")", ".", "all", "(", ")", ":", "raise", "ValueError", "(", "\"tau must be nondecreasing\"", ")", "# last processed element needs to be:", "# i + k - 1 = len(tau)- 1", "# => i + k = len(tau)", "# => i = len(tau) - k", "#", "u", "=", "len", "(", "tau", ")", "-", "k", "for", "i", "in", "range", "(", "u", ")", ":", "if", "tau", "[", "i", "+", "k", "-", "1", "]", "==", "tau", "[", "i", "]", ":", "raise", "ValueError", "(", "\"k-fold (or higher) repeated sites not allowed, but tau[i+k-1] == tau[i] for i = %d, k = %d\"", "%", "(", "i", ",", "k", ")", ")", "# form the output sequence", "#", "prefix", "=", "[", "tau", "[", "0", "]", "]", "*", "k", "suffix", "=", "[", "tau", "[", "-", "1", "]", "]", "*", "k", "# https://se.mathworks.com/help/curvefit/aveknt.html", "# MATLAB's aveknt():", "# - averages successive k-1 entries, but ours averages k", "# - seems to ignore the endpoints", "#", "tmp", "=", "aveknt", "(", "tau", "[", "1", ":", "-", "1", "]", ",", "k", "-", "1", ")", "middle", "=", "tmp", ".", "tolist", "(", ")", "return", "np", ".", "array", "(", "prefix", "+", "middle", "+", "suffix", ",", "dtype", "=", "tmp", ".", "dtype", ")" ]
Create an acceptable knot vector. Minimal emulation of MATLAB's ``aptknt``. The returned knot vector can be used to generate splines of desired `order` that are suitable for interpolation to the collocation sites `tau`. Note that this is only possible when ``len(tau)`` >= `order` + 1. When this condition does not hold, a valid knot vector is returned, but using it to generate a spline basis will not have the desired effect (the spline will return a length-zero array upon evaluation). Parameters: tau: Python list or rank-1 array, collocation sites order: int, >= 0, order of spline Returns: rank-1 array, `k` copies of ``tau[0]``, then ``aveknt(tau[1:-1], k-1)``, and finally `k` copies of ``tau[-1]``, where ``k = min(order+1, len(tau))``.
[ "Create", "an", "acceptable", "knot", "vector", "." ]
train
https://github.com/johntfoster/bspline/blob/366085a665da6fe907258eafcc8032c58a0601e0/bspline/splinelab.py#L81-L145
johntfoster/bspline
bspline/splinelab.py
knt2mlt
def knt2mlt(t): """Count multiplicities of elements in a sorted list or rank-1 array. Minimal emulation of MATLAB's ``knt2mlt``. Parameters: t: Python list or rank-1 array. Must be sorted! Returns: out rank-1 array such that out[k] = #{ t[i] == t[k] for i < k } Example: If ``t = [1, 1, 2, 3, 3, 3]``, then ``out = [0, 1, 0, 0, 1, 2]``. Caveat: Requires input to be already sorted (this is not checked). """ t = np.atleast_1d(t) if t.ndim > 1: raise ValueError("t must be a list or a rank-1 array") out = [] e = None for k in range(t.shape[0]): if t[k] != e: e = t[k] count = 0 else: count += 1 out.append(count) return np.array( out )
python
def knt2mlt(t): """Count multiplicities of elements in a sorted list or rank-1 array. Minimal emulation of MATLAB's ``knt2mlt``. Parameters: t: Python list or rank-1 array. Must be sorted! Returns: out rank-1 array such that out[k] = #{ t[i] == t[k] for i < k } Example: If ``t = [1, 1, 2, 3, 3, 3]``, then ``out = [0, 1, 0, 0, 1, 2]``. Caveat: Requires input to be already sorted (this is not checked). """ t = np.atleast_1d(t) if t.ndim > 1: raise ValueError("t must be a list or a rank-1 array") out = [] e = None for k in range(t.shape[0]): if t[k] != e: e = t[k] count = 0 else: count += 1 out.append(count) return np.array( out )
[ "def", "knt2mlt", "(", "t", ")", ":", "t", "=", "np", ".", "atleast_1d", "(", "t", ")", "if", "t", ".", "ndim", ">", "1", ":", "raise", "ValueError", "(", "\"t must be a list or a rank-1 array\"", ")", "out", "=", "[", "]", "e", "=", "None", "for", "k", "in", "range", "(", "t", ".", "shape", "[", "0", "]", ")", ":", "if", "t", "[", "k", "]", "!=", "e", ":", "e", "=", "t", "[", "k", "]", "count", "=", "0", "else", ":", "count", "+=", "1", "out", ".", "append", "(", "count", ")", "return", "np", ".", "array", "(", "out", ")" ]
Count multiplicities of elements in a sorted list or rank-1 array. Minimal emulation of MATLAB's ``knt2mlt``. Parameters: t: Python list or rank-1 array. Must be sorted! Returns: out rank-1 array such that out[k] = #{ t[i] == t[k] for i < k } Example: If ``t = [1, 1, 2, 3, 3, 3]``, then ``out = [0, 1, 0, 0, 1, 2]``. Caveat: Requires input to be already sorted (this is not checked).
[ "Count", "multiplicities", "of", "elements", "in", "a", "sorted", "list", "or", "rank", "-", "1", "array", "." ]
train
https://github.com/johntfoster/bspline/blob/366085a665da6fe907258eafcc8032c58a0601e0/bspline/splinelab.py#L148-L182
johntfoster/bspline
bspline/splinelab.py
spcol
def spcol(knots, order, tau): """Return collocation matrix. Minimal emulation of MATLAB's ``spcol``. Parameters: knots: rank-1 array, knot vector (with appropriately repeated endpoints; see `augknt`, `aptknt`) order: int, >= 0, order of spline tau: rank-1 array, collocation sites Returns: rank-2 array A such that A[i,j] = D**{m(i)} B_j(tau[i]) where m(i) = multiplicity of site tau[i] D**k = kth derivative (0 for function value itself) """ m = knt2mlt(tau) B = bspline.Bspline(knots, order) dummy = B(0.) nbasis = len(dummy) # perform dummy evaluation to get number of basis functions A = np.empty( (tau.shape[0], nbasis), dtype=dummy.dtype ) for i,item in enumerate(zip(tau,m)): taui,mi = item f = B.diff(order=mi) A[i,:] = f(taui) return A
python
def spcol(knots, order, tau): """Return collocation matrix. Minimal emulation of MATLAB's ``spcol``. Parameters: knots: rank-1 array, knot vector (with appropriately repeated endpoints; see `augknt`, `aptknt`) order: int, >= 0, order of spline tau: rank-1 array, collocation sites Returns: rank-2 array A such that A[i,j] = D**{m(i)} B_j(tau[i]) where m(i) = multiplicity of site tau[i] D**k = kth derivative (0 for function value itself) """ m = knt2mlt(tau) B = bspline.Bspline(knots, order) dummy = B(0.) nbasis = len(dummy) # perform dummy evaluation to get number of basis functions A = np.empty( (tau.shape[0], nbasis), dtype=dummy.dtype ) for i,item in enumerate(zip(tau,m)): taui,mi = item f = B.diff(order=mi) A[i,:] = f(taui) return A
[ "def", "spcol", "(", "knots", ",", "order", ",", "tau", ")", ":", "m", "=", "knt2mlt", "(", "tau", ")", "B", "=", "bspline", ".", "Bspline", "(", "knots", ",", "order", ")", "dummy", "=", "B", "(", "0.", ")", "nbasis", "=", "len", "(", "dummy", ")", "# perform dummy evaluation to get number of basis functions", "A", "=", "np", ".", "empty", "(", "(", "tau", ".", "shape", "[", "0", "]", ",", "nbasis", ")", ",", "dtype", "=", "dummy", ".", "dtype", ")", "for", "i", ",", "item", "in", "enumerate", "(", "zip", "(", "tau", ",", "m", ")", ")", ":", "taui", ",", "mi", "=", "item", "f", "=", "B", ".", "diff", "(", "order", "=", "mi", ")", "A", "[", "i", ",", ":", "]", "=", "f", "(", "taui", ")", "return", "A" ]
Return collocation matrix. Minimal emulation of MATLAB's ``spcol``. Parameters: knots: rank-1 array, knot vector (with appropriately repeated endpoints; see `augknt`, `aptknt`) order: int, >= 0, order of spline tau: rank-1 array, collocation sites Returns: rank-2 array A such that A[i,j] = D**{m(i)} B_j(tau[i]) where m(i) = multiplicity of site tau[i] D**k = kth derivative (0 for function value itself)
[ "Return", "collocation", "matrix", "." ]
train
https://github.com/johntfoster/bspline/blob/366085a665da6fe907258eafcc8032c58a0601e0/bspline/splinelab.py#L185-L220
johntfoster/bspline
bspline/bspline.py
Bspline.__basis0
def __basis0(self, xi): """Order zero basis (for internal use).""" return np.where(np.all([self.knot_vector[:-1] <= xi, xi < self.knot_vector[1:]],axis=0), 1.0, 0.0)
python
def __basis0(self, xi): """Order zero basis (for internal use).""" return np.where(np.all([self.knot_vector[:-1] <= xi, xi < self.knot_vector[1:]],axis=0), 1.0, 0.0)
[ "def", "__basis0", "(", "self", ",", "xi", ")", ":", "return", "np", ".", "where", "(", "np", ".", "all", "(", "[", "self", ".", "knot_vector", "[", ":", "-", "1", "]", "<=", "xi", ",", "xi", "<", "self", ".", "knot_vector", "[", "1", ":", "]", "]", ",", "axis", "=", "0", ")", ",", "1.0", ",", "0.0", ")" ]
Order zero basis (for internal use).
[ "Order", "zero", "basis", "(", "for", "internal", "use", ")", "." ]
train
https://github.com/johntfoster/bspline/blob/366085a665da6fe907258eafcc8032c58a0601e0/bspline/bspline.py#L83-L86
johntfoster/bspline
bspline/bspline.py
Bspline.__basis
def __basis(self, xi, p, compute_derivatives=False): """Recursive Cox - de Boor function (for internal use). Compute basis functions and optionally their first derivatives. """ if p == 0: return self.__basis0(xi) else: basis_p_minus_1 = self.__basis(xi, p - 1) first_term_numerator = xi - self.knot_vector[:-p] first_term_denominator = self.knot_vector[p:] - self.knot_vector[:-p] second_term_numerator = self.knot_vector[(p + 1):] - xi second_term_denominator = (self.knot_vector[(p + 1):] - self.knot_vector[1:-p]) #Change numerator in last recursion if derivatives are desired if compute_derivatives and p == self.p: first_term_numerator = p second_term_numerator = -p #Disable divide by zero error because we check for it with np.errstate(divide='ignore', invalid='ignore'): first_term = np.where(first_term_denominator != 0.0, (first_term_numerator / first_term_denominator), 0.0) second_term = np.where(second_term_denominator != 0.0, (second_term_numerator / second_term_denominator), 0.0) return (first_term[:-1] * basis_p_minus_1[:-1] + second_term * basis_p_minus_1[1:])
python
def __basis(self, xi, p, compute_derivatives=False): """Recursive Cox - de Boor function (for internal use). Compute basis functions and optionally their first derivatives. """ if p == 0: return self.__basis0(xi) else: basis_p_minus_1 = self.__basis(xi, p - 1) first_term_numerator = xi - self.knot_vector[:-p] first_term_denominator = self.knot_vector[p:] - self.knot_vector[:-p] second_term_numerator = self.knot_vector[(p + 1):] - xi second_term_denominator = (self.knot_vector[(p + 1):] - self.knot_vector[1:-p]) #Change numerator in last recursion if derivatives are desired if compute_derivatives and p == self.p: first_term_numerator = p second_term_numerator = -p #Disable divide by zero error because we check for it with np.errstate(divide='ignore', invalid='ignore'): first_term = np.where(first_term_denominator != 0.0, (first_term_numerator / first_term_denominator), 0.0) second_term = np.where(second_term_denominator != 0.0, (second_term_numerator / second_term_denominator), 0.0) return (first_term[:-1] * basis_p_minus_1[:-1] + second_term * basis_p_minus_1[1:])
[ "def", "__basis", "(", "self", ",", "xi", ",", "p", ",", "compute_derivatives", "=", "False", ")", ":", "if", "p", "==", "0", ":", "return", "self", ".", "__basis0", "(", "xi", ")", "else", ":", "basis_p_minus_1", "=", "self", ".", "__basis", "(", "xi", ",", "p", "-", "1", ")", "first_term_numerator", "=", "xi", "-", "self", ".", "knot_vector", "[", ":", "-", "p", "]", "first_term_denominator", "=", "self", ".", "knot_vector", "[", "p", ":", "]", "-", "self", ".", "knot_vector", "[", ":", "-", "p", "]", "second_term_numerator", "=", "self", ".", "knot_vector", "[", "(", "p", "+", "1", ")", ":", "]", "-", "xi", "second_term_denominator", "=", "(", "self", ".", "knot_vector", "[", "(", "p", "+", "1", ")", ":", "]", "-", "self", ".", "knot_vector", "[", "1", ":", "-", "p", "]", ")", "#Change numerator in last recursion if derivatives are desired", "if", "compute_derivatives", "and", "p", "==", "self", ".", "p", ":", "first_term_numerator", "=", "p", "second_term_numerator", "=", "-", "p", "#Disable divide by zero error because we check for it", "with", "np", ".", "errstate", "(", "divide", "=", "'ignore'", ",", "invalid", "=", "'ignore'", ")", ":", "first_term", "=", "np", ".", "where", "(", "first_term_denominator", "!=", "0.0", ",", "(", "first_term_numerator", "/", "first_term_denominator", ")", ",", "0.0", ")", "second_term", "=", "np", ".", "where", "(", "second_term_denominator", "!=", "0.0", ",", "(", "second_term_numerator", "/", "second_term_denominator", ")", ",", "0.0", ")", "return", "(", "first_term", "[", ":", "-", "1", "]", "*", "basis_p_minus_1", "[", ":", "-", "1", "]", "+", "second_term", "*", "basis_p_minus_1", "[", "1", ":", "]", ")" ]
Recursive Cox - de Boor function (for internal use). Compute basis functions and optionally their first derivatives.
[ "Recursive", "Cox", "-", "de", "Boor", "function", "(", "for", "internal", "use", ")", "." ]
train
https://github.com/johntfoster/bspline/blob/366085a665da6fe907258eafcc8032c58a0601e0/bspline/bspline.py#L88-L123
johntfoster/bspline
bspline/bspline.py
Bspline.d
def d(self, xi): """Convenience function to compute first derivative of basis functions. 'Memoized' for speed.""" return self.__basis(xi, self.p, compute_derivatives=True)
python
def d(self, xi): """Convenience function to compute first derivative of basis functions. 'Memoized' for speed.""" return self.__basis(xi, self.p, compute_derivatives=True)
[ "def", "d", "(", "self", ",", "xi", ")", ":", "return", "self", ".", "__basis", "(", "xi", ",", "self", ".", "p", ",", "compute_derivatives", "=", "True", ")" ]
Convenience function to compute first derivative of basis functions. 'Memoized' for speed.
[ "Convenience", "function", "to", "compute", "first", "derivative", "of", "basis", "functions", ".", "Memoized", "for", "speed", "." ]
train
https://github.com/johntfoster/bspline/blob/366085a665da6fe907258eafcc8032c58a0601e0/bspline/bspline.py#L131-L133
johntfoster/bspline
bspline/bspline.py
Bspline.plot
def plot(self): """Plot basis functions over full range of knots. Convenience function. Requires matplotlib. """ try: import matplotlib.pyplot as plt except ImportError: from sys import stderr print("ERROR: matplotlib.pyplot not found, matplotlib must be installed to use this function", file=stderr) raise x_min = np.min(self.knot_vector) x_max = np.max(self.knot_vector) x = np.linspace(x_min, x_max, num=1000) N = np.array([self(i) for i in x]).T for n in N: plt.plot(x,n) return plt.show()
python
def plot(self): """Plot basis functions over full range of knots. Convenience function. Requires matplotlib. """ try: import matplotlib.pyplot as plt except ImportError: from sys import stderr print("ERROR: matplotlib.pyplot not found, matplotlib must be installed to use this function", file=stderr) raise x_min = np.min(self.knot_vector) x_max = np.max(self.knot_vector) x = np.linspace(x_min, x_max, num=1000) N = np.array([self(i) for i in x]).T for n in N: plt.plot(x,n) return plt.show()
[ "def", "plot", "(", "self", ")", ":", "try", ":", "import", "matplotlib", ".", "pyplot", "as", "plt", "except", "ImportError", ":", "from", "sys", "import", "stderr", "print", "(", "\"ERROR: matplotlib.pyplot not found, matplotlib must be installed to use this function\"", ",", "file", "=", "stderr", ")", "raise", "x_min", "=", "np", ".", "min", "(", "self", ".", "knot_vector", ")", "x_max", "=", "np", ".", "max", "(", "self", ".", "knot_vector", ")", "x", "=", "np", ".", "linspace", "(", "x_min", ",", "x_max", ",", "num", "=", "1000", ")", "N", "=", "np", ".", "array", "(", "[", "self", "(", "i", ")", "for", "i", "in", "x", "]", ")", ".", "T", "for", "n", "in", "N", ":", "plt", ".", "plot", "(", "x", ",", "n", ")", "return", "plt", ".", "show", "(", ")" ]
Plot basis functions over full range of knots. Convenience function. Requires matplotlib.
[ "Plot", "basis", "functions", "over", "full", "range", "of", "knots", "." ]
train
https://github.com/johntfoster/bspline/blob/366085a665da6fe907258eafcc8032c58a0601e0/bspline/bspline.py#L135-L158
johntfoster/bspline
bspline/bspline.py
Bspline.__diff_internal
def __diff_internal(self): """Differentiate a B-spline once, and return the resulting coefficients and Bspline objects. This preserves the Bspline object nature of the data, enabling recursive implementation of higher-order differentiation (see `diff`). The value of the first derivative of `B` at a point `x` can be obtained as:: def diff1(B, x): terms = B.__diff_internal() return sum( ci*Bi(x) for ci,Bi in terms ) Returns: tuple of tuples, where each item is (coefficient, Bspline object). See: `diff`: differentiation of any order >= 0 """ assert self.p > 0, "order of Bspline must be > 0" # we already handle the other case in diff() # https://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/B-spline/bspline-derv.html # t = self.knot_vector p = self.p Bi = Bspline( t[:-1], p-1 ) Bip1 = Bspline( t[1:], p-1 ) numer1 = +p numer2 = -p denom1 = t[p:-1] - t[:-(p+1)] denom2 = t[(p+1):] - t[1:-p] with np.errstate(divide='ignore', invalid='ignore'): ci = np.where(denom1 != 0., (numer1 / denom1), 0.) cip1 = np.where(denom2 != 0., (numer2 / denom2), 0.) return ( (ci,Bi), (cip1,Bip1) )
python
def __diff_internal(self): """Differentiate a B-spline once, and return the resulting coefficients and Bspline objects. This preserves the Bspline object nature of the data, enabling recursive implementation of higher-order differentiation (see `diff`). The value of the first derivative of `B` at a point `x` can be obtained as:: def diff1(B, x): terms = B.__diff_internal() return sum( ci*Bi(x) for ci,Bi in terms ) Returns: tuple of tuples, where each item is (coefficient, Bspline object). See: `diff`: differentiation of any order >= 0 """ assert self.p > 0, "order of Bspline must be > 0" # we already handle the other case in diff() # https://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/B-spline/bspline-derv.html # t = self.knot_vector p = self.p Bi = Bspline( t[:-1], p-1 ) Bip1 = Bspline( t[1:], p-1 ) numer1 = +p numer2 = -p denom1 = t[p:-1] - t[:-(p+1)] denom2 = t[(p+1):] - t[1:-p] with np.errstate(divide='ignore', invalid='ignore'): ci = np.where(denom1 != 0., (numer1 / denom1), 0.) cip1 = np.where(denom2 != 0., (numer2 / denom2), 0.) return ( (ci,Bi), (cip1,Bip1) )
[ "def", "__diff_internal", "(", "self", ")", ":", "assert", "self", ".", "p", ">", "0", ",", "\"order of Bspline must be > 0\"", "# we already handle the other case in diff()", "# https://www.cs.mtu.edu/~shene/COURSES/cs3621/NOTES/spline/B-spline/bspline-derv.html", "#", "t", "=", "self", ".", "knot_vector", "p", "=", "self", ".", "p", "Bi", "=", "Bspline", "(", "t", "[", ":", "-", "1", "]", ",", "p", "-", "1", ")", "Bip1", "=", "Bspline", "(", "t", "[", "1", ":", "]", ",", "p", "-", "1", ")", "numer1", "=", "+", "p", "numer2", "=", "-", "p", "denom1", "=", "t", "[", "p", ":", "-", "1", "]", "-", "t", "[", ":", "-", "(", "p", "+", "1", ")", "]", "denom2", "=", "t", "[", "(", "p", "+", "1", ")", ":", "]", "-", "t", "[", "1", ":", "-", "p", "]", "with", "np", ".", "errstate", "(", "divide", "=", "'ignore'", ",", "invalid", "=", "'ignore'", ")", ":", "ci", "=", "np", ".", "where", "(", "denom1", "!=", "0.", ",", "(", "numer1", "/", "denom1", ")", ",", "0.", ")", "cip1", "=", "np", ".", "where", "(", "denom2", "!=", "0.", ",", "(", "numer2", "/", "denom2", ")", ",", "0.", ")", "return", "(", "(", "ci", ",", "Bi", ")", ",", "(", "cip1", ",", "Bip1", ")", ")" ]
Differentiate a B-spline once, and return the resulting coefficients and Bspline objects. This preserves the Bspline object nature of the data, enabling recursive implementation of higher-order differentiation (see `diff`). The value of the first derivative of `B` at a point `x` can be obtained as:: def diff1(B, x): terms = B.__diff_internal() return sum( ci*Bi(x) for ci,Bi in terms ) Returns: tuple of tuples, where each item is (coefficient, Bspline object). See: `diff`: differentiation of any order >= 0
[ "Differentiate", "a", "B", "-", "spline", "once", "and", "return", "the", "resulting", "coefficients", "and", "Bspline", "objects", "." ]
train
https://github.com/johntfoster/bspline/blob/366085a665da6fe907258eafcc8032c58a0601e0/bspline/bspline.py#L186-L222
johntfoster/bspline
bspline/bspline.py
Bspline.diff
def diff(self, order=1): """Differentiate a B-spline `order` number of times. Parameters: order: int, >= 0 Returns: **lambda** `x`: ... that evaluates the `order`-th derivative of `B` at the point `x`. The returned function internally uses __call__, which is 'memoized' for speed. """ order = int(order) if order < 0: raise ValueError("order must be >= 0, got %d" % (order)) if order == 0: return self.__call__ if order > self.p: # identically zero, but force the same output format as in the general case dummy = self.__call__(0.) # get number of basis functions and output dtype nbasis = dummy.shape[0] return lambda x: np.zeros( (nbasis,), dtype=dummy.dtype ) # accept but ignore input x # At each differentiation, each term maps into two new terms. # The number of terms in the result will be 2**order. # # This will cause an exponential explosion in the number of terms for high derivative orders, # but for the first few orders (practical usage; >3 is rarely needed) the approach works. # terms = [ (1.,self) ] for k in range(order): tmp = [] for Ci,Bi in terms: tmp.extend( (Ci*cn, Bn) for cn,Bn in Bi.__diff_internal() ) # NOTE: also propagate Ci terms = tmp # perform final summation at call time return lambda x: sum( ci*Bi(x) for ci,Bi in terms )
python
def diff(self, order=1): """Differentiate a B-spline `order` number of times. Parameters: order: int, >= 0 Returns: **lambda** `x`: ... that evaluates the `order`-th derivative of `B` at the point `x`. The returned function internally uses __call__, which is 'memoized' for speed. """ order = int(order) if order < 0: raise ValueError("order must be >= 0, got %d" % (order)) if order == 0: return self.__call__ if order > self.p: # identically zero, but force the same output format as in the general case dummy = self.__call__(0.) # get number of basis functions and output dtype nbasis = dummy.shape[0] return lambda x: np.zeros( (nbasis,), dtype=dummy.dtype ) # accept but ignore input x # At each differentiation, each term maps into two new terms. # The number of terms in the result will be 2**order. # # This will cause an exponential explosion in the number of terms for high derivative orders, # but for the first few orders (practical usage; >3 is rarely needed) the approach works. # terms = [ (1.,self) ] for k in range(order): tmp = [] for Ci,Bi in terms: tmp.extend( (Ci*cn, Bn) for cn,Bn in Bi.__diff_internal() ) # NOTE: also propagate Ci terms = tmp # perform final summation at call time return lambda x: sum( ci*Bi(x) for ci,Bi in terms )
[ "def", "diff", "(", "self", ",", "order", "=", "1", ")", ":", "order", "=", "int", "(", "order", ")", "if", "order", "<", "0", ":", "raise", "ValueError", "(", "\"order must be >= 0, got %d\"", "%", "(", "order", ")", ")", "if", "order", "==", "0", ":", "return", "self", ".", "__call__", "if", "order", ">", "self", ".", "p", ":", "# identically zero, but force the same output format as in the general case", "dummy", "=", "self", ".", "__call__", "(", "0.", ")", "# get number of basis functions and output dtype", "nbasis", "=", "dummy", ".", "shape", "[", "0", "]", "return", "lambda", "x", ":", "np", ".", "zeros", "(", "(", "nbasis", ",", ")", ",", "dtype", "=", "dummy", ".", "dtype", ")", "# accept but ignore input x", "# At each differentiation, each term maps into two new terms.", "# The number of terms in the result will be 2**order.", "#", "# This will cause an exponential explosion in the number of terms for high derivative orders,", "# but for the first few orders (practical usage; >3 is rarely needed) the approach works.", "#", "terms", "=", "[", "(", "1.", ",", "self", ")", "]", "for", "k", "in", "range", "(", "order", ")", ":", "tmp", "=", "[", "]", "for", "Ci", ",", "Bi", "in", "terms", ":", "tmp", ".", "extend", "(", "(", "Ci", "*", "cn", ",", "Bn", ")", "for", "cn", ",", "Bn", "in", "Bi", ".", "__diff_internal", "(", ")", ")", "# NOTE: also propagate Ci", "terms", "=", "tmp", "# perform final summation at call time", "return", "lambda", "x", ":", "sum", "(", "ci", "*", "Bi", "(", "x", ")", "for", "ci", ",", "Bi", "in", "terms", ")" ]
Differentiate a B-spline `order` number of times. Parameters: order: int, >= 0 Returns: **lambda** `x`: ... that evaluates the `order`-th derivative of `B` at the point `x`. The returned function internally uses __call__, which is 'memoized' for speed.
[ "Differentiate", "a", "B", "-", "spline", "order", "number", "of", "times", "." ]
train
https://github.com/johntfoster/bspline/blob/366085a665da6fe907258eafcc8032c58a0601e0/bspline/bspline.py#L225-L262
johntfoster/bspline
bspline/bspline.py
Bspline.collmat
def collmat(self, tau, deriv_order=0): """Compute collocation matrix. Parameters: tau: Python list or rank-1 array, collocation sites deriv_order: int, >=0, order of derivative for which to compute the collocation matrix. The default is 0, which means the function value itself. Returns: A: if len(tau) > 1, rank-2 array such that A[i,j] = D**deriv_order B_j(tau[i]) where D**k = kth derivative (0 for function value itself) if len(tau) == 1, rank-1 array such that A[j] = D**deriv_order B_j(tau) Example: If the coefficients of a spline function are given in the vector c, then:: np.sum( A*c, axis=-1 ) will give a rank-1 array of function values at the sites tau[i] that were supplied to `collmat`. Similarly for derivatives (if the supplied `deriv_order`> 0). """ # get number of basis functions and output dtype dummy = self.__call__(0.) nbasis = dummy.shape[0] tau = np.atleast_1d(tau) if tau.ndim > 1: raise ValueError("tau must be a list or a rank-1 array") A = np.empty( (tau.shape[0], nbasis), dtype=dummy.dtype ) f = self.diff(order=deriv_order) for i,taui in enumerate(tau): A[i,:] = f(taui) return np.squeeze(A)
python
def collmat(self, tau, deriv_order=0): """Compute collocation matrix. Parameters: tau: Python list or rank-1 array, collocation sites deriv_order: int, >=0, order of derivative for which to compute the collocation matrix. The default is 0, which means the function value itself. Returns: A: if len(tau) > 1, rank-2 array such that A[i,j] = D**deriv_order B_j(tau[i]) where D**k = kth derivative (0 for function value itself) if len(tau) == 1, rank-1 array such that A[j] = D**deriv_order B_j(tau) Example: If the coefficients of a spline function are given in the vector c, then:: np.sum( A*c, axis=-1 ) will give a rank-1 array of function values at the sites tau[i] that were supplied to `collmat`. Similarly for derivatives (if the supplied `deriv_order`> 0). """ # get number of basis functions and output dtype dummy = self.__call__(0.) nbasis = dummy.shape[0] tau = np.atleast_1d(tau) if tau.ndim > 1: raise ValueError("tau must be a list or a rank-1 array") A = np.empty( (tau.shape[0], nbasis), dtype=dummy.dtype ) f = self.diff(order=deriv_order) for i,taui in enumerate(tau): A[i,:] = f(taui) return np.squeeze(A)
[ "def", "collmat", "(", "self", ",", "tau", ",", "deriv_order", "=", "0", ")", ":", "# get number of basis functions and output dtype", "dummy", "=", "self", ".", "__call__", "(", "0.", ")", "nbasis", "=", "dummy", ".", "shape", "[", "0", "]", "tau", "=", "np", ".", "atleast_1d", "(", "tau", ")", "if", "tau", ".", "ndim", ">", "1", ":", "raise", "ValueError", "(", "\"tau must be a list or a rank-1 array\"", ")", "A", "=", "np", ".", "empty", "(", "(", "tau", ".", "shape", "[", "0", "]", ",", "nbasis", ")", ",", "dtype", "=", "dummy", ".", "dtype", ")", "f", "=", "self", ".", "diff", "(", "order", "=", "deriv_order", ")", "for", "i", ",", "taui", "in", "enumerate", "(", "tau", ")", ":", "A", "[", "i", ",", ":", "]", "=", "f", "(", "taui", ")", "return", "np", ".", "squeeze", "(", "A", ")" ]
Compute collocation matrix. Parameters: tau: Python list or rank-1 array, collocation sites deriv_order: int, >=0, order of derivative for which to compute the collocation matrix. The default is 0, which means the function value itself. Returns: A: if len(tau) > 1, rank-2 array such that A[i,j] = D**deriv_order B_j(tau[i]) where D**k = kth derivative (0 for function value itself) if len(tau) == 1, rank-1 array such that A[j] = D**deriv_order B_j(tau) Example: If the coefficients of a spline function are given in the vector c, then:: np.sum( A*c, axis=-1 ) will give a rank-1 array of function values at the sites tau[i] that were supplied to `collmat`. Similarly for derivatives (if the supplied `deriv_order`> 0).
[ "Compute", "collocation", "matrix", "." ]
train
https://github.com/johntfoster/bspline/blob/366085a665da6fe907258eafcc8032c58a0601e0/bspline/bspline.py#L265-L309
disqus/disqus-python
disqusapi/utils.py
build_interfaces_by_method
def build_interfaces_by_method(interfaces): """ Create new dictionary from INTERFACES hashed by method then the endpoints name. For use when using the disqusapi by the method interface instead of the endpoint interface. For instance: 'blacklists': { 'add': { 'formats': ['json', 'jsonp'], 'method': 'POST', 'required': ['forum'] } } is translated to: 'POST': { 'blacklists.add': { 'formats': ['json', 'jsonp'], 'method': 'POST', 'required': ['forum'] } """ def traverse(block, parts): try: method = block['method'].lower() except KeyError: for k, v in compat.iteritems(block): traverse(v, parts + [k]) else: path = '.'.join(parts) try: methods[method] except KeyError: methods[method] = {} methods[method][path] = block methods = {} for key, val in compat.iteritems(interfaces): traverse(val, [key]) return methods
python
def build_interfaces_by_method(interfaces): """ Create new dictionary from INTERFACES hashed by method then the endpoints name. For use when using the disqusapi by the method interface instead of the endpoint interface. For instance: 'blacklists': { 'add': { 'formats': ['json', 'jsonp'], 'method': 'POST', 'required': ['forum'] } } is translated to: 'POST': { 'blacklists.add': { 'formats': ['json', 'jsonp'], 'method': 'POST', 'required': ['forum'] } """ def traverse(block, parts): try: method = block['method'].lower() except KeyError: for k, v in compat.iteritems(block): traverse(v, parts + [k]) else: path = '.'.join(parts) try: methods[method] except KeyError: methods[method] = {} methods[method][path] = block methods = {} for key, val in compat.iteritems(interfaces): traverse(val, [key]) return methods
[ "def", "build_interfaces_by_method", "(", "interfaces", ")", ":", "def", "traverse", "(", "block", ",", "parts", ")", ":", "try", ":", "method", "=", "block", "[", "'method'", "]", ".", "lower", "(", ")", "except", "KeyError", ":", "for", "k", ",", "v", "in", "compat", ".", "iteritems", "(", "block", ")", ":", "traverse", "(", "v", ",", "parts", "+", "[", "k", "]", ")", "else", ":", "path", "=", "'.'", ".", "join", "(", "parts", ")", "try", ":", "methods", "[", "method", "]", "except", "KeyError", ":", "methods", "[", "method", "]", "=", "{", "}", "methods", "[", "method", "]", "[", "path", "]", "=", "block", "methods", "=", "{", "}", "for", "key", ",", "val", "in", "compat", ".", "iteritems", "(", "interfaces", ")", ":", "traverse", "(", "val", ",", "[", "key", "]", ")", "return", "methods" ]
Create new dictionary from INTERFACES hashed by method then the endpoints name. For use when using the disqusapi by the method interface instead of the endpoint interface. For instance: 'blacklists': { 'add': { 'formats': ['json', 'jsonp'], 'method': 'POST', 'required': ['forum'] } } is translated to: 'POST': { 'blacklists.add': { 'formats': ['json', 'jsonp'], 'method': 'POST', 'required': ['forum'] }
[ "Create", "new", "dictionary", "from", "INTERFACES", "hashed", "by", "method", "then", "the", "endpoints", "name", ".", "For", "use", "when", "using", "the", "disqusapi", "by", "the", "method", "interface", "instead", "of", "the", "endpoint", "interface", ".", "For", "instance", ":" ]
train
https://github.com/disqus/disqus-python/blob/605f33c7b735fcb85e16041c27658fbba49d7a7b/disqusapi/utils.py#L8-L48
disqus/disqus-python
disqusapi/utils.py
get_normalized_request_string
def get_normalized_request_string(method, url, nonce, params, ext='', body_hash=None): """ Returns a normalized request string as described iN OAuth2 MAC spec. http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-00#section-3.3.1 """ urlparts = urlparse.urlparse(url) if urlparts.query: norm_url = '%s?%s' % (urlparts.path, urlparts.query) elif params: norm_url = '%s?%s' % (urlparts.path, get_normalized_params(params)) else: norm_url = urlparts.path if not body_hash: body_hash = get_body_hash(params) port = urlparts.port if not port: assert urlparts.scheme in ('http', 'https') if urlparts.scheme == 'http': port = 80 elif urlparts.scheme == 'https': port = 443 output = [nonce, method.upper(), norm_url, urlparts.hostname, port, body_hash, ext, ''] return '\n'.join(map(str, output))
python
def get_normalized_request_string(method, url, nonce, params, ext='', body_hash=None): """ Returns a normalized request string as described iN OAuth2 MAC spec. http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-00#section-3.3.1 """ urlparts = urlparse.urlparse(url) if urlparts.query: norm_url = '%s?%s' % (urlparts.path, urlparts.query) elif params: norm_url = '%s?%s' % (urlparts.path, get_normalized_params(params)) else: norm_url = urlparts.path if not body_hash: body_hash = get_body_hash(params) port = urlparts.port if not port: assert urlparts.scheme in ('http', 'https') if urlparts.scheme == 'http': port = 80 elif urlparts.scheme == 'https': port = 443 output = [nonce, method.upper(), norm_url, urlparts.hostname, port, body_hash, ext, ''] return '\n'.join(map(str, output))
[ "def", "get_normalized_request_string", "(", "method", ",", "url", ",", "nonce", ",", "params", ",", "ext", "=", "''", ",", "body_hash", "=", "None", ")", ":", "urlparts", "=", "urlparse", ".", "urlparse", "(", "url", ")", "if", "urlparts", ".", "query", ":", "norm_url", "=", "'%s?%s'", "%", "(", "urlparts", ".", "path", ",", "urlparts", ".", "query", ")", "elif", "params", ":", "norm_url", "=", "'%s?%s'", "%", "(", "urlparts", ".", "path", ",", "get_normalized_params", "(", "params", ")", ")", "else", ":", "norm_url", "=", "urlparts", ".", "path", "if", "not", "body_hash", ":", "body_hash", "=", "get_body_hash", "(", "params", ")", "port", "=", "urlparts", ".", "port", "if", "not", "port", ":", "assert", "urlparts", ".", "scheme", "in", "(", "'http'", ",", "'https'", ")", "if", "urlparts", ".", "scheme", "==", "'http'", ":", "port", "=", "80", "elif", "urlparts", ".", "scheme", "==", "'https'", ":", "port", "=", "443", "output", "=", "[", "nonce", ",", "method", ".", "upper", "(", ")", ",", "norm_url", ",", "urlparts", ".", "hostname", ",", "port", ",", "body_hash", ",", "ext", ",", "''", "]", "return", "'\\n'", ".", "join", "(", "map", "(", "str", ",", "output", ")", ")" ]
Returns a normalized request string as described iN OAuth2 MAC spec. http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-00#section-3.3.1
[ "Returns", "a", "normalized", "request", "string", "as", "described", "iN", "OAuth2", "MAC", "spec", "." ]
train
https://github.com/disqus/disqus-python/blob/605f33c7b735fcb85e16041c27658fbba49d7a7b/disqusapi/utils.py#L59-L87
disqus/disqus-python
disqusapi/utils.py
get_body_hash
def get_body_hash(params): """ Returns BASE64 ( HASH (text) ) as described in OAuth2 MAC spec. http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-00#section-3.2 """ norm_params = get_normalized_params(params) return binascii.b2a_base64(hashlib.sha1(norm_params).digest())[:-1]
python
def get_body_hash(params): """ Returns BASE64 ( HASH (text) ) as described in OAuth2 MAC spec. http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-00#section-3.2 """ norm_params = get_normalized_params(params) return binascii.b2a_base64(hashlib.sha1(norm_params).digest())[:-1]
[ "def", "get_body_hash", "(", "params", ")", ":", "norm_params", "=", "get_normalized_params", "(", "params", ")", "return", "binascii", ".", "b2a_base64", "(", "hashlib", ".", "sha1", "(", "norm_params", ")", ".", "digest", "(", ")", ")", "[", ":", "-", "1", "]" ]
Returns BASE64 ( HASH (text) ) as described in OAuth2 MAC spec. http://tools.ietf.org/html/draft-ietf-oauth-v2-http-mac-00#section-3.2
[ "Returns", "BASE64", "(", "HASH", "(", "text", ")", ")", "as", "described", "in", "OAuth2", "MAC", "spec", "." ]
train
https://github.com/disqus/disqus-python/blob/605f33c7b735fcb85e16041c27658fbba49d7a7b/disqusapi/utils.py#L90-L98
disqus/disqus-python
disqusapi/utils.py
get_mac_signature
def get_mac_signature(api_secret, norm_request_string): """ Returns HMAC-SHA1 (api secret, normalized request string) """ hashed = hmac.new(str(api_secret), norm_request_string, hashlib.sha1) return binascii.b2a_base64(hashed.digest())[:-1]
python
def get_mac_signature(api_secret, norm_request_string): """ Returns HMAC-SHA1 (api secret, normalized request string) """ hashed = hmac.new(str(api_secret), norm_request_string, hashlib.sha1) return binascii.b2a_base64(hashed.digest())[:-1]
[ "def", "get_mac_signature", "(", "api_secret", ",", "norm_request_string", ")", ":", "hashed", "=", "hmac", ".", "new", "(", "str", "(", "api_secret", ")", ",", "norm_request_string", ",", "hashlib", ".", "sha1", ")", "return", "binascii", ".", "b2a_base64", "(", "hashed", ".", "digest", "(", ")", ")", "[", ":", "-", "1", "]" ]
Returns HMAC-SHA1 (api secret, normalized request string)
[ "Returns", "HMAC", "-", "SHA1", "(", "api", "secret", "normalized", "request", "string", ")" ]
train
https://github.com/disqus/disqus-python/blob/605f33c7b735fcb85e16041c27658fbba49d7a7b/disqusapi/utils.py#L101-L106
codesardine/Jade-Application-Kit
j/Api.py
Fs.open_file
def open_file(self, access_mode="r"): """ input: filename and path. output: file contents. """ try: with open(self, access_mode, encoding='utf-8') as file: return file.read() except IOError: print(self + " File not found.") sys.exit(0)
python
def open_file(self, access_mode="r"): """ input: filename and path. output: file contents. """ try: with open(self, access_mode, encoding='utf-8') as file: return file.read() except IOError: print(self + " File not found.") sys.exit(0)
[ "def", "open_file", "(", "self", ",", "access_mode", "=", "\"r\"", ")", ":", "try", ":", "with", "open", "(", "self", ",", "access_mode", ",", "encoding", "=", "'utf-8'", ")", "as", "file", ":", "return", "file", ".", "read", "(", ")", "except", "IOError", ":", "print", "(", "self", "+", "\" File not found.\"", ")", "sys", ".", "exit", "(", "0", ")" ]
input: filename and path. output: file contents.
[ "input", ":", "filename", "and", "path", ".", "output", ":", "file", "contents", "." ]
train
https://github.com/codesardine/Jade-Application-Kit/blob/f94ba061f1e71c812f23ef4297473a5fce712367/j/Api.py#L8-L19
bhmm/bhmm
bhmm/estimators/maximum_likelihood.py
MaximumLikelihoodEstimator._forward_backward
def _forward_backward(self, itraj): """ Estimation step: Runs the forward-back algorithm on trajectory with index itraj Parameters ---------- itraj : int index of the observation trajectory to process Results ------- logprob : float The probability to observe the observation sequence given the HMM parameters gamma : ndarray(T,N, dtype=float) state probabilities for each t count_matrix : ndarray(N,N, dtype=float) the Baum-Welch transition count matrix from the hidden state trajectory """ # get parameters A = self._hmm.transition_matrix pi = self._hmm.initial_distribution obs = self._observations[itraj] T = len(obs) # compute output probability matrix # t1 = time.time() self._hmm.output_model.p_obs(obs, out=self._pobs) # t2 = time.time() # self._fbtimings[0] += t2-t1 # forward variables logprob = hidden.forward(A, self._pobs, pi, T=T, alpha_out=self._alpha)[0] # t3 = time.time() # self._fbtimings[1] += t3-t2 # backward variables hidden.backward(A, self._pobs, T=T, beta_out=self._beta) # t4 = time.time() # self._fbtimings[2] += t4-t3 # gamma hidden.state_probabilities(self._alpha, self._beta, T=T, gamma_out=self._gammas[itraj]) # t5 = time.time() # self._fbtimings[3] += t5-t4 # count matrix hidden.transition_counts(self._alpha, self._beta, A, self._pobs, T=T, out=self._Cs[itraj]) # t6 = time.time() # self._fbtimings[4] += t6-t5 # return results return logprob
python
def _forward_backward(self, itraj): """ Estimation step: Runs the forward-back algorithm on trajectory with index itraj Parameters ---------- itraj : int index of the observation trajectory to process Results ------- logprob : float The probability to observe the observation sequence given the HMM parameters gamma : ndarray(T,N, dtype=float) state probabilities for each t count_matrix : ndarray(N,N, dtype=float) the Baum-Welch transition count matrix from the hidden state trajectory """ # get parameters A = self._hmm.transition_matrix pi = self._hmm.initial_distribution obs = self._observations[itraj] T = len(obs) # compute output probability matrix # t1 = time.time() self._hmm.output_model.p_obs(obs, out=self._pobs) # t2 = time.time() # self._fbtimings[0] += t2-t1 # forward variables logprob = hidden.forward(A, self._pobs, pi, T=T, alpha_out=self._alpha)[0] # t3 = time.time() # self._fbtimings[1] += t3-t2 # backward variables hidden.backward(A, self._pobs, T=T, beta_out=self._beta) # t4 = time.time() # self._fbtimings[2] += t4-t3 # gamma hidden.state_probabilities(self._alpha, self._beta, T=T, gamma_out=self._gammas[itraj]) # t5 = time.time() # self._fbtimings[3] += t5-t4 # count matrix hidden.transition_counts(self._alpha, self._beta, A, self._pobs, T=T, out=self._Cs[itraj]) # t6 = time.time() # self._fbtimings[4] += t6-t5 # return results return logprob
[ "def", "_forward_backward", "(", "self", ",", "itraj", ")", ":", "# get parameters", "A", "=", "self", ".", "_hmm", ".", "transition_matrix", "pi", "=", "self", ".", "_hmm", ".", "initial_distribution", "obs", "=", "self", ".", "_observations", "[", "itraj", "]", "T", "=", "len", "(", "obs", ")", "# compute output probability matrix", "# t1 = time.time()", "self", ".", "_hmm", ".", "output_model", ".", "p_obs", "(", "obs", ",", "out", "=", "self", ".", "_pobs", ")", "# t2 = time.time()", "# self._fbtimings[0] += t2-t1", "# forward variables", "logprob", "=", "hidden", ".", "forward", "(", "A", ",", "self", ".", "_pobs", ",", "pi", ",", "T", "=", "T", ",", "alpha_out", "=", "self", ".", "_alpha", ")", "[", "0", "]", "# t3 = time.time()", "# self._fbtimings[1] += t3-t2", "# backward variables", "hidden", ".", "backward", "(", "A", ",", "self", ".", "_pobs", ",", "T", "=", "T", ",", "beta_out", "=", "self", ".", "_beta", ")", "# t4 = time.time()", "# self._fbtimings[2] += t4-t3", "# gamma", "hidden", ".", "state_probabilities", "(", "self", ".", "_alpha", ",", "self", ".", "_beta", ",", "T", "=", "T", ",", "gamma_out", "=", "self", ".", "_gammas", "[", "itraj", "]", ")", "# t5 = time.time()", "# self._fbtimings[3] += t5-t4", "# count matrix", "hidden", ".", "transition_counts", "(", "self", ".", "_alpha", ",", "self", ".", "_beta", ",", "A", ",", "self", ".", "_pobs", ",", "T", "=", "T", ",", "out", "=", "self", ".", "_Cs", "[", "itraj", "]", ")", "# t6 = time.time()", "# self._fbtimings[4] += t6-t5", "# return results", "return", "logprob" ]
Estimation step: Runs the forward-back algorithm on trajectory with index itraj Parameters ---------- itraj : int index of the observation trajectory to process Results ------- logprob : float The probability to observe the observation sequence given the HMM parameters gamma : ndarray(T,N, dtype=float) state probabilities for each t count_matrix : ndarray(N,N, dtype=float) the Baum-Welch transition count matrix from the hidden state trajectory
[ "Estimation", "step", ":", "Runs", "the", "forward", "-", "back", "algorithm", "on", "trajectory", "with", "index", "itraj" ]
train
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/estimators/maximum_likelihood.py#L221-L269
bhmm/bhmm
bhmm/estimators/maximum_likelihood.py
MaximumLikelihoodEstimator._update_model
def _update_model(self, gammas, count_matrices, maxiter=10000000): """ Maximization step: Updates the HMM model given the hidden state assignment and count matrices Parameters ---------- gamma : [ ndarray(T,N, dtype=float) ] list of state probabilities for each trajectory count_matrix : [ ndarray(N,N, dtype=float) ] list of the Baum-Welch transition count matrices for each hidden state trajectory maxiter : int maximum number of iterations of the transition matrix estimation if an iterative method is used. """ gamma0_sum = self._init_counts(gammas) C = self._transition_counts(count_matrices) logger().info("Initial count = \n"+str(gamma0_sum)) logger().info("Count matrix = \n"+str(C)) # compute new transition matrix from bhmm.estimators._tmatrix_disconnected import estimate_P, stationary_distribution T = estimate_P(C, reversible=self._hmm.is_reversible, fixed_statdist=self._fixed_stationary_distribution, maxiter=maxiter, maxerr=1e-12, mincount_connectivity=1e-16) # print 'P:\n', T # estimate stationary or init distribution if self._stationary: if self._fixed_stationary_distribution is None: pi = stationary_distribution(T, C=C, mincount_connectivity=1e-16) else: pi = self._fixed_stationary_distribution else: if self._fixed_initial_distribution is None: pi = gamma0_sum / np.sum(gamma0_sum) else: pi = self._fixed_initial_distribution # print 'pi: ', pi, ' stationary = ', self._hmm.is_stationary # update model self._hmm.update(pi, T) logger().info("T: \n"+str(T)) logger().info("pi: \n"+str(pi)) # update output model self._hmm.output_model.estimate(self._observations, gammas)
python
def _update_model(self, gammas, count_matrices, maxiter=10000000): """ Maximization step: Updates the HMM model given the hidden state assignment and count matrices Parameters ---------- gamma : [ ndarray(T,N, dtype=float) ] list of state probabilities for each trajectory count_matrix : [ ndarray(N,N, dtype=float) ] list of the Baum-Welch transition count matrices for each hidden state trajectory maxiter : int maximum number of iterations of the transition matrix estimation if an iterative method is used. """ gamma0_sum = self._init_counts(gammas) C = self._transition_counts(count_matrices) logger().info("Initial count = \n"+str(gamma0_sum)) logger().info("Count matrix = \n"+str(C)) # compute new transition matrix from bhmm.estimators._tmatrix_disconnected import estimate_P, stationary_distribution T = estimate_P(C, reversible=self._hmm.is_reversible, fixed_statdist=self._fixed_stationary_distribution, maxiter=maxiter, maxerr=1e-12, mincount_connectivity=1e-16) # print 'P:\n', T # estimate stationary or init distribution if self._stationary: if self._fixed_stationary_distribution is None: pi = stationary_distribution(T, C=C, mincount_connectivity=1e-16) else: pi = self._fixed_stationary_distribution else: if self._fixed_initial_distribution is None: pi = gamma0_sum / np.sum(gamma0_sum) else: pi = self._fixed_initial_distribution # print 'pi: ', pi, ' stationary = ', self._hmm.is_stationary # update model self._hmm.update(pi, T) logger().info("T: \n"+str(T)) logger().info("pi: \n"+str(pi)) # update output model self._hmm.output_model.estimate(self._observations, gammas)
[ "def", "_update_model", "(", "self", ",", "gammas", ",", "count_matrices", ",", "maxiter", "=", "10000000", ")", ":", "gamma0_sum", "=", "self", ".", "_init_counts", "(", "gammas", ")", "C", "=", "self", ".", "_transition_counts", "(", "count_matrices", ")", "logger", "(", ")", ".", "info", "(", "\"Initial count = \\n\"", "+", "str", "(", "gamma0_sum", ")", ")", "logger", "(", ")", ".", "info", "(", "\"Count matrix = \\n\"", "+", "str", "(", "C", ")", ")", "# compute new transition matrix", "from", "bhmm", ".", "estimators", ".", "_tmatrix_disconnected", "import", "estimate_P", ",", "stationary_distribution", "T", "=", "estimate_P", "(", "C", ",", "reversible", "=", "self", ".", "_hmm", ".", "is_reversible", ",", "fixed_statdist", "=", "self", ".", "_fixed_stationary_distribution", ",", "maxiter", "=", "maxiter", ",", "maxerr", "=", "1e-12", ",", "mincount_connectivity", "=", "1e-16", ")", "# print 'P:\\n', T", "# estimate stationary or init distribution", "if", "self", ".", "_stationary", ":", "if", "self", ".", "_fixed_stationary_distribution", "is", "None", ":", "pi", "=", "stationary_distribution", "(", "T", ",", "C", "=", "C", ",", "mincount_connectivity", "=", "1e-16", ")", "else", ":", "pi", "=", "self", ".", "_fixed_stationary_distribution", "else", ":", "if", "self", ".", "_fixed_initial_distribution", "is", "None", ":", "pi", "=", "gamma0_sum", "/", "np", ".", "sum", "(", "gamma0_sum", ")", "else", ":", "pi", "=", "self", ".", "_fixed_initial_distribution", "# print 'pi: ', pi, ' stationary = ', self._hmm.is_stationary", "# update model", "self", ".", "_hmm", ".", "update", "(", "pi", ",", "T", ")", "logger", "(", ")", ".", "info", "(", "\"T: \\n\"", "+", "str", "(", "T", ")", ")", "logger", "(", ")", ".", "info", "(", "\"pi: \\n\"", "+", "str", "(", "pi", ")", ")", "# update output model", "self", ".", "_hmm", ".", "output_model", ".", "estimate", "(", "self", ".", "_observations", ",", "gammas", ")" ]
Maximization step: Updates the HMM model given the hidden state assignment and count matrices Parameters ---------- gamma : [ ndarray(T,N, dtype=float) ] list of state probabilities for each trajectory count_matrix : [ ndarray(N,N, dtype=float) ] list of the Baum-Welch transition count matrices for each hidden state trajectory maxiter : int maximum number of iterations of the transition matrix estimation if an iterative method is used.
[ "Maximization", "step", ":", "Updates", "the", "HMM", "model", "given", "the", "hidden", "state", "assignment", "and", "count", "matrices" ]
train
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/estimators/maximum_likelihood.py#L284-L330
bhmm/bhmm
bhmm/estimators/maximum_likelihood.py
MaximumLikelihoodEstimator.compute_viterbi_paths
def compute_viterbi_paths(self): """ Computes the viterbi paths using the current HMM model """ # get parameters K = len(self._observations) A = self._hmm.transition_matrix pi = self._hmm.initial_distribution # compute viterbi path for each trajectory paths = np.empty(K, dtype=object) for itraj in range(K): obs = self._observations[itraj] # compute output probability matrix pobs = self._hmm.output_model.p_obs(obs) # hidden path paths[itraj] = hidden.viterbi(A, pobs, pi) # done return paths
python
def compute_viterbi_paths(self): """ Computes the viterbi paths using the current HMM model """ # get parameters K = len(self._observations) A = self._hmm.transition_matrix pi = self._hmm.initial_distribution # compute viterbi path for each trajectory paths = np.empty(K, dtype=object) for itraj in range(K): obs = self._observations[itraj] # compute output probability matrix pobs = self._hmm.output_model.p_obs(obs) # hidden path paths[itraj] = hidden.viterbi(A, pobs, pi) # done return paths
[ "def", "compute_viterbi_paths", "(", "self", ")", ":", "# get parameters", "K", "=", "len", "(", "self", ".", "_observations", ")", "A", "=", "self", ".", "_hmm", ".", "transition_matrix", "pi", "=", "self", ".", "_hmm", ".", "initial_distribution", "# compute viterbi path for each trajectory", "paths", "=", "np", ".", "empty", "(", "K", ",", "dtype", "=", "object", ")", "for", "itraj", "in", "range", "(", "K", ")", ":", "obs", "=", "self", ".", "_observations", "[", "itraj", "]", "# compute output probability matrix", "pobs", "=", "self", ".", "_hmm", ".", "output_model", ".", "p_obs", "(", "obs", ")", "# hidden path", "paths", "[", "itraj", "]", "=", "hidden", ".", "viterbi", "(", "A", ",", "pobs", ",", "pi", ")", "# done", "return", "paths" ]
Computes the viterbi paths using the current HMM model
[ "Computes", "the", "viterbi", "paths", "using", "the", "current", "HMM", "model" ]
train
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/estimators/maximum_likelihood.py#L332-L352
bhmm/bhmm
bhmm/estimators/maximum_likelihood.py
MaximumLikelihoodEstimator.fit
def fit(self): """ Maximum-likelihood estimation of the HMM using the Baum-Welch algorithm Returns ------- model : HMM The maximum likelihood HMM model. """ logger().info("=================================================================") logger().info("Running Baum-Welch:") logger().info(" input observations: "+str(self.nobservations)+" of lengths "+str(self.observation_lengths)) logger().info(" initial HMM guess:"+str(self._hmm)) initial_time = time.time() it = 0 self._likelihoods = np.zeros(self.maxit) loglik = 0.0 # flag if connectivity has changed (e.g. state lost) - in that case the likelihood # is discontinuous and can't be used as a convergence criterion in that iteration. tmatrix_nonzeros = self.hmm.transition_matrix.nonzero() converged = False while not converged and it < self.maxit: # self._fbtimings = np.zeros(5) t1 = time.time() loglik = 0.0 for k in range(self._nobs): loglik += self._forward_backward(k) assert np.isfinite(loglik), it t2 = time.time() # convergence check if it > 0: dL = loglik - self._likelihoods[it-1] # print 'dL ', dL, 'iter_P ', maxiter_P if dL < self._accuracy: # print "CONVERGED! Likelihood change = ",(loglik - self.likelihoods[it-1]) converged = True # update model self._update_model(self._gammas, self._Cs, maxiter=self._maxit_P) t3 = time.time() # connectivity change check tmatrix_nonzeros_new = self.hmm.transition_matrix.nonzero() if not np.array_equal(tmatrix_nonzeros, tmatrix_nonzeros_new): converged = False # unset converged tmatrix_nonzeros = tmatrix_nonzeros_new # print 't_fb: ', str(1000.0*(t2-t1)), 't_up: ', str(1000.0*(t3-t2)), 'L = ', loglik, 'dL = ', (loglik - self._likelihoods[it-1]) # print ' fb timings (ms): pobs', (1000.0*self._fbtimings).astype(int) logger().info(str(it) + " ll = " + str(loglik)) # print self.model.output_model # print "---------------------" # end of iteration self._likelihoods[it] = loglik it += 1 # final update with high precision # self._update_model(self._gammas, self._Cs, maxiter=10000000) # truncate likelihood history self._likelihoods = self._likelihoods[:it] # set final likelihood self._hmm.likelihood = loglik # set final count matrix self.count_matrix = self._transition_counts(self._Cs) self.initial_count = self._init_counts(self._gammas) final_time = time.time() elapsed_time = final_time - initial_time logger().info("maximum likelihood HMM:"+str(self._hmm)) logger().info("Elapsed time for Baum-Welch solution: %.3f s" % elapsed_time) logger().info("Computing Viterbi path:") initial_time = time.time() # Compute hidden state trajectories using the Viterbi algorithm. self._hmm.hidden_state_trajectories = self.compute_viterbi_paths() final_time = time.time() elapsed_time = final_time - initial_time logger().info("Elapsed time for Viterbi path computation: %.3f s" % elapsed_time) logger().info("=================================================================") return self._hmm
python
def fit(self): """ Maximum-likelihood estimation of the HMM using the Baum-Welch algorithm Returns ------- model : HMM The maximum likelihood HMM model. """ logger().info("=================================================================") logger().info("Running Baum-Welch:") logger().info(" input observations: "+str(self.nobservations)+" of lengths "+str(self.observation_lengths)) logger().info(" initial HMM guess:"+str(self._hmm)) initial_time = time.time() it = 0 self._likelihoods = np.zeros(self.maxit) loglik = 0.0 # flag if connectivity has changed (e.g. state lost) - in that case the likelihood # is discontinuous and can't be used as a convergence criterion in that iteration. tmatrix_nonzeros = self.hmm.transition_matrix.nonzero() converged = False while not converged and it < self.maxit: # self._fbtimings = np.zeros(5) t1 = time.time() loglik = 0.0 for k in range(self._nobs): loglik += self._forward_backward(k) assert np.isfinite(loglik), it t2 = time.time() # convergence check if it > 0: dL = loglik - self._likelihoods[it-1] # print 'dL ', dL, 'iter_P ', maxiter_P if dL < self._accuracy: # print "CONVERGED! Likelihood change = ",(loglik - self.likelihoods[it-1]) converged = True # update model self._update_model(self._gammas, self._Cs, maxiter=self._maxit_P) t3 = time.time() # connectivity change check tmatrix_nonzeros_new = self.hmm.transition_matrix.nonzero() if not np.array_equal(tmatrix_nonzeros, tmatrix_nonzeros_new): converged = False # unset converged tmatrix_nonzeros = tmatrix_nonzeros_new # print 't_fb: ', str(1000.0*(t2-t1)), 't_up: ', str(1000.0*(t3-t2)), 'L = ', loglik, 'dL = ', (loglik - self._likelihoods[it-1]) # print ' fb timings (ms): pobs', (1000.0*self._fbtimings).astype(int) logger().info(str(it) + " ll = " + str(loglik)) # print self.model.output_model # print "---------------------" # end of iteration self._likelihoods[it] = loglik it += 1 # final update with high precision # self._update_model(self._gammas, self._Cs, maxiter=10000000) # truncate likelihood history self._likelihoods = self._likelihoods[:it] # set final likelihood self._hmm.likelihood = loglik # set final count matrix self.count_matrix = self._transition_counts(self._Cs) self.initial_count = self._init_counts(self._gammas) final_time = time.time() elapsed_time = final_time - initial_time logger().info("maximum likelihood HMM:"+str(self._hmm)) logger().info("Elapsed time for Baum-Welch solution: %.3f s" % elapsed_time) logger().info("Computing Viterbi path:") initial_time = time.time() # Compute hidden state trajectories using the Viterbi algorithm. self._hmm.hidden_state_trajectories = self.compute_viterbi_paths() final_time = time.time() elapsed_time = final_time - initial_time logger().info("Elapsed time for Viterbi path computation: %.3f s" % elapsed_time) logger().info("=================================================================") return self._hmm
[ "def", "fit", "(", "self", ")", ":", "logger", "(", ")", ".", "info", "(", "\"=================================================================\"", ")", "logger", "(", ")", ".", "info", "(", "\"Running Baum-Welch:\"", ")", "logger", "(", ")", ".", "info", "(", "\" input observations: \"", "+", "str", "(", "self", ".", "nobservations", ")", "+", "\" of lengths \"", "+", "str", "(", "self", ".", "observation_lengths", ")", ")", "logger", "(", ")", ".", "info", "(", "\" initial HMM guess:\"", "+", "str", "(", "self", ".", "_hmm", ")", ")", "initial_time", "=", "time", ".", "time", "(", ")", "it", "=", "0", "self", ".", "_likelihoods", "=", "np", ".", "zeros", "(", "self", ".", "maxit", ")", "loglik", "=", "0.0", "# flag if connectivity has changed (e.g. state lost) - in that case the likelihood", "# is discontinuous and can't be used as a convergence criterion in that iteration.", "tmatrix_nonzeros", "=", "self", ".", "hmm", ".", "transition_matrix", ".", "nonzero", "(", ")", "converged", "=", "False", "while", "not", "converged", "and", "it", "<", "self", ".", "maxit", ":", "# self._fbtimings = np.zeros(5)", "t1", "=", "time", ".", "time", "(", ")", "loglik", "=", "0.0", "for", "k", "in", "range", "(", "self", ".", "_nobs", ")", ":", "loglik", "+=", "self", ".", "_forward_backward", "(", "k", ")", "assert", "np", ".", "isfinite", "(", "loglik", ")", ",", "it", "t2", "=", "time", ".", "time", "(", ")", "# convergence check", "if", "it", ">", "0", ":", "dL", "=", "loglik", "-", "self", ".", "_likelihoods", "[", "it", "-", "1", "]", "# print 'dL ', dL, 'iter_P ', maxiter_P", "if", "dL", "<", "self", ".", "_accuracy", ":", "# print \"CONVERGED! Likelihood change = \",(loglik - self.likelihoods[it-1])", "converged", "=", "True", "# update model", "self", ".", "_update_model", "(", "self", ".", "_gammas", ",", "self", ".", "_Cs", ",", "maxiter", "=", "self", ".", "_maxit_P", ")", "t3", "=", "time", ".", "time", "(", ")", "# connectivity change check", "tmatrix_nonzeros_new", "=", "self", ".", "hmm", ".", "transition_matrix", ".", "nonzero", "(", ")", "if", "not", "np", ".", "array_equal", "(", "tmatrix_nonzeros", ",", "tmatrix_nonzeros_new", ")", ":", "converged", "=", "False", "# unset converged", "tmatrix_nonzeros", "=", "tmatrix_nonzeros_new", "# print 't_fb: ', str(1000.0*(t2-t1)), 't_up: ', str(1000.0*(t3-t2)), 'L = ', loglik, 'dL = ', (loglik - self._likelihoods[it-1])", "# print ' fb timings (ms): pobs', (1000.0*self._fbtimings).astype(int)", "logger", "(", ")", ".", "info", "(", "str", "(", "it", ")", "+", "\" ll = \"", "+", "str", "(", "loglik", ")", ")", "# print self.model.output_model", "# print \"---------------------\"", "# end of iteration", "self", ".", "_likelihoods", "[", "it", "]", "=", "loglik", "it", "+=", "1", "# final update with high precision", "# self._update_model(self._gammas, self._Cs, maxiter=10000000)", "# truncate likelihood history", "self", ".", "_likelihoods", "=", "self", ".", "_likelihoods", "[", ":", "it", "]", "# set final likelihood", "self", ".", "_hmm", ".", "likelihood", "=", "loglik", "# set final count matrix", "self", ".", "count_matrix", "=", "self", ".", "_transition_counts", "(", "self", ".", "_Cs", ")", "self", ".", "initial_count", "=", "self", ".", "_init_counts", "(", "self", ".", "_gammas", ")", "final_time", "=", "time", ".", "time", "(", ")", "elapsed_time", "=", "final_time", "-", "initial_time", "logger", "(", ")", ".", "info", "(", "\"maximum likelihood HMM:\"", "+", "str", "(", "self", ".", "_hmm", ")", ")", "logger", "(", ")", ".", "info", "(", "\"Elapsed time for Baum-Welch solution: %.3f s\"", "%", "elapsed_time", ")", "logger", "(", ")", ".", "info", "(", "\"Computing Viterbi path:\"", ")", "initial_time", "=", "time", ".", "time", "(", ")", "# Compute hidden state trajectories using the Viterbi algorithm.", "self", ".", "_hmm", ".", "hidden_state_trajectories", "=", "self", ".", "compute_viterbi_paths", "(", ")", "final_time", "=", "time", ".", "time", "(", ")", "elapsed_time", "=", "final_time", "-", "initial_time", "logger", "(", ")", ".", "info", "(", "\"Elapsed time for Viterbi path computation: %.3f s\"", "%", "elapsed_time", ")", "logger", "(", ")", ".", "info", "(", "\"=================================================================\"", ")", "return", "self", ".", "_hmm" ]
Maximum-likelihood estimation of the HMM using the Baum-Welch algorithm Returns ------- model : HMM The maximum likelihood HMM model.
[ "Maximum", "-", "likelihood", "estimation", "of", "the", "HMM", "using", "the", "Baum", "-", "Welch", "algorithm" ]
train
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/estimators/maximum_likelihood.py#L354-L446
bhmm/bhmm
bhmm/_external/sklearn/mixture/gmm.py
sample_gaussian
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1, random_state=None): """Generate random samples from a Gaussian distribution. Parameters ---------- mean : array_like, shape (n_features,) Mean of the distribution. covar : array_like, optional Covariance of the distribution. The shape depends on `covariance_type`: scalar if 'spherical', (n_features) if 'diag', (n_features, n_features) if 'tied', or 'full' covariance_type : string, optional Type of the covariance parameters. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. n_samples : int, optional Number of samples to generate. Defaults to 1. Returns ------- X : array, shape (n_features, n_samples) Randomly generated sample """ rng = check_random_state(random_state) n_dim = len(mean) rand = rng.randn(n_dim, n_samples) if n_samples == 1: rand.shape = (n_dim,) if covariance_type == 'spherical': rand *= np.sqrt(covar) elif covariance_type == 'diag': rand = np.dot(np.diag(np.sqrt(covar)), rand) else: s, U = linalg.eigh(covar) s.clip(0, out=s) # get rid of tiny negatives np.sqrt(s, out=s) U *= s rand = np.dot(U, rand) return (rand.T + mean).T
python
def sample_gaussian(mean, covar, covariance_type='diag', n_samples=1, random_state=None): """Generate random samples from a Gaussian distribution. Parameters ---------- mean : array_like, shape (n_features,) Mean of the distribution. covar : array_like, optional Covariance of the distribution. The shape depends on `covariance_type`: scalar if 'spherical', (n_features) if 'diag', (n_features, n_features) if 'tied', or 'full' covariance_type : string, optional Type of the covariance parameters. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. n_samples : int, optional Number of samples to generate. Defaults to 1. Returns ------- X : array, shape (n_features, n_samples) Randomly generated sample """ rng = check_random_state(random_state) n_dim = len(mean) rand = rng.randn(n_dim, n_samples) if n_samples == 1: rand.shape = (n_dim,) if covariance_type == 'spherical': rand *= np.sqrt(covar) elif covariance_type == 'diag': rand = np.dot(np.diag(np.sqrt(covar)), rand) else: s, U = linalg.eigh(covar) s.clip(0, out=s) # get rid of tiny negatives np.sqrt(s, out=s) U *= s rand = np.dot(U, rand) return (rand.T + mean).T
[ "def", "sample_gaussian", "(", "mean", ",", "covar", ",", "covariance_type", "=", "'diag'", ",", "n_samples", "=", "1", ",", "random_state", "=", "None", ")", ":", "rng", "=", "check_random_state", "(", "random_state", ")", "n_dim", "=", "len", "(", "mean", ")", "rand", "=", "rng", ".", "randn", "(", "n_dim", ",", "n_samples", ")", "if", "n_samples", "==", "1", ":", "rand", ".", "shape", "=", "(", "n_dim", ",", ")", "if", "covariance_type", "==", "'spherical'", ":", "rand", "*=", "np", ".", "sqrt", "(", "covar", ")", "elif", "covariance_type", "==", "'diag'", ":", "rand", "=", "np", ".", "dot", "(", "np", ".", "diag", "(", "np", ".", "sqrt", "(", "covar", ")", ")", ",", "rand", ")", "else", ":", "s", ",", "U", "=", "linalg", ".", "eigh", "(", "covar", ")", "s", ".", "clip", "(", "0", ",", "out", "=", "s", ")", "# get rid of tiny negatives", "np", ".", "sqrt", "(", "s", ",", "out", "=", "s", ")", "U", "*=", "s", "rand", "=", "np", ".", "dot", "(", "U", ",", "rand", ")", "return", "(", "rand", ".", "T", "+", "mean", ")", ".", "T" ]
Generate random samples from a Gaussian distribution. Parameters ---------- mean : array_like, shape (n_features,) Mean of the distribution. covar : array_like, optional Covariance of the distribution. The shape depends on `covariance_type`: scalar if 'spherical', (n_features) if 'diag', (n_features, n_features) if 'tied', or 'full' covariance_type : string, optional Type of the covariance parameters. Must be one of 'spherical', 'tied', 'diag', 'full'. Defaults to 'diag'. n_samples : int, optional Number of samples to generate. Defaults to 1. Returns ------- X : array, shape (n_features, n_samples) Randomly generated sample
[ "Generate", "random", "samples", "from", "a", "Gaussian", "distribution", "." ]
train
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/_external/sklearn/mixture/gmm.py#L63-L107
bhmm/bhmm
bhmm/_external/sklearn/mixture/gmm.py
_covar_mstep_diag
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): """Performing the covariance M step for diagonal cases""" avg_X2 = np.dot(responsibilities.T, X * X) * norm avg_means2 = gmm.means_ ** 2 avg_X_means = gmm.means_ * weighted_X_sum * norm return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
python
def _covar_mstep_diag(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): """Performing the covariance M step for diagonal cases""" avg_X2 = np.dot(responsibilities.T, X * X) * norm avg_means2 = gmm.means_ ** 2 avg_X_means = gmm.means_ * weighted_X_sum * norm return avg_X2 - 2 * avg_X_means + avg_means2 + min_covar
[ "def", "_covar_mstep_diag", "(", "gmm", ",", "X", ",", "responsibilities", ",", "weighted_X_sum", ",", "norm", ",", "min_covar", ")", ":", "avg_X2", "=", "np", ".", "dot", "(", "responsibilities", ".", "T", ",", "X", "*", "X", ")", "*", "norm", "avg_means2", "=", "gmm", ".", "means_", "**", "2", "avg_X_means", "=", "gmm", ".", "means_", "*", "weighted_X_sum", "*", "norm", "return", "avg_X2", "-", "2", "*", "avg_X_means", "+", "avg_means2", "+", "min_covar" ]
Performing the covariance M step for diagonal cases
[ "Performing", "the", "covariance", "M", "step", "for", "diagonal", "cases" ]
train
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/_external/sklearn/mixture/gmm.py#L684-L690
bhmm/bhmm
bhmm/_external/sklearn/mixture/gmm.py
_covar_mstep_spherical
def _covar_mstep_spherical(*args): """Performing the covariance M step for spherical cases""" cv = _covar_mstep_diag(*args) return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
python
def _covar_mstep_spherical(*args): """Performing the covariance M step for spherical cases""" cv = _covar_mstep_diag(*args) return np.tile(cv.mean(axis=1)[:, np.newaxis], (1, cv.shape[1]))
[ "def", "_covar_mstep_spherical", "(", "*", "args", ")", ":", "cv", "=", "_covar_mstep_diag", "(", "*", "args", ")", "return", "np", ".", "tile", "(", "cv", ".", "mean", "(", "axis", "=", "1", ")", "[", ":", ",", "np", ".", "newaxis", "]", ",", "(", "1", ",", "cv", ".", "shape", "[", "1", "]", ")", ")" ]
Performing the covariance M step for spherical cases
[ "Performing", "the", "covariance", "M", "step", "for", "spherical", "cases" ]
train
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/_external/sklearn/mixture/gmm.py#L693-L696
bhmm/bhmm
bhmm/_external/sklearn/mixture/gmm.py
_covar_mstep_full
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): """Performing the covariance M step for full cases""" # Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian # Distribution" n_features = X.shape[1] cv = np.empty((gmm.n_components, n_features, n_features)) for c in range(gmm.n_components): post = responsibilities[:, c] mu = gmm.means_[c] diff = X - mu with np.errstate(under='ignore'): # Underflow Errors in doing post * X.T are not important avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS) cv[c] = avg_cv + min_covar * np.eye(n_features) return cv
python
def _covar_mstep_full(gmm, X, responsibilities, weighted_X_sum, norm, min_covar): """Performing the covariance M step for full cases""" # Eq. 12 from K. Murphy, "Fitting a Conditional Linear Gaussian # Distribution" n_features = X.shape[1] cv = np.empty((gmm.n_components, n_features, n_features)) for c in range(gmm.n_components): post = responsibilities[:, c] mu = gmm.means_[c] diff = X - mu with np.errstate(under='ignore'): # Underflow Errors in doing post * X.T are not important avg_cv = np.dot(post * diff.T, diff) / (post.sum() + 10 * EPS) cv[c] = avg_cv + min_covar * np.eye(n_features) return cv
[ "def", "_covar_mstep_full", "(", "gmm", ",", "X", ",", "responsibilities", ",", "weighted_X_sum", ",", "norm", ",", "min_covar", ")", ":", "# Eq. 12 from K. Murphy, \"Fitting a Conditional Linear Gaussian", "# Distribution\"", "n_features", "=", "X", ".", "shape", "[", "1", "]", "cv", "=", "np", ".", "empty", "(", "(", "gmm", ".", "n_components", ",", "n_features", ",", "n_features", ")", ")", "for", "c", "in", "range", "(", "gmm", ".", "n_components", ")", ":", "post", "=", "responsibilities", "[", ":", ",", "c", "]", "mu", "=", "gmm", ".", "means_", "[", "c", "]", "diff", "=", "X", "-", "mu", "with", "np", ".", "errstate", "(", "under", "=", "'ignore'", ")", ":", "# Underflow Errors in doing post * X.T are not important", "avg_cv", "=", "np", ".", "dot", "(", "post", "*", "diff", ".", "T", ",", "diff", ")", "/", "(", "post", ".", "sum", "(", ")", "+", "10", "*", "EPS", ")", "cv", "[", "c", "]", "=", "avg_cv", "+", "min_covar", "*", "np", ".", "eye", "(", "n_features", ")", "return", "cv" ]
Performing the covariance M step for full cases
[ "Performing", "the", "covariance", "M", "step", "for", "full", "cases" ]
train
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/_external/sklearn/mixture/gmm.py#L699-L714
bhmm/bhmm
bhmm/_external/sklearn/mixture/gmm.py
GMM._get_covars
def _get_covars(self): """Covariance parameters for each mixture component. The shape depends on `cvtype`:: (`n_states`, 'n_features') if 'spherical', (`n_features`, `n_features`) if 'tied', (`n_states`, `n_features`) if 'diag', (`n_states`, `n_features`, `n_features`) if 'full' """ if self.covariance_type == 'full': return self.covars_ elif self.covariance_type == 'diag': return [np.diag(cov) for cov in self.covars_] elif self.covariance_type == 'tied': return [self.covars_] * self.n_components elif self.covariance_type == 'spherical': return [np.diag(cov) for cov in self.covars_]
python
def _get_covars(self): """Covariance parameters for each mixture component. The shape depends on `cvtype`:: (`n_states`, 'n_features') if 'spherical', (`n_features`, `n_features`) if 'tied', (`n_states`, `n_features`) if 'diag', (`n_states`, `n_features`, `n_features`) if 'full' """ if self.covariance_type == 'full': return self.covars_ elif self.covariance_type == 'diag': return [np.diag(cov) for cov in self.covars_] elif self.covariance_type == 'tied': return [self.covars_] * self.n_components elif self.covariance_type == 'spherical': return [np.diag(cov) for cov in self.covars_]
[ "def", "_get_covars", "(", "self", ")", ":", "if", "self", ".", "covariance_type", "==", "'full'", ":", "return", "self", ".", "covars_", "elif", "self", ".", "covariance_type", "==", "'diag'", ":", "return", "[", "np", ".", "diag", "(", "cov", ")", "for", "cov", "in", "self", ".", "covars_", "]", "elif", "self", ".", "covariance_type", "==", "'tied'", ":", "return", "[", "self", ".", "covars_", "]", "*", "self", ".", "n_components", "elif", "self", ".", "covariance_type", "==", "'spherical'", ":", "return", "[", "np", ".", "diag", "(", "cov", ")", "for", "cov", "in", "self", ".", "covars_", "]" ]
Covariance parameters for each mixture component. The shape depends on `cvtype`:: (`n_states`, 'n_features') if 'spherical', (`n_features`, `n_features`) if 'tied', (`n_states`, `n_features`) if 'diag', (`n_states`, `n_features`, `n_features`) if 'full'
[ "Covariance", "parameters", "for", "each", "mixture", "component", ".", "The", "shape", "depends", "on", "cvtype", "::" ]
train
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/_external/sklearn/mixture/gmm.py#L261-L277
bhmm/bhmm
bhmm/_external/sklearn/mixture/gmm.py
GMM._set_covars
def _set_covars(self, covars): """Provide values for covariance""" covars = np.asarray(covars) _validate_covars(covars, self.covariance_type, self.n_components) self.covars_ = covars
python
def _set_covars(self, covars): """Provide values for covariance""" covars = np.asarray(covars) _validate_covars(covars, self.covariance_type, self.n_components) self.covars_ = covars
[ "def", "_set_covars", "(", "self", ",", "covars", ")", ":", "covars", "=", "np", ".", "asarray", "(", "covars", ")", "_validate_covars", "(", "covars", ",", "self", ".", "covariance_type", ",", "self", ".", "n_components", ")", "self", ".", "covars_", "=", "covars" ]
Provide values for covariance
[ "Provide", "values", "for", "covariance" ]
train
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/_external/sklearn/mixture/gmm.py#L279-L283
bhmm/bhmm
bhmm/_external/sklearn/mixture/gmm.py
GMM.score_samples
def score_samples(self, X): """Return the per-sample likelihood of the data under the model. Compute the log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. Parameters ---------- X: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X. responsibilities : array_like, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation """ check_is_fitted(self, 'means_') X = check_array(X) if X.ndim == 1: X = X[:, np.newaxis] if X.size == 0: return np.array([]), np.empty((0, self.n_components)) if X.shape[1] != self.means_.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = (log_multivariate_normal_density(X, self.means_, self.covars_, self.covariance_type) + np.log(self.weights_)) logprob = logsumexp(lpr, axis=1) responsibilities = np.exp(lpr - logprob[:, np.newaxis]) return logprob, responsibilities
python
def score_samples(self, X): """Return the per-sample likelihood of the data under the model. Compute the log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. Parameters ---------- X: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X. responsibilities : array_like, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation """ check_is_fitted(self, 'means_') X = check_array(X) if X.ndim == 1: X = X[:, np.newaxis] if X.size == 0: return np.array([]), np.empty((0, self.n_components)) if X.shape[1] != self.means_.shape[1]: raise ValueError('The shape of X is not compatible with self') lpr = (log_multivariate_normal_density(X, self.means_, self.covars_, self.covariance_type) + np.log(self.weights_)) logprob = logsumexp(lpr, axis=1) responsibilities = np.exp(lpr - logprob[:, np.newaxis]) return logprob, responsibilities
[ "def", "score_samples", "(", "self", ",", "X", ")", ":", "check_is_fitted", "(", "self", ",", "'means_'", ")", "X", "=", "check_array", "(", "X", ")", "if", "X", ".", "ndim", "==", "1", ":", "X", "=", "X", "[", ":", ",", "np", ".", "newaxis", "]", "if", "X", ".", "size", "==", "0", ":", "return", "np", ".", "array", "(", "[", "]", ")", ",", "np", ".", "empty", "(", "(", "0", ",", "self", ".", "n_components", ")", ")", "if", "X", ".", "shape", "[", "1", "]", "!=", "self", ".", "means_", ".", "shape", "[", "1", "]", ":", "raise", "ValueError", "(", "'The shape of X is not compatible with self'", ")", "lpr", "=", "(", "log_multivariate_normal_density", "(", "X", ",", "self", ".", "means_", ",", "self", ".", "covars_", ",", "self", ".", "covariance_type", ")", "+", "np", ".", "log", "(", "self", ".", "weights_", ")", ")", "logprob", "=", "logsumexp", "(", "lpr", ",", "axis", "=", "1", ")", "responsibilities", "=", "np", ".", "exp", "(", "lpr", "-", "logprob", "[", ":", ",", "np", ".", "newaxis", "]", ")", "return", "logprob", ",", "responsibilities" ]
Return the per-sample likelihood of the data under the model. Compute the log probability of X under the model and return the posterior distribution (responsibilities) of each mixture component for each element of X. Parameters ---------- X: array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X. responsibilities : array_like, shape (n_samples, n_components) Posterior probabilities of each mixture component for each observation
[ "Return", "the", "per", "-", "sample", "likelihood", "of", "the", "data", "under", "the", "model", "." ]
train
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/_external/sklearn/mixture/gmm.py#L285-L322
bhmm/bhmm
bhmm/_external/sklearn/mixture/gmm.py
GMM.score
def score(self, X, y=None): """Compute the log probability under the model. Parameters ---------- X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X """ logprob, _ = self.score_samples(X) return logprob
python
def score(self, X, y=None): """Compute the log probability under the model. Parameters ---------- X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X """ logprob, _ = self.score_samples(X) return logprob
[ "def", "score", "(", "self", ",", "X", ",", "y", "=", "None", ")", ":", "logprob", ",", "_", "=", "self", ".", "score_samples", "(", "X", ")", "return", "logprob" ]
Compute the log probability under the model. Parameters ---------- X : array_like, shape (n_samples, n_features) List of n_features-dimensional data points. Each row corresponds to a single data point. Returns ------- logprob : array_like, shape (n_samples,) Log probabilities of each data point in X
[ "Compute", "the", "log", "probability", "under", "the", "model", "." ]
train
https://github.com/bhmm/bhmm/blob/9804d18c2ddb684fb4d90b544cc209617a89ca9a/bhmm/_external/sklearn/mixture/gmm.py#L324-L339