id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
6,600
|
tableau/document-api-python
|
tableaudocumentapi/xfile.py
|
xml_open
|
def xml_open(filename, expected_root=None):
"""Opens the provided 'filename'. Handles detecting if the file is an archive,
detecting the document version, and validating the root tag."""
# Is the file a zip (.twbx or .tdsx)
if zipfile.is_zipfile(filename):
tree = get_xml_from_archive(filename)
else:
tree = ET.parse(filename)
# Is the file a supported version
tree_root = tree.getroot()
file_version = Version(tree_root.attrib.get('version', '0.0'))
if file_version < MIN_SUPPORTED_VERSION:
raise TableauVersionNotSupportedException(file_version)
# Does the root tag match the object type (workbook or data source)
if expected_root and (expected_root != tree_root.tag):
raise TableauInvalidFileException(
"'{}'' is not a valid '{}' file".format(filename, expected_root))
return tree
|
python
|
def xml_open(filename, expected_root=None):
"""Opens the provided 'filename'. Handles detecting if the file is an archive,
detecting the document version, and validating the root tag."""
# Is the file a zip (.twbx or .tdsx)
if zipfile.is_zipfile(filename):
tree = get_xml_from_archive(filename)
else:
tree = ET.parse(filename)
# Is the file a supported version
tree_root = tree.getroot()
file_version = Version(tree_root.attrib.get('version', '0.0'))
if file_version < MIN_SUPPORTED_VERSION:
raise TableauVersionNotSupportedException(file_version)
# Does the root tag match the object type (workbook or data source)
if expected_root and (expected_root != tree_root.tag):
raise TableauInvalidFileException(
"'{}'' is not a valid '{}' file".format(filename, expected_root))
return tree
|
[
"def",
"xml_open",
"(",
"filename",
",",
"expected_root",
"=",
"None",
")",
":",
"# Is the file a zip (.twbx or .tdsx)",
"if",
"zipfile",
".",
"is_zipfile",
"(",
"filename",
")",
":",
"tree",
"=",
"get_xml_from_archive",
"(",
"filename",
")",
"else",
":",
"tree",
"=",
"ET",
".",
"parse",
"(",
"filename",
")",
"# Is the file a supported version",
"tree_root",
"=",
"tree",
".",
"getroot",
"(",
")",
"file_version",
"=",
"Version",
"(",
"tree_root",
".",
"attrib",
".",
"get",
"(",
"'version'",
",",
"'0.0'",
")",
")",
"if",
"file_version",
"<",
"MIN_SUPPORTED_VERSION",
":",
"raise",
"TableauVersionNotSupportedException",
"(",
"file_version",
")",
"# Does the root tag match the object type (workbook or data source)",
"if",
"expected_root",
"and",
"(",
"expected_root",
"!=",
"tree_root",
".",
"tag",
")",
":",
"raise",
"TableauInvalidFileException",
"(",
"\"'{}'' is not a valid '{}' file\"",
".",
"format",
"(",
"filename",
",",
"expected_root",
")",
")",
"return",
"tree"
] |
Opens the provided 'filename'. Handles detecting if the file is an archive,
detecting the document version, and validating the root tag.
|
[
"Opens",
"the",
"provided",
"filename",
".",
"Handles",
"detecting",
"if",
"the",
"file",
"is",
"an",
"archive",
"detecting",
"the",
"document",
"version",
"and",
"validating",
"the",
"root",
"tag",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/xfile.py#L24-L46
|
6,601
|
tableau/document-api-python
|
tableaudocumentapi/xfile.py
|
build_archive_file
|
def build_archive_file(archive_contents, zip_file):
"""Build a Tableau-compatible archive file."""
# This is tested against Desktop and Server, and reverse engineered by lots
# of trial and error. Do not change this logic.
for root_dir, _, files in os.walk(archive_contents):
relative_dir = os.path.relpath(root_dir, archive_contents)
for f in files:
temp_file_full_path = os.path.join(
archive_contents, relative_dir, f)
zipname = os.path.join(relative_dir, f)
zip_file.write(temp_file_full_path, arcname=zipname)
|
python
|
def build_archive_file(archive_contents, zip_file):
"""Build a Tableau-compatible archive file."""
# This is tested against Desktop and Server, and reverse engineered by lots
# of trial and error. Do not change this logic.
for root_dir, _, files in os.walk(archive_contents):
relative_dir = os.path.relpath(root_dir, archive_contents)
for f in files:
temp_file_full_path = os.path.join(
archive_contents, relative_dir, f)
zipname = os.path.join(relative_dir, f)
zip_file.write(temp_file_full_path, arcname=zipname)
|
[
"def",
"build_archive_file",
"(",
"archive_contents",
",",
"zip_file",
")",
":",
"# This is tested against Desktop and Server, and reverse engineered by lots",
"# of trial and error. Do not change this logic.",
"for",
"root_dir",
",",
"_",
",",
"files",
"in",
"os",
".",
"walk",
"(",
"archive_contents",
")",
":",
"relative_dir",
"=",
"os",
".",
"path",
".",
"relpath",
"(",
"root_dir",
",",
"archive_contents",
")",
"for",
"f",
"in",
"files",
":",
"temp_file_full_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"archive_contents",
",",
"relative_dir",
",",
"f",
")",
"zipname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"relative_dir",
",",
"f",
")",
"zip_file",
".",
"write",
"(",
"temp_file_full_path",
",",
"arcname",
"=",
"zipname",
")"
] |
Build a Tableau-compatible archive file.
|
[
"Build",
"a",
"Tableau",
"-",
"compatible",
"archive",
"file",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/xfile.py#L85-L96
|
6,602
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.from_attributes
|
def from_attributes(cls, server, dbname, username, dbclass, port=None, query_band=None,
initial_sql=None, authentication=''):
"""Creates a new connection that can be added into a Data Source.
defaults to `''` which will be treated as 'prompt' by Tableau."""
root = ET.Element('connection', authentication=authentication)
xml = cls(root)
xml.server = server
xml.dbname = dbname
xml.username = username
xml.dbclass = dbclass
xml.port = port
xml.query_band = query_band
xml.initial_sql = initial_sql
return xml
|
python
|
def from_attributes(cls, server, dbname, username, dbclass, port=None, query_band=None,
initial_sql=None, authentication=''):
"""Creates a new connection that can be added into a Data Source.
defaults to `''` which will be treated as 'prompt' by Tableau."""
root = ET.Element('connection', authentication=authentication)
xml = cls(root)
xml.server = server
xml.dbname = dbname
xml.username = username
xml.dbclass = dbclass
xml.port = port
xml.query_band = query_band
xml.initial_sql = initial_sql
return xml
|
[
"def",
"from_attributes",
"(",
"cls",
",",
"server",
",",
"dbname",
",",
"username",
",",
"dbclass",
",",
"port",
"=",
"None",
",",
"query_band",
"=",
"None",
",",
"initial_sql",
"=",
"None",
",",
"authentication",
"=",
"''",
")",
":",
"root",
"=",
"ET",
".",
"Element",
"(",
"'connection'",
",",
"authentication",
"=",
"authentication",
")",
"xml",
"=",
"cls",
"(",
"root",
")",
"xml",
".",
"server",
"=",
"server",
"xml",
".",
"dbname",
"=",
"dbname",
"xml",
".",
"username",
"=",
"username",
"xml",
".",
"dbclass",
"=",
"dbclass",
"xml",
".",
"port",
"=",
"port",
"xml",
".",
"query_band",
"=",
"query_band",
"xml",
".",
"initial_sql",
"=",
"initial_sql",
"return",
"xml"
] |
Creates a new connection that can be added into a Data Source.
defaults to `''` which will be treated as 'prompt' by Tableau.
|
[
"Creates",
"a",
"new",
"connection",
"that",
"can",
"be",
"added",
"into",
"a",
"Data",
"Source",
".",
"defaults",
"to",
"which",
"will",
"be",
"treated",
"as",
"prompt",
"by",
"Tableau",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L28-L43
|
6,603
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.dbname
|
def dbname(self, value):
"""
Set the connection's database name property.
Args:
value: New name of the database. String.
Returns:
Nothing.
"""
self._dbname = value
self._connectionXML.set('dbname', value)
|
python
|
def dbname(self, value):
"""
Set the connection's database name property.
Args:
value: New name of the database. String.
Returns:
Nothing.
"""
self._dbname = value
self._connectionXML.set('dbname', value)
|
[
"def",
"dbname",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_dbname",
"=",
"value",
"self",
".",
"_connectionXML",
".",
"set",
"(",
"'dbname'",
",",
"value",
")"
] |
Set the connection's database name property.
Args:
value: New name of the database. String.
Returns:
Nothing.
|
[
"Set",
"the",
"connection",
"s",
"database",
"name",
"property",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L51-L63
|
6,604
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.server
|
def server(self, value):
"""
Set the connection's server property.
Args:
value: New server. String.
Returns:
Nothing.
"""
self._server = value
self._connectionXML.set('server', value)
|
python
|
def server(self, value):
"""
Set the connection's server property.
Args:
value: New server. String.
Returns:
Nothing.
"""
self._server = value
self._connectionXML.set('server', value)
|
[
"def",
"server",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_server",
"=",
"value",
"self",
".",
"_connectionXML",
".",
"set",
"(",
"'server'",
",",
"value",
")"
] |
Set the connection's server property.
Args:
value: New server. String.
Returns:
Nothing.
|
[
"Set",
"the",
"connection",
"s",
"server",
"property",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L71-L83
|
6,605
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.username
|
def username(self, value):
"""
Set the connection's username property.
Args:
value: New username value. String.
Returns:
Nothing.
"""
self._username = value
self._connectionXML.set('username', value)
|
python
|
def username(self, value):
"""
Set the connection's username property.
Args:
value: New username value. String.
Returns:
Nothing.
"""
self._username = value
self._connectionXML.set('username', value)
|
[
"def",
"username",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_username",
"=",
"value",
"self",
".",
"_connectionXML",
".",
"set",
"(",
"'username'",
",",
"value",
")"
] |
Set the connection's username property.
Args:
value: New username value. String.
Returns:
Nothing.
|
[
"Set",
"the",
"connection",
"s",
"username",
"property",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L91-L103
|
6,606
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.dbclass
|
def dbclass(self, value):
"""Set the connection's dbclass property.
Args:
value: New dbclass value. String.
Returns:
Nothing.
"""
if not is_valid_dbclass(value):
raise AttributeError("'{}' is not a valid database type".format(value))
self._class = value
self._connectionXML.set('class', value)
|
python
|
def dbclass(self, value):
"""Set the connection's dbclass property.
Args:
value: New dbclass value. String.
Returns:
Nothing.
"""
if not is_valid_dbclass(value):
raise AttributeError("'{}' is not a valid database type".format(value))
self._class = value
self._connectionXML.set('class', value)
|
[
"def",
"dbclass",
"(",
"self",
",",
"value",
")",
":",
"if",
"not",
"is_valid_dbclass",
"(",
"value",
")",
":",
"raise",
"AttributeError",
"(",
"\"'{}' is not a valid database type\"",
".",
"format",
"(",
"value",
")",
")",
"self",
".",
"_class",
"=",
"value",
"self",
".",
"_connectionXML",
".",
"set",
"(",
"'class'",
",",
"value",
")"
] |
Set the connection's dbclass property.
Args:
value: New dbclass value. String.
Returns:
Nothing.
|
[
"Set",
"the",
"connection",
"s",
"dbclass",
"property",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L116-L130
|
6,607
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.port
|
def port(self, value):
"""Set the connection's port property.
Args:
value: New port value. String.
Returns:
Nothing.
"""
self._port = value
# If port is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['port']
except KeyError:
pass
else:
self._connectionXML.set('port', value)
|
python
|
def port(self, value):
"""Set the connection's port property.
Args:
value: New port value. String.
Returns:
Nothing.
"""
self._port = value
# If port is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['port']
except KeyError:
pass
else:
self._connectionXML.set('port', value)
|
[
"def",
"port",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_port",
"=",
"value",
"# If port is None we remove the element and don't write it to XML",
"if",
"value",
"is",
"None",
":",
"try",
":",
"del",
"self",
".",
"_connectionXML",
".",
"attrib",
"[",
"'port'",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"self",
".",
"_connectionXML",
".",
"set",
"(",
"'port'",
",",
"value",
")"
] |
Set the connection's port property.
Args:
value: New port value. String.
Returns:
Nothing.
|
[
"Set",
"the",
"connection",
"s",
"port",
"property",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L138-L156
|
6,608
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.query_band
|
def query_band(self, value):
"""Set the connection's query_band property.
Args:
value: New query_band value. String.
Returns:
Nothing.
"""
self._query_band = value
# If query band is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['query-band-spec']
except KeyError:
pass
else:
self._connectionXML.set('query-band-spec', value)
|
python
|
def query_band(self, value):
"""Set the connection's query_band property.
Args:
value: New query_band value. String.
Returns:
Nothing.
"""
self._query_band = value
# If query band is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['query-band-spec']
except KeyError:
pass
else:
self._connectionXML.set('query-band-spec', value)
|
[
"def",
"query_band",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_query_band",
"=",
"value",
"# If query band is None we remove the element and don't write it to XML",
"if",
"value",
"is",
"None",
":",
"try",
":",
"del",
"self",
".",
"_connectionXML",
".",
"attrib",
"[",
"'query-band-spec'",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"self",
".",
"_connectionXML",
".",
"set",
"(",
"'query-band-spec'",
",",
"value",
")"
] |
Set the connection's query_band property.
Args:
value: New query_band value. String.
Returns:
Nothing.
|
[
"Set",
"the",
"connection",
"s",
"query_band",
"property",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L164-L182
|
6,609
|
tableau/document-api-python
|
tableaudocumentapi/connection.py
|
Connection.initial_sql
|
def initial_sql(self, value):
"""Set the connection's initial_sql property.
Args:
value: New initial_sql value. String.
Returns:
Nothing.
"""
self._initial_sql = value
# If initial_sql is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['one-time-sql']
except KeyError:
pass
else:
self._connectionXML.set('one-time-sql', value)
|
python
|
def initial_sql(self, value):
"""Set the connection's initial_sql property.
Args:
value: New initial_sql value. String.
Returns:
Nothing.
"""
self._initial_sql = value
# If initial_sql is None we remove the element and don't write it to XML
if value is None:
try:
del self._connectionXML.attrib['one-time-sql']
except KeyError:
pass
else:
self._connectionXML.set('one-time-sql', value)
|
[
"def",
"initial_sql",
"(",
"self",
",",
"value",
")",
":",
"self",
".",
"_initial_sql",
"=",
"value",
"# If initial_sql is None we remove the element and don't write it to XML",
"if",
"value",
"is",
"None",
":",
"try",
":",
"del",
"self",
".",
"_connectionXML",
".",
"attrib",
"[",
"'one-time-sql'",
"]",
"except",
"KeyError",
":",
"pass",
"else",
":",
"self",
".",
"_connectionXML",
".",
"set",
"(",
"'one-time-sql'",
",",
"value",
")"
] |
Set the connection's initial_sql property.
Args:
value: New initial_sql value. String.
Returns:
Nothing.
|
[
"Set",
"the",
"connection",
"s",
"initial_sql",
"property",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/connection.py#L190-L208
|
6,610
|
tableau/document-api-python
|
tableaudocumentapi/datasource.py
|
base36encode
|
def base36encode(number):
"""Converts an integer into a base36 string."""
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz"
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(ALPHABET):
return sign + ALPHABET[number]
while number != 0:
number, i = divmod(number, len(ALPHABET))
base36 = ALPHABET[i] + base36
return sign + base36
|
python
|
def base36encode(number):
"""Converts an integer into a base36 string."""
ALPHABET = "0123456789abcdefghijklmnopqrstuvwxyz"
base36 = ''
sign = ''
if number < 0:
sign = '-'
number = -number
if 0 <= number < len(ALPHABET):
return sign + ALPHABET[number]
while number != 0:
number, i = divmod(number, len(ALPHABET))
base36 = ALPHABET[i] + base36
return sign + base36
|
[
"def",
"base36encode",
"(",
"number",
")",
":",
"ALPHABET",
"=",
"\"0123456789abcdefghijklmnopqrstuvwxyz\"",
"base36",
"=",
"''",
"sign",
"=",
"''",
"if",
"number",
"<",
"0",
":",
"sign",
"=",
"'-'",
"number",
"=",
"-",
"number",
"if",
"0",
"<=",
"number",
"<",
"len",
"(",
"ALPHABET",
")",
":",
"return",
"sign",
"+",
"ALPHABET",
"[",
"number",
"]",
"while",
"number",
"!=",
"0",
":",
"number",
",",
"i",
"=",
"divmod",
"(",
"number",
",",
"len",
"(",
"ALPHABET",
")",
")",
"base36",
"=",
"ALPHABET",
"[",
"i",
"]",
"+",
"base36",
"return",
"sign",
"+",
"base36"
] |
Converts an integer into a base36 string.
|
[
"Converts",
"an",
"integer",
"into",
"a",
"base36",
"string",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/datasource.py#L63-L82
|
6,611
|
tableau/document-api-python
|
tableaudocumentapi/datasource.py
|
ConnectionParser.get_connections
|
def get_connections(self):
"""Find and return all connections based on file format version."""
if float(self._dsversion) < 10:
connections = self._extract_legacy_connection()
else:
connections = self._extract_federated_connections()
return connections
|
python
|
def get_connections(self):
"""Find and return all connections based on file format version."""
if float(self._dsversion) < 10:
connections = self._extract_legacy_connection()
else:
connections = self._extract_federated_connections()
return connections
|
[
"def",
"get_connections",
"(",
"self",
")",
":",
"if",
"float",
"(",
"self",
".",
"_dsversion",
")",
"<",
"10",
":",
"connections",
"=",
"self",
".",
"_extract_legacy_connection",
"(",
")",
"else",
":",
"connections",
"=",
"self",
".",
"_extract_federated_connections",
"(",
")",
"return",
"connections"
] |
Find and return all connections based on file format version.
|
[
"Find",
"and",
"return",
"all",
"connections",
"based",
"on",
"file",
"format",
"version",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/datasource.py#L108-L115
|
6,612
|
tableau/document-api-python
|
tableaudocumentapi/datasource.py
|
Datasource.from_connections
|
def from_connections(cls, caption, connections):
"""Create a new Data Source give a list of Connections."""
root = ET.Element('datasource', caption=caption, version='10.0', inline='true')
outer_connection = ET.SubElement(root, 'connection')
outer_connection.set('class', 'federated')
named_conns = ET.SubElement(outer_connection, 'named-connections')
for conn in connections:
nc = ET.SubElement(named_conns,
'named-connection',
name=_make_unique_name(conn.dbclass),
caption=conn.server)
nc.append(conn._connectionXML)
return cls(root)
|
python
|
def from_connections(cls, caption, connections):
"""Create a new Data Source give a list of Connections."""
root = ET.Element('datasource', caption=caption, version='10.0', inline='true')
outer_connection = ET.SubElement(root, 'connection')
outer_connection.set('class', 'federated')
named_conns = ET.SubElement(outer_connection, 'named-connections')
for conn in connections:
nc = ET.SubElement(named_conns,
'named-connection',
name=_make_unique_name(conn.dbclass),
caption=conn.server)
nc.append(conn._connectionXML)
return cls(root)
|
[
"def",
"from_connections",
"(",
"cls",
",",
"caption",
",",
"connections",
")",
":",
"root",
"=",
"ET",
".",
"Element",
"(",
"'datasource'",
",",
"caption",
"=",
"caption",
",",
"version",
"=",
"'10.0'",
",",
"inline",
"=",
"'true'",
")",
"outer_connection",
"=",
"ET",
".",
"SubElement",
"(",
"root",
",",
"'connection'",
")",
"outer_connection",
".",
"set",
"(",
"'class'",
",",
"'federated'",
")",
"named_conns",
"=",
"ET",
".",
"SubElement",
"(",
"outer_connection",
",",
"'named-connections'",
")",
"for",
"conn",
"in",
"connections",
":",
"nc",
"=",
"ET",
".",
"SubElement",
"(",
"named_conns",
",",
"'named-connection'",
",",
"name",
"=",
"_make_unique_name",
"(",
"conn",
".",
"dbclass",
")",
",",
"caption",
"=",
"conn",
".",
"server",
")",
"nc",
".",
"append",
"(",
"conn",
".",
"_connectionXML",
")",
"return",
"cls",
"(",
"root",
")"
] |
Create a new Data Source give a list of Connections.
|
[
"Create",
"a",
"new",
"Data",
"Source",
"give",
"a",
"list",
"of",
"Connections",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/datasource.py#L149-L162
|
6,613
|
tableau/document-api-python
|
tableaudocumentapi/field.py
|
Field.name
|
def name(self):
""" Provides a nice name for the field which is derived from the alias, caption, or the id.
The name resolves as either the alias if it's defined, or the caption if alias is not defined,
and finally the id which is the underlying name if neither of the fields exist. """
alias = getattr(self, 'alias', None)
if alias:
return alias
caption = getattr(self, 'caption', None)
if caption:
return caption
return self.id
|
python
|
def name(self):
""" Provides a nice name for the field which is derived from the alias, caption, or the id.
The name resolves as either the alias if it's defined, or the caption if alias is not defined,
and finally the id which is the underlying name if neither of the fields exist. """
alias = getattr(self, 'alias', None)
if alias:
return alias
caption = getattr(self, 'caption', None)
if caption:
return caption
return self.id
|
[
"def",
"name",
"(",
"self",
")",
":",
"alias",
"=",
"getattr",
"(",
"self",
",",
"'alias'",
",",
"None",
")",
"if",
"alias",
":",
"return",
"alias",
"caption",
"=",
"getattr",
"(",
"self",
",",
"'caption'",
",",
"None",
")",
"if",
"caption",
":",
"return",
"caption",
"return",
"self",
".",
"id"
] |
Provides a nice name for the field which is derived from the alias, caption, or the id.
The name resolves as either the alias if it's defined, or the caption if alias is not defined,
and finally the id which is the underlying name if neither of the fields exist.
|
[
"Provides",
"a",
"nice",
"name",
"for",
"the",
"field",
"which",
"is",
"derived",
"from",
"the",
"alias",
"caption",
"or",
"the",
"id",
"."
] |
9097a5b351622c5dd2653fa94624bc012316d8a4
|
https://github.com/tableau/document-api-python/blob/9097a5b351622c5dd2653fa94624bc012316d8a4/tableaudocumentapi/field.py#L99-L112
|
6,614
|
maraujop/requests-oauth2
|
requests_oauth2/oauth2.py
|
OAuth2._check_configuration
|
def _check_configuration(self, *attrs):
"""Check that each named attr has been configured
"""
for attr in attrs:
if getattr(self, attr, None) is None:
raise ConfigurationError("{} not configured".format(attr))
|
python
|
def _check_configuration(self, *attrs):
"""Check that each named attr has been configured
"""
for attr in attrs:
if getattr(self, attr, None) is None:
raise ConfigurationError("{} not configured".format(attr))
|
[
"def",
"_check_configuration",
"(",
"self",
",",
"*",
"attrs",
")",
":",
"for",
"attr",
"in",
"attrs",
":",
"if",
"getattr",
"(",
"self",
",",
"attr",
",",
"None",
")",
"is",
"None",
":",
"raise",
"ConfigurationError",
"(",
"\"{} not configured\"",
".",
"format",
"(",
"attr",
")",
")"
] |
Check that each named attr has been configured
|
[
"Check",
"that",
"each",
"named",
"attr",
"has",
"been",
"configured"
] |
191995aa571d0fbdf5bb166fb0668d5e73fe7817
|
https://github.com/maraujop/requests-oauth2/blob/191995aa571d0fbdf5bb166fb0668d5e73fe7817/requests_oauth2/oauth2.py#L41-L46
|
6,615
|
maraujop/requests-oauth2
|
requests_oauth2/oauth2.py
|
OAuth2._make_request
|
def _make_request(self, url, **kwargs):
"""
Make a request to an OAuth2 endpoint
"""
response = requests.post(url, **kwargs)
try:
return response.json()
except ValueError:
pass
return parse_qs(response.content)
|
python
|
def _make_request(self, url, **kwargs):
"""
Make a request to an OAuth2 endpoint
"""
response = requests.post(url, **kwargs)
try:
return response.json()
except ValueError:
pass
return parse_qs(response.content)
|
[
"def",
"_make_request",
"(",
"self",
",",
"url",
",",
"*",
"*",
"kwargs",
")",
":",
"response",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"*",
"*",
"kwargs",
")",
"try",
":",
"return",
"response",
".",
"json",
"(",
")",
"except",
"ValueError",
":",
"pass",
"return",
"parse_qs",
"(",
"response",
".",
"content",
")"
] |
Make a request to an OAuth2 endpoint
|
[
"Make",
"a",
"request",
"to",
"an",
"OAuth2",
"endpoint"
] |
191995aa571d0fbdf5bb166fb0668d5e73fe7817
|
https://github.com/maraujop/requests-oauth2/blob/191995aa571d0fbdf5bb166fb0668d5e73fe7817/requests_oauth2/oauth2.py#L48-L57
|
6,616
|
maraujop/requests-oauth2
|
requests_oauth2/oauth2.py
|
OAuth2.get_token
|
def get_token(self, code, headers=None, **kwargs):
"""
Requests an access token
"""
self._check_configuration("site", "token_url", "redirect_uri",
"client_id", "client_secret")
url = "%s%s" % (self.site, quote(self.token_url))
data = {
'redirect_uri': self.redirect_uri,
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
}
data.update(kwargs)
return self._make_request(url, data=data, headers=headers)
|
python
|
def get_token(self, code, headers=None, **kwargs):
"""
Requests an access token
"""
self._check_configuration("site", "token_url", "redirect_uri",
"client_id", "client_secret")
url = "%s%s" % (self.site, quote(self.token_url))
data = {
'redirect_uri': self.redirect_uri,
'client_id': self.client_id,
'client_secret': self.client_secret,
'code': code,
}
data.update(kwargs)
return self._make_request(url, data=data, headers=headers)
|
[
"def",
"get_token",
"(",
"self",
",",
"code",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_check_configuration",
"(",
"\"site\"",
",",
"\"token_url\"",
",",
"\"redirect_uri\"",
",",
"\"client_id\"",
",",
"\"client_secret\"",
")",
"url",
"=",
"\"%s%s\"",
"%",
"(",
"self",
".",
"site",
",",
"quote",
"(",
"self",
".",
"token_url",
")",
")",
"data",
"=",
"{",
"'redirect_uri'",
":",
"self",
".",
"redirect_uri",
",",
"'client_id'",
":",
"self",
".",
"client_id",
",",
"'client_secret'",
":",
"self",
".",
"client_secret",
",",
"'code'",
":",
"code",
",",
"}",
"data",
".",
"update",
"(",
"kwargs",
")",
"return",
"self",
".",
"_make_request",
"(",
"url",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
")"
] |
Requests an access token
|
[
"Requests",
"an",
"access",
"token"
] |
191995aa571d0fbdf5bb166fb0668d5e73fe7817
|
https://github.com/maraujop/requests-oauth2/blob/191995aa571d0fbdf5bb166fb0668d5e73fe7817/requests_oauth2/oauth2.py#L77-L92
|
6,617
|
maraujop/requests-oauth2
|
requests_oauth2/oauth2.py
|
OAuth2.refresh_token
|
def refresh_token(self, headers=None, **kwargs):
"""
Request a refreshed token
"""
self._check_configuration("site", "token_url", "client_id",
"client_secret")
url = "%s%s" % (self.site, quote(self.token_url))
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
}
data.update(kwargs)
return self._make_request(url, data=data, headers=headers)
|
python
|
def refresh_token(self, headers=None, **kwargs):
"""
Request a refreshed token
"""
self._check_configuration("site", "token_url", "client_id",
"client_secret")
url = "%s%s" % (self.site, quote(self.token_url))
data = {
'client_id': self.client_id,
'client_secret': self.client_secret,
}
data.update(kwargs)
return self._make_request(url, data=data, headers=headers)
|
[
"def",
"refresh_token",
"(",
"self",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_check_configuration",
"(",
"\"site\"",
",",
"\"token_url\"",
",",
"\"client_id\"",
",",
"\"client_secret\"",
")",
"url",
"=",
"\"%s%s\"",
"%",
"(",
"self",
".",
"site",
",",
"quote",
"(",
"self",
".",
"token_url",
")",
")",
"data",
"=",
"{",
"'client_id'",
":",
"self",
".",
"client_id",
",",
"'client_secret'",
":",
"self",
".",
"client_secret",
",",
"}",
"data",
".",
"update",
"(",
"kwargs",
")",
"return",
"self",
".",
"_make_request",
"(",
"url",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
")"
] |
Request a refreshed token
|
[
"Request",
"a",
"refreshed",
"token"
] |
191995aa571d0fbdf5bb166fb0668d5e73fe7817
|
https://github.com/maraujop/requests-oauth2/blob/191995aa571d0fbdf5bb166fb0668d5e73fe7817/requests_oauth2/oauth2.py#L94-L107
|
6,618
|
maraujop/requests-oauth2
|
requests_oauth2/oauth2.py
|
OAuth2.revoke_token
|
def revoke_token(self, token, headers=None, **kwargs):
"""
Revoke an access token
"""
self._check_configuration("site", "revoke_uri")
url = "%s%s" % (self.site, quote(self.revoke_url))
data = {'token': token}
data.update(kwargs)
return self._make_request(url, data=data, headers=headers)
|
python
|
def revoke_token(self, token, headers=None, **kwargs):
"""
Revoke an access token
"""
self._check_configuration("site", "revoke_uri")
url = "%s%s" % (self.site, quote(self.revoke_url))
data = {'token': token}
data.update(kwargs)
return self._make_request(url, data=data, headers=headers)
|
[
"def",
"revoke_token",
"(",
"self",
",",
"token",
",",
"headers",
"=",
"None",
",",
"*",
"*",
"kwargs",
")",
":",
"self",
".",
"_check_configuration",
"(",
"\"site\"",
",",
"\"revoke_uri\"",
")",
"url",
"=",
"\"%s%s\"",
"%",
"(",
"self",
".",
"site",
",",
"quote",
"(",
"self",
".",
"revoke_url",
")",
")",
"data",
"=",
"{",
"'token'",
":",
"token",
"}",
"data",
".",
"update",
"(",
"kwargs",
")",
"return",
"self",
".",
"_make_request",
"(",
"url",
",",
"data",
"=",
"data",
",",
"headers",
"=",
"headers",
")"
] |
Revoke an access token
|
[
"Revoke",
"an",
"access",
"token"
] |
191995aa571d0fbdf5bb166fb0668d5e73fe7817
|
https://github.com/maraujop/requests-oauth2/blob/191995aa571d0fbdf5bb166fb0668d5e73fe7817/requests_oauth2/oauth2.py#L109-L118
|
6,619
|
jorgenkg/python-neural-network
|
nimblenet/neuralnet.py
|
NeuralNet.save_network_to_file
|
def save_network_to_file(self, filename = "network0.pkl" ):
import cPickle, os, re
"""
This save method pickles the parameters of the current network into a
binary file for persistant storage.
"""
if filename == "network0.pkl":
while os.path.exists( os.path.join(os.getcwd(), filename )):
filename = re.sub('\d(?!\d)', lambda x: str(int(x.group(0)) + 1), filename)
with open( filename , 'wb') as file:
store_dict = {
"n_inputs" : self.n_inputs,
"layers" : self.layers,
"n_weights" : self.n_weights,
"weights" : self.weights,
}
cPickle.dump( store_dict, file, 2 )
|
python
|
def save_network_to_file(self, filename = "network0.pkl" ):
import cPickle, os, re
"""
This save method pickles the parameters of the current network into a
binary file for persistant storage.
"""
if filename == "network0.pkl":
while os.path.exists( os.path.join(os.getcwd(), filename )):
filename = re.sub('\d(?!\d)', lambda x: str(int(x.group(0)) + 1), filename)
with open( filename , 'wb') as file:
store_dict = {
"n_inputs" : self.n_inputs,
"layers" : self.layers,
"n_weights" : self.n_weights,
"weights" : self.weights,
}
cPickle.dump( store_dict, file, 2 )
|
[
"def",
"save_network_to_file",
"(",
"self",
",",
"filename",
"=",
"\"network0.pkl\"",
")",
":",
"import",
"cPickle",
",",
"os",
",",
"re",
"if",
"filename",
"==",
"\"network0.pkl\"",
":",
"while",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"getcwd",
"(",
")",
",",
"filename",
")",
")",
":",
"filename",
"=",
"re",
".",
"sub",
"(",
"'\\d(?!\\d)'",
",",
"lambda",
"x",
":",
"str",
"(",
"int",
"(",
"x",
".",
"group",
"(",
"0",
")",
")",
"+",
"1",
")",
",",
"filename",
")",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"file",
":",
"store_dict",
"=",
"{",
"\"n_inputs\"",
":",
"self",
".",
"n_inputs",
",",
"\"layers\"",
":",
"self",
".",
"layers",
",",
"\"n_weights\"",
":",
"self",
".",
"n_weights",
",",
"\"weights\"",
":",
"self",
".",
"weights",
",",
"}",
"cPickle",
".",
"dump",
"(",
"store_dict",
",",
"file",
",",
"2",
")"
] |
This save method pickles the parameters of the current network into a
binary file for persistant storage.
|
[
"This",
"save",
"method",
"pickles",
"the",
"parameters",
"of",
"the",
"current",
"network",
"into",
"a",
"binary",
"file",
"for",
"persistant",
"storage",
"."
] |
617b9940fa157d54d7831c42c0f7ba6857239b9a
|
https://github.com/jorgenkg/python-neural-network/blob/617b9940fa157d54d7831c42c0f7ba6857239b9a/nimblenet/neuralnet.py#L194-L212
|
6,620
|
jorgenkg/python-neural-network
|
nimblenet/neuralnet.py
|
NeuralNet.load_network_from_file
|
def load_network_from_file( filename ):
import cPickle
"""
Load the complete configuration of a previously stored network.
"""
network = NeuralNet( {"n_inputs":1, "layers":[[0,None]]} )
with open( filename , 'rb') as file:
store_dict = cPickle.load(file)
network.n_inputs = store_dict["n_inputs"]
network.n_weights = store_dict["n_weights"]
network.layers = store_dict["layers"]
network.weights = store_dict["weights"]
return network
|
python
|
def load_network_from_file( filename ):
import cPickle
"""
Load the complete configuration of a previously stored network.
"""
network = NeuralNet( {"n_inputs":1, "layers":[[0,None]]} )
with open( filename , 'rb') as file:
store_dict = cPickle.load(file)
network.n_inputs = store_dict["n_inputs"]
network.n_weights = store_dict["n_weights"]
network.layers = store_dict["layers"]
network.weights = store_dict["weights"]
return network
|
[
"def",
"load_network_from_file",
"(",
"filename",
")",
":",
"import",
"cPickle",
"network",
"=",
"NeuralNet",
"(",
"{",
"\"n_inputs\"",
":",
"1",
",",
"\"layers\"",
":",
"[",
"[",
"0",
",",
"None",
"]",
"]",
"}",
")",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"file",
":",
"store_dict",
"=",
"cPickle",
".",
"load",
"(",
"file",
")",
"network",
".",
"n_inputs",
"=",
"store_dict",
"[",
"\"n_inputs\"",
"]",
"network",
".",
"n_weights",
"=",
"store_dict",
"[",
"\"n_weights\"",
"]",
"network",
".",
"layers",
"=",
"store_dict",
"[",
"\"layers\"",
"]",
"network",
".",
"weights",
"=",
"store_dict",
"[",
"\"weights\"",
"]",
"return",
"network"
] |
Load the complete configuration of a previously stored network.
|
[
"Load",
"the",
"complete",
"configuration",
"of",
"a",
"previously",
"stored",
"network",
"."
] |
617b9940fa157d54d7831c42c0f7ba6857239b9a
|
https://github.com/jorgenkg/python-neural-network/blob/617b9940fa157d54d7831c42c0f7ba6857239b9a/nimblenet/neuralnet.py#L216-L231
|
6,621
|
jorgenkg/python-neural-network
|
nimblenet/preprocessing.py
|
replace_nan
|
def replace_nan( trainingset, replace_with = None ): # if replace_with = None, replaces with mean value
"""
Replace instanced of "not a number" with either the mean of the signal feature
or a specific value assigned by `replace_nan_with`
"""
training_data = np.array( [instance.features for instance in trainingset ] ).astype( np.float64 )
def encoder( dataset ):
for instance in dataset:
instance.features = instance.features.astype( np.float64 )
if np.sum(np.isnan( instance.features )):
if replace_with == None:
instance.features[ np.isnan( instance.features ) ] = means[ np.isnan( instance.features ) ]
else:
instance.features[ np.isnan( instance.features ) ] = replace_with
return dataset
#end
if replace_nan_with == None:
means = np.mean( np.nan_to_num(training_data), axis=0 )
return encoder
|
python
|
def replace_nan( trainingset, replace_with = None ): # if replace_with = None, replaces with mean value
"""
Replace instanced of "not a number" with either the mean of the signal feature
or a specific value assigned by `replace_nan_with`
"""
training_data = np.array( [instance.features for instance in trainingset ] ).astype( np.float64 )
def encoder( dataset ):
for instance in dataset:
instance.features = instance.features.astype( np.float64 )
if np.sum(np.isnan( instance.features )):
if replace_with == None:
instance.features[ np.isnan( instance.features ) ] = means[ np.isnan( instance.features ) ]
else:
instance.features[ np.isnan( instance.features ) ] = replace_with
return dataset
#end
if replace_nan_with == None:
means = np.mean( np.nan_to_num(training_data), axis=0 )
return encoder
|
[
"def",
"replace_nan",
"(",
"trainingset",
",",
"replace_with",
"=",
"None",
")",
":",
"# if replace_with = None, replaces with mean value",
"training_data",
"=",
"np",
".",
"array",
"(",
"[",
"instance",
".",
"features",
"for",
"instance",
"in",
"trainingset",
"]",
")",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"def",
"encoder",
"(",
"dataset",
")",
":",
"for",
"instance",
"in",
"dataset",
":",
"instance",
".",
"features",
"=",
"instance",
".",
"features",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"if",
"np",
".",
"sum",
"(",
"np",
".",
"isnan",
"(",
"instance",
".",
"features",
")",
")",
":",
"if",
"replace_with",
"==",
"None",
":",
"instance",
".",
"features",
"[",
"np",
".",
"isnan",
"(",
"instance",
".",
"features",
")",
"]",
"=",
"means",
"[",
"np",
".",
"isnan",
"(",
"instance",
".",
"features",
")",
"]",
"else",
":",
"instance",
".",
"features",
"[",
"np",
".",
"isnan",
"(",
"instance",
".",
"features",
")",
"]",
"=",
"replace_with",
"return",
"dataset",
"#end",
"if",
"replace_nan_with",
"==",
"None",
":",
"means",
"=",
"np",
".",
"mean",
"(",
"np",
".",
"nan_to_num",
"(",
"training_data",
")",
",",
"axis",
"=",
"0",
")",
"return",
"encoder"
] |
Replace instanced of "not a number" with either the mean of the signal feature
or a specific value assigned by `replace_nan_with`
|
[
"Replace",
"instanced",
"of",
"not",
"a",
"number",
"with",
"either",
"the",
"mean",
"of",
"the",
"signal",
"feature",
"or",
"a",
"specific",
"value",
"assigned",
"by",
"replace_nan_with"
] |
617b9940fa157d54d7831c42c0f7ba6857239b9a
|
https://github.com/jorgenkg/python-neural-network/blob/617b9940fa157d54d7831c42c0f7ba6857239b9a/nimblenet/preprocessing.py#L47-L69
|
6,622
|
jorgenkg/python-neural-network
|
nimblenet/activation_functions.py
|
elliot_function
|
def elliot_function( signal, derivative=False ):
""" A fast approximation of sigmoid """
s = 1 # steepness
abs_signal = (1 + np.abs(signal * s))
if derivative:
return 0.5 * s / abs_signal**2
else:
# Return the activation signal
return 0.5*(signal * s) / abs_signal + 0.5
|
python
|
def elliot_function( signal, derivative=False ):
""" A fast approximation of sigmoid """
s = 1 # steepness
abs_signal = (1 + np.abs(signal * s))
if derivative:
return 0.5 * s / abs_signal**2
else:
# Return the activation signal
return 0.5*(signal * s) / abs_signal + 0.5
|
[
"def",
"elliot_function",
"(",
"signal",
",",
"derivative",
"=",
"False",
")",
":",
"s",
"=",
"1",
"# steepness",
"abs_signal",
"=",
"(",
"1",
"+",
"np",
".",
"abs",
"(",
"signal",
"*",
"s",
")",
")",
"if",
"derivative",
":",
"return",
"0.5",
"*",
"s",
"/",
"abs_signal",
"**",
"2",
"else",
":",
"# Return the activation signal",
"return",
"0.5",
"*",
"(",
"signal",
"*",
"s",
")",
"/",
"abs_signal",
"+",
"0.5"
] |
A fast approximation of sigmoid
|
[
"A",
"fast",
"approximation",
"of",
"sigmoid"
] |
617b9940fa157d54d7831c42c0f7ba6857239b9a
|
https://github.com/jorgenkg/python-neural-network/blob/617b9940fa157d54d7831c42c0f7ba6857239b9a/nimblenet/activation_functions.py#L39-L48
|
6,623
|
jorgenkg/python-neural-network
|
nimblenet/activation_functions.py
|
symmetric_elliot_function
|
def symmetric_elliot_function( signal, derivative=False ):
""" A fast approximation of tanh """
s = 1.0 # steepness
abs_signal = (1 + np.abs(signal * s))
if derivative:
return s / abs_signal**2
else:
# Return the activation signal
return (signal * s) / abs_signal
|
python
|
def symmetric_elliot_function( signal, derivative=False ):
""" A fast approximation of tanh """
s = 1.0 # steepness
abs_signal = (1 + np.abs(signal * s))
if derivative:
return s / abs_signal**2
else:
# Return the activation signal
return (signal * s) / abs_signal
|
[
"def",
"symmetric_elliot_function",
"(",
"signal",
",",
"derivative",
"=",
"False",
")",
":",
"s",
"=",
"1.0",
"# steepness",
"abs_signal",
"=",
"(",
"1",
"+",
"np",
".",
"abs",
"(",
"signal",
"*",
"s",
")",
")",
"if",
"derivative",
":",
"return",
"s",
"/",
"abs_signal",
"**",
"2",
"else",
":",
"# Return the activation signal",
"return",
"(",
"signal",
"*",
"s",
")",
"/",
"abs_signal"
] |
A fast approximation of tanh
|
[
"A",
"fast",
"approximation",
"of",
"tanh"
] |
617b9940fa157d54d7831c42c0f7ba6857239b9a
|
https://github.com/jorgenkg/python-neural-network/blob/617b9940fa157d54d7831c42c0f7ba6857239b9a/nimblenet/activation_functions.py#L52-L61
|
6,624
|
jorgenkg/python-neural-network
|
nimblenet/activation_functions.py
|
LReLU_function
|
def LReLU_function( signal, derivative=False, leakage = 0.01 ):
"""
Leaky Rectified Linear Unit
"""
if derivative:
# Return the partial derivation of the activation function
return np.clip(signal > 0, leakage, 1.0)
else:
# Return the activation signal
output = np.copy( signal )
output[ output < 0 ] *= leakage
return output
|
python
|
def LReLU_function( signal, derivative=False, leakage = 0.01 ):
"""
Leaky Rectified Linear Unit
"""
if derivative:
# Return the partial derivation of the activation function
return np.clip(signal > 0, leakage, 1.0)
else:
# Return the activation signal
output = np.copy( signal )
output[ output < 0 ] *= leakage
return output
|
[
"def",
"LReLU_function",
"(",
"signal",
",",
"derivative",
"=",
"False",
",",
"leakage",
"=",
"0.01",
")",
":",
"if",
"derivative",
":",
"# Return the partial derivation of the activation function",
"return",
"np",
".",
"clip",
"(",
"signal",
">",
"0",
",",
"leakage",
",",
"1.0",
")",
"else",
":",
"# Return the activation signal",
"output",
"=",
"np",
".",
"copy",
"(",
"signal",
")",
"output",
"[",
"output",
"<",
"0",
"]",
"*=",
"leakage",
"return",
"output"
] |
Leaky Rectified Linear Unit
|
[
"Leaky",
"Rectified",
"Linear",
"Unit"
] |
617b9940fa157d54d7831c42c0f7ba6857239b9a
|
https://github.com/jorgenkg/python-neural-network/blob/617b9940fa157d54d7831c42c0f7ba6857239b9a/nimblenet/activation_functions.py#L74-L85
|
6,625
|
mikusjelly/apkutils
|
apkutils/apkfile.py
|
is_zipfile
|
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except OSError:
pass
return result
|
python
|
def is_zipfile(filename):
"""Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
"""
result = False
try:
if hasattr(filename, "read"):
result = _check_zipfile(fp=filename)
else:
with open(filename, "rb") as fp:
result = _check_zipfile(fp)
except OSError:
pass
return result
|
[
"def",
"is_zipfile",
"(",
"filename",
")",
":",
"result",
"=",
"False",
"try",
":",
"if",
"hasattr",
"(",
"filename",
",",
"\"read\"",
")",
":",
"result",
"=",
"_check_zipfile",
"(",
"fp",
"=",
"filename",
")",
"else",
":",
"with",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
"as",
"fp",
":",
"result",
"=",
"_check_zipfile",
"(",
"fp",
")",
"except",
"OSError",
":",
"pass",
"return",
"result"
] |
Quickly see if a file is a ZIP file by checking the magic number.
The filename argument may be a file or file-like object too.
|
[
"Quickly",
"see",
"if",
"a",
"file",
"is",
"a",
"ZIP",
"file",
"by",
"checking",
"the",
"magic",
"number",
"."
] |
2db1ed0cdb610dfc55bfd77266e9a91e4764bba4
|
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L182-L196
|
6,626
|
mikusjelly/apkutils
|
apkutils/apkfile.py
|
ZipExtFile.readline
|
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == b'':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
|
python
|
def readline(self, limit=-1):
"""Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
"""
if not self._universal and limit < 0:
# Shortcut common case - newline found in buffer.
i = self._readbuffer.find(b'\n', self._offset) + 1
if i > 0:
line = self._readbuffer[self._offset: i]
self._offset = i
return line
if not self._universal:
return io.BufferedIOBase.readline(self, limit)
line = b''
while limit < 0 or len(line) < limit:
readahead = self.peek(2)
if readahead == b'':
return line
#
# Search for universal newlines or line chunks.
#
# The pattern returns either a line chunk or a newline, but not
# both. Combined with peek(2), we are assured that the sequence
# '\r\n' is always retrieved completely and never split into
# separate newlines - '\r', '\n' due to coincidental readaheads.
#
match = self.PATTERN.search(readahead)
newline = match.group('newline')
if newline is not None:
if self.newlines is None:
self.newlines = []
if newline not in self.newlines:
self.newlines.append(newline)
self._offset += len(newline)
return line + b'\n'
chunk = match.group('chunk')
if limit >= 0:
chunk = chunk[: limit - len(line)]
self._offset += len(chunk)
line += chunk
return line
|
[
"def",
"readline",
"(",
"self",
",",
"limit",
"=",
"-",
"1",
")",
":",
"if",
"not",
"self",
".",
"_universal",
"and",
"limit",
"<",
"0",
":",
"# Shortcut common case - newline found in buffer.",
"i",
"=",
"self",
".",
"_readbuffer",
".",
"find",
"(",
"b'\\n'",
",",
"self",
".",
"_offset",
")",
"+",
"1",
"if",
"i",
">",
"0",
":",
"line",
"=",
"self",
".",
"_readbuffer",
"[",
"self",
".",
"_offset",
":",
"i",
"]",
"self",
".",
"_offset",
"=",
"i",
"return",
"line",
"if",
"not",
"self",
".",
"_universal",
":",
"return",
"io",
".",
"BufferedIOBase",
".",
"readline",
"(",
"self",
",",
"limit",
")",
"line",
"=",
"b''",
"while",
"limit",
"<",
"0",
"or",
"len",
"(",
"line",
")",
"<",
"limit",
":",
"readahead",
"=",
"self",
".",
"peek",
"(",
"2",
")",
"if",
"readahead",
"==",
"b''",
":",
"return",
"line",
"#",
"# Search for universal newlines or line chunks.",
"#",
"# The pattern returns either a line chunk or a newline, but not",
"# both. Combined with peek(2), we are assured that the sequence",
"# '\\r\\n' is always retrieved completely and never split into",
"# separate newlines - '\\r', '\\n' due to coincidental readaheads.",
"#",
"match",
"=",
"self",
".",
"PATTERN",
".",
"search",
"(",
"readahead",
")",
"newline",
"=",
"match",
".",
"group",
"(",
"'newline'",
")",
"if",
"newline",
"is",
"not",
"None",
":",
"if",
"self",
".",
"newlines",
"is",
"None",
":",
"self",
".",
"newlines",
"=",
"[",
"]",
"if",
"newline",
"not",
"in",
"self",
".",
"newlines",
":",
"self",
".",
"newlines",
".",
"append",
"(",
"newline",
")",
"self",
".",
"_offset",
"+=",
"len",
"(",
"newline",
")",
"return",
"line",
"+",
"b'\\n'",
"chunk",
"=",
"match",
".",
"group",
"(",
"'chunk'",
")",
"if",
"limit",
">=",
"0",
":",
"chunk",
"=",
"chunk",
"[",
":",
"limit",
"-",
"len",
"(",
"line",
")",
"]",
"self",
".",
"_offset",
"+=",
"len",
"(",
"chunk",
")",
"line",
"+=",
"chunk",
"return",
"line"
] |
Read and return a line from the stream.
If limit is specified, at most limit bytes will be read.
|
[
"Read",
"and",
"return",
"a",
"line",
"from",
"the",
"stream",
"."
] |
2db1ed0cdb610dfc55bfd77266e9a91e4764bba4
|
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L758-L806
|
6,627
|
mikusjelly/apkutils
|
apkutils/apkfile.py
|
ZipFile.setpassword
|
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
|
python
|
def setpassword(self, pwd):
"""Set default password for encrypted files."""
if pwd and not isinstance(pwd, bytes):
raise TypeError("pwd: expected bytes, got %s" % type(pwd))
if pwd:
self.pwd = pwd
else:
self.pwd = None
|
[
"def",
"setpassword",
"(",
"self",
",",
"pwd",
")",
":",
"if",
"pwd",
"and",
"not",
"isinstance",
"(",
"pwd",
",",
"bytes",
")",
":",
"raise",
"TypeError",
"(",
"\"pwd: expected bytes, got %s\"",
"%",
"type",
"(",
"pwd",
")",
")",
"if",
"pwd",
":",
"self",
".",
"pwd",
"=",
"pwd",
"else",
":",
"self",
".",
"pwd",
"=",
"None"
] |
Set default password for encrypted files.
|
[
"Set",
"default",
"password",
"for",
"encrypted",
"files",
"."
] |
2db1ed0cdb610dfc55bfd77266e9a91e4764bba4
|
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1204-L1211
|
6,628
|
mikusjelly/apkutils
|
apkutils/apkfile.py
|
ZipFile._sanitize_windows_name
|
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
|
python
|
def _sanitize_windows_name(cls, arcname, pathsep):
"""Replace bad characters and remove trailing dots from parts."""
table = cls._windows_illegal_name_trans_table
if not table:
illegal = ':<>|"?*'
table = str.maketrans(illegal, '_' * len(illegal))
cls._windows_illegal_name_trans_table = table
arcname = arcname.translate(table)
# remove trailing dots
arcname = (x.rstrip('.') for x in arcname.split(pathsep))
# rejoin, removing empty parts.
arcname = pathsep.join(x for x in arcname if x)
return arcname
|
[
"def",
"_sanitize_windows_name",
"(",
"cls",
",",
"arcname",
",",
"pathsep",
")",
":",
"table",
"=",
"cls",
".",
"_windows_illegal_name_trans_table",
"if",
"not",
"table",
":",
"illegal",
"=",
"':<>|\"?*'",
"table",
"=",
"str",
".",
"maketrans",
"(",
"illegal",
",",
"'_'",
"*",
"len",
"(",
"illegal",
")",
")",
"cls",
".",
"_windows_illegal_name_trans_table",
"=",
"table",
"arcname",
"=",
"arcname",
".",
"translate",
"(",
"table",
")",
"# remove trailing dots",
"arcname",
"=",
"(",
"x",
".",
"rstrip",
"(",
"'.'",
")",
"for",
"x",
"in",
"arcname",
".",
"split",
"(",
"pathsep",
")",
")",
"# rejoin, removing empty parts.",
"arcname",
"=",
"pathsep",
".",
"join",
"(",
"x",
"for",
"x",
"in",
"arcname",
"if",
"x",
")",
"return",
"arcname"
] |
Replace bad characters and remove trailing dots from parts.
|
[
"Replace",
"bad",
"characters",
"and",
"remove",
"trailing",
"dots",
"from",
"parts",
"."
] |
2db1ed0cdb610dfc55bfd77266e9a91e4764bba4
|
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1341-L1353
|
6,629
|
mikusjelly/apkutils
|
apkutils/apkfile.py
|
ZipFile.close
|
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
|
python
|
def close(self):
"""Close the file, and for mode 'w', 'x' and 'a' write the ending
records."""
if self.fp is None:
return
try:
if self.mode in ('w', 'x', 'a') and self._didModify: # write ending records
with self._lock:
if self._seekable:
self.fp.seek(self.start_dir)
self._write_end_record()
finally:
fp = self.fp
self.fp = None
self._fpclose(fp)
|
[
"def",
"close",
"(",
"self",
")",
":",
"if",
"self",
".",
"fp",
"is",
"None",
":",
"return",
"try",
":",
"if",
"self",
".",
"mode",
"in",
"(",
"'w'",
",",
"'x'",
",",
"'a'",
")",
"and",
"self",
".",
"_didModify",
":",
"# write ending records",
"with",
"self",
".",
"_lock",
":",
"if",
"self",
".",
"_seekable",
":",
"self",
".",
"fp",
".",
"seek",
"(",
"self",
".",
"start_dir",
")",
"self",
".",
"_write_end_record",
"(",
")",
"finally",
":",
"fp",
"=",
"self",
".",
"fp",
"self",
".",
"fp",
"=",
"None",
"self",
".",
"_fpclose",
"(",
"fp",
")"
] |
Close the file, and for mode 'w', 'x' and 'a' write the ending
records.
|
[
"Close",
"the",
"file",
"and",
"for",
"mode",
"w",
"x",
"and",
"a",
"write",
"the",
"ending",
"records",
"."
] |
2db1ed0cdb610dfc55bfd77266e9a91e4764bba4
|
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/apkfile.py#L1588-L1603
|
6,630
|
mikusjelly/apkutils
|
apkutils/elf/elfparser.py
|
ELF.display_string_dump
|
def display_string_dump(self, section_spec):
""" Display a strings dump of a section. section_spec is either a
section number or a name.
"""
section = _section_from_spec(self.elf_file, section_spec)
if section is None:
print("Section '%s' does not exist in the file!" % section_spec)
return None
data = section.data()
dataptr = 0
strs = []
while dataptr < len(data):
while dataptr < len(data) and not 32 <= byte2int(data[dataptr]) <= 127:
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
strs.append(binascii.b2a_hex(
data[dataptr:endptr]).decode().upper())
dataptr = endptr
return strs
|
python
|
def display_string_dump(self, section_spec):
""" Display a strings dump of a section. section_spec is either a
section number or a name.
"""
section = _section_from_spec(self.elf_file, section_spec)
if section is None:
print("Section '%s' does not exist in the file!" % section_spec)
return None
data = section.data()
dataptr = 0
strs = []
while dataptr < len(data):
while dataptr < len(data) and not 32 <= byte2int(data[dataptr]) <= 127:
dataptr += 1
if dataptr >= len(data):
break
endptr = dataptr
while endptr < len(data) and byte2int(data[endptr]) != 0:
endptr += 1
strs.append(binascii.b2a_hex(
data[dataptr:endptr]).decode().upper())
dataptr = endptr
return strs
|
[
"def",
"display_string_dump",
"(",
"self",
",",
"section_spec",
")",
":",
"section",
"=",
"_section_from_spec",
"(",
"self",
".",
"elf_file",
",",
"section_spec",
")",
"if",
"section",
"is",
"None",
":",
"print",
"(",
"\"Section '%s' does not exist in the file!\"",
"%",
"section_spec",
")",
"return",
"None",
"data",
"=",
"section",
".",
"data",
"(",
")",
"dataptr",
"=",
"0",
"strs",
"=",
"[",
"]",
"while",
"dataptr",
"<",
"len",
"(",
"data",
")",
":",
"while",
"dataptr",
"<",
"len",
"(",
"data",
")",
"and",
"not",
"32",
"<=",
"byte2int",
"(",
"data",
"[",
"dataptr",
"]",
")",
"<=",
"127",
":",
"dataptr",
"+=",
"1",
"if",
"dataptr",
">=",
"len",
"(",
"data",
")",
":",
"break",
"endptr",
"=",
"dataptr",
"while",
"endptr",
"<",
"len",
"(",
"data",
")",
"and",
"byte2int",
"(",
"data",
"[",
"endptr",
"]",
")",
"!=",
"0",
":",
"endptr",
"+=",
"1",
"strs",
".",
"append",
"(",
"binascii",
".",
"b2a_hex",
"(",
"data",
"[",
"dataptr",
":",
"endptr",
"]",
")",
".",
"decode",
"(",
")",
".",
"upper",
"(",
")",
")",
"dataptr",
"=",
"endptr",
"return",
"strs"
] |
Display a strings dump of a section. section_spec is either a
section number or a name.
|
[
"Display",
"a",
"strings",
"dump",
"of",
"a",
"section",
".",
"section_spec",
"is",
"either",
"a",
"section",
"number",
"or",
"a",
"name",
"."
] |
2db1ed0cdb610dfc55bfd77266e9a91e4764bba4
|
https://github.com/mikusjelly/apkutils/blob/2db1ed0cdb610dfc55bfd77266e9a91e4764bba4/apkutils/elf/elfparser.py#L57-L85
|
6,631
|
google/fleetspeak
|
fleetspeak/src/client/daemonservice/client/client.py
|
_EnvOpen
|
def _EnvOpen(var, mode):
"""Open a file descriptor identified by an environment variable."""
value = os.getenv(var)
if value is None:
raise ValueError("%s is not set" % var)
fd = int(value)
# If running on Windows, convert the file handle to a C file descriptor; see:
# https://groups.google.com/forum/#!topic/dev-python/GeN5bFJWfJ4
if _WINDOWS:
fd = msvcrt.open_osfhandle(fd, 0)
return os.fdopen(fd, mode)
|
python
|
def _EnvOpen(var, mode):
"""Open a file descriptor identified by an environment variable."""
value = os.getenv(var)
if value is None:
raise ValueError("%s is not set" % var)
fd = int(value)
# If running on Windows, convert the file handle to a C file descriptor; see:
# https://groups.google.com/forum/#!topic/dev-python/GeN5bFJWfJ4
if _WINDOWS:
fd = msvcrt.open_osfhandle(fd, 0)
return os.fdopen(fd, mode)
|
[
"def",
"_EnvOpen",
"(",
"var",
",",
"mode",
")",
":",
"value",
"=",
"os",
".",
"getenv",
"(",
"var",
")",
"if",
"value",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"%s is not set\"",
"%",
"var",
")",
"fd",
"=",
"int",
"(",
"value",
")",
"# If running on Windows, convert the file handle to a C file descriptor; see:",
"# https://groups.google.com/forum/#!topic/dev-python/GeN5bFJWfJ4",
"if",
"_WINDOWS",
":",
"fd",
"=",
"msvcrt",
".",
"open_osfhandle",
"(",
"fd",
",",
"0",
")",
"return",
"os",
".",
"fdopen",
"(",
"fd",
",",
"mode",
")"
] |
Open a file descriptor identified by an environment variable.
|
[
"Open",
"a",
"file",
"descriptor",
"identified",
"by",
"an",
"environment",
"variable",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/client/daemonservice/client/client.py#L58-L71
|
6,632
|
google/fleetspeak
|
fleetspeak/src/client/daemonservice/client/client.py
|
FleetspeakConnection.Send
|
def Send(self, message):
"""Send a message through Fleetspeak.
Args:
message: A message protocol buffer.
Returns:
Size of the message in bytes.
Raises:
ValueError: If message is not a common_pb2.Message.
"""
if not isinstance(message, common_pb2.Message):
raise ValueError("Send requires a fleetspeak.Message")
if message.destination.service_name == "system":
raise ValueError(
"Only predefined messages can have destination.service_name == \"system\"")
return self._SendImpl(message)
|
python
|
def Send(self, message):
"""Send a message through Fleetspeak.
Args:
message: A message protocol buffer.
Returns:
Size of the message in bytes.
Raises:
ValueError: If message is not a common_pb2.Message.
"""
if not isinstance(message, common_pb2.Message):
raise ValueError("Send requires a fleetspeak.Message")
if message.destination.service_name == "system":
raise ValueError(
"Only predefined messages can have destination.service_name == \"system\"")
return self._SendImpl(message)
|
[
"def",
"Send",
"(",
"self",
",",
"message",
")",
":",
"if",
"not",
"isinstance",
"(",
"message",
",",
"common_pb2",
".",
"Message",
")",
":",
"raise",
"ValueError",
"(",
"\"Send requires a fleetspeak.Message\"",
")",
"if",
"message",
".",
"destination",
".",
"service_name",
"==",
"\"system\"",
":",
"raise",
"ValueError",
"(",
"\"Only predefined messages can have destination.service_name == \\\"system\\\"\"",
")",
"return",
"self",
".",
"_SendImpl",
"(",
"message",
")"
] |
Send a message through Fleetspeak.
Args:
message: A message protocol buffer.
Returns:
Size of the message in bytes.
Raises:
ValueError: If message is not a common_pb2.Message.
|
[
"Send",
"a",
"message",
"through",
"Fleetspeak",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/client/daemonservice/client/client.py#L126-L143
|
6,633
|
google/fleetspeak
|
fleetspeak/src/client/daemonservice/client/client.py
|
FleetspeakConnection.Recv
|
def Recv(self):
"""Accept a message from Fleetspeak.
Returns:
A tuple (common_pb2.Message, size of the message in bytes).
Raises:
ProtocolError: If we receive unexpected data from Fleetspeak.
"""
size = struct.unpack(_STRUCT_FMT, self._ReadN(_STRUCT_LEN))[0]
if size > MAX_SIZE:
raise ProtocolError("Expected size to be at most %d, got %d" % (MAX_SIZE,
size))
with self._read_lock:
buf = self._ReadN(size)
self._ReadMagic()
res = common_pb2.Message()
res.ParseFromString(buf)
return res, len(buf)
|
python
|
def Recv(self):
"""Accept a message from Fleetspeak.
Returns:
A tuple (common_pb2.Message, size of the message in bytes).
Raises:
ProtocolError: If we receive unexpected data from Fleetspeak.
"""
size = struct.unpack(_STRUCT_FMT, self._ReadN(_STRUCT_LEN))[0]
if size > MAX_SIZE:
raise ProtocolError("Expected size to be at most %d, got %d" % (MAX_SIZE,
size))
with self._read_lock:
buf = self._ReadN(size)
self._ReadMagic()
res = common_pb2.Message()
res.ParseFromString(buf)
return res, len(buf)
|
[
"def",
"Recv",
"(",
"self",
")",
":",
"size",
"=",
"struct",
".",
"unpack",
"(",
"_STRUCT_FMT",
",",
"self",
".",
"_ReadN",
"(",
"_STRUCT_LEN",
")",
")",
"[",
"0",
"]",
"if",
"size",
">",
"MAX_SIZE",
":",
"raise",
"ProtocolError",
"(",
"\"Expected size to be at most %d, got %d\"",
"%",
"(",
"MAX_SIZE",
",",
"size",
")",
")",
"with",
"self",
".",
"_read_lock",
":",
"buf",
"=",
"self",
".",
"_ReadN",
"(",
"size",
")",
"self",
".",
"_ReadMagic",
"(",
")",
"res",
"=",
"common_pb2",
".",
"Message",
"(",
")",
"res",
".",
"ParseFromString",
"(",
"buf",
")",
"return",
"res",
",",
"len",
"(",
"buf",
")"
] |
Accept a message from Fleetspeak.
Returns:
A tuple (common_pb2.Message, size of the message in bytes).
Raises:
ProtocolError: If we receive unexpected data from Fleetspeak.
|
[
"Accept",
"a",
"message",
"from",
"Fleetspeak",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/client/daemonservice/client/client.py#L162-L181
|
6,634
|
google/fleetspeak
|
fleetspeak/src/client/daemonservice/client/client.py
|
FleetspeakConnection.Heartbeat
|
def Heartbeat(self):
"""Sends a heartbeat to the Fleetspeak client.
If this daemonservice is configured to use heartbeats, clients that don't
call this method often enough are considered faulty and are restarted by
Fleetspeak.
"""
heartbeat_msg = common_pb2.Message(
message_type="Heartbeat",
destination=common_pb2.Address(service_name="system"))
self._SendImpl(heartbeat_msg)
|
python
|
def Heartbeat(self):
"""Sends a heartbeat to the Fleetspeak client.
If this daemonservice is configured to use heartbeats, clients that don't
call this method often enough are considered faulty and are restarted by
Fleetspeak.
"""
heartbeat_msg = common_pb2.Message(
message_type="Heartbeat",
destination=common_pb2.Address(service_name="system"))
self._SendImpl(heartbeat_msg)
|
[
"def",
"Heartbeat",
"(",
"self",
")",
":",
"heartbeat_msg",
"=",
"common_pb2",
".",
"Message",
"(",
"message_type",
"=",
"\"Heartbeat\"",
",",
"destination",
"=",
"common_pb2",
".",
"Address",
"(",
"service_name",
"=",
"\"system\"",
")",
")",
"self",
".",
"_SendImpl",
"(",
"heartbeat_msg",
")"
] |
Sends a heartbeat to the Fleetspeak client.
If this daemonservice is configured to use heartbeats, clients that don't
call this method often enough are considered faulty and are restarted by
Fleetspeak.
|
[
"Sends",
"a",
"heartbeat",
"to",
"the",
"Fleetspeak",
"client",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/client/daemonservice/client/client.py#L183-L193
|
6,635
|
google/fleetspeak
|
fleetspeak/src/client/daemonservice/client/client.py
|
FleetspeakConnection._ReadN
|
def _ReadN(self, n):
"""Reads n characters from the input stream, or until EOF.
This is equivalent to the current CPython implementation of read(n), but
not guaranteed by the docs.
Args:
n: int
Returns:
string
"""
ret = ""
while True:
chunk = self._read_file.read(n - len(ret))
ret += chunk
if len(ret) == n or not chunk:
return ret
|
python
|
def _ReadN(self, n):
"""Reads n characters from the input stream, or until EOF.
This is equivalent to the current CPython implementation of read(n), but
not guaranteed by the docs.
Args:
n: int
Returns:
string
"""
ret = ""
while True:
chunk = self._read_file.read(n - len(ret))
ret += chunk
if len(ret) == n or not chunk:
return ret
|
[
"def",
"_ReadN",
"(",
"self",
",",
"n",
")",
":",
"ret",
"=",
"\"\"",
"while",
"True",
":",
"chunk",
"=",
"self",
".",
"_read_file",
".",
"read",
"(",
"n",
"-",
"len",
"(",
"ret",
")",
")",
"ret",
"+=",
"chunk",
"if",
"len",
"(",
"ret",
")",
"==",
"n",
"or",
"not",
"chunk",
":",
"return",
"ret"
] |
Reads n characters from the input stream, or until EOF.
This is equivalent to the current CPython implementation of read(n), but
not guaranteed by the docs.
Args:
n: int
Returns:
string
|
[
"Reads",
"n",
"characters",
"from",
"the",
"input",
"stream",
"or",
"until",
"EOF",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/client/daemonservice/client/client.py#L214-L232
|
6,636
|
google/fleetspeak
|
setup.py
|
_CompileProtos
|
def _CompileProtos():
"""Compiles all Fleetspeak protos."""
proto_files = []
for dir_path, _, filenames in os.walk(THIS_DIRECTORY):
for filename in filenames:
if filename.endswith(".proto"):
proto_files.append(os.path.join(dir_path, filename))
if not proto_files:
return
protoc_command = [
"python", "-m", "grpc_tools.protoc",
"--python_out", THIS_DIRECTORY,
"--grpc_python_out", THIS_DIRECTORY,
"--proto_path", THIS_DIRECTORY,
]
protoc_command.extend(proto_files)
subprocess.check_output(protoc_command)
|
python
|
def _CompileProtos():
"""Compiles all Fleetspeak protos."""
proto_files = []
for dir_path, _, filenames in os.walk(THIS_DIRECTORY):
for filename in filenames:
if filename.endswith(".proto"):
proto_files.append(os.path.join(dir_path, filename))
if not proto_files:
return
protoc_command = [
"python", "-m", "grpc_tools.protoc",
"--python_out", THIS_DIRECTORY,
"--grpc_python_out", THIS_DIRECTORY,
"--proto_path", THIS_DIRECTORY,
]
protoc_command.extend(proto_files)
subprocess.check_output(protoc_command)
|
[
"def",
"_CompileProtos",
"(",
")",
":",
"proto_files",
"=",
"[",
"]",
"for",
"dir_path",
",",
"_",
",",
"filenames",
"in",
"os",
".",
"walk",
"(",
"THIS_DIRECTORY",
")",
":",
"for",
"filename",
"in",
"filenames",
":",
"if",
"filename",
".",
"endswith",
"(",
"\".proto\"",
")",
":",
"proto_files",
".",
"append",
"(",
"os",
".",
"path",
".",
"join",
"(",
"dir_path",
",",
"filename",
")",
")",
"if",
"not",
"proto_files",
":",
"return",
"protoc_command",
"=",
"[",
"\"python\"",
",",
"\"-m\"",
",",
"\"grpc_tools.protoc\"",
",",
"\"--python_out\"",
",",
"THIS_DIRECTORY",
",",
"\"--grpc_python_out\"",
",",
"THIS_DIRECTORY",
",",
"\"--proto_path\"",
",",
"THIS_DIRECTORY",
",",
"]",
"protoc_command",
".",
"extend",
"(",
"proto_files",
")",
"subprocess",
".",
"check_output",
"(",
"protoc_command",
")"
] |
Compiles all Fleetspeak protos.
|
[
"Compiles",
"all",
"Fleetspeak",
"protos",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/setup.py#L42-L58
|
6,637
|
google/fleetspeak
|
fleetspeak/src/server/grpcservice/client/client.py
|
OutgoingConnection._RetryLoop
|
def _RetryLoop(self, func, timeout=None):
"""Retries an operation until success or deadline.
Args:
func: The function to run. Must take a timeout, in seconds, as a single
parameter. If it raises grpc.RpcError and deadline has not be reached,
it will be run again.
timeout: Retries will continue until timeout seconds have passed.
"""
timeout = timeout or self.DEFAULT_TIMEOUT
deadline = time.time() + timeout
sleep = 1
while True:
try:
return func(timeout)
except grpc.RpcError:
if time.time() + sleep > deadline:
raise
time.sleep(sleep)
sleep *= 2
timeout = deadline - time.time()
|
python
|
def _RetryLoop(self, func, timeout=None):
"""Retries an operation until success or deadline.
Args:
func: The function to run. Must take a timeout, in seconds, as a single
parameter. If it raises grpc.RpcError and deadline has not be reached,
it will be run again.
timeout: Retries will continue until timeout seconds have passed.
"""
timeout = timeout or self.DEFAULT_TIMEOUT
deadline = time.time() + timeout
sleep = 1
while True:
try:
return func(timeout)
except grpc.RpcError:
if time.time() + sleep > deadline:
raise
time.sleep(sleep)
sleep *= 2
timeout = deadline - time.time()
|
[
"def",
"_RetryLoop",
"(",
"self",
",",
"func",
",",
"timeout",
"=",
"None",
")",
":",
"timeout",
"=",
"timeout",
"or",
"self",
".",
"DEFAULT_TIMEOUT",
"deadline",
"=",
"time",
".",
"time",
"(",
")",
"+",
"timeout",
"sleep",
"=",
"1",
"while",
"True",
":",
"try",
":",
"return",
"func",
"(",
"timeout",
")",
"except",
"grpc",
".",
"RpcError",
":",
"if",
"time",
".",
"time",
"(",
")",
"+",
"sleep",
">",
"deadline",
":",
"raise",
"time",
".",
"sleep",
"(",
"sleep",
")",
"sleep",
"*=",
"2",
"timeout",
"=",
"deadline",
"-",
"time",
".",
"time",
"(",
")"
] |
Retries an operation until success or deadline.
Args:
func: The function to run. Must take a timeout, in seconds, as a single
parameter. If it raises grpc.RpcError and deadline has not be reached,
it will be run again.
timeout: Retries will continue until timeout seconds have passed.
|
[
"Retries",
"an",
"operation",
"until",
"success",
"or",
"deadline",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/server/grpcservice/client/client.py#L150-L172
|
6,638
|
google/fleetspeak
|
fleetspeak/src/server/grpcservice/client/client.py
|
OutgoingConnection.InsertMessage
|
def InsertMessage(self, message, timeout=None):
"""Inserts a message into the Fleetspeak server.
Sets message.source, if unset.
Args:
message: common_pb2.Message
The message to send.
timeout: How many seconds to try for.
Raises:
grpc.RpcError: if the RPC fails.
InvalidArgument: if message is not a common_pb2.Message.
"""
if not isinstance(message, common_pb2.Message):
raise InvalidArgument("Attempt to send unexpected message type: %s" %
message.__class__.__name__)
if not message.HasField("source"):
message.source.service_name = self._service_name
# Sometimes GRPC reports failure, even though the call succeeded. To prevent
# retry logic from creating duplicate messages we fix the message_id.
if not message.message_id:
message.message_id = os.urandom(32)
return self._RetryLoop(
lambda t: self._stub.InsertMessage(message, timeout=t))
|
python
|
def InsertMessage(self, message, timeout=None):
"""Inserts a message into the Fleetspeak server.
Sets message.source, if unset.
Args:
message: common_pb2.Message
The message to send.
timeout: How many seconds to try for.
Raises:
grpc.RpcError: if the RPC fails.
InvalidArgument: if message is not a common_pb2.Message.
"""
if not isinstance(message, common_pb2.Message):
raise InvalidArgument("Attempt to send unexpected message type: %s" %
message.__class__.__name__)
if not message.HasField("source"):
message.source.service_name = self._service_name
# Sometimes GRPC reports failure, even though the call succeeded. To prevent
# retry logic from creating duplicate messages we fix the message_id.
if not message.message_id:
message.message_id = os.urandom(32)
return self._RetryLoop(
lambda t: self._stub.InsertMessage(message, timeout=t))
|
[
"def",
"InsertMessage",
"(",
"self",
",",
"message",
",",
"timeout",
"=",
"None",
")",
":",
"if",
"not",
"isinstance",
"(",
"message",
",",
"common_pb2",
".",
"Message",
")",
":",
"raise",
"InvalidArgument",
"(",
"\"Attempt to send unexpected message type: %s\"",
"%",
"message",
".",
"__class__",
".",
"__name__",
")",
"if",
"not",
"message",
".",
"HasField",
"(",
"\"source\"",
")",
":",
"message",
".",
"source",
".",
"service_name",
"=",
"self",
".",
"_service_name",
"# Sometimes GRPC reports failure, even though the call succeeded. To prevent",
"# retry logic from creating duplicate messages we fix the message_id.",
"if",
"not",
"message",
".",
"message_id",
":",
"message",
".",
"message_id",
"=",
"os",
".",
"urandom",
"(",
"32",
")",
"return",
"self",
".",
"_RetryLoop",
"(",
"lambda",
"t",
":",
"self",
".",
"_stub",
".",
"InsertMessage",
"(",
"message",
",",
"timeout",
"=",
"t",
")",
")"
] |
Inserts a message into the Fleetspeak server.
Sets message.source, if unset.
Args:
message: common_pb2.Message
The message to send.
timeout: How many seconds to try for.
Raises:
grpc.RpcError: if the RPC fails.
InvalidArgument: if message is not a common_pb2.Message.
|
[
"Inserts",
"a",
"message",
"into",
"the",
"Fleetspeak",
"server",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/server/grpcservice/client/client.py#L174-L202
|
6,639
|
google/fleetspeak
|
fleetspeak/src/server/grpcservice/client/client.py
|
OutgoingConnection.ListClients
|
def ListClients(self, request, timeout=None):
"""Provides basic information about Fleetspeak clients.
Args:
request: fleetspeak.admin.ListClientsRequest
timeout: How many seconds to try for.
Returns: fleetspeak.admin.ListClientsResponse
"""
return self._RetryLoop(
lambda t: self._stub.ListClients(request, timeout=t))
|
python
|
def ListClients(self, request, timeout=None):
"""Provides basic information about Fleetspeak clients.
Args:
request: fleetspeak.admin.ListClientsRequest
timeout: How many seconds to try for.
Returns: fleetspeak.admin.ListClientsResponse
"""
return self._RetryLoop(
lambda t: self._stub.ListClients(request, timeout=t))
|
[
"def",
"ListClients",
"(",
"self",
",",
"request",
",",
"timeout",
"=",
"None",
")",
":",
"return",
"self",
".",
"_RetryLoop",
"(",
"lambda",
"t",
":",
"self",
".",
"_stub",
".",
"ListClients",
"(",
"request",
",",
"timeout",
"=",
"t",
")",
")"
] |
Provides basic information about Fleetspeak clients.
Args:
request: fleetspeak.admin.ListClientsRequest
timeout: How many seconds to try for.
Returns: fleetspeak.admin.ListClientsResponse
|
[
"Provides",
"basic",
"information",
"about",
"Fleetspeak",
"clients",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/server/grpcservice/client/client.py#L204-L215
|
6,640
|
google/fleetspeak
|
fleetspeak/src/server/grpcservice/client/client.py
|
InsecureGRPCServiceClient.Send
|
def Send(self, message):
"""Send one message.
Deprecated, users should migrate to call self.outgoing.InsertMessage
directly.
"""
if not self.outgoing:
raise NotConfigured("Send address not provided.")
self.outgoing.InsertMessage(message)
|
python
|
def Send(self, message):
"""Send one message.
Deprecated, users should migrate to call self.outgoing.InsertMessage
directly.
"""
if not self.outgoing:
raise NotConfigured("Send address not provided.")
self.outgoing.InsertMessage(message)
|
[
"def",
"Send",
"(",
"self",
",",
"message",
")",
":",
"if",
"not",
"self",
".",
"outgoing",
":",
"raise",
"NotConfigured",
"(",
"\"Send address not provided.\"",
")",
"self",
".",
"outgoing",
".",
"InsertMessage",
"(",
"message",
")"
] |
Send one message.
Deprecated, users should migrate to call self.outgoing.InsertMessage
directly.
|
[
"Send",
"one",
"message",
"."
] |
bc95dd6941494461d2e5dff0a7f4c78a07ff724d
|
https://github.com/google/fleetspeak/blob/bc95dd6941494461d2e5dff0a7f4c78a07ff724d/fleetspeak/src/server/grpcservice/client/client.py#L325-L333
|
6,641
|
reiinakano/xcessiv
|
xcessiv/automatedruns.py
|
start_naive_bayes
|
def start_naive_bayes(automated_run, session, path):
"""Starts naive bayes automated run
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
"""
module = functions.import_string_code_as_module(automated_run.source)
random_state = 8 if not hasattr(module, 'random_state') else module.random_state
assert module.metric_to_optimize in automated_run.base_learner_origin.metric_generators
# get non-searchable parameters
base_estimator = automated_run.base_learner_origin.return_estimator()
base_estimator.set_params(**module.default_params)
default_params = functions.make_serializable(base_estimator.get_params())
non_searchable_params = dict((key, val) for key, val in iteritems(default_params)
if key not in module.pbounds)
# get already calculated base learners in search space
existing_base_learners = []
for base_learner in automated_run.base_learner_origin.base_learners:
if not base_learner.job_status == 'finished':
continue
in_search_space = True
for key, val in iteritems(non_searchable_params):
if base_learner.hyperparameters[key] != val:
in_search_space = False
break # If no match, move on to the next base learner
if in_search_space:
existing_base_learners.append(base_learner)
# build initialize dictionary
target = []
initialization_dict = dict((key, list()) for key in module.pbounds.keys())
for base_learner in existing_base_learners:
# check if base learner's searchable hyperparameters are all numerical
all_numerical = True
for key in module.pbounds.keys():
if not isinstance(base_learner.hyperparameters[key], numbers.Number):
all_numerical = False
break
if not all_numerical:
continue # if there is a non-numerical hyperparameter, skip this.
for key in module.pbounds.keys():
initialization_dict[key].append(base_learner.hyperparameters[key])
target.append(base_learner.individual_score[module.metric_to_optimize])
initialization_dict['target'] = target if not module.invert_metric \
else list(map(lambda x: -x, target))
print('{} existing in initialization dictionary'.
format(len(initialization_dict['target'])))
# Create function to be optimized
func_to_optimize = return_func_to_optimize(
path, session, automated_run.base_learner_origin, module.default_params,
module.metric_to_optimize, module.invert_metric, set(module.integers)
)
# Create Bayes object
bo = BayesianOptimization(func_to_optimize, module.pbounds)
bo.initialize(initialization_dict)
np.random.seed(random_state)
bo.maximize(**module.maximize_config)
|
python
|
def start_naive_bayes(automated_run, session, path):
"""Starts naive bayes automated run
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
"""
module = functions.import_string_code_as_module(automated_run.source)
random_state = 8 if not hasattr(module, 'random_state') else module.random_state
assert module.metric_to_optimize in automated_run.base_learner_origin.metric_generators
# get non-searchable parameters
base_estimator = automated_run.base_learner_origin.return_estimator()
base_estimator.set_params(**module.default_params)
default_params = functions.make_serializable(base_estimator.get_params())
non_searchable_params = dict((key, val) for key, val in iteritems(default_params)
if key not in module.pbounds)
# get already calculated base learners in search space
existing_base_learners = []
for base_learner in automated_run.base_learner_origin.base_learners:
if not base_learner.job_status == 'finished':
continue
in_search_space = True
for key, val in iteritems(non_searchable_params):
if base_learner.hyperparameters[key] != val:
in_search_space = False
break # If no match, move on to the next base learner
if in_search_space:
existing_base_learners.append(base_learner)
# build initialize dictionary
target = []
initialization_dict = dict((key, list()) for key in module.pbounds.keys())
for base_learner in existing_base_learners:
# check if base learner's searchable hyperparameters are all numerical
all_numerical = True
for key in module.pbounds.keys():
if not isinstance(base_learner.hyperparameters[key], numbers.Number):
all_numerical = False
break
if not all_numerical:
continue # if there is a non-numerical hyperparameter, skip this.
for key in module.pbounds.keys():
initialization_dict[key].append(base_learner.hyperparameters[key])
target.append(base_learner.individual_score[module.metric_to_optimize])
initialization_dict['target'] = target if not module.invert_metric \
else list(map(lambda x: -x, target))
print('{} existing in initialization dictionary'.
format(len(initialization_dict['target'])))
# Create function to be optimized
func_to_optimize = return_func_to_optimize(
path, session, automated_run.base_learner_origin, module.default_params,
module.metric_to_optimize, module.invert_metric, set(module.integers)
)
# Create Bayes object
bo = BayesianOptimization(func_to_optimize, module.pbounds)
bo.initialize(initialization_dict)
np.random.seed(random_state)
bo.maximize(**module.maximize_config)
|
[
"def",
"start_naive_bayes",
"(",
"automated_run",
",",
"session",
",",
"path",
")",
":",
"module",
"=",
"functions",
".",
"import_string_code_as_module",
"(",
"automated_run",
".",
"source",
")",
"random_state",
"=",
"8",
"if",
"not",
"hasattr",
"(",
"module",
",",
"'random_state'",
")",
"else",
"module",
".",
"random_state",
"assert",
"module",
".",
"metric_to_optimize",
"in",
"automated_run",
".",
"base_learner_origin",
".",
"metric_generators",
"# get non-searchable parameters",
"base_estimator",
"=",
"automated_run",
".",
"base_learner_origin",
".",
"return_estimator",
"(",
")",
"base_estimator",
".",
"set_params",
"(",
"*",
"*",
"module",
".",
"default_params",
")",
"default_params",
"=",
"functions",
".",
"make_serializable",
"(",
"base_estimator",
".",
"get_params",
"(",
")",
")",
"non_searchable_params",
"=",
"dict",
"(",
"(",
"key",
",",
"val",
")",
"for",
"key",
",",
"val",
"in",
"iteritems",
"(",
"default_params",
")",
"if",
"key",
"not",
"in",
"module",
".",
"pbounds",
")",
"# get already calculated base learners in search space",
"existing_base_learners",
"=",
"[",
"]",
"for",
"base_learner",
"in",
"automated_run",
".",
"base_learner_origin",
".",
"base_learners",
":",
"if",
"not",
"base_learner",
".",
"job_status",
"==",
"'finished'",
":",
"continue",
"in_search_space",
"=",
"True",
"for",
"key",
",",
"val",
"in",
"iteritems",
"(",
"non_searchable_params",
")",
":",
"if",
"base_learner",
".",
"hyperparameters",
"[",
"key",
"]",
"!=",
"val",
":",
"in_search_space",
"=",
"False",
"break",
"# If no match, move on to the next base learner",
"if",
"in_search_space",
":",
"existing_base_learners",
".",
"append",
"(",
"base_learner",
")",
"# build initialize dictionary",
"target",
"=",
"[",
"]",
"initialization_dict",
"=",
"dict",
"(",
"(",
"key",
",",
"list",
"(",
")",
")",
"for",
"key",
"in",
"module",
".",
"pbounds",
".",
"keys",
"(",
")",
")",
"for",
"base_learner",
"in",
"existing_base_learners",
":",
"# check if base learner's searchable hyperparameters are all numerical",
"all_numerical",
"=",
"True",
"for",
"key",
"in",
"module",
".",
"pbounds",
".",
"keys",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"base_learner",
".",
"hyperparameters",
"[",
"key",
"]",
",",
"numbers",
".",
"Number",
")",
":",
"all_numerical",
"=",
"False",
"break",
"if",
"not",
"all_numerical",
":",
"continue",
"# if there is a non-numerical hyperparameter, skip this.",
"for",
"key",
"in",
"module",
".",
"pbounds",
".",
"keys",
"(",
")",
":",
"initialization_dict",
"[",
"key",
"]",
".",
"append",
"(",
"base_learner",
".",
"hyperparameters",
"[",
"key",
"]",
")",
"target",
".",
"append",
"(",
"base_learner",
".",
"individual_score",
"[",
"module",
".",
"metric_to_optimize",
"]",
")",
"initialization_dict",
"[",
"'target'",
"]",
"=",
"target",
"if",
"not",
"module",
".",
"invert_metric",
"else",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"-",
"x",
",",
"target",
")",
")",
"print",
"(",
"'{} existing in initialization dictionary'",
".",
"format",
"(",
"len",
"(",
"initialization_dict",
"[",
"'target'",
"]",
")",
")",
")",
"# Create function to be optimized",
"func_to_optimize",
"=",
"return_func_to_optimize",
"(",
"path",
",",
"session",
",",
"automated_run",
".",
"base_learner_origin",
",",
"module",
".",
"default_params",
",",
"module",
".",
"metric_to_optimize",
",",
"module",
".",
"invert_metric",
",",
"set",
"(",
"module",
".",
"integers",
")",
")",
"# Create Bayes object",
"bo",
"=",
"BayesianOptimization",
"(",
"func_to_optimize",
",",
"module",
".",
"pbounds",
")",
"bo",
".",
"initialize",
"(",
"initialization_dict",
")",
"np",
".",
"random",
".",
"seed",
"(",
"random_state",
")",
"bo",
".",
"maximize",
"(",
"*",
"*",
"module",
".",
"maximize_config",
")"
] |
Starts naive bayes automated run
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
|
[
"Starts",
"naive",
"bayes",
"automated",
"run"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/automatedruns.py#L139-L207
|
6,642
|
reiinakano/xcessiv
|
xcessiv/automatedruns.py
|
start_tpot
|
def start_tpot(automated_run, session, path):
"""Starts a TPOT automated run that exports directly to base learner setup
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
"""
module = functions.import_string_code_as_module(automated_run.source)
extraction = session.query(models.Extraction).first()
X, y = extraction.return_train_dataset()
tpot_learner = module.tpot_learner
tpot_learner.fit(X, y)
temp_filename = os.path.join(path, 'tpot-temp-export-{}'.format(os.getpid()))
tpot_learner.export(temp_filename)
with open(temp_filename) as f:
base_learner_source = f.read()
base_learner_source = constants.tpot_learner_docstring + base_learner_source
try:
os.remove(temp_filename)
except OSError:
pass
blo = models.BaseLearnerOrigin(
source=base_learner_source,
name='TPOT Learner',
meta_feature_generator='predict'
)
session.add(blo)
session.commit()
|
python
|
def start_tpot(automated_run, session, path):
"""Starts a TPOT automated run that exports directly to base learner setup
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
"""
module = functions.import_string_code_as_module(automated_run.source)
extraction = session.query(models.Extraction).first()
X, y = extraction.return_train_dataset()
tpot_learner = module.tpot_learner
tpot_learner.fit(X, y)
temp_filename = os.path.join(path, 'tpot-temp-export-{}'.format(os.getpid()))
tpot_learner.export(temp_filename)
with open(temp_filename) as f:
base_learner_source = f.read()
base_learner_source = constants.tpot_learner_docstring + base_learner_source
try:
os.remove(temp_filename)
except OSError:
pass
blo = models.BaseLearnerOrigin(
source=base_learner_source,
name='TPOT Learner',
meta_feature_generator='predict'
)
session.add(blo)
session.commit()
|
[
"def",
"start_tpot",
"(",
"automated_run",
",",
"session",
",",
"path",
")",
":",
"module",
"=",
"functions",
".",
"import_string_code_as_module",
"(",
"automated_run",
".",
"source",
")",
"extraction",
"=",
"session",
".",
"query",
"(",
"models",
".",
"Extraction",
")",
".",
"first",
"(",
")",
"X",
",",
"y",
"=",
"extraction",
".",
"return_train_dataset",
"(",
")",
"tpot_learner",
"=",
"module",
".",
"tpot_learner",
"tpot_learner",
".",
"fit",
"(",
"X",
",",
"y",
")",
"temp_filename",
"=",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"'tpot-temp-export-{}'",
".",
"format",
"(",
"os",
".",
"getpid",
"(",
")",
")",
")",
"tpot_learner",
".",
"export",
"(",
"temp_filename",
")",
"with",
"open",
"(",
"temp_filename",
")",
"as",
"f",
":",
"base_learner_source",
"=",
"f",
".",
"read",
"(",
")",
"base_learner_source",
"=",
"constants",
".",
"tpot_learner_docstring",
"+",
"base_learner_source",
"try",
":",
"os",
".",
"remove",
"(",
"temp_filename",
")",
"except",
"OSError",
":",
"pass",
"blo",
"=",
"models",
".",
"BaseLearnerOrigin",
"(",
"source",
"=",
"base_learner_source",
",",
"name",
"=",
"'TPOT Learner'",
",",
"meta_feature_generator",
"=",
"'predict'",
")",
"session",
".",
"add",
"(",
"blo",
")",
"session",
".",
"commit",
"(",
")"
] |
Starts a TPOT automated run that exports directly to base learner setup
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
|
[
"Starts",
"a",
"TPOT",
"automated",
"run",
"that",
"exports",
"directly",
"to",
"base",
"learner",
"setup"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/automatedruns.py#L210-L248
|
6,643
|
reiinakano/xcessiv
|
xcessiv/automatedruns.py
|
start_greedy_ensemble_search
|
def start_greedy_ensemble_search(automated_run, session, path):
"""Starts an automated ensemble search using greedy forward model selection.
The steps for this search are adapted from "Ensemble Selection from Libraries of Models" by
Caruana.
1. Start with the empty ensemble
2. Add to the ensemble the model in the library that maximizes the ensemmble's
performance on the error metric.
3. Repeat step 2 for a fixed number of iterations or until all models have been used.
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
"""
module = functions.import_string_code_as_module(automated_run.source)
assert module.metric_to_optimize in automated_run.base_learner_origin.metric_generators
best_ensemble = [] # List containing IDs of best performing ensemble for the last round
secondary_learner = automated_run.base_learner_origin.return_estimator()
secondary_learner.set_params(**module.secondary_learner_hyperparameters)
for i in range(module.max_num_base_learners):
best_score = -float('inf') # Best metric for this round (not in total!)
current_ensemble = best_ensemble[:] # Shallow copy of best ensemble
for base_learner in session.query(models.BaseLearner).filter_by(job_status='finished').all():
if base_learner in current_ensemble: # Don't append when learner is already in
continue
current_ensemble.append(base_learner)
# Check if our "best ensemble" already exists
existing_ensemble = session.query(models.StackedEnsemble).\
filter_by(base_learner_origin_id=automated_run.base_learner_origin.id,
secondary_learner_hyperparameters=secondary_learner.get_params(),
base_learner_ids=sorted([bl.id for bl in current_ensemble])).first()
if existing_ensemble and existing_ensemble.job_status == 'finished':
score = existing_ensemble.individual_score[module.metric_to_optimize]
elif existing_ensemble and existing_ensemble.job_status != 'finished':
eval_stacked_ensemble(existing_ensemble, session, path)
score = existing_ensemble.individual_score[module.metric_to_optimize]
else:
stacked_ensemble = models.StackedEnsemble(
secondary_learner_hyperparameters=secondary_learner.get_params(),
base_learners=current_ensemble,
base_learner_origin=automated_run.base_learner_origin,
job_status='started'
)
session.add(stacked_ensemble)
session.commit()
eval_stacked_ensemble(stacked_ensemble, session, path)
score = stacked_ensemble.individual_score[module.metric_to_optimize]
score = -score if module.invert_metric else score
if best_score < score:
best_score = score
best_ensemble = current_ensemble[:]
current_ensemble.pop()
|
python
|
def start_greedy_ensemble_search(automated_run, session, path):
"""Starts an automated ensemble search using greedy forward model selection.
The steps for this search are adapted from "Ensemble Selection from Libraries of Models" by
Caruana.
1. Start with the empty ensemble
2. Add to the ensemble the model in the library that maximizes the ensemmble's
performance on the error metric.
3. Repeat step 2 for a fixed number of iterations or until all models have been used.
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
"""
module = functions.import_string_code_as_module(automated_run.source)
assert module.metric_to_optimize in automated_run.base_learner_origin.metric_generators
best_ensemble = [] # List containing IDs of best performing ensemble for the last round
secondary_learner = automated_run.base_learner_origin.return_estimator()
secondary_learner.set_params(**module.secondary_learner_hyperparameters)
for i in range(module.max_num_base_learners):
best_score = -float('inf') # Best metric for this round (not in total!)
current_ensemble = best_ensemble[:] # Shallow copy of best ensemble
for base_learner in session.query(models.BaseLearner).filter_by(job_status='finished').all():
if base_learner in current_ensemble: # Don't append when learner is already in
continue
current_ensemble.append(base_learner)
# Check if our "best ensemble" already exists
existing_ensemble = session.query(models.StackedEnsemble).\
filter_by(base_learner_origin_id=automated_run.base_learner_origin.id,
secondary_learner_hyperparameters=secondary_learner.get_params(),
base_learner_ids=sorted([bl.id for bl in current_ensemble])).first()
if existing_ensemble and existing_ensemble.job_status == 'finished':
score = existing_ensemble.individual_score[module.metric_to_optimize]
elif existing_ensemble and existing_ensemble.job_status != 'finished':
eval_stacked_ensemble(existing_ensemble, session, path)
score = existing_ensemble.individual_score[module.metric_to_optimize]
else:
stacked_ensemble = models.StackedEnsemble(
secondary_learner_hyperparameters=secondary_learner.get_params(),
base_learners=current_ensemble,
base_learner_origin=automated_run.base_learner_origin,
job_status='started'
)
session.add(stacked_ensemble)
session.commit()
eval_stacked_ensemble(stacked_ensemble, session, path)
score = stacked_ensemble.individual_score[module.metric_to_optimize]
score = -score if module.invert_metric else score
if best_score < score:
best_score = score
best_ensemble = current_ensemble[:]
current_ensemble.pop()
|
[
"def",
"start_greedy_ensemble_search",
"(",
"automated_run",
",",
"session",
",",
"path",
")",
":",
"module",
"=",
"functions",
".",
"import_string_code_as_module",
"(",
"automated_run",
".",
"source",
")",
"assert",
"module",
".",
"metric_to_optimize",
"in",
"automated_run",
".",
"base_learner_origin",
".",
"metric_generators",
"best_ensemble",
"=",
"[",
"]",
"# List containing IDs of best performing ensemble for the last round",
"secondary_learner",
"=",
"automated_run",
".",
"base_learner_origin",
".",
"return_estimator",
"(",
")",
"secondary_learner",
".",
"set_params",
"(",
"*",
"*",
"module",
".",
"secondary_learner_hyperparameters",
")",
"for",
"i",
"in",
"range",
"(",
"module",
".",
"max_num_base_learners",
")",
":",
"best_score",
"=",
"-",
"float",
"(",
"'inf'",
")",
"# Best metric for this round (not in total!)",
"current_ensemble",
"=",
"best_ensemble",
"[",
":",
"]",
"# Shallow copy of best ensemble",
"for",
"base_learner",
"in",
"session",
".",
"query",
"(",
"models",
".",
"BaseLearner",
")",
".",
"filter_by",
"(",
"job_status",
"=",
"'finished'",
")",
".",
"all",
"(",
")",
":",
"if",
"base_learner",
"in",
"current_ensemble",
":",
"# Don't append when learner is already in",
"continue",
"current_ensemble",
".",
"append",
"(",
"base_learner",
")",
"# Check if our \"best ensemble\" already exists",
"existing_ensemble",
"=",
"session",
".",
"query",
"(",
"models",
".",
"StackedEnsemble",
")",
".",
"filter_by",
"(",
"base_learner_origin_id",
"=",
"automated_run",
".",
"base_learner_origin",
".",
"id",
",",
"secondary_learner_hyperparameters",
"=",
"secondary_learner",
".",
"get_params",
"(",
")",
",",
"base_learner_ids",
"=",
"sorted",
"(",
"[",
"bl",
".",
"id",
"for",
"bl",
"in",
"current_ensemble",
"]",
")",
")",
".",
"first",
"(",
")",
"if",
"existing_ensemble",
"and",
"existing_ensemble",
".",
"job_status",
"==",
"'finished'",
":",
"score",
"=",
"existing_ensemble",
".",
"individual_score",
"[",
"module",
".",
"metric_to_optimize",
"]",
"elif",
"existing_ensemble",
"and",
"existing_ensemble",
".",
"job_status",
"!=",
"'finished'",
":",
"eval_stacked_ensemble",
"(",
"existing_ensemble",
",",
"session",
",",
"path",
")",
"score",
"=",
"existing_ensemble",
".",
"individual_score",
"[",
"module",
".",
"metric_to_optimize",
"]",
"else",
":",
"stacked_ensemble",
"=",
"models",
".",
"StackedEnsemble",
"(",
"secondary_learner_hyperparameters",
"=",
"secondary_learner",
".",
"get_params",
"(",
")",
",",
"base_learners",
"=",
"current_ensemble",
",",
"base_learner_origin",
"=",
"automated_run",
".",
"base_learner_origin",
",",
"job_status",
"=",
"'started'",
")",
"session",
".",
"add",
"(",
"stacked_ensemble",
")",
"session",
".",
"commit",
"(",
")",
"eval_stacked_ensemble",
"(",
"stacked_ensemble",
",",
"session",
",",
"path",
")",
"score",
"=",
"stacked_ensemble",
".",
"individual_score",
"[",
"module",
".",
"metric_to_optimize",
"]",
"score",
"=",
"-",
"score",
"if",
"module",
".",
"invert_metric",
"else",
"score",
"if",
"best_score",
"<",
"score",
":",
"best_score",
"=",
"score",
"best_ensemble",
"=",
"current_ensemble",
"[",
":",
"]",
"current_ensemble",
".",
"pop",
"(",
")"
] |
Starts an automated ensemble search using greedy forward model selection.
The steps for this search are adapted from "Ensemble Selection from Libraries of Models" by
Caruana.
1. Start with the empty ensemble
2. Add to the ensemble the model in the library that maximizes the ensemmble's
performance on the error metric.
3. Repeat step 2 for a fixed number of iterations or until all models have been used.
Args:
automated_run (xcessiv.models.AutomatedRun): Automated run object
session: Valid SQLAlchemy session
path (str, unicode): Path to project folder
|
[
"Starts",
"an",
"automated",
"ensemble",
"search",
"using",
"greedy",
"forward",
"model",
"selection",
"."
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/automatedruns.py#L331-L398
|
6,644
|
reiinakano/xcessiv
|
xcessiv/rqtasks.py
|
extraction_data_statistics
|
def extraction_data_statistics(path):
""" Generates data statistics for the given data extraction setup stored
in Xcessiv notebook.
This is in rqtasks.py but not as a job yet. Temporarily call this directly
while I'm figuring out Javascript lel.
Args:
path (str, unicode): Path to xcessiv notebook
"""
with functions.DBContextManager(path) as session:
extraction = session.query(models.Extraction).first()
X, y = extraction.return_main_dataset()
functions.verify_dataset(X, y)
if extraction.test_dataset['method'] == 'split_from_main':
X, X_test, y, y_test = train_test_split(
X,
y,
test_size=extraction.test_dataset['split_ratio'],
random_state=extraction.test_dataset['split_seed'],
stratify=y
)
elif extraction.test_dataset['method'] == 'source':
if 'source' not in extraction.test_dataset or not extraction.test_dataset['source']:
raise exceptions.UserError('Source is empty')
extraction_code = extraction.test_dataset["source"]
extraction_function = functions.\
import_object_from_string_code(extraction_code, "extract_test_dataset")
X_test, y_test = extraction_function()
else:
X_test, y_test = None, None
# test base learner cross-validation
extraction_code = extraction.meta_feature_generation['source']
return_splits_iterable = functions.import_object_from_string_code(
extraction_code,
'return_splits_iterable'
)
number_of_splits = 0
test_indices = []
try:
for train_idx, test_idx in return_splits_iterable(X, y):
number_of_splits += 1
test_indices.append(test_idx)
except Exception as e:
raise exceptions.UserError('User code exception', exception_message=str(e))
# preparation before testing stacked ensemble cross-validation
test_indices = np.concatenate(test_indices)
X, y = X[test_indices], y[test_indices]
# test stacked ensemble cross-validation
extraction_code = extraction.stacked_ensemble_cv['source']
return_splits_iterable = functions.import_object_from_string_code(
extraction_code,
'return_splits_iterable'
)
number_of_splits_stacked_cv = 0
try:
for train_idx, test_idx in return_splits_iterable(X, y):
number_of_splits_stacked_cv += 1
except Exception as e:
raise exceptions.UserError('User code exception', exception_message=str(e))
data_stats = dict()
data_stats['train_data_stats'] = functions.verify_dataset(X, y)
if X_test is not None:
data_stats['test_data_stats'] = functions.verify_dataset(X_test, y_test)
else:
data_stats['test_data_stats'] = None
data_stats['holdout_data_stats'] = {'number_of_splits': number_of_splits}
data_stats['stacked_ensemble_cv_stats'] = {'number_of_splits': number_of_splits_stacked_cv}
extraction.data_statistics = data_stats
session.add(extraction)
session.commit()
|
python
|
def extraction_data_statistics(path):
""" Generates data statistics for the given data extraction setup stored
in Xcessiv notebook.
This is in rqtasks.py but not as a job yet. Temporarily call this directly
while I'm figuring out Javascript lel.
Args:
path (str, unicode): Path to xcessiv notebook
"""
with functions.DBContextManager(path) as session:
extraction = session.query(models.Extraction).first()
X, y = extraction.return_main_dataset()
functions.verify_dataset(X, y)
if extraction.test_dataset['method'] == 'split_from_main':
X, X_test, y, y_test = train_test_split(
X,
y,
test_size=extraction.test_dataset['split_ratio'],
random_state=extraction.test_dataset['split_seed'],
stratify=y
)
elif extraction.test_dataset['method'] == 'source':
if 'source' not in extraction.test_dataset or not extraction.test_dataset['source']:
raise exceptions.UserError('Source is empty')
extraction_code = extraction.test_dataset["source"]
extraction_function = functions.\
import_object_from_string_code(extraction_code, "extract_test_dataset")
X_test, y_test = extraction_function()
else:
X_test, y_test = None, None
# test base learner cross-validation
extraction_code = extraction.meta_feature_generation['source']
return_splits_iterable = functions.import_object_from_string_code(
extraction_code,
'return_splits_iterable'
)
number_of_splits = 0
test_indices = []
try:
for train_idx, test_idx in return_splits_iterable(X, y):
number_of_splits += 1
test_indices.append(test_idx)
except Exception as e:
raise exceptions.UserError('User code exception', exception_message=str(e))
# preparation before testing stacked ensemble cross-validation
test_indices = np.concatenate(test_indices)
X, y = X[test_indices], y[test_indices]
# test stacked ensemble cross-validation
extraction_code = extraction.stacked_ensemble_cv['source']
return_splits_iterable = functions.import_object_from_string_code(
extraction_code,
'return_splits_iterable'
)
number_of_splits_stacked_cv = 0
try:
for train_idx, test_idx in return_splits_iterable(X, y):
number_of_splits_stacked_cv += 1
except Exception as e:
raise exceptions.UserError('User code exception', exception_message=str(e))
data_stats = dict()
data_stats['train_data_stats'] = functions.verify_dataset(X, y)
if X_test is not None:
data_stats['test_data_stats'] = functions.verify_dataset(X_test, y_test)
else:
data_stats['test_data_stats'] = None
data_stats['holdout_data_stats'] = {'number_of_splits': number_of_splits}
data_stats['stacked_ensemble_cv_stats'] = {'number_of_splits': number_of_splits_stacked_cv}
extraction.data_statistics = data_stats
session.add(extraction)
session.commit()
|
[
"def",
"extraction_data_statistics",
"(",
"path",
")",
":",
"with",
"functions",
".",
"DBContextManager",
"(",
"path",
")",
"as",
"session",
":",
"extraction",
"=",
"session",
".",
"query",
"(",
"models",
".",
"Extraction",
")",
".",
"first",
"(",
")",
"X",
",",
"y",
"=",
"extraction",
".",
"return_main_dataset",
"(",
")",
"functions",
".",
"verify_dataset",
"(",
"X",
",",
"y",
")",
"if",
"extraction",
".",
"test_dataset",
"[",
"'method'",
"]",
"==",
"'split_from_main'",
":",
"X",
",",
"X_test",
",",
"y",
",",
"y_test",
"=",
"train_test_split",
"(",
"X",
",",
"y",
",",
"test_size",
"=",
"extraction",
".",
"test_dataset",
"[",
"'split_ratio'",
"]",
",",
"random_state",
"=",
"extraction",
".",
"test_dataset",
"[",
"'split_seed'",
"]",
",",
"stratify",
"=",
"y",
")",
"elif",
"extraction",
".",
"test_dataset",
"[",
"'method'",
"]",
"==",
"'source'",
":",
"if",
"'source'",
"not",
"in",
"extraction",
".",
"test_dataset",
"or",
"not",
"extraction",
".",
"test_dataset",
"[",
"'source'",
"]",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Source is empty'",
")",
"extraction_code",
"=",
"extraction",
".",
"test_dataset",
"[",
"\"source\"",
"]",
"extraction_function",
"=",
"functions",
".",
"import_object_from_string_code",
"(",
"extraction_code",
",",
"\"extract_test_dataset\"",
")",
"X_test",
",",
"y_test",
"=",
"extraction_function",
"(",
")",
"else",
":",
"X_test",
",",
"y_test",
"=",
"None",
",",
"None",
"# test base learner cross-validation",
"extraction_code",
"=",
"extraction",
".",
"meta_feature_generation",
"[",
"'source'",
"]",
"return_splits_iterable",
"=",
"functions",
".",
"import_object_from_string_code",
"(",
"extraction_code",
",",
"'return_splits_iterable'",
")",
"number_of_splits",
"=",
"0",
"test_indices",
"=",
"[",
"]",
"try",
":",
"for",
"train_idx",
",",
"test_idx",
"in",
"return_splits_iterable",
"(",
"X",
",",
"y",
")",
":",
"number_of_splits",
"+=",
"1",
"test_indices",
".",
"append",
"(",
"test_idx",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'User code exception'",
",",
"exception_message",
"=",
"str",
"(",
"e",
")",
")",
"# preparation before testing stacked ensemble cross-validation",
"test_indices",
"=",
"np",
".",
"concatenate",
"(",
"test_indices",
")",
"X",
",",
"y",
"=",
"X",
"[",
"test_indices",
"]",
",",
"y",
"[",
"test_indices",
"]",
"# test stacked ensemble cross-validation",
"extraction_code",
"=",
"extraction",
".",
"stacked_ensemble_cv",
"[",
"'source'",
"]",
"return_splits_iterable",
"=",
"functions",
".",
"import_object_from_string_code",
"(",
"extraction_code",
",",
"'return_splits_iterable'",
")",
"number_of_splits_stacked_cv",
"=",
"0",
"try",
":",
"for",
"train_idx",
",",
"test_idx",
"in",
"return_splits_iterable",
"(",
"X",
",",
"y",
")",
":",
"number_of_splits_stacked_cv",
"+=",
"1",
"except",
"Exception",
"as",
"e",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'User code exception'",
",",
"exception_message",
"=",
"str",
"(",
"e",
")",
")",
"data_stats",
"=",
"dict",
"(",
")",
"data_stats",
"[",
"'train_data_stats'",
"]",
"=",
"functions",
".",
"verify_dataset",
"(",
"X",
",",
"y",
")",
"if",
"X_test",
"is",
"not",
"None",
":",
"data_stats",
"[",
"'test_data_stats'",
"]",
"=",
"functions",
".",
"verify_dataset",
"(",
"X_test",
",",
"y_test",
")",
"else",
":",
"data_stats",
"[",
"'test_data_stats'",
"]",
"=",
"None",
"data_stats",
"[",
"'holdout_data_stats'",
"]",
"=",
"{",
"'number_of_splits'",
":",
"number_of_splits",
"}",
"data_stats",
"[",
"'stacked_ensemble_cv_stats'",
"]",
"=",
"{",
"'number_of_splits'",
":",
"number_of_splits_stacked_cv",
"}",
"extraction",
".",
"data_statistics",
"=",
"data_stats",
"session",
".",
"add",
"(",
"extraction",
")",
"session",
".",
"commit",
"(",
")"
] |
Generates data statistics for the given data extraction setup stored
in Xcessiv notebook.
This is in rqtasks.py but not as a job yet. Temporarily call this directly
while I'm figuring out Javascript lel.
Args:
path (str, unicode): Path to xcessiv notebook
|
[
"Generates",
"data",
"statistics",
"for",
"the",
"given",
"data",
"extraction",
"setup",
"stored",
"in",
"Xcessiv",
"notebook",
"."
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/rqtasks.py#L17-L95
|
6,645
|
reiinakano/xcessiv
|
xcessiv/rqtasks.py
|
generate_meta_features
|
def generate_meta_features(path, base_learner_id):
"""Generates meta-features for specified base learner
After generation of meta-features, the file is saved into the meta-features folder
Args:
path (str): Path to Xcessiv notebook
base_learner_id (str): Base learner ID
"""
with functions.DBContextManager(path) as session:
base_learner = session.query(models.BaseLearner).filter_by(id=base_learner_id).first()
if not base_learner:
raise exceptions.UserError('Base learner {} '
'does not exist'.format(base_learner_id))
base_learner.job_id = get_current_job().id
base_learner.job_status = 'started'
session.add(base_learner)
session.commit()
try:
est = base_learner.return_estimator()
extraction = session.query(models.Extraction).first()
X, y = extraction.return_train_dataset()
return_splits_iterable = functions.import_object_from_string_code(
extraction.meta_feature_generation['source'],
'return_splits_iterable'
)
meta_features_list = []
trues_list = []
for train_index, test_index in return_splits_iterable(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
est = est.fit(X_train, y_train)
meta_features_list.append(
getattr(est, base_learner.base_learner_origin.
meta_feature_generator)(X_test)
)
trues_list.append(y_test)
meta_features = np.concatenate(meta_features_list, axis=0)
y_true = np.concatenate(trues_list)
for key in base_learner.base_learner_origin.metric_generators:
metric_generator = functions.import_object_from_string_code(
base_learner.base_learner_origin.metric_generators[key],
'metric_generator'
)
base_learner.individual_score[key] = metric_generator(y_true, meta_features)
meta_features_path = base_learner.meta_features_path(path)
if not os.path.exists(os.path.dirname(meta_features_path)):
os.makedirs(os.path.dirname(meta_features_path))
np.save(meta_features_path, meta_features, allow_pickle=False)
base_learner.job_status = 'finished'
base_learner.meta_features_exists = True
session.add(base_learner)
session.commit()
except:
session.rollback()
base_learner.job_status = 'errored'
base_learner.description['error_type'] = repr(sys.exc_info()[0])
base_learner.description['error_value'] = repr(sys.exc_info()[1])
base_learner.description['error_traceback'] = \
traceback.format_exception(*sys.exc_info())
session.add(base_learner)
session.commit()
raise
|
python
|
def generate_meta_features(path, base_learner_id):
"""Generates meta-features for specified base learner
After generation of meta-features, the file is saved into the meta-features folder
Args:
path (str): Path to Xcessiv notebook
base_learner_id (str): Base learner ID
"""
with functions.DBContextManager(path) as session:
base_learner = session.query(models.BaseLearner).filter_by(id=base_learner_id).first()
if not base_learner:
raise exceptions.UserError('Base learner {} '
'does not exist'.format(base_learner_id))
base_learner.job_id = get_current_job().id
base_learner.job_status = 'started'
session.add(base_learner)
session.commit()
try:
est = base_learner.return_estimator()
extraction = session.query(models.Extraction).first()
X, y = extraction.return_train_dataset()
return_splits_iterable = functions.import_object_from_string_code(
extraction.meta_feature_generation['source'],
'return_splits_iterable'
)
meta_features_list = []
trues_list = []
for train_index, test_index in return_splits_iterable(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
est = est.fit(X_train, y_train)
meta_features_list.append(
getattr(est, base_learner.base_learner_origin.
meta_feature_generator)(X_test)
)
trues_list.append(y_test)
meta_features = np.concatenate(meta_features_list, axis=0)
y_true = np.concatenate(trues_list)
for key in base_learner.base_learner_origin.metric_generators:
metric_generator = functions.import_object_from_string_code(
base_learner.base_learner_origin.metric_generators[key],
'metric_generator'
)
base_learner.individual_score[key] = metric_generator(y_true, meta_features)
meta_features_path = base_learner.meta_features_path(path)
if not os.path.exists(os.path.dirname(meta_features_path)):
os.makedirs(os.path.dirname(meta_features_path))
np.save(meta_features_path, meta_features, allow_pickle=False)
base_learner.job_status = 'finished'
base_learner.meta_features_exists = True
session.add(base_learner)
session.commit()
except:
session.rollback()
base_learner.job_status = 'errored'
base_learner.description['error_type'] = repr(sys.exc_info()[0])
base_learner.description['error_value'] = repr(sys.exc_info()[1])
base_learner.description['error_traceback'] = \
traceback.format_exception(*sys.exc_info())
session.add(base_learner)
session.commit()
raise
|
[
"def",
"generate_meta_features",
"(",
"path",
",",
"base_learner_id",
")",
":",
"with",
"functions",
".",
"DBContextManager",
"(",
"path",
")",
"as",
"session",
":",
"base_learner",
"=",
"session",
".",
"query",
"(",
"models",
".",
"BaseLearner",
")",
".",
"filter_by",
"(",
"id",
"=",
"base_learner_id",
")",
".",
"first",
"(",
")",
"if",
"not",
"base_learner",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Base learner {} '",
"'does not exist'",
".",
"format",
"(",
"base_learner_id",
")",
")",
"base_learner",
".",
"job_id",
"=",
"get_current_job",
"(",
")",
".",
"id",
"base_learner",
".",
"job_status",
"=",
"'started'",
"session",
".",
"add",
"(",
"base_learner",
")",
"session",
".",
"commit",
"(",
")",
"try",
":",
"est",
"=",
"base_learner",
".",
"return_estimator",
"(",
")",
"extraction",
"=",
"session",
".",
"query",
"(",
"models",
".",
"Extraction",
")",
".",
"first",
"(",
")",
"X",
",",
"y",
"=",
"extraction",
".",
"return_train_dataset",
"(",
")",
"return_splits_iterable",
"=",
"functions",
".",
"import_object_from_string_code",
"(",
"extraction",
".",
"meta_feature_generation",
"[",
"'source'",
"]",
",",
"'return_splits_iterable'",
")",
"meta_features_list",
"=",
"[",
"]",
"trues_list",
"=",
"[",
"]",
"for",
"train_index",
",",
"test_index",
"in",
"return_splits_iterable",
"(",
"X",
",",
"y",
")",
":",
"X_train",
",",
"X_test",
"=",
"X",
"[",
"train_index",
"]",
",",
"X",
"[",
"test_index",
"]",
"y_train",
",",
"y_test",
"=",
"y",
"[",
"train_index",
"]",
",",
"y",
"[",
"test_index",
"]",
"est",
"=",
"est",
".",
"fit",
"(",
"X_train",
",",
"y_train",
")",
"meta_features_list",
".",
"append",
"(",
"getattr",
"(",
"est",
",",
"base_learner",
".",
"base_learner_origin",
".",
"meta_feature_generator",
")",
"(",
"X_test",
")",
")",
"trues_list",
".",
"append",
"(",
"y_test",
")",
"meta_features",
"=",
"np",
".",
"concatenate",
"(",
"meta_features_list",
",",
"axis",
"=",
"0",
")",
"y_true",
"=",
"np",
".",
"concatenate",
"(",
"trues_list",
")",
"for",
"key",
"in",
"base_learner",
".",
"base_learner_origin",
".",
"metric_generators",
":",
"metric_generator",
"=",
"functions",
".",
"import_object_from_string_code",
"(",
"base_learner",
".",
"base_learner_origin",
".",
"metric_generators",
"[",
"key",
"]",
",",
"'metric_generator'",
")",
"base_learner",
".",
"individual_score",
"[",
"key",
"]",
"=",
"metric_generator",
"(",
"y_true",
",",
"meta_features",
")",
"meta_features_path",
"=",
"base_learner",
".",
"meta_features_path",
"(",
"path",
")",
"if",
"not",
"os",
".",
"path",
".",
"exists",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"meta_features_path",
")",
")",
":",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"meta_features_path",
")",
")",
"np",
".",
"save",
"(",
"meta_features_path",
",",
"meta_features",
",",
"allow_pickle",
"=",
"False",
")",
"base_learner",
".",
"job_status",
"=",
"'finished'",
"base_learner",
".",
"meta_features_exists",
"=",
"True",
"session",
".",
"add",
"(",
"base_learner",
")",
"session",
".",
"commit",
"(",
")",
"except",
":",
"session",
".",
"rollback",
"(",
")",
"base_learner",
".",
"job_status",
"=",
"'errored'",
"base_learner",
".",
"description",
"[",
"'error_type'",
"]",
"=",
"repr",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"0",
"]",
")",
"base_learner",
".",
"description",
"[",
"'error_value'",
"]",
"=",
"repr",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
")",
"base_learner",
".",
"description",
"[",
"'error_traceback'",
"]",
"=",
"traceback",
".",
"format_exception",
"(",
"*",
"sys",
".",
"exc_info",
"(",
")",
")",
"session",
".",
"add",
"(",
"base_learner",
")",
"session",
".",
"commit",
"(",
")",
"raise"
] |
Generates meta-features for specified base learner
After generation of meta-features, the file is saved into the meta-features folder
Args:
path (str): Path to Xcessiv notebook
base_learner_id (str): Base learner ID
|
[
"Generates",
"meta",
"-",
"features",
"for",
"specified",
"base",
"learner"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/rqtasks.py#L99-L171
|
6,646
|
reiinakano/xcessiv
|
xcessiv/rqtasks.py
|
start_automated_run
|
def start_automated_run(path, automated_run_id):
"""Starts automated run. This will automatically create
base learners until the run finishes or errors out.
Args:
path (str): Path to Xcessiv notebook
automated_run_id (str): Automated Run ID
"""
with functions.DBContextManager(path) as session:
automated_run = session.query(models.AutomatedRun).filter_by(id=automated_run_id).first()
if not automated_run:
raise exceptions.UserError('Automated run {} '
'does not exist'.format(automated_run_id))
automated_run.job_id = get_current_job().id
automated_run.job_status = 'started'
session.add(automated_run)
session.commit()
try:
if automated_run.category == 'bayes':
automatedruns.start_naive_bayes(automated_run, session, path)
elif automated_run.category == 'tpot':
automatedruns.start_tpot(automated_run, session, path)
elif automated_run.category == 'greedy_ensemble_search':
automatedruns.start_greedy_ensemble_search(automated_run, session, path)
else:
raise Exception('Something went wrong. Invalid category for automated run')
automated_run.job_status = 'finished'
session.add(automated_run)
session.commit()
except:
session.rollback()
automated_run.job_status = 'errored'
automated_run.description['error_type'] = repr(sys.exc_info()[0])
automated_run.description['error_value'] = repr(sys.exc_info()[1])
automated_run.description['error_traceback'] = \
traceback.format_exception(*sys.exc_info())
session.add(automated_run)
session.commit()
raise
|
python
|
def start_automated_run(path, automated_run_id):
"""Starts automated run. This will automatically create
base learners until the run finishes or errors out.
Args:
path (str): Path to Xcessiv notebook
automated_run_id (str): Automated Run ID
"""
with functions.DBContextManager(path) as session:
automated_run = session.query(models.AutomatedRun).filter_by(id=automated_run_id).first()
if not automated_run:
raise exceptions.UserError('Automated run {} '
'does not exist'.format(automated_run_id))
automated_run.job_id = get_current_job().id
automated_run.job_status = 'started'
session.add(automated_run)
session.commit()
try:
if automated_run.category == 'bayes':
automatedruns.start_naive_bayes(automated_run, session, path)
elif automated_run.category == 'tpot':
automatedruns.start_tpot(automated_run, session, path)
elif automated_run.category == 'greedy_ensemble_search':
automatedruns.start_greedy_ensemble_search(automated_run, session, path)
else:
raise Exception('Something went wrong. Invalid category for automated run')
automated_run.job_status = 'finished'
session.add(automated_run)
session.commit()
except:
session.rollback()
automated_run.job_status = 'errored'
automated_run.description['error_type'] = repr(sys.exc_info()[0])
automated_run.description['error_value'] = repr(sys.exc_info()[1])
automated_run.description['error_traceback'] = \
traceback.format_exception(*sys.exc_info())
session.add(automated_run)
session.commit()
raise
|
[
"def",
"start_automated_run",
"(",
"path",
",",
"automated_run_id",
")",
":",
"with",
"functions",
".",
"DBContextManager",
"(",
"path",
")",
"as",
"session",
":",
"automated_run",
"=",
"session",
".",
"query",
"(",
"models",
".",
"AutomatedRun",
")",
".",
"filter_by",
"(",
"id",
"=",
"automated_run_id",
")",
".",
"first",
"(",
")",
"if",
"not",
"automated_run",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Automated run {} '",
"'does not exist'",
".",
"format",
"(",
"automated_run_id",
")",
")",
"automated_run",
".",
"job_id",
"=",
"get_current_job",
"(",
")",
".",
"id",
"automated_run",
".",
"job_status",
"=",
"'started'",
"session",
".",
"add",
"(",
"automated_run",
")",
"session",
".",
"commit",
"(",
")",
"try",
":",
"if",
"automated_run",
".",
"category",
"==",
"'bayes'",
":",
"automatedruns",
".",
"start_naive_bayes",
"(",
"automated_run",
",",
"session",
",",
"path",
")",
"elif",
"automated_run",
".",
"category",
"==",
"'tpot'",
":",
"automatedruns",
".",
"start_tpot",
"(",
"automated_run",
",",
"session",
",",
"path",
")",
"elif",
"automated_run",
".",
"category",
"==",
"'greedy_ensemble_search'",
":",
"automatedruns",
".",
"start_greedy_ensemble_search",
"(",
"automated_run",
",",
"session",
",",
"path",
")",
"else",
":",
"raise",
"Exception",
"(",
"'Something went wrong. Invalid category for automated run'",
")",
"automated_run",
".",
"job_status",
"=",
"'finished'",
"session",
".",
"add",
"(",
"automated_run",
")",
"session",
".",
"commit",
"(",
")",
"except",
":",
"session",
".",
"rollback",
"(",
")",
"automated_run",
".",
"job_status",
"=",
"'errored'",
"automated_run",
".",
"description",
"[",
"'error_type'",
"]",
"=",
"repr",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"0",
"]",
")",
"automated_run",
".",
"description",
"[",
"'error_value'",
"]",
"=",
"repr",
"(",
"sys",
".",
"exc_info",
"(",
")",
"[",
"1",
"]",
")",
"automated_run",
".",
"description",
"[",
"'error_traceback'",
"]",
"=",
"traceback",
".",
"format_exception",
"(",
"*",
"sys",
".",
"exc_info",
"(",
")",
")",
"session",
".",
"add",
"(",
"automated_run",
")",
"session",
".",
"commit",
"(",
")",
"raise"
] |
Starts automated run. This will automatically create
base learners until the run finishes or errors out.
Args:
path (str): Path to Xcessiv notebook
automated_run_id (str): Automated Run ID
|
[
"Starts",
"automated",
"run",
".",
"This",
"will",
"automatically",
"create",
"base",
"learners",
"until",
"the",
"run",
"finishes",
"or",
"errors",
"out",
"."
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/rqtasks.py#L175-L221
|
6,647
|
reiinakano/xcessiv
|
xcessiv/functions.py
|
hash_file
|
def hash_file(path, block_size=65536):
"""Returns SHA256 checksum of a file
Args:
path (string): Absolute file path of file to hash
block_size (int, optional): Number of bytes to read per block
"""
sha256 = hashlib.sha256()
with open(path, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest()
|
python
|
def hash_file(path, block_size=65536):
"""Returns SHA256 checksum of a file
Args:
path (string): Absolute file path of file to hash
block_size (int, optional): Number of bytes to read per block
"""
sha256 = hashlib.sha256()
with open(path, 'rb') as f:
for block in iter(lambda: f.read(block_size), b''):
sha256.update(block)
return sha256.hexdigest()
|
[
"def",
"hash_file",
"(",
"path",
",",
"block_size",
"=",
"65536",
")",
":",
"sha256",
"=",
"hashlib",
".",
"sha256",
"(",
")",
"with",
"open",
"(",
"path",
",",
"'rb'",
")",
"as",
"f",
":",
"for",
"block",
"in",
"iter",
"(",
"lambda",
":",
"f",
".",
"read",
"(",
"block_size",
")",
",",
"b''",
")",
":",
"sha256",
".",
"update",
"(",
"block",
")",
"return",
"sha256",
".",
"hexdigest",
"(",
")"
] |
Returns SHA256 checksum of a file
Args:
path (string): Absolute file path of file to hash
block_size (int, optional): Number of bytes to read per block
|
[
"Returns",
"SHA256",
"checksum",
"of",
"a",
"file"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L16-L28
|
6,648
|
reiinakano/xcessiv
|
xcessiv/functions.py
|
import_object_from_path
|
def import_object_from_path(path, object):
"""Used to import an object from an absolute path.
This function takes an absolute path and imports it as a Python module.
It then returns the object with name `object` from the imported module.
Args:
path (string): Absolute file path of .py file to import
object (string): Name of object to extract from imported module
"""
with open(path) as f:
return import_object_from_string_code(f.read(), object)
|
python
|
def import_object_from_path(path, object):
"""Used to import an object from an absolute path.
This function takes an absolute path and imports it as a Python module.
It then returns the object with name `object` from the imported module.
Args:
path (string): Absolute file path of .py file to import
object (string): Name of object to extract from imported module
"""
with open(path) as f:
return import_object_from_string_code(f.read(), object)
|
[
"def",
"import_object_from_path",
"(",
"path",
",",
"object",
")",
":",
"with",
"open",
"(",
"path",
")",
"as",
"f",
":",
"return",
"import_object_from_string_code",
"(",
"f",
".",
"read",
"(",
")",
",",
"object",
")"
] |
Used to import an object from an absolute path.
This function takes an absolute path and imports it as a Python module.
It then returns the object with name `object` from the imported module.
Args:
path (string): Absolute file path of .py file to import
object (string): Name of object to extract from imported module
|
[
"Used",
"to",
"import",
"an",
"object",
"from",
"an",
"absolute",
"path",
"."
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L36-L48
|
6,649
|
reiinakano/xcessiv
|
xcessiv/functions.py
|
import_object_from_string_code
|
def import_object_from_string_code(code, object):
"""Used to import an object from arbitrary passed code.
Passed in code is treated as a module and is imported and added
to `sys.modules` with its SHA256 hash as key.
Args:
code (string): Python code to import as module
object (string): Name of object to extract from imported module
"""
sha256 = hashlib.sha256(code.encode('UTF-8')).hexdigest()
module = imp.new_module(sha256)
try:
exec_(code, module.__dict__)
except Exception as e:
raise exceptions.UserError('User code exception', exception_message=str(e))
sys.modules[sha256] = module
try:
return getattr(module, object)
except AttributeError:
raise exceptions.UserError("{} not found in code".format(object))
|
python
|
def import_object_from_string_code(code, object):
"""Used to import an object from arbitrary passed code.
Passed in code is treated as a module and is imported and added
to `sys.modules` with its SHA256 hash as key.
Args:
code (string): Python code to import as module
object (string): Name of object to extract from imported module
"""
sha256 = hashlib.sha256(code.encode('UTF-8')).hexdigest()
module = imp.new_module(sha256)
try:
exec_(code, module.__dict__)
except Exception as e:
raise exceptions.UserError('User code exception', exception_message=str(e))
sys.modules[sha256] = module
try:
return getattr(module, object)
except AttributeError:
raise exceptions.UserError("{} not found in code".format(object))
|
[
"def",
"import_object_from_string_code",
"(",
"code",
",",
"object",
")",
":",
"sha256",
"=",
"hashlib",
".",
"sha256",
"(",
"code",
".",
"encode",
"(",
"'UTF-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"module",
"=",
"imp",
".",
"new_module",
"(",
"sha256",
")",
"try",
":",
"exec_",
"(",
"code",
",",
"module",
".",
"__dict__",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'User code exception'",
",",
"exception_message",
"=",
"str",
"(",
"e",
")",
")",
"sys",
".",
"modules",
"[",
"sha256",
"]",
"=",
"module",
"try",
":",
"return",
"getattr",
"(",
"module",
",",
"object",
")",
"except",
"AttributeError",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"\"{} not found in code\"",
".",
"format",
"(",
"object",
")",
")"
] |
Used to import an object from arbitrary passed code.
Passed in code is treated as a module and is imported and added
to `sys.modules` with its SHA256 hash as key.
Args:
code (string): Python code to import as module
object (string): Name of object to extract from imported module
|
[
"Used",
"to",
"import",
"an",
"object",
"from",
"arbitrary",
"passed",
"code",
"."
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L51-L72
|
6,650
|
reiinakano/xcessiv
|
xcessiv/functions.py
|
import_string_code_as_module
|
def import_string_code_as_module(code):
"""Used to run arbitrary passed code as a module
Args:
code (string): Python code to import as module
Returns:
module: Python module
"""
sha256 = hashlib.sha256(code.encode('UTF-8')).hexdigest()
module = imp.new_module(sha256)
try:
exec_(code, module.__dict__)
except Exception as e:
raise exceptions.UserError('User code exception', exception_message=str(e))
sys.modules[sha256] = module
return module
|
python
|
def import_string_code_as_module(code):
"""Used to run arbitrary passed code as a module
Args:
code (string): Python code to import as module
Returns:
module: Python module
"""
sha256 = hashlib.sha256(code.encode('UTF-8')).hexdigest()
module = imp.new_module(sha256)
try:
exec_(code, module.__dict__)
except Exception as e:
raise exceptions.UserError('User code exception', exception_message=str(e))
sys.modules[sha256] = module
return module
|
[
"def",
"import_string_code_as_module",
"(",
"code",
")",
":",
"sha256",
"=",
"hashlib",
".",
"sha256",
"(",
"code",
".",
"encode",
"(",
"'UTF-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"module",
"=",
"imp",
".",
"new_module",
"(",
"sha256",
")",
"try",
":",
"exec_",
"(",
"code",
",",
"module",
".",
"__dict__",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'User code exception'",
",",
"exception_message",
"=",
"str",
"(",
"e",
")",
")",
"sys",
".",
"modules",
"[",
"sha256",
"]",
"=",
"module",
"return",
"module"
] |
Used to run arbitrary passed code as a module
Args:
code (string): Python code to import as module
Returns:
module: Python module
|
[
"Used",
"to",
"run",
"arbitrary",
"passed",
"code",
"as",
"a",
"module"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L75-L91
|
6,651
|
reiinakano/xcessiv
|
xcessiv/functions.py
|
verify_dataset
|
def verify_dataset(X, y):
"""Verifies if a dataset is valid for use i.e. scikit-learn format
Used to verify a dataset by returning shape and basic statistics of
returned data. This will also provide quick and dirty check on
capability of host machine to process the data.
Args:
X (array-like): Features array
y (array-like): Label array
Returns:
X_shape (2-tuple of int): Shape of X returned
y_shape (1-tuple of int): Shape of y returned
Raises:
AssertionError: `X_shape` must be of length 2 and `y_shape` must be of
length 1. `X` must have the same number of elements as `y`
i.e. X_shape[0] == y_shape[0]. If any of these conditions are not met,
an AssertionError is raised.
"""
X_shape, y_shape = np.array(X).shape, np.array(y).shape
if len(X_shape) != 2:
raise exceptions.UserError("X must be 2-dimensional array")
if len(y_shape) != 1:
raise exceptions.UserError("y must be 1-dimensional array")
if X_shape[0] != y_shape[0]:
raise exceptions.UserError("X must have same number of elements as y")
return dict(
features_shape=X_shape,
labels_shape=y_shape
)
|
python
|
def verify_dataset(X, y):
"""Verifies if a dataset is valid for use i.e. scikit-learn format
Used to verify a dataset by returning shape and basic statistics of
returned data. This will also provide quick and dirty check on
capability of host machine to process the data.
Args:
X (array-like): Features array
y (array-like): Label array
Returns:
X_shape (2-tuple of int): Shape of X returned
y_shape (1-tuple of int): Shape of y returned
Raises:
AssertionError: `X_shape` must be of length 2 and `y_shape` must be of
length 1. `X` must have the same number of elements as `y`
i.e. X_shape[0] == y_shape[0]. If any of these conditions are not met,
an AssertionError is raised.
"""
X_shape, y_shape = np.array(X).shape, np.array(y).shape
if len(X_shape) != 2:
raise exceptions.UserError("X must be 2-dimensional array")
if len(y_shape) != 1:
raise exceptions.UserError("y must be 1-dimensional array")
if X_shape[0] != y_shape[0]:
raise exceptions.UserError("X must have same number of elements as y")
return dict(
features_shape=X_shape,
labels_shape=y_shape
)
|
[
"def",
"verify_dataset",
"(",
"X",
",",
"y",
")",
":",
"X_shape",
",",
"y_shape",
"=",
"np",
".",
"array",
"(",
"X",
")",
".",
"shape",
",",
"np",
".",
"array",
"(",
"y",
")",
".",
"shape",
"if",
"len",
"(",
"X_shape",
")",
"!=",
"2",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"\"X must be 2-dimensional array\"",
")",
"if",
"len",
"(",
"y_shape",
")",
"!=",
"1",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"\"y must be 1-dimensional array\"",
")",
"if",
"X_shape",
"[",
"0",
"]",
"!=",
"y_shape",
"[",
"0",
"]",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"\"X must have same number of elements as y\"",
")",
"return",
"dict",
"(",
"features_shape",
"=",
"X_shape",
",",
"labels_shape",
"=",
"y_shape",
")"
] |
Verifies if a dataset is valid for use i.e. scikit-learn format
Used to verify a dataset by returning shape and basic statistics of
returned data. This will also provide quick and dirty check on
capability of host machine to process the data.
Args:
X (array-like): Features array
y (array-like): Label array
Returns:
X_shape (2-tuple of int): Shape of X returned
y_shape (1-tuple of int): Shape of y returned
Raises:
AssertionError: `X_shape` must be of length 2 and `y_shape` must be of
length 1. `X` must have the same number of elements as `y`
i.e. X_shape[0] == y_shape[0]. If any of these conditions are not met,
an AssertionError is raised.
|
[
"Verifies",
"if",
"a",
"dataset",
"is",
"valid",
"for",
"use",
"i",
".",
"e",
".",
"scikit",
"-",
"learn",
"format"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L94-L127
|
6,652
|
reiinakano/xcessiv
|
xcessiv/functions.py
|
make_serializable
|
def make_serializable(json):
"""This function ensures that the dictionary is JSON serializable. If not,
keys with non-serializable values are removed from the return value.
Args:
json (dict): Dictionary to convert to serializable
Returns:
new_dict (dict): New dictionary with non JSON serializable values removed
"""
new_dict = dict()
for key, value in iteritems(json):
if is_valid_json(value):
new_dict[key] = value
return new_dict
|
python
|
def make_serializable(json):
"""This function ensures that the dictionary is JSON serializable. If not,
keys with non-serializable values are removed from the return value.
Args:
json (dict): Dictionary to convert to serializable
Returns:
new_dict (dict): New dictionary with non JSON serializable values removed
"""
new_dict = dict()
for key, value in iteritems(json):
if is_valid_json(value):
new_dict[key] = value
return new_dict
|
[
"def",
"make_serializable",
"(",
"json",
")",
":",
"new_dict",
"=",
"dict",
"(",
")",
"for",
"key",
",",
"value",
"in",
"iteritems",
"(",
"json",
")",
":",
"if",
"is_valid_json",
"(",
"value",
")",
":",
"new_dict",
"[",
"key",
"]",
"=",
"value",
"return",
"new_dict"
] |
This function ensures that the dictionary is JSON serializable. If not,
keys with non-serializable values are removed from the return value.
Args:
json (dict): Dictionary to convert to serializable
Returns:
new_dict (dict): New dictionary with non JSON serializable values removed
|
[
"This",
"function",
"ensures",
"that",
"the",
"dictionary",
"is",
"JSON",
"serializable",
".",
"If",
"not",
"keys",
"with",
"non",
"-",
"serializable",
"values",
"are",
"removed",
"from",
"the",
"return",
"value",
"."
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L143-L158
|
6,653
|
reiinakano/xcessiv
|
xcessiv/functions.py
|
get_sample_dataset
|
def get_sample_dataset(dataset_properties):
"""Returns sample dataset
Args:
dataset_properties (dict): Dictionary corresponding to the properties of the dataset
used to verify the estimator and metric generators.
Returns:
X (array-like): Features array
y (array-like): Labels array
splits (iterator): This is an iterator that returns train test splits for
cross-validation purposes on ``X`` and ``y``.
"""
kwargs = dataset_properties.copy()
data_type = kwargs.pop('type')
if data_type == 'multiclass':
try:
X, y = datasets.make_classification(random_state=8, **kwargs)
splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)
except Exception as e:
raise exceptions.UserError(repr(e))
elif data_type == 'iris':
X, y = datasets.load_iris(return_X_y=True)
splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)
elif data_type == 'mnist':
X, y = datasets.load_digits(return_X_y=True)
splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)
elif data_type == 'breast_cancer':
X, y = datasets.load_breast_cancer(return_X_y=True)
splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)
elif data_type == 'boston':
X, y = datasets.load_boston(return_X_y=True)
splits = model_selection.KFold(n_splits=2, random_state=8).split(X)
elif data_type == 'diabetes':
X, y = datasets.load_diabetes(return_X_y=True)
splits = model_selection.KFold(n_splits=2, random_state=8).split(X)
else:
raise exceptions.UserError('Unknown dataset type {}'.format(dataset_properties['type']))
return X, y, splits
|
python
|
def get_sample_dataset(dataset_properties):
"""Returns sample dataset
Args:
dataset_properties (dict): Dictionary corresponding to the properties of the dataset
used to verify the estimator and metric generators.
Returns:
X (array-like): Features array
y (array-like): Labels array
splits (iterator): This is an iterator that returns train test splits for
cross-validation purposes on ``X`` and ``y``.
"""
kwargs = dataset_properties.copy()
data_type = kwargs.pop('type')
if data_type == 'multiclass':
try:
X, y = datasets.make_classification(random_state=8, **kwargs)
splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)
except Exception as e:
raise exceptions.UserError(repr(e))
elif data_type == 'iris':
X, y = datasets.load_iris(return_X_y=True)
splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)
elif data_type == 'mnist':
X, y = datasets.load_digits(return_X_y=True)
splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)
elif data_type == 'breast_cancer':
X, y = datasets.load_breast_cancer(return_X_y=True)
splits = model_selection.StratifiedKFold(n_splits=2, random_state=8).split(X, y)
elif data_type == 'boston':
X, y = datasets.load_boston(return_X_y=True)
splits = model_selection.KFold(n_splits=2, random_state=8).split(X)
elif data_type == 'diabetes':
X, y = datasets.load_diabetes(return_X_y=True)
splits = model_selection.KFold(n_splits=2, random_state=8).split(X)
else:
raise exceptions.UserError('Unknown dataset type {}'.format(dataset_properties['type']))
return X, y, splits
|
[
"def",
"get_sample_dataset",
"(",
"dataset_properties",
")",
":",
"kwargs",
"=",
"dataset_properties",
".",
"copy",
"(",
")",
"data_type",
"=",
"kwargs",
".",
"pop",
"(",
"'type'",
")",
"if",
"data_type",
"==",
"'multiclass'",
":",
"try",
":",
"X",
",",
"y",
"=",
"datasets",
".",
"make_classification",
"(",
"random_state",
"=",
"8",
",",
"*",
"*",
"kwargs",
")",
"splits",
"=",
"model_selection",
".",
"StratifiedKFold",
"(",
"n_splits",
"=",
"2",
",",
"random_state",
"=",
"8",
")",
".",
"split",
"(",
"X",
",",
"y",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"repr",
"(",
"e",
")",
")",
"elif",
"data_type",
"==",
"'iris'",
":",
"X",
",",
"y",
"=",
"datasets",
".",
"load_iris",
"(",
"return_X_y",
"=",
"True",
")",
"splits",
"=",
"model_selection",
".",
"StratifiedKFold",
"(",
"n_splits",
"=",
"2",
",",
"random_state",
"=",
"8",
")",
".",
"split",
"(",
"X",
",",
"y",
")",
"elif",
"data_type",
"==",
"'mnist'",
":",
"X",
",",
"y",
"=",
"datasets",
".",
"load_digits",
"(",
"return_X_y",
"=",
"True",
")",
"splits",
"=",
"model_selection",
".",
"StratifiedKFold",
"(",
"n_splits",
"=",
"2",
",",
"random_state",
"=",
"8",
")",
".",
"split",
"(",
"X",
",",
"y",
")",
"elif",
"data_type",
"==",
"'breast_cancer'",
":",
"X",
",",
"y",
"=",
"datasets",
".",
"load_breast_cancer",
"(",
"return_X_y",
"=",
"True",
")",
"splits",
"=",
"model_selection",
".",
"StratifiedKFold",
"(",
"n_splits",
"=",
"2",
",",
"random_state",
"=",
"8",
")",
".",
"split",
"(",
"X",
",",
"y",
")",
"elif",
"data_type",
"==",
"'boston'",
":",
"X",
",",
"y",
"=",
"datasets",
".",
"load_boston",
"(",
"return_X_y",
"=",
"True",
")",
"splits",
"=",
"model_selection",
".",
"KFold",
"(",
"n_splits",
"=",
"2",
",",
"random_state",
"=",
"8",
")",
".",
"split",
"(",
"X",
")",
"elif",
"data_type",
"==",
"'diabetes'",
":",
"X",
",",
"y",
"=",
"datasets",
".",
"load_diabetes",
"(",
"return_X_y",
"=",
"True",
")",
"splits",
"=",
"model_selection",
".",
"KFold",
"(",
"n_splits",
"=",
"2",
",",
"random_state",
"=",
"8",
")",
".",
"split",
"(",
"X",
")",
"else",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Unknown dataset type {}'",
".",
"format",
"(",
"dataset_properties",
"[",
"'type'",
"]",
")",
")",
"return",
"X",
",",
"y",
",",
"splits"
] |
Returns sample dataset
Args:
dataset_properties (dict): Dictionary corresponding to the properties of the dataset
used to verify the estimator and metric generators.
Returns:
X (array-like): Features array
y (array-like): Labels array
splits (iterator): This is an iterator that returns train test splits for
cross-validation purposes on ``X`` and ``y``.
|
[
"Returns",
"sample",
"dataset"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L161-L201
|
6,654
|
reiinakano/xcessiv
|
xcessiv/functions.py
|
verify_estimator_class
|
def verify_estimator_class(est, meta_feature_generator, metric_generators, dataset_properties):
"""Verify if estimator object is valid for use i.e. scikit-learn format
Verifies if an estimator is fit for use by testing for existence of methods
such as `get_params` and `set_params`. Must also be able to properly fit on
and predict a sample iris dataset.
Args:
est: Estimator object with `fit`, `predict`/`predict_proba`,
`get_params`, and `set_params` methods.
meta_feature_generator (str, unicode): Name of the method used by the estimator
to generate meta-features on a set of data.
metric_generators (dict): Dictionary of key value pairs where the key
signifies the name of the metric calculated and the value is a list
of strings, when concatenated, form Python code containing the
function used to calculate the metric from true values and the
meta-features generated.
dataset_properties (dict): Dictionary corresponding to the properties of the dataset
used to verify the estimator and metric generators.
Returns:
performance_dict (mapping): Mapping from performance metric
name to performance metric value e.g. "Accuracy": 0.963
hyperparameters (mapping): Mapping from the estimator's hyperparameters to
their default values e.g. "n_estimators": 10
"""
X, y, splits = get_sample_dataset(dataset_properties)
if not hasattr(est, "get_params"):
raise exceptions.UserError('Estimator does not have get_params method')
if not hasattr(est, "set_params"):
raise exceptions.UserError('Estimator does not have set_params method')
if not hasattr(est, meta_feature_generator):
raise exceptions.UserError('Estimator does not have meta-feature generator'
' {}'.format(meta_feature_generator))
performance_dict = dict()
true_labels = []
preds = []
try:
for train_index, test_index in splits:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
est.fit(X_train, y_train)
true_labels.append(y_test)
preds.append(getattr(est, meta_feature_generator)(X_test))
true_labels = np.concatenate(true_labels)
preds = np.concatenate(preds, axis=0)
except Exception as e:
raise exceptions.UserError(repr(e))
if preds.shape[0] != true_labels.shape[0]:
raise exceptions.UserError('Estimator\'s meta-feature generator '
'does not produce valid shape')
for key in metric_generators:
metric_generator = import_object_from_string_code(
metric_generators[key],
'metric_generator'
)
try:
performance_dict[key] = metric_generator(true_labels, preds)
except Exception as e:
raise exceptions.UserError(repr(e))
return performance_dict, make_serializable(est.get_params())
|
python
|
def verify_estimator_class(est, meta_feature_generator, metric_generators, dataset_properties):
"""Verify if estimator object is valid for use i.e. scikit-learn format
Verifies if an estimator is fit for use by testing for existence of methods
such as `get_params` and `set_params`. Must also be able to properly fit on
and predict a sample iris dataset.
Args:
est: Estimator object with `fit`, `predict`/`predict_proba`,
`get_params`, and `set_params` methods.
meta_feature_generator (str, unicode): Name of the method used by the estimator
to generate meta-features on a set of data.
metric_generators (dict): Dictionary of key value pairs where the key
signifies the name of the metric calculated and the value is a list
of strings, when concatenated, form Python code containing the
function used to calculate the metric from true values and the
meta-features generated.
dataset_properties (dict): Dictionary corresponding to the properties of the dataset
used to verify the estimator and metric generators.
Returns:
performance_dict (mapping): Mapping from performance metric
name to performance metric value e.g. "Accuracy": 0.963
hyperparameters (mapping): Mapping from the estimator's hyperparameters to
their default values e.g. "n_estimators": 10
"""
X, y, splits = get_sample_dataset(dataset_properties)
if not hasattr(est, "get_params"):
raise exceptions.UserError('Estimator does not have get_params method')
if not hasattr(est, "set_params"):
raise exceptions.UserError('Estimator does not have set_params method')
if not hasattr(est, meta_feature_generator):
raise exceptions.UserError('Estimator does not have meta-feature generator'
' {}'.format(meta_feature_generator))
performance_dict = dict()
true_labels = []
preds = []
try:
for train_index, test_index in splits:
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
est.fit(X_train, y_train)
true_labels.append(y_test)
preds.append(getattr(est, meta_feature_generator)(X_test))
true_labels = np.concatenate(true_labels)
preds = np.concatenate(preds, axis=0)
except Exception as e:
raise exceptions.UserError(repr(e))
if preds.shape[0] != true_labels.shape[0]:
raise exceptions.UserError('Estimator\'s meta-feature generator '
'does not produce valid shape')
for key in metric_generators:
metric_generator = import_object_from_string_code(
metric_generators[key],
'metric_generator'
)
try:
performance_dict[key] = metric_generator(true_labels, preds)
except Exception as e:
raise exceptions.UserError(repr(e))
return performance_dict, make_serializable(est.get_params())
|
[
"def",
"verify_estimator_class",
"(",
"est",
",",
"meta_feature_generator",
",",
"metric_generators",
",",
"dataset_properties",
")",
":",
"X",
",",
"y",
",",
"splits",
"=",
"get_sample_dataset",
"(",
"dataset_properties",
")",
"if",
"not",
"hasattr",
"(",
"est",
",",
"\"get_params\"",
")",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Estimator does not have get_params method'",
")",
"if",
"not",
"hasattr",
"(",
"est",
",",
"\"set_params\"",
")",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Estimator does not have set_params method'",
")",
"if",
"not",
"hasattr",
"(",
"est",
",",
"meta_feature_generator",
")",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Estimator does not have meta-feature generator'",
"' {}'",
".",
"format",
"(",
"meta_feature_generator",
")",
")",
"performance_dict",
"=",
"dict",
"(",
")",
"true_labels",
"=",
"[",
"]",
"preds",
"=",
"[",
"]",
"try",
":",
"for",
"train_index",
",",
"test_index",
"in",
"splits",
":",
"X_train",
",",
"X_test",
"=",
"X",
"[",
"train_index",
"]",
",",
"X",
"[",
"test_index",
"]",
"y_train",
",",
"y_test",
"=",
"y",
"[",
"train_index",
"]",
",",
"y",
"[",
"test_index",
"]",
"est",
".",
"fit",
"(",
"X_train",
",",
"y_train",
")",
"true_labels",
".",
"append",
"(",
"y_test",
")",
"preds",
".",
"append",
"(",
"getattr",
"(",
"est",
",",
"meta_feature_generator",
")",
"(",
"X_test",
")",
")",
"true_labels",
"=",
"np",
".",
"concatenate",
"(",
"true_labels",
")",
"preds",
"=",
"np",
".",
"concatenate",
"(",
"preds",
",",
"axis",
"=",
"0",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"repr",
"(",
"e",
")",
")",
"if",
"preds",
".",
"shape",
"[",
"0",
"]",
"!=",
"true_labels",
".",
"shape",
"[",
"0",
"]",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Estimator\\'s meta-feature generator '",
"'does not produce valid shape'",
")",
"for",
"key",
"in",
"metric_generators",
":",
"metric_generator",
"=",
"import_object_from_string_code",
"(",
"metric_generators",
"[",
"key",
"]",
",",
"'metric_generator'",
")",
"try",
":",
"performance_dict",
"[",
"key",
"]",
"=",
"metric_generator",
"(",
"true_labels",
",",
"preds",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"repr",
"(",
"e",
")",
")",
"return",
"performance_dict",
",",
"make_serializable",
"(",
"est",
".",
"get_params",
"(",
")",
")"
] |
Verify if estimator object is valid for use i.e. scikit-learn format
Verifies if an estimator is fit for use by testing for existence of methods
such as `get_params` and `set_params`. Must also be able to properly fit on
and predict a sample iris dataset.
Args:
est: Estimator object with `fit`, `predict`/`predict_proba`,
`get_params`, and `set_params` methods.
meta_feature_generator (str, unicode): Name of the method used by the estimator
to generate meta-features on a set of data.
metric_generators (dict): Dictionary of key value pairs where the key
signifies the name of the metric calculated and the value is a list
of strings, when concatenated, form Python code containing the
function used to calculate the metric from true values and the
meta-features generated.
dataset_properties (dict): Dictionary corresponding to the properties of the dataset
used to verify the estimator and metric generators.
Returns:
performance_dict (mapping): Mapping from performance metric
name to performance metric value e.g. "Accuracy": 0.963
hyperparameters (mapping): Mapping from the estimator's hyperparameters to
their default values e.g. "n_estimators": 10
|
[
"Verify",
"if",
"estimator",
"object",
"is",
"valid",
"for",
"use",
"i",
".",
"e",
".",
"scikit",
"-",
"learn",
"format"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L204-L275
|
6,655
|
reiinakano/xcessiv
|
xcessiv/functions.py
|
get_path_from_query_string
|
def get_path_from_query_string(req):
"""Gets path from query string
Args:
req (flask.request): Request object from Flask
Returns:
path (str): Value of "path" parameter from query string
Raises:
exceptions.UserError: If "path" is not found in query string
"""
if req.args.get('path') is None:
raise exceptions.UserError('Path not found in query string')
return req.args.get('path')
|
python
|
def get_path_from_query_string(req):
"""Gets path from query string
Args:
req (flask.request): Request object from Flask
Returns:
path (str): Value of "path" parameter from query string
Raises:
exceptions.UserError: If "path" is not found in query string
"""
if req.args.get('path') is None:
raise exceptions.UserError('Path not found in query string')
return req.args.get('path')
|
[
"def",
"get_path_from_query_string",
"(",
"req",
")",
":",
"if",
"req",
".",
"args",
".",
"get",
"(",
"'path'",
")",
"is",
"None",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Path not found in query string'",
")",
"return",
"req",
".",
"args",
".",
"get",
"(",
"'path'",
")"
] |
Gets path from query string
Args:
req (flask.request): Request object from Flask
Returns:
path (str): Value of "path" parameter from query string
Raises:
exceptions.UserError: If "path" is not found in query string
|
[
"Gets",
"path",
"from",
"query",
"string"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/functions.py#L278-L292
|
6,656
|
reiinakano/xcessiv
|
xcessiv/models.py
|
Extraction.return_main_dataset
|
def return_main_dataset(self):
"""Returns main data set from self
Returns:
X (numpy.ndarray): Features
y (numpy.ndarray): Labels
"""
if not self.main_dataset['source']:
raise exceptions.UserError('Source is empty')
extraction_code = self.main_dataset["source"]
extraction_function = functions.import_object_from_string_code(extraction_code,
"extract_main_dataset")
try:
X, y = extraction_function()
except Exception as e:
raise exceptions.UserError('User code exception', exception_message=str(e))
X, y = np.array(X), np.array(y)
return X, y
|
python
|
def return_main_dataset(self):
"""Returns main data set from self
Returns:
X (numpy.ndarray): Features
y (numpy.ndarray): Labels
"""
if not self.main_dataset['source']:
raise exceptions.UserError('Source is empty')
extraction_code = self.main_dataset["source"]
extraction_function = functions.import_object_from_string_code(extraction_code,
"extract_main_dataset")
try:
X, y = extraction_function()
except Exception as e:
raise exceptions.UserError('User code exception', exception_message=str(e))
X, y = np.array(X), np.array(y)
return X, y
|
[
"def",
"return_main_dataset",
"(",
"self",
")",
":",
"if",
"not",
"self",
".",
"main_dataset",
"[",
"'source'",
"]",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Source is empty'",
")",
"extraction_code",
"=",
"self",
".",
"main_dataset",
"[",
"\"source\"",
"]",
"extraction_function",
"=",
"functions",
".",
"import_object_from_string_code",
"(",
"extraction_code",
",",
"\"extract_main_dataset\"",
")",
"try",
":",
"X",
",",
"y",
"=",
"extraction_function",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'User code exception'",
",",
"exception_message",
"=",
"str",
"(",
"e",
")",
")",
"X",
",",
"y",
"=",
"np",
".",
"array",
"(",
"X",
")",
",",
"np",
".",
"array",
"(",
"y",
")",
"return",
"X",
",",
"y"
] |
Returns main data set from self
Returns:
X (numpy.ndarray): Features
y (numpy.ndarray): Labels
|
[
"Returns",
"main",
"data",
"set",
"from",
"self"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L70-L92
|
6,657
|
reiinakano/xcessiv
|
xcessiv/models.py
|
Extraction.return_train_dataset
|
def return_train_dataset(self):
"""Returns train data set
Returns:
X (numpy.ndarray): Features
y (numpy.ndarray): Labels
"""
X, y = self.return_main_dataset()
if self.test_dataset['method'] == 'split_from_main':
X, X_test, y, y_test = train_test_split(
X,
y,
test_size=self.test_dataset['split_ratio'],
random_state=self.test_dataset['split_seed'],
stratify=y
)
return X, y
|
python
|
def return_train_dataset(self):
"""Returns train data set
Returns:
X (numpy.ndarray): Features
y (numpy.ndarray): Labels
"""
X, y = self.return_main_dataset()
if self.test_dataset['method'] == 'split_from_main':
X, X_test, y, y_test = train_test_split(
X,
y,
test_size=self.test_dataset['split_ratio'],
random_state=self.test_dataset['split_seed'],
stratify=y
)
return X, y
|
[
"def",
"return_train_dataset",
"(",
"self",
")",
":",
"X",
",",
"y",
"=",
"self",
".",
"return_main_dataset",
"(",
")",
"if",
"self",
".",
"test_dataset",
"[",
"'method'",
"]",
"==",
"'split_from_main'",
":",
"X",
",",
"X_test",
",",
"y",
",",
"y_test",
"=",
"train_test_split",
"(",
"X",
",",
"y",
",",
"test_size",
"=",
"self",
".",
"test_dataset",
"[",
"'split_ratio'",
"]",
",",
"random_state",
"=",
"self",
".",
"test_dataset",
"[",
"'split_seed'",
"]",
",",
"stratify",
"=",
"y",
")",
"return",
"X",
",",
"y"
] |
Returns train data set
Returns:
X (numpy.ndarray): Features
y (numpy.ndarray): Labels
|
[
"Returns",
"train",
"data",
"set"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L94-L113
|
6,658
|
reiinakano/xcessiv
|
xcessiv/models.py
|
BaseLearnerOrigin.return_estimator
|
def return_estimator(self):
"""Returns estimator from base learner origin
Returns:
est (estimator): Estimator object
"""
extraction_code = self.source
estimator = functions.import_object_from_string_code(extraction_code, "base_learner")
return estimator
|
python
|
def return_estimator(self):
"""Returns estimator from base learner origin
Returns:
est (estimator): Estimator object
"""
extraction_code = self.source
estimator = functions.import_object_from_string_code(extraction_code, "base_learner")
return estimator
|
[
"def",
"return_estimator",
"(",
"self",
")",
":",
"extraction_code",
"=",
"self",
".",
"source",
"estimator",
"=",
"functions",
".",
"import_object_from_string_code",
"(",
"extraction_code",
",",
"\"base_learner\"",
")",
"return",
"estimator"
] |
Returns estimator from base learner origin
Returns:
est (estimator): Estimator object
|
[
"Returns",
"estimator",
"from",
"base",
"learner",
"origin"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L192-L201
|
6,659
|
reiinakano/xcessiv
|
xcessiv/models.py
|
BaseLearnerOrigin.export_as_file
|
def export_as_file(self, filepath, hyperparameters):
"""Generates a Python file with the importable base learner set to ``hyperparameters``
This function generates a Python file in the specified file path that contains
the base learner as an importable variable stored in ``base_learner``. The base
learner will be set to the appropriate hyperparameters through ``set_params``.
Args:
filepath (str, unicode): File path to save file in
hyperparameters (dict): Dictionary to use for ``set_params``
"""
if not filepath.endswith('.py'):
filepath += '.py'
file_contents = ''
file_contents += self.source
file_contents += '\n\nbase_learner.set_params(**{})\n'.format(hyperparameters)
file_contents += '\nmeta_feature_generator = "{}"\n'.format(self.meta_feature_generator)
with open(filepath, 'wb') as f:
f.write(file_contents.encode('utf8'))
|
python
|
def export_as_file(self, filepath, hyperparameters):
"""Generates a Python file with the importable base learner set to ``hyperparameters``
This function generates a Python file in the specified file path that contains
the base learner as an importable variable stored in ``base_learner``. The base
learner will be set to the appropriate hyperparameters through ``set_params``.
Args:
filepath (str, unicode): File path to save file in
hyperparameters (dict): Dictionary to use for ``set_params``
"""
if not filepath.endswith('.py'):
filepath += '.py'
file_contents = ''
file_contents += self.source
file_contents += '\n\nbase_learner.set_params(**{})\n'.format(hyperparameters)
file_contents += '\nmeta_feature_generator = "{}"\n'.format(self.meta_feature_generator)
with open(filepath, 'wb') as f:
f.write(file_contents.encode('utf8'))
|
[
"def",
"export_as_file",
"(",
"self",
",",
"filepath",
",",
"hyperparameters",
")",
":",
"if",
"not",
"filepath",
".",
"endswith",
"(",
"'.py'",
")",
":",
"filepath",
"+=",
"'.py'",
"file_contents",
"=",
"''",
"file_contents",
"+=",
"self",
".",
"source",
"file_contents",
"+=",
"'\\n\\nbase_learner.set_params(**{})\\n'",
".",
"format",
"(",
"hyperparameters",
")",
"file_contents",
"+=",
"'\\nmeta_feature_generator = \"{}\"\\n'",
".",
"format",
"(",
"self",
".",
"meta_feature_generator",
")",
"with",
"open",
"(",
"filepath",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"file_contents",
".",
"encode",
"(",
"'utf8'",
")",
")"
] |
Generates a Python file with the importable base learner set to ``hyperparameters``
This function generates a Python file in the specified file path that contains
the base learner as an importable variable stored in ``base_learner``. The base
learner will be set to the appropriate hyperparameters through ``set_params``.
Args:
filepath (str, unicode): File path to save file in
hyperparameters (dict): Dictionary to use for ``set_params``
|
[
"Generates",
"a",
"Python",
"file",
"with",
"the",
"importable",
"base",
"learner",
"set",
"to",
"hyperparameters"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L212-L232
|
6,660
|
reiinakano/xcessiv
|
xcessiv/models.py
|
BaseLearner.return_estimator
|
def return_estimator(self):
"""Returns base learner using its origin and the given hyperparameters
Returns:
est (estimator): Estimator object
"""
estimator = self.base_learner_origin.return_estimator()
estimator = estimator.set_params(**self.hyperparameters)
return estimator
|
python
|
def return_estimator(self):
"""Returns base learner using its origin and the given hyperparameters
Returns:
est (estimator): Estimator object
"""
estimator = self.base_learner_origin.return_estimator()
estimator = estimator.set_params(**self.hyperparameters)
return estimator
|
[
"def",
"return_estimator",
"(",
"self",
")",
":",
"estimator",
"=",
"self",
".",
"base_learner_origin",
".",
"return_estimator",
"(",
")",
"estimator",
"=",
"estimator",
".",
"set_params",
"(",
"*",
"*",
"self",
".",
"hyperparameters",
")",
"return",
"estimator"
] |
Returns base learner using its origin and the given hyperparameters
Returns:
est (estimator): Estimator object
|
[
"Returns",
"base",
"learner",
"using",
"its",
"origin",
"and",
"the",
"given",
"hyperparameters"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L307-L315
|
6,661
|
reiinakano/xcessiv
|
xcessiv/models.py
|
BaseLearner.meta_features_path
|
def meta_features_path(self, path):
"""Returns path for meta-features
Args:
path (str): Absolute/local path of xcessiv folder
"""
return os.path.join(
path,
app.config['XCESSIV_META_FEATURES_FOLDER'],
str(self.id)
) + '.npy'
|
python
|
def meta_features_path(self, path):
"""Returns path for meta-features
Args:
path (str): Absolute/local path of xcessiv folder
"""
return os.path.join(
path,
app.config['XCESSIV_META_FEATURES_FOLDER'],
str(self.id)
) + '.npy'
|
[
"def",
"meta_features_path",
"(",
"self",
",",
"path",
")",
":",
"return",
"os",
".",
"path",
".",
"join",
"(",
"path",
",",
"app",
".",
"config",
"[",
"'XCESSIV_META_FEATURES_FOLDER'",
"]",
",",
"str",
"(",
"self",
".",
"id",
")",
")",
"+",
"'.npy'"
] |
Returns path for meta-features
Args:
path (str): Absolute/local path of xcessiv folder
|
[
"Returns",
"path",
"for",
"meta",
"-",
"features"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L317-L327
|
6,662
|
reiinakano/xcessiv
|
xcessiv/models.py
|
BaseLearner.delete_meta_features
|
def delete_meta_features(self, path):
"""Deletes meta-features of base learner if it exists
Args:
path (str): Absolute/local path of xcessiv folder
"""
if os.path.exists(self.meta_features_path(path)):
os.remove(self.meta_features_path(path))
|
python
|
def delete_meta_features(self, path):
"""Deletes meta-features of base learner if it exists
Args:
path (str): Absolute/local path of xcessiv folder
"""
if os.path.exists(self.meta_features_path(path)):
os.remove(self.meta_features_path(path))
|
[
"def",
"delete_meta_features",
"(",
"self",
",",
"path",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"self",
".",
"meta_features_path",
"(",
"path",
")",
")",
":",
"os",
".",
"remove",
"(",
"self",
".",
"meta_features_path",
"(",
"path",
")",
")"
] |
Deletes meta-features of base learner if it exists
Args:
path (str): Absolute/local path of xcessiv folder
|
[
"Deletes",
"meta",
"-",
"features",
"of",
"base",
"learner",
"if",
"it",
"exists"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L342-L349
|
6,663
|
reiinakano/xcessiv
|
xcessiv/models.py
|
StackedEnsemble.return_secondary_learner
|
def return_secondary_learner(self):
"""Returns secondary learner using its origin and the given hyperparameters
Returns:
est (estimator): Estimator object
"""
estimator = self.base_learner_origin.return_estimator()
estimator = estimator.set_params(**self.secondary_learner_hyperparameters)
return estimator
|
python
|
def return_secondary_learner(self):
"""Returns secondary learner using its origin and the given hyperparameters
Returns:
est (estimator): Estimator object
"""
estimator = self.base_learner_origin.return_estimator()
estimator = estimator.set_params(**self.secondary_learner_hyperparameters)
return estimator
|
[
"def",
"return_secondary_learner",
"(",
"self",
")",
":",
"estimator",
"=",
"self",
".",
"base_learner_origin",
".",
"return_estimator",
"(",
")",
"estimator",
"=",
"estimator",
".",
"set_params",
"(",
"*",
"*",
"self",
".",
"secondary_learner_hyperparameters",
")",
"return",
"estimator"
] |
Returns secondary learner using its origin and the given hyperparameters
Returns:
est (estimator): Estimator object
|
[
"Returns",
"secondary",
"learner",
"using",
"its",
"origin",
"and",
"the",
"given",
"hyperparameters"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L402-L410
|
6,664
|
reiinakano/xcessiv
|
xcessiv/models.py
|
StackedEnsemble.export_as_code
|
def export_as_code(self, cv_source):
"""Returns a string value that contains the Python code for the ensemble
Args:
cv_source (str, unicode): String containing actual code for base learner
cross-validation used to generate secondary meta-features.
Returns:
base_learner_code (str, unicode): String that can be used as Python code
"""
rand_value = ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(25))
base_learner_code = ''
base_learner_code += 'base_learner_list_{} = []\n'.format(rand_value)
base_learner_code += 'meta_feature_generators_list_{} = []\n\n'.format(rand_value)
for idx, base_learner in enumerate(self.base_learners):
base_learner_code += '################################################\n'
base_learner_code += '###### Code for building base learner {} ########\n'.format(idx+1)
base_learner_code += '################################################\n'
base_learner_code += base_learner.base_learner_origin.source
base_learner_code += '\n\n'
base_learner_code += 'base_learner' \
'.set_params(**{})\n'.format(base_learner.hyperparameters)
base_learner_code += 'base_learner_list_{}.append(base_learner)\n'.format(rand_value)
base_learner_code += 'meta_feature_generators_list_{}.append("{}")\n'.format(
rand_value,
base_learner.base_learner_origin.meta_feature_generator
)
base_learner_code += '\n\n'
base_learner_code += '################################################\n'
base_learner_code += '##### Code for building secondary learner ######\n'
base_learner_code += '################################################\n'
base_learner_code += self.base_learner_origin.source
base_learner_code += '\n\n'
base_learner_code += 'base_learner' \
'.set_params(**{})\n'.format(self.secondary_learner_hyperparameters)
base_learner_code += 'secondary_learner_{} = base_learner\n'.format(rand_value)
base_learner_code += '\n\n'
base_learner_code += '################################################\n'
base_learner_code += '############## Code for CV method ##############\n'
base_learner_code += '################################################\n'
base_learner_code += cv_source
base_learner_code += '\n\n'
base_learner_code += '################################################\n'
base_learner_code += '######## Code for Xcessiv stacker class ########\n'
base_learner_code += '################################################\n'
stacker_file_loc = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'stacker.py')
with open(stacker_file_loc) as f2:
base_learner_code += f2.read()
base_learner_code += '\n\n' \
' def {}(self, X):\n' \
' return self._process_using_' \
'meta_feature_generator(X, "{}")\n\n'\
.format(self.base_learner_origin.meta_feature_generator,
self.base_learner_origin.meta_feature_generator)
base_learner_code += '\n\n'
base_learner_code += 'base_learner = XcessivStackedEnsemble' \
'(base_learners=base_learner_list_{},' \
' meta_feature_generators=meta_feature_generators_list_{},' \
' secondary_learner=secondary_learner_{},' \
' cv_function=return_splits_iterable)\n'.format(
rand_value,
rand_value,
rand_value
)
return base_learner_code
|
python
|
def export_as_code(self, cv_source):
"""Returns a string value that contains the Python code for the ensemble
Args:
cv_source (str, unicode): String containing actual code for base learner
cross-validation used to generate secondary meta-features.
Returns:
base_learner_code (str, unicode): String that can be used as Python code
"""
rand_value = ''.join(random.choice(string.ascii_uppercase + string.digits)
for _ in range(25))
base_learner_code = ''
base_learner_code += 'base_learner_list_{} = []\n'.format(rand_value)
base_learner_code += 'meta_feature_generators_list_{} = []\n\n'.format(rand_value)
for idx, base_learner in enumerate(self.base_learners):
base_learner_code += '################################################\n'
base_learner_code += '###### Code for building base learner {} ########\n'.format(idx+1)
base_learner_code += '################################################\n'
base_learner_code += base_learner.base_learner_origin.source
base_learner_code += '\n\n'
base_learner_code += 'base_learner' \
'.set_params(**{})\n'.format(base_learner.hyperparameters)
base_learner_code += 'base_learner_list_{}.append(base_learner)\n'.format(rand_value)
base_learner_code += 'meta_feature_generators_list_{}.append("{}")\n'.format(
rand_value,
base_learner.base_learner_origin.meta_feature_generator
)
base_learner_code += '\n\n'
base_learner_code += '################################################\n'
base_learner_code += '##### Code for building secondary learner ######\n'
base_learner_code += '################################################\n'
base_learner_code += self.base_learner_origin.source
base_learner_code += '\n\n'
base_learner_code += 'base_learner' \
'.set_params(**{})\n'.format(self.secondary_learner_hyperparameters)
base_learner_code += 'secondary_learner_{} = base_learner\n'.format(rand_value)
base_learner_code += '\n\n'
base_learner_code += '################################################\n'
base_learner_code += '############## Code for CV method ##############\n'
base_learner_code += '################################################\n'
base_learner_code += cv_source
base_learner_code += '\n\n'
base_learner_code += '################################################\n'
base_learner_code += '######## Code for Xcessiv stacker class ########\n'
base_learner_code += '################################################\n'
stacker_file_loc = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'stacker.py')
with open(stacker_file_loc) as f2:
base_learner_code += f2.read()
base_learner_code += '\n\n' \
' def {}(self, X):\n' \
' return self._process_using_' \
'meta_feature_generator(X, "{}")\n\n'\
.format(self.base_learner_origin.meta_feature_generator,
self.base_learner_origin.meta_feature_generator)
base_learner_code += '\n\n'
base_learner_code += 'base_learner = XcessivStackedEnsemble' \
'(base_learners=base_learner_list_{},' \
' meta_feature_generators=meta_feature_generators_list_{},' \
' secondary_learner=secondary_learner_{},' \
' cv_function=return_splits_iterable)\n'.format(
rand_value,
rand_value,
rand_value
)
return base_learner_code
|
[
"def",
"export_as_code",
"(",
"self",
",",
"cv_source",
")",
":",
"rand_value",
"=",
"''",
".",
"join",
"(",
"random",
".",
"choice",
"(",
"string",
".",
"ascii_uppercase",
"+",
"string",
".",
"digits",
")",
"for",
"_",
"in",
"range",
"(",
"25",
")",
")",
"base_learner_code",
"=",
"''",
"base_learner_code",
"+=",
"'base_learner_list_{} = []\\n'",
".",
"format",
"(",
"rand_value",
")",
"base_learner_code",
"+=",
"'meta_feature_generators_list_{} = []\\n\\n'",
".",
"format",
"(",
"rand_value",
")",
"for",
"idx",
",",
"base_learner",
"in",
"enumerate",
"(",
"self",
".",
"base_learners",
")",
":",
"base_learner_code",
"+=",
"'################################################\\n'",
"base_learner_code",
"+=",
"'###### Code for building base learner {} ########\\n'",
".",
"format",
"(",
"idx",
"+",
"1",
")",
"base_learner_code",
"+=",
"'################################################\\n'",
"base_learner_code",
"+=",
"base_learner",
".",
"base_learner_origin",
".",
"source",
"base_learner_code",
"+=",
"'\\n\\n'",
"base_learner_code",
"+=",
"'base_learner'",
"'.set_params(**{})\\n'",
".",
"format",
"(",
"base_learner",
".",
"hyperparameters",
")",
"base_learner_code",
"+=",
"'base_learner_list_{}.append(base_learner)\\n'",
".",
"format",
"(",
"rand_value",
")",
"base_learner_code",
"+=",
"'meta_feature_generators_list_{}.append(\"{}\")\\n'",
".",
"format",
"(",
"rand_value",
",",
"base_learner",
".",
"base_learner_origin",
".",
"meta_feature_generator",
")",
"base_learner_code",
"+=",
"'\\n\\n'",
"base_learner_code",
"+=",
"'################################################\\n'",
"base_learner_code",
"+=",
"'##### Code for building secondary learner ######\\n'",
"base_learner_code",
"+=",
"'################################################\\n'",
"base_learner_code",
"+=",
"self",
".",
"base_learner_origin",
".",
"source",
"base_learner_code",
"+=",
"'\\n\\n'",
"base_learner_code",
"+=",
"'base_learner'",
"'.set_params(**{})\\n'",
".",
"format",
"(",
"self",
".",
"secondary_learner_hyperparameters",
")",
"base_learner_code",
"+=",
"'secondary_learner_{} = base_learner\\n'",
".",
"format",
"(",
"rand_value",
")",
"base_learner_code",
"+=",
"'\\n\\n'",
"base_learner_code",
"+=",
"'################################################\\n'",
"base_learner_code",
"+=",
"'############## Code for CV method ##############\\n'",
"base_learner_code",
"+=",
"'################################################\\n'",
"base_learner_code",
"+=",
"cv_source",
"base_learner_code",
"+=",
"'\\n\\n'",
"base_learner_code",
"+=",
"'################################################\\n'",
"base_learner_code",
"+=",
"'######## Code for Xcessiv stacker class ########\\n'",
"base_learner_code",
"+=",
"'################################################\\n'",
"stacker_file_loc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
",",
"'stacker.py'",
")",
"with",
"open",
"(",
"stacker_file_loc",
")",
"as",
"f2",
":",
"base_learner_code",
"+=",
"f2",
".",
"read",
"(",
")",
"base_learner_code",
"+=",
"'\\n\\n'",
"' def {}(self, X):\\n'",
"' return self._process_using_'",
"'meta_feature_generator(X, \"{}\")\\n\\n'",
".",
"format",
"(",
"self",
".",
"base_learner_origin",
".",
"meta_feature_generator",
",",
"self",
".",
"base_learner_origin",
".",
"meta_feature_generator",
")",
"base_learner_code",
"+=",
"'\\n\\n'",
"base_learner_code",
"+=",
"'base_learner = XcessivStackedEnsemble'",
"'(base_learners=base_learner_list_{},'",
"' meta_feature_generators=meta_feature_generators_list_{},'",
"' secondary_learner=secondary_learner_{},'",
"' cv_function=return_splits_iterable)\\n'",
".",
"format",
"(",
"rand_value",
",",
"rand_value",
",",
"rand_value",
")",
"return",
"base_learner_code"
] |
Returns a string value that contains the Python code for the ensemble
Args:
cv_source (str, unicode): String containing actual code for base learner
cross-validation used to generate secondary meta-features.
Returns:
base_learner_code (str, unicode): String that can be used as Python code
|
[
"Returns",
"a",
"string",
"value",
"that",
"contains",
"the",
"Python",
"code",
"for",
"the",
"ensemble"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L412-L486
|
6,665
|
reiinakano/xcessiv
|
xcessiv/models.py
|
StackedEnsemble.export_as_file
|
def export_as_file(self, file_path, cv_source):
"""Export the ensemble as a single Python file and saves it to `file_path`.
This is EXPERIMENTAL as putting different modules together would probably wreak havoc
especially on modules that make heavy use of global variables.
Args:
file_path (str, unicode): Absolute/local path of place to save file in
cv_source (str, unicode): String containing actual code for base learner
cross-validation used to generate secondary meta-features.
"""
if os.path.exists(file_path):
raise exceptions.UserError('{} already exists'.format(file_path))
with open(file_path, 'wb') as f:
f.write(self.export_as_code(cv_source).encode('utf8'))
|
python
|
def export_as_file(self, file_path, cv_source):
"""Export the ensemble as a single Python file and saves it to `file_path`.
This is EXPERIMENTAL as putting different modules together would probably wreak havoc
especially on modules that make heavy use of global variables.
Args:
file_path (str, unicode): Absolute/local path of place to save file in
cv_source (str, unicode): String containing actual code for base learner
cross-validation used to generate secondary meta-features.
"""
if os.path.exists(file_path):
raise exceptions.UserError('{} already exists'.format(file_path))
with open(file_path, 'wb') as f:
f.write(self.export_as_code(cv_source).encode('utf8'))
|
[
"def",
"export_as_file",
"(",
"self",
",",
"file_path",
",",
"cv_source",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"file_path",
")",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'{} already exists'",
".",
"format",
"(",
"file_path",
")",
")",
"with",
"open",
"(",
"file_path",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"self",
".",
"export_as_code",
"(",
"cv_source",
")",
".",
"encode",
"(",
"'utf8'",
")",
")"
] |
Export the ensemble as a single Python file and saves it to `file_path`.
This is EXPERIMENTAL as putting different modules together would probably wreak havoc
especially on modules that make heavy use of global variables.
Args:
file_path (str, unicode): Absolute/local path of place to save file in
cv_source (str, unicode): String containing actual code for base learner
cross-validation used to generate secondary meta-features.
|
[
"Export",
"the",
"ensemble",
"as",
"a",
"single",
"Python",
"file",
"and",
"saves",
"it",
"to",
"file_path",
"."
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L488-L504
|
6,666
|
reiinakano/xcessiv
|
xcessiv/models.py
|
StackedEnsemble.export_as_package
|
def export_as_package(self, package_path, cv_source):
"""Exports the ensemble as a Python package and saves it to `package_path`.
Args:
package_path (str, unicode): Absolute/local path of place to save package in
cv_source (str, unicode): String containing actual code for base learner
cross-validation used to generate secondary meta-features.
Raises:
exceptions.UserError: If os.path.join(path, name) already exists.
"""
if os.path.exists(package_path):
raise exceptions.UserError('{} already exists'.format(package_path))
package_name = os.path.basename(os.path.normpath(package_path))
os.makedirs(package_path)
# Write __init__.py
with open(os.path.join(package_path, '__init__.py'), 'wb') as f:
f.write('from {}.builder import xcessiv_ensemble'.format(package_name).encode('utf8'))
# Create package baselearners with each base learner having its own module
os.makedirs(os.path.join(package_path, 'baselearners'))
open(os.path.join(package_path, 'baselearners', '__init__.py'), 'a').close()
for idx, base_learner in enumerate(self.base_learners):
base_learner.export_as_file(os.path.join(package_path,
'baselearners',
'baselearner' + str(idx)))
# Create metalearner.py containing secondary learner
self.base_learner_origin.export_as_file(
os.path.join(package_path, 'metalearner'),
self.secondary_learner_hyperparameters
)
# Create cv.py containing CV method for getting meta-features
with open(os.path.join(package_path, 'cv.py'), 'wb') as f:
f.write(cv_source.encode('utf8'))
# Create stacker.py containing class for Xcessiv ensemble
ensemble_source = ''
stacker_file_loc = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'stacker.py')
with open(stacker_file_loc) as f:
ensemble_source += f.read()
ensemble_source += '\n\n' \
' def {}(self, X):\n' \
' return self._process_using_' \
'meta_feature_generator(X, "{}")\n\n'\
.format(self.base_learner_origin.meta_feature_generator,
self.base_learner_origin.meta_feature_generator)
with open(os.path.join(package_path, 'stacker.py'), 'wb') as f:
f.write(ensemble_source.encode('utf8'))
# Create builder.py containing file where `xcessiv_ensemble` is instantiated for import
builder_source = ''
for idx, base_learner in enumerate(self.base_learners):
builder_source += 'from {}.baselearners import baselearner{}\n'.format(package_name, idx)
builder_source += 'from {}.cv import return_splits_iterable\n'.format(package_name)
builder_source += 'from {} import metalearner\n'.format(package_name)
builder_source += 'from {}.stacker import XcessivStackedEnsemble\n'.format(package_name)
builder_source += '\nbase_learners = [\n'
for idx, base_learner in enumerate(self.base_learners):
builder_source += ' baselearner{}.base_learner,\n'.format(idx)
builder_source += ']\n'
builder_source += '\nmeta_feature_generators = [\n'
for idx, base_learner in enumerate(self.base_learners):
builder_source += ' baselearner{}.meta_feature_generator,\n'.format(idx)
builder_source += ']\n'
builder_source += '\nxcessiv_ensemble = XcessivStackedEnsemble(base_learners=base_learners,' \
' meta_feature_generators=meta_feature_generators,' \
' secondary_learner=metalearner.base_learner,' \
' cv_function=return_splits_iterable)\n'
with open(os.path.join(package_path, 'builder.py'), 'wb') as f:
f.write(builder_source.encode('utf8'))
|
python
|
def export_as_package(self, package_path, cv_source):
"""Exports the ensemble as a Python package and saves it to `package_path`.
Args:
package_path (str, unicode): Absolute/local path of place to save package in
cv_source (str, unicode): String containing actual code for base learner
cross-validation used to generate secondary meta-features.
Raises:
exceptions.UserError: If os.path.join(path, name) already exists.
"""
if os.path.exists(package_path):
raise exceptions.UserError('{} already exists'.format(package_path))
package_name = os.path.basename(os.path.normpath(package_path))
os.makedirs(package_path)
# Write __init__.py
with open(os.path.join(package_path, '__init__.py'), 'wb') as f:
f.write('from {}.builder import xcessiv_ensemble'.format(package_name).encode('utf8'))
# Create package baselearners with each base learner having its own module
os.makedirs(os.path.join(package_path, 'baselearners'))
open(os.path.join(package_path, 'baselearners', '__init__.py'), 'a').close()
for idx, base_learner in enumerate(self.base_learners):
base_learner.export_as_file(os.path.join(package_path,
'baselearners',
'baselearner' + str(idx)))
# Create metalearner.py containing secondary learner
self.base_learner_origin.export_as_file(
os.path.join(package_path, 'metalearner'),
self.secondary_learner_hyperparameters
)
# Create cv.py containing CV method for getting meta-features
with open(os.path.join(package_path, 'cv.py'), 'wb') as f:
f.write(cv_source.encode('utf8'))
# Create stacker.py containing class for Xcessiv ensemble
ensemble_source = ''
stacker_file_loc = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'stacker.py')
with open(stacker_file_loc) as f:
ensemble_source += f.read()
ensemble_source += '\n\n' \
' def {}(self, X):\n' \
' return self._process_using_' \
'meta_feature_generator(X, "{}")\n\n'\
.format(self.base_learner_origin.meta_feature_generator,
self.base_learner_origin.meta_feature_generator)
with open(os.path.join(package_path, 'stacker.py'), 'wb') as f:
f.write(ensemble_source.encode('utf8'))
# Create builder.py containing file where `xcessiv_ensemble` is instantiated for import
builder_source = ''
for idx, base_learner in enumerate(self.base_learners):
builder_source += 'from {}.baselearners import baselearner{}\n'.format(package_name, idx)
builder_source += 'from {}.cv import return_splits_iterable\n'.format(package_name)
builder_source += 'from {} import metalearner\n'.format(package_name)
builder_source += 'from {}.stacker import XcessivStackedEnsemble\n'.format(package_name)
builder_source += '\nbase_learners = [\n'
for idx, base_learner in enumerate(self.base_learners):
builder_source += ' baselearner{}.base_learner,\n'.format(idx)
builder_source += ']\n'
builder_source += '\nmeta_feature_generators = [\n'
for idx, base_learner in enumerate(self.base_learners):
builder_source += ' baselearner{}.meta_feature_generator,\n'.format(idx)
builder_source += ']\n'
builder_source += '\nxcessiv_ensemble = XcessivStackedEnsemble(base_learners=base_learners,' \
' meta_feature_generators=meta_feature_generators,' \
' secondary_learner=metalearner.base_learner,' \
' cv_function=return_splits_iterable)\n'
with open(os.path.join(package_path, 'builder.py'), 'wb') as f:
f.write(builder_source.encode('utf8'))
|
[
"def",
"export_as_package",
"(",
"self",
",",
"package_path",
",",
"cv_source",
")",
":",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"package_path",
")",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'{} already exists'",
".",
"format",
"(",
"package_path",
")",
")",
"package_name",
"=",
"os",
".",
"path",
".",
"basename",
"(",
"os",
".",
"path",
".",
"normpath",
"(",
"package_path",
")",
")",
"os",
".",
"makedirs",
"(",
"package_path",
")",
"# Write __init__.py",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"package_path",
",",
"'__init__.py'",
")",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"'from {}.builder import xcessiv_ensemble'",
".",
"format",
"(",
"package_name",
")",
".",
"encode",
"(",
"'utf8'",
")",
")",
"# Create package baselearners with each base learner having its own module",
"os",
".",
"makedirs",
"(",
"os",
".",
"path",
".",
"join",
"(",
"package_path",
",",
"'baselearners'",
")",
")",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"package_path",
",",
"'baselearners'",
",",
"'__init__.py'",
")",
",",
"'a'",
")",
".",
"close",
"(",
")",
"for",
"idx",
",",
"base_learner",
"in",
"enumerate",
"(",
"self",
".",
"base_learners",
")",
":",
"base_learner",
".",
"export_as_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"package_path",
",",
"'baselearners'",
",",
"'baselearner'",
"+",
"str",
"(",
"idx",
")",
")",
")",
"# Create metalearner.py containing secondary learner",
"self",
".",
"base_learner_origin",
".",
"export_as_file",
"(",
"os",
".",
"path",
".",
"join",
"(",
"package_path",
",",
"'metalearner'",
")",
",",
"self",
".",
"secondary_learner_hyperparameters",
")",
"# Create cv.py containing CV method for getting meta-features",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"package_path",
",",
"'cv.py'",
")",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"cv_source",
".",
"encode",
"(",
"'utf8'",
")",
")",
"# Create stacker.py containing class for Xcessiv ensemble",
"ensemble_source",
"=",
"''",
"stacker_file_loc",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"abspath",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
")",
",",
"'stacker.py'",
")",
"with",
"open",
"(",
"stacker_file_loc",
")",
"as",
"f",
":",
"ensemble_source",
"+=",
"f",
".",
"read",
"(",
")",
"ensemble_source",
"+=",
"'\\n\\n'",
"' def {}(self, X):\\n'",
"' return self._process_using_'",
"'meta_feature_generator(X, \"{}\")\\n\\n'",
".",
"format",
"(",
"self",
".",
"base_learner_origin",
".",
"meta_feature_generator",
",",
"self",
".",
"base_learner_origin",
".",
"meta_feature_generator",
")",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"package_path",
",",
"'stacker.py'",
")",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"ensemble_source",
".",
"encode",
"(",
"'utf8'",
")",
")",
"# Create builder.py containing file where `xcessiv_ensemble` is instantiated for import",
"builder_source",
"=",
"''",
"for",
"idx",
",",
"base_learner",
"in",
"enumerate",
"(",
"self",
".",
"base_learners",
")",
":",
"builder_source",
"+=",
"'from {}.baselearners import baselearner{}\\n'",
".",
"format",
"(",
"package_name",
",",
"idx",
")",
"builder_source",
"+=",
"'from {}.cv import return_splits_iterable\\n'",
".",
"format",
"(",
"package_name",
")",
"builder_source",
"+=",
"'from {} import metalearner\\n'",
".",
"format",
"(",
"package_name",
")",
"builder_source",
"+=",
"'from {}.stacker import XcessivStackedEnsemble\\n'",
".",
"format",
"(",
"package_name",
")",
"builder_source",
"+=",
"'\\nbase_learners = [\\n'",
"for",
"idx",
",",
"base_learner",
"in",
"enumerate",
"(",
"self",
".",
"base_learners",
")",
":",
"builder_source",
"+=",
"' baselearner{}.base_learner,\\n'",
".",
"format",
"(",
"idx",
")",
"builder_source",
"+=",
"']\\n'",
"builder_source",
"+=",
"'\\nmeta_feature_generators = [\\n'",
"for",
"idx",
",",
"base_learner",
"in",
"enumerate",
"(",
"self",
".",
"base_learners",
")",
":",
"builder_source",
"+=",
"' baselearner{}.meta_feature_generator,\\n'",
".",
"format",
"(",
"idx",
")",
"builder_source",
"+=",
"']\\n'",
"builder_source",
"+=",
"'\\nxcessiv_ensemble = XcessivStackedEnsemble(base_learners=base_learners,'",
"' meta_feature_generators=meta_feature_generators,'",
"' secondary_learner=metalearner.base_learner,'",
"' cv_function=return_splits_iterable)\\n'",
"with",
"open",
"(",
"os",
".",
"path",
".",
"join",
"(",
"package_path",
",",
"'builder.py'",
")",
",",
"'wb'",
")",
"as",
"f",
":",
"f",
".",
"write",
"(",
"builder_source",
".",
"encode",
"(",
"'utf8'",
")",
")"
] |
Exports the ensemble as a Python package and saves it to `package_path`.
Args:
package_path (str, unicode): Absolute/local path of place to save package in
cv_source (str, unicode): String containing actual code for base learner
cross-validation used to generate secondary meta-features.
Raises:
exceptions.UserError: If os.path.join(path, name) already exists.
|
[
"Exports",
"the",
"ensemble",
"as",
"a",
"Python",
"package",
"and",
"saves",
"it",
"to",
"package_path",
"."
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/models.py#L506-L591
|
6,667
|
reiinakano/xcessiv
|
xcessiv/views.py
|
verify_full_extraction
|
def verify_full_extraction():
"""This is an experimental endpoint to simultaneously verify data
statistics and extraction for training, test, and holdout datasets.
With this, the other three verification methods will no longer be
necessary.
"""
path = functions.get_path_from_query_string(request)
if request.method == 'POST':
rqtasks.extraction_data_statistics(path)
with functions.DBContextManager(path) as session:
extraction = session.query(models.Extraction).first()
return jsonify(extraction.data_statistics)
|
python
|
def verify_full_extraction():
"""This is an experimental endpoint to simultaneously verify data
statistics and extraction for training, test, and holdout datasets.
With this, the other three verification methods will no longer be
necessary.
"""
path = functions.get_path_from_query_string(request)
if request.method == 'POST':
rqtasks.extraction_data_statistics(path)
with functions.DBContextManager(path) as session:
extraction = session.query(models.Extraction).first()
return jsonify(extraction.data_statistics)
|
[
"def",
"verify_full_extraction",
"(",
")",
":",
"path",
"=",
"functions",
".",
"get_path_from_query_string",
"(",
"request",
")",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"rqtasks",
".",
"extraction_data_statistics",
"(",
"path",
")",
"with",
"functions",
".",
"DBContextManager",
"(",
"path",
")",
"as",
"session",
":",
"extraction",
"=",
"session",
".",
"query",
"(",
"models",
".",
"Extraction",
")",
".",
"first",
"(",
")",
"return",
"jsonify",
"(",
"extraction",
".",
"data_statistics",
")"
] |
This is an experimental endpoint to simultaneously verify data
statistics and extraction for training, test, and holdout datasets.
With this, the other three verification methods will no longer be
necessary.
|
[
"This",
"is",
"an",
"experimental",
"endpoint",
"to",
"simultaneously",
"verify",
"data",
"statistics",
"and",
"extraction",
"for",
"training",
"test",
"and",
"holdout",
"datasets",
".",
"With",
"this",
"the",
"other",
"three",
"verification",
"methods",
"will",
"no",
"longer",
"be",
"necessary",
"."
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/views.py#L156-L169
|
6,668
|
reiinakano/xcessiv
|
xcessiv/views.py
|
create_base_learner
|
def create_base_learner(id):
"""This creates a single base learner from a base learner origin and queues it up"""
path = functions.get_path_from_query_string(request)
with functions.DBContextManager(path) as session:
base_learner_origin = session.query(models.BaseLearnerOrigin).filter_by(id=id).first()
if base_learner_origin is None:
raise exceptions.UserError('Base learner origin {} not found'.format(id), 404)
if not base_learner_origin.final:
raise exceptions.UserError('Base learner origin {} is not final'.format(id))
req_body = request.get_json()
# Retrieve full hyperparameters
est = base_learner_origin.return_estimator()
hyperparameters = functions.import_object_from_string_code(req_body['source'],
'params')
est.set_params(**hyperparameters)
hyperparameters = functions.make_serializable(est.get_params())
base_learners = session.query(models.BaseLearner).\
filter_by(base_learner_origin_id=id,
hyperparameters=hyperparameters).all()
if base_learners:
raise exceptions.UserError('Base learner exists with given hyperparameters')
base_learner = models.BaseLearner(hyperparameters,
'queued',
base_learner_origin)
if 'single_searches' not in base_learner_origin.description:
base_learner_origin.description['single_searches'] = []
base_learner_origin.description['single_searches'] += ([req_body['source']])
session.add(base_learner)
session.add(base_learner_origin)
session.commit()
with Connection(get_redis_connection()):
rqtasks.generate_meta_features.delay(path, base_learner.id)
return jsonify(base_learner.serialize)
|
python
|
def create_base_learner(id):
"""This creates a single base learner from a base learner origin and queues it up"""
path = functions.get_path_from_query_string(request)
with functions.DBContextManager(path) as session:
base_learner_origin = session.query(models.BaseLearnerOrigin).filter_by(id=id).first()
if base_learner_origin is None:
raise exceptions.UserError('Base learner origin {} not found'.format(id), 404)
if not base_learner_origin.final:
raise exceptions.UserError('Base learner origin {} is not final'.format(id))
req_body = request.get_json()
# Retrieve full hyperparameters
est = base_learner_origin.return_estimator()
hyperparameters = functions.import_object_from_string_code(req_body['source'],
'params')
est.set_params(**hyperparameters)
hyperparameters = functions.make_serializable(est.get_params())
base_learners = session.query(models.BaseLearner).\
filter_by(base_learner_origin_id=id,
hyperparameters=hyperparameters).all()
if base_learners:
raise exceptions.UserError('Base learner exists with given hyperparameters')
base_learner = models.BaseLearner(hyperparameters,
'queued',
base_learner_origin)
if 'single_searches' not in base_learner_origin.description:
base_learner_origin.description['single_searches'] = []
base_learner_origin.description['single_searches'] += ([req_body['source']])
session.add(base_learner)
session.add(base_learner_origin)
session.commit()
with Connection(get_redis_connection()):
rqtasks.generate_meta_features.delay(path, base_learner.id)
return jsonify(base_learner.serialize)
|
[
"def",
"create_base_learner",
"(",
"id",
")",
":",
"path",
"=",
"functions",
".",
"get_path_from_query_string",
"(",
"request",
")",
"with",
"functions",
".",
"DBContextManager",
"(",
"path",
")",
"as",
"session",
":",
"base_learner_origin",
"=",
"session",
".",
"query",
"(",
"models",
".",
"BaseLearnerOrigin",
")",
".",
"filter_by",
"(",
"id",
"=",
"id",
")",
".",
"first",
"(",
")",
"if",
"base_learner_origin",
"is",
"None",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Base learner origin {} not found'",
".",
"format",
"(",
"id",
")",
",",
"404",
")",
"if",
"not",
"base_learner_origin",
".",
"final",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Base learner origin {} is not final'",
".",
"format",
"(",
"id",
")",
")",
"req_body",
"=",
"request",
".",
"get_json",
"(",
")",
"# Retrieve full hyperparameters",
"est",
"=",
"base_learner_origin",
".",
"return_estimator",
"(",
")",
"hyperparameters",
"=",
"functions",
".",
"import_object_from_string_code",
"(",
"req_body",
"[",
"'source'",
"]",
",",
"'params'",
")",
"est",
".",
"set_params",
"(",
"*",
"*",
"hyperparameters",
")",
"hyperparameters",
"=",
"functions",
".",
"make_serializable",
"(",
"est",
".",
"get_params",
"(",
")",
")",
"base_learners",
"=",
"session",
".",
"query",
"(",
"models",
".",
"BaseLearner",
")",
".",
"filter_by",
"(",
"base_learner_origin_id",
"=",
"id",
",",
"hyperparameters",
"=",
"hyperparameters",
")",
".",
"all",
"(",
")",
"if",
"base_learners",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Base learner exists with given hyperparameters'",
")",
"base_learner",
"=",
"models",
".",
"BaseLearner",
"(",
"hyperparameters",
",",
"'queued'",
",",
"base_learner_origin",
")",
"if",
"'single_searches'",
"not",
"in",
"base_learner_origin",
".",
"description",
":",
"base_learner_origin",
".",
"description",
"[",
"'single_searches'",
"]",
"=",
"[",
"]",
"base_learner_origin",
".",
"description",
"[",
"'single_searches'",
"]",
"+=",
"(",
"[",
"req_body",
"[",
"'source'",
"]",
"]",
")",
"session",
".",
"add",
"(",
"base_learner",
")",
"session",
".",
"add",
"(",
"base_learner_origin",
")",
"session",
".",
"commit",
"(",
")",
"with",
"Connection",
"(",
"get_redis_connection",
"(",
")",
")",
":",
"rqtasks",
".",
"generate_meta_features",
".",
"delay",
"(",
"path",
",",
"base_learner",
".",
"id",
")",
"return",
"jsonify",
"(",
"base_learner",
".",
"serialize",
")"
] |
This creates a single base learner from a base learner origin and queues it up
|
[
"This",
"creates",
"a",
"single",
"base",
"learner",
"from",
"a",
"base",
"learner",
"origin",
"and",
"queues",
"it",
"up"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/views.py#L306-L348
|
6,669
|
reiinakano/xcessiv
|
xcessiv/views.py
|
search_base_learner
|
def search_base_learner(id):
"""Creates a set of base learners from base learner origin using grid search
and queues them up
"""
path = functions.get_path_from_query_string(request)
req_body = request.get_json()
if req_body['method'] == 'grid':
param_grid = functions.import_object_from_string_code(
req_body['source'],
'param_grid'
)
iterator = ParameterGrid(param_grid)
elif req_body['method'] == 'random':
param_distributions = functions.import_object_from_string_code(
req_body['source'],
'param_distributions'
)
iterator = ParameterSampler(param_distributions, n_iter=req_body['n_iter'])
else:
raise exceptions.UserError('{} not a valid search method'.format(req_body['method']))
with functions.DBContextManager(path) as session:
base_learner_origin = session.query(models.BaseLearnerOrigin).filter_by(id=id).first()
if base_learner_origin is None:
raise exceptions.UserError('Base learner origin {} not found'.format(id), 404)
if not base_learner_origin.final:
raise exceptions.UserError('Base learner origin {} is not final'.format(id))
learners = []
for params in iterator:
est = base_learner_origin.return_estimator()
try:
est.set_params(**params)
except Exception as e:
print(repr(e))
continue
hyperparameters = functions.make_serializable(est.get_params())
base_learners = session.query(models.BaseLearner).\
filter_by(base_learner_origin_id=id,
hyperparameters=hyperparameters).all()
if base_learners: # already exists
continue
base_learner = models.BaseLearner(hyperparameters,
'queued',
base_learner_origin)
session.add(base_learner)
session.commit()
with Connection(get_redis_connection()):
rqtasks.generate_meta_features.delay(path, base_learner.id)
learners.append(base_learner)
if not learners:
raise exceptions.UserError('Created 0 new base learners')
if req_body['method'] == 'grid':
if 'grid_searches' not in base_learner_origin.description:
base_learner_origin.description['grid_searches'] = []
base_learner_origin.description['grid_searches'] += ([req_body['source']])
elif req_body['method'] == 'random':
if 'random_searches' not in base_learner_origin.description:
base_learner_origin.description['random_searches'] = []
base_learner_origin.description['random_searches'] += ([req_body['source']])
session.add(base_learner_origin)
session.commit()
return jsonify(list(map(lambda x: x.serialize, learners)))
|
python
|
def search_base_learner(id):
"""Creates a set of base learners from base learner origin using grid search
and queues them up
"""
path = functions.get_path_from_query_string(request)
req_body = request.get_json()
if req_body['method'] == 'grid':
param_grid = functions.import_object_from_string_code(
req_body['source'],
'param_grid'
)
iterator = ParameterGrid(param_grid)
elif req_body['method'] == 'random':
param_distributions = functions.import_object_from_string_code(
req_body['source'],
'param_distributions'
)
iterator = ParameterSampler(param_distributions, n_iter=req_body['n_iter'])
else:
raise exceptions.UserError('{} not a valid search method'.format(req_body['method']))
with functions.DBContextManager(path) as session:
base_learner_origin = session.query(models.BaseLearnerOrigin).filter_by(id=id).first()
if base_learner_origin is None:
raise exceptions.UserError('Base learner origin {} not found'.format(id), 404)
if not base_learner_origin.final:
raise exceptions.UserError('Base learner origin {} is not final'.format(id))
learners = []
for params in iterator:
est = base_learner_origin.return_estimator()
try:
est.set_params(**params)
except Exception as e:
print(repr(e))
continue
hyperparameters = functions.make_serializable(est.get_params())
base_learners = session.query(models.BaseLearner).\
filter_by(base_learner_origin_id=id,
hyperparameters=hyperparameters).all()
if base_learners: # already exists
continue
base_learner = models.BaseLearner(hyperparameters,
'queued',
base_learner_origin)
session.add(base_learner)
session.commit()
with Connection(get_redis_connection()):
rqtasks.generate_meta_features.delay(path, base_learner.id)
learners.append(base_learner)
if not learners:
raise exceptions.UserError('Created 0 new base learners')
if req_body['method'] == 'grid':
if 'grid_searches' not in base_learner_origin.description:
base_learner_origin.description['grid_searches'] = []
base_learner_origin.description['grid_searches'] += ([req_body['source']])
elif req_body['method'] == 'random':
if 'random_searches' not in base_learner_origin.description:
base_learner_origin.description['random_searches'] = []
base_learner_origin.description['random_searches'] += ([req_body['source']])
session.add(base_learner_origin)
session.commit()
return jsonify(list(map(lambda x: x.serialize, learners)))
|
[
"def",
"search_base_learner",
"(",
"id",
")",
":",
"path",
"=",
"functions",
".",
"get_path_from_query_string",
"(",
"request",
")",
"req_body",
"=",
"request",
".",
"get_json",
"(",
")",
"if",
"req_body",
"[",
"'method'",
"]",
"==",
"'grid'",
":",
"param_grid",
"=",
"functions",
".",
"import_object_from_string_code",
"(",
"req_body",
"[",
"'source'",
"]",
",",
"'param_grid'",
")",
"iterator",
"=",
"ParameterGrid",
"(",
"param_grid",
")",
"elif",
"req_body",
"[",
"'method'",
"]",
"==",
"'random'",
":",
"param_distributions",
"=",
"functions",
".",
"import_object_from_string_code",
"(",
"req_body",
"[",
"'source'",
"]",
",",
"'param_distributions'",
")",
"iterator",
"=",
"ParameterSampler",
"(",
"param_distributions",
",",
"n_iter",
"=",
"req_body",
"[",
"'n_iter'",
"]",
")",
"else",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'{} not a valid search method'",
".",
"format",
"(",
"req_body",
"[",
"'method'",
"]",
")",
")",
"with",
"functions",
".",
"DBContextManager",
"(",
"path",
")",
"as",
"session",
":",
"base_learner_origin",
"=",
"session",
".",
"query",
"(",
"models",
".",
"BaseLearnerOrigin",
")",
".",
"filter_by",
"(",
"id",
"=",
"id",
")",
".",
"first",
"(",
")",
"if",
"base_learner_origin",
"is",
"None",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Base learner origin {} not found'",
".",
"format",
"(",
"id",
")",
",",
"404",
")",
"if",
"not",
"base_learner_origin",
".",
"final",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Base learner origin {} is not final'",
".",
"format",
"(",
"id",
")",
")",
"learners",
"=",
"[",
"]",
"for",
"params",
"in",
"iterator",
":",
"est",
"=",
"base_learner_origin",
".",
"return_estimator",
"(",
")",
"try",
":",
"est",
".",
"set_params",
"(",
"*",
"*",
"params",
")",
"except",
"Exception",
"as",
"e",
":",
"print",
"(",
"repr",
"(",
"e",
")",
")",
"continue",
"hyperparameters",
"=",
"functions",
".",
"make_serializable",
"(",
"est",
".",
"get_params",
"(",
")",
")",
"base_learners",
"=",
"session",
".",
"query",
"(",
"models",
".",
"BaseLearner",
")",
".",
"filter_by",
"(",
"base_learner_origin_id",
"=",
"id",
",",
"hyperparameters",
"=",
"hyperparameters",
")",
".",
"all",
"(",
")",
"if",
"base_learners",
":",
"# already exists",
"continue",
"base_learner",
"=",
"models",
".",
"BaseLearner",
"(",
"hyperparameters",
",",
"'queued'",
",",
"base_learner_origin",
")",
"session",
".",
"add",
"(",
"base_learner",
")",
"session",
".",
"commit",
"(",
")",
"with",
"Connection",
"(",
"get_redis_connection",
"(",
")",
")",
":",
"rqtasks",
".",
"generate_meta_features",
".",
"delay",
"(",
"path",
",",
"base_learner",
".",
"id",
")",
"learners",
".",
"append",
"(",
"base_learner",
")",
"if",
"not",
"learners",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Created 0 new base learners'",
")",
"if",
"req_body",
"[",
"'method'",
"]",
"==",
"'grid'",
":",
"if",
"'grid_searches'",
"not",
"in",
"base_learner_origin",
".",
"description",
":",
"base_learner_origin",
".",
"description",
"[",
"'grid_searches'",
"]",
"=",
"[",
"]",
"base_learner_origin",
".",
"description",
"[",
"'grid_searches'",
"]",
"+=",
"(",
"[",
"req_body",
"[",
"'source'",
"]",
"]",
")",
"elif",
"req_body",
"[",
"'method'",
"]",
"==",
"'random'",
":",
"if",
"'random_searches'",
"not",
"in",
"base_learner_origin",
".",
"description",
":",
"base_learner_origin",
".",
"description",
"[",
"'random_searches'",
"]",
"=",
"[",
"]",
"base_learner_origin",
".",
"description",
"[",
"'random_searches'",
"]",
"+=",
"(",
"[",
"req_body",
"[",
"'source'",
"]",
"]",
")",
"session",
".",
"add",
"(",
"base_learner_origin",
")",
"session",
".",
"commit",
"(",
")",
"return",
"jsonify",
"(",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"serialize",
",",
"learners",
")",
")",
")"
] |
Creates a set of base learners from base learner origin using grid search
and queues them up
|
[
"Creates",
"a",
"set",
"of",
"base",
"learners",
"from",
"base",
"learner",
"origin",
"using",
"grid",
"search",
"and",
"queues",
"them",
"up"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/views.py#L352-L424
|
6,670
|
reiinakano/xcessiv
|
xcessiv/views.py
|
get_automated_runs
|
def get_automated_runs():
"""Return all automated runs"""
path = functions.get_path_from_query_string(request)
if request.method == 'GET':
with functions.DBContextManager(path) as session:
automated_runs = session.query(models.AutomatedRun).all()
return jsonify(list(map(lambda x: x.serialize, automated_runs)))
if request.method == 'POST':
req_body = request.get_json()
with functions.DBContextManager(path) as session:
base_learner_origin = None
if req_body['category'] == 'bayes' or req_body['category'] == 'greedy_ensemble_search':
base_learner_origin = session.query(models.BaseLearnerOrigin).\
filter_by(id=req_body['base_learner_origin_id']).first()
if base_learner_origin is None:
raise exceptions.UserError('Base learner origin {} not found'.format(
req_body['base_learner_origin_id']
), 404)
if not base_learner_origin.final:
raise exceptions.UserError('Base learner origin {} is not final'.format(
req_body['base_learner_origin_id']
))
elif req_body['category'] == 'tpot':
pass
else:
raise exceptions.UserError('Automated run category'
' {} not recognized'.format(req_body['category']))
# Check for any syntax errors
module = functions.import_string_code_as_module(req_body['source'])
del module
automated_run = models.AutomatedRun(req_body['source'],
'queued',
req_body['category'],
base_learner_origin)
session.add(automated_run)
session.commit()
with Connection(get_redis_connection()):
rqtasks.start_automated_run.delay(path, automated_run.id)
return jsonify(automated_run.serialize)
|
python
|
def get_automated_runs():
"""Return all automated runs"""
path = functions.get_path_from_query_string(request)
if request.method == 'GET':
with functions.DBContextManager(path) as session:
automated_runs = session.query(models.AutomatedRun).all()
return jsonify(list(map(lambda x: x.serialize, automated_runs)))
if request.method == 'POST':
req_body = request.get_json()
with functions.DBContextManager(path) as session:
base_learner_origin = None
if req_body['category'] == 'bayes' or req_body['category'] == 'greedy_ensemble_search':
base_learner_origin = session.query(models.BaseLearnerOrigin).\
filter_by(id=req_body['base_learner_origin_id']).first()
if base_learner_origin is None:
raise exceptions.UserError('Base learner origin {} not found'.format(
req_body['base_learner_origin_id']
), 404)
if not base_learner_origin.final:
raise exceptions.UserError('Base learner origin {} is not final'.format(
req_body['base_learner_origin_id']
))
elif req_body['category'] == 'tpot':
pass
else:
raise exceptions.UserError('Automated run category'
' {} not recognized'.format(req_body['category']))
# Check for any syntax errors
module = functions.import_string_code_as_module(req_body['source'])
del module
automated_run = models.AutomatedRun(req_body['source'],
'queued',
req_body['category'],
base_learner_origin)
session.add(automated_run)
session.commit()
with Connection(get_redis_connection()):
rqtasks.start_automated_run.delay(path, automated_run.id)
return jsonify(automated_run.serialize)
|
[
"def",
"get_automated_runs",
"(",
")",
":",
"path",
"=",
"functions",
".",
"get_path_from_query_string",
"(",
"request",
")",
"if",
"request",
".",
"method",
"==",
"'GET'",
":",
"with",
"functions",
".",
"DBContextManager",
"(",
"path",
")",
"as",
"session",
":",
"automated_runs",
"=",
"session",
".",
"query",
"(",
"models",
".",
"AutomatedRun",
")",
".",
"all",
"(",
")",
"return",
"jsonify",
"(",
"list",
"(",
"map",
"(",
"lambda",
"x",
":",
"x",
".",
"serialize",
",",
"automated_runs",
")",
")",
")",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"req_body",
"=",
"request",
".",
"get_json",
"(",
")",
"with",
"functions",
".",
"DBContextManager",
"(",
"path",
")",
"as",
"session",
":",
"base_learner_origin",
"=",
"None",
"if",
"req_body",
"[",
"'category'",
"]",
"==",
"'bayes'",
"or",
"req_body",
"[",
"'category'",
"]",
"==",
"'greedy_ensemble_search'",
":",
"base_learner_origin",
"=",
"session",
".",
"query",
"(",
"models",
".",
"BaseLearnerOrigin",
")",
".",
"filter_by",
"(",
"id",
"=",
"req_body",
"[",
"'base_learner_origin_id'",
"]",
")",
".",
"first",
"(",
")",
"if",
"base_learner_origin",
"is",
"None",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Base learner origin {} not found'",
".",
"format",
"(",
"req_body",
"[",
"'base_learner_origin_id'",
"]",
")",
",",
"404",
")",
"if",
"not",
"base_learner_origin",
".",
"final",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Base learner origin {} is not final'",
".",
"format",
"(",
"req_body",
"[",
"'base_learner_origin_id'",
"]",
")",
")",
"elif",
"req_body",
"[",
"'category'",
"]",
"==",
"'tpot'",
":",
"pass",
"else",
":",
"raise",
"exceptions",
".",
"UserError",
"(",
"'Automated run category'",
"' {} not recognized'",
".",
"format",
"(",
"req_body",
"[",
"'category'",
"]",
")",
")",
"# Check for any syntax errors",
"module",
"=",
"functions",
".",
"import_string_code_as_module",
"(",
"req_body",
"[",
"'source'",
"]",
")",
"del",
"module",
"automated_run",
"=",
"models",
".",
"AutomatedRun",
"(",
"req_body",
"[",
"'source'",
"]",
",",
"'queued'",
",",
"req_body",
"[",
"'category'",
"]",
",",
"base_learner_origin",
")",
"session",
".",
"add",
"(",
"automated_run",
")",
"session",
".",
"commit",
"(",
")",
"with",
"Connection",
"(",
"get_redis_connection",
"(",
")",
")",
":",
"rqtasks",
".",
"start_automated_run",
".",
"delay",
"(",
"path",
",",
"automated_run",
".",
"id",
")",
"return",
"jsonify",
"(",
"automated_run",
".",
"serialize",
")"
] |
Return all automated runs
|
[
"Return",
"all",
"automated",
"runs"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/views.py#L428-L476
|
6,671
|
reiinakano/xcessiv
|
xcessiv/stacker.py
|
XcessivStackedEnsemble._process_using_meta_feature_generator
|
def _process_using_meta_feature_generator(self, X, meta_feature_generator):
"""Process using secondary learner meta-feature generator
Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba,
this internal method gives the ability to use any string. Just make sure secondary learner
has the method.
Args:
X (array-like): Features array
meta_feature_generator (str, unicode): Method for use by secondary learner
"""
all_learner_meta_features = []
for idx, base_learner in enumerate(self.base_learners):
single_learner_meta_features = getattr(base_learner,
self.meta_feature_generators[idx])(X)
if len(single_learner_meta_features.shape) == 1:
single_learner_meta_features = single_learner_meta_features.reshape(-1, 1)
all_learner_meta_features.append(single_learner_meta_features)
all_learner_meta_features = np.concatenate(all_learner_meta_features, axis=1)
out = getattr(self.secondary_learner, meta_feature_generator)(all_learner_meta_features)
return out
|
python
|
def _process_using_meta_feature_generator(self, X, meta_feature_generator):
"""Process using secondary learner meta-feature generator
Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba,
this internal method gives the ability to use any string. Just make sure secondary learner
has the method.
Args:
X (array-like): Features array
meta_feature_generator (str, unicode): Method for use by secondary learner
"""
all_learner_meta_features = []
for idx, base_learner in enumerate(self.base_learners):
single_learner_meta_features = getattr(base_learner,
self.meta_feature_generators[idx])(X)
if len(single_learner_meta_features.shape) == 1:
single_learner_meta_features = single_learner_meta_features.reshape(-1, 1)
all_learner_meta_features.append(single_learner_meta_features)
all_learner_meta_features = np.concatenate(all_learner_meta_features, axis=1)
out = getattr(self.secondary_learner, meta_feature_generator)(all_learner_meta_features)
return out
|
[
"def",
"_process_using_meta_feature_generator",
"(",
"self",
",",
"X",
",",
"meta_feature_generator",
")",
":",
"all_learner_meta_features",
"=",
"[",
"]",
"for",
"idx",
",",
"base_learner",
"in",
"enumerate",
"(",
"self",
".",
"base_learners",
")",
":",
"single_learner_meta_features",
"=",
"getattr",
"(",
"base_learner",
",",
"self",
".",
"meta_feature_generators",
"[",
"idx",
"]",
")",
"(",
"X",
")",
"if",
"len",
"(",
"single_learner_meta_features",
".",
"shape",
")",
"==",
"1",
":",
"single_learner_meta_features",
"=",
"single_learner_meta_features",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
"all_learner_meta_features",
".",
"append",
"(",
"single_learner_meta_features",
")",
"all_learner_meta_features",
"=",
"np",
".",
"concatenate",
"(",
"all_learner_meta_features",
",",
"axis",
"=",
"1",
")",
"out",
"=",
"getattr",
"(",
"self",
".",
"secondary_learner",
",",
"meta_feature_generator",
")",
"(",
"all_learner_meta_features",
")",
"return",
"out"
] |
Process using secondary learner meta-feature generator
Since secondary learner meta-feature generator can be anything e.g. predict, predict_proba,
this internal method gives the ability to use any string. Just make sure secondary learner
has the method.
Args:
X (array-like): Features array
meta_feature_generator (str, unicode): Method for use by secondary learner
|
[
"Process",
"using",
"secondary",
"learner",
"meta",
"-",
"feature",
"generator"
] |
a48dff7d370c84eb5c243bde87164c1f5fd096d5
|
https://github.com/reiinakano/xcessiv/blob/a48dff7d370c84eb5c243bde87164c1f5fd096d5/xcessiv/stacker.py#L77-L103
|
6,672
|
madedotcom/photon-pump
|
photonpump/messages.py
|
NewEvent
|
def NewEvent(
type: str, id: UUID = None, data: JsonDict = None, metadata: JsonDict = None
) -> NewEventData:
"""Build the data structure for a new event.
Args:
type: An event type.
id: The uuid identifier for the event.
data: A dict containing data for the event. These data
must be json serializable.
metadata: A dict containing metadata about the event.
These must be json serializable.
"""
return NewEventData(id or uuid4(), type, data, metadata)
|
python
|
def NewEvent(
type: str, id: UUID = None, data: JsonDict = None, metadata: JsonDict = None
) -> NewEventData:
"""Build the data structure for a new event.
Args:
type: An event type.
id: The uuid identifier for the event.
data: A dict containing data for the event. These data
must be json serializable.
metadata: A dict containing metadata about the event.
These must be json serializable.
"""
return NewEventData(id or uuid4(), type, data, metadata)
|
[
"def",
"NewEvent",
"(",
"type",
":",
"str",
",",
"id",
":",
"UUID",
"=",
"None",
",",
"data",
":",
"JsonDict",
"=",
"None",
",",
"metadata",
":",
"JsonDict",
"=",
"None",
")",
"->",
"NewEventData",
":",
"return",
"NewEventData",
"(",
"id",
"or",
"uuid4",
"(",
")",
",",
"type",
",",
"data",
",",
"metadata",
")"
] |
Build the data structure for a new event.
Args:
type: An event type.
id: The uuid identifier for the event.
data: A dict containing data for the event. These data
must be json serializable.
metadata: A dict containing metadata about the event.
These must be json serializable.
|
[
"Build",
"the",
"data",
"structure",
"for",
"a",
"new",
"event",
"."
] |
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
|
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/messages.py#L439-L453
|
6,673
|
madedotcom/photon-pump
|
photonpump/messages.py
|
Credential.from_bytes
|
def from_bytes(cls, data):
"""
I am so sorry.
"""
len_username = int.from_bytes(data[0:2], byteorder="big")
offset_username = 2 + len_username
username = data[2:offset_username].decode("UTF-8")
offset_password = 2 + offset_username
len_password = int.from_bytes(
data[offset_username:offset_password], byteorder="big"
)
pass_begin = offset_password
pass_end = offset_password + len_password
password = data[pass_begin:pass_end].decode("UTF-8")
return cls(username, password)
|
python
|
def from_bytes(cls, data):
"""
I am so sorry.
"""
len_username = int.from_bytes(data[0:2], byteorder="big")
offset_username = 2 + len_username
username = data[2:offset_username].decode("UTF-8")
offset_password = 2 + offset_username
len_password = int.from_bytes(
data[offset_username:offset_password], byteorder="big"
)
pass_begin = offset_password
pass_end = offset_password + len_password
password = data[pass_begin:pass_end].decode("UTF-8")
return cls(username, password)
|
[
"def",
"from_bytes",
"(",
"cls",
",",
"data",
")",
":",
"len_username",
"=",
"int",
".",
"from_bytes",
"(",
"data",
"[",
"0",
":",
"2",
"]",
",",
"byteorder",
"=",
"\"big\"",
")",
"offset_username",
"=",
"2",
"+",
"len_username",
"username",
"=",
"data",
"[",
"2",
":",
"offset_username",
"]",
".",
"decode",
"(",
"\"UTF-8\"",
")",
"offset_password",
"=",
"2",
"+",
"offset_username",
"len_password",
"=",
"int",
".",
"from_bytes",
"(",
"data",
"[",
"offset_username",
":",
"offset_password",
"]",
",",
"byteorder",
"=",
"\"big\"",
")",
"pass_begin",
"=",
"offset_password",
"pass_end",
"=",
"offset_password",
"+",
"len_password",
"password",
"=",
"data",
"[",
"pass_begin",
":",
"pass_end",
"]",
".",
"decode",
"(",
"\"UTF-8\"",
")",
"return",
"cls",
"(",
"username",
",",
"password",
")"
] |
I am so sorry.
|
[
"I",
"am",
"so",
"sorry",
"."
] |
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
|
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/messages.py#L155-L170
|
6,674
|
madedotcom/photon-pump
|
photonpump/connection.py
|
connect
|
def connect(
host="localhost",
port=1113,
discovery_host=None,
discovery_port=2113,
username=None,
password=None,
loop=None,
name=None,
selector=select_random,
) -> Client:
""" Create a new client.
Examples:
Since the Client is an async context manager, we can use it in a
with block for automatic connect/disconnect semantics.
>>> async with connect(host='127.0.0.1', port=1113) as c:
>>> await c.ping()
Or we can call connect at a more convenient moment
>>> c = connect()
>>> await c.connect()
>>> await c.ping()
>>> await c.close()
For cluster discovery cases, we can provide a discovery host and
port. The host may be an IP or DNS entry. If you provide a DNS
entry, discovery will choose randomly from the registered IP
addresses for the hostname.
>>> async with connect(discovery_host="eventstore.test") as c:
>>> await c.ping()
The discovery host returns gossip data about the cluster. We use the
gossip to select a node at random from the avaialble cluster members.
If you're using
:meth:`persistent subscriptions <photonpump.connection.Client.create_subscription>`
you will always want to connect to the master node of the cluster.
The selector parameter is a function that chooses an available node from
the gossip result. To select the master node, use the
:func:`photonpump.discovery.prefer_master` function. This function will return
the master node if there is a live master, and a random replica otherwise.
All requests to the server can be made with the require_master flag which
will raise an error if the current node is not a master.
>>> async with connect(
>>> discovery_host="eventstore.test",
>>> selector=discovery.prefer_master,
>>> ) as c:
>>> await c.ping(require_master=True)
Conversely, you might want to avoid connecting to the master node for reasons
of scalability. For this you can use the
:func:`photonpump.discovery.prefer_replica` function.
>>> async with connect(
>>> discovery_host="eventstore.test",
>>> selector=discovery.prefer_replica,
>>> ) as c:
>>> await c.ping()
For some operations, you may need to authenticate your requests by
providing a username and password to the client.
>>> async with connect(username='admin', password='changeit') as c:
>>> await c.ping()
Ordinarily you will create a single Client per application, but for
advanced scenarios you might want multiple connections. In this
situation, you can name each connection in order to get better logging.
>>> async with connect(name="event-reader"):
>>> await c.ping()
>>> async with connect(name="event-writer"):
>>> await c.ping()
Args:
host: The IP or DNS entry to connect with, defaults to 'localhost'.
port: The port to connect with, defaults to 1113.
discovery_host: The IP or DNS entry to use for cluster discovery.
discovery_port: The port to use for cluster discovery, defaults to 2113.
username: The username to use when communicating with eventstore.
password: The password to use when communicating with eventstore.
loop:An Asyncio event loop.
selector: An optional function that selects one element from a list of
:class:`photonpump.disovery.DiscoveredNode` elements.
"""
discovery = get_discoverer(host, port, discovery_host, discovery_port, selector)
dispatcher = MessageDispatcher(name=name, loop=loop)
connector = Connector(discovery, dispatcher, name=name)
credential = msg.Credential(username, password) if username and password else None
return Client(connector, dispatcher, credential=credential)
|
python
|
def connect(
host="localhost",
port=1113,
discovery_host=None,
discovery_port=2113,
username=None,
password=None,
loop=None,
name=None,
selector=select_random,
) -> Client:
""" Create a new client.
Examples:
Since the Client is an async context manager, we can use it in a
with block for automatic connect/disconnect semantics.
>>> async with connect(host='127.0.0.1', port=1113) as c:
>>> await c.ping()
Or we can call connect at a more convenient moment
>>> c = connect()
>>> await c.connect()
>>> await c.ping()
>>> await c.close()
For cluster discovery cases, we can provide a discovery host and
port. The host may be an IP or DNS entry. If you provide a DNS
entry, discovery will choose randomly from the registered IP
addresses for the hostname.
>>> async with connect(discovery_host="eventstore.test") as c:
>>> await c.ping()
The discovery host returns gossip data about the cluster. We use the
gossip to select a node at random from the avaialble cluster members.
If you're using
:meth:`persistent subscriptions <photonpump.connection.Client.create_subscription>`
you will always want to connect to the master node of the cluster.
The selector parameter is a function that chooses an available node from
the gossip result. To select the master node, use the
:func:`photonpump.discovery.prefer_master` function. This function will return
the master node if there is a live master, and a random replica otherwise.
All requests to the server can be made with the require_master flag which
will raise an error if the current node is not a master.
>>> async with connect(
>>> discovery_host="eventstore.test",
>>> selector=discovery.prefer_master,
>>> ) as c:
>>> await c.ping(require_master=True)
Conversely, you might want to avoid connecting to the master node for reasons
of scalability. For this you can use the
:func:`photonpump.discovery.prefer_replica` function.
>>> async with connect(
>>> discovery_host="eventstore.test",
>>> selector=discovery.prefer_replica,
>>> ) as c:
>>> await c.ping()
For some operations, you may need to authenticate your requests by
providing a username and password to the client.
>>> async with connect(username='admin', password='changeit') as c:
>>> await c.ping()
Ordinarily you will create a single Client per application, but for
advanced scenarios you might want multiple connections. In this
situation, you can name each connection in order to get better logging.
>>> async with connect(name="event-reader"):
>>> await c.ping()
>>> async with connect(name="event-writer"):
>>> await c.ping()
Args:
host: The IP or DNS entry to connect with, defaults to 'localhost'.
port: The port to connect with, defaults to 1113.
discovery_host: The IP or DNS entry to use for cluster discovery.
discovery_port: The port to use for cluster discovery, defaults to 2113.
username: The username to use when communicating with eventstore.
password: The password to use when communicating with eventstore.
loop:An Asyncio event loop.
selector: An optional function that selects one element from a list of
:class:`photonpump.disovery.DiscoveredNode` elements.
"""
discovery = get_discoverer(host, port, discovery_host, discovery_port, selector)
dispatcher = MessageDispatcher(name=name, loop=loop)
connector = Connector(discovery, dispatcher, name=name)
credential = msg.Credential(username, password) if username and password else None
return Client(connector, dispatcher, credential=credential)
|
[
"def",
"connect",
"(",
"host",
"=",
"\"localhost\"",
",",
"port",
"=",
"1113",
",",
"discovery_host",
"=",
"None",
",",
"discovery_port",
"=",
"2113",
",",
"username",
"=",
"None",
",",
"password",
"=",
"None",
",",
"loop",
"=",
"None",
",",
"name",
"=",
"None",
",",
"selector",
"=",
"select_random",
",",
")",
"->",
"Client",
":",
"discovery",
"=",
"get_discoverer",
"(",
"host",
",",
"port",
",",
"discovery_host",
",",
"discovery_port",
",",
"selector",
")",
"dispatcher",
"=",
"MessageDispatcher",
"(",
"name",
"=",
"name",
",",
"loop",
"=",
"loop",
")",
"connector",
"=",
"Connector",
"(",
"discovery",
",",
"dispatcher",
",",
"name",
"=",
"name",
")",
"credential",
"=",
"msg",
".",
"Credential",
"(",
"username",
",",
"password",
")",
"if",
"username",
"and",
"password",
"else",
"None",
"return",
"Client",
"(",
"connector",
",",
"dispatcher",
",",
"credential",
"=",
"credential",
")"
] |
Create a new client.
Examples:
Since the Client is an async context manager, we can use it in a
with block for automatic connect/disconnect semantics.
>>> async with connect(host='127.0.0.1', port=1113) as c:
>>> await c.ping()
Or we can call connect at a more convenient moment
>>> c = connect()
>>> await c.connect()
>>> await c.ping()
>>> await c.close()
For cluster discovery cases, we can provide a discovery host and
port. The host may be an IP or DNS entry. If you provide a DNS
entry, discovery will choose randomly from the registered IP
addresses for the hostname.
>>> async with connect(discovery_host="eventstore.test") as c:
>>> await c.ping()
The discovery host returns gossip data about the cluster. We use the
gossip to select a node at random from the avaialble cluster members.
If you're using
:meth:`persistent subscriptions <photonpump.connection.Client.create_subscription>`
you will always want to connect to the master node of the cluster.
The selector parameter is a function that chooses an available node from
the gossip result. To select the master node, use the
:func:`photonpump.discovery.prefer_master` function. This function will return
the master node if there is a live master, and a random replica otherwise.
All requests to the server can be made with the require_master flag which
will raise an error if the current node is not a master.
>>> async with connect(
>>> discovery_host="eventstore.test",
>>> selector=discovery.prefer_master,
>>> ) as c:
>>> await c.ping(require_master=True)
Conversely, you might want to avoid connecting to the master node for reasons
of scalability. For this you can use the
:func:`photonpump.discovery.prefer_replica` function.
>>> async with connect(
>>> discovery_host="eventstore.test",
>>> selector=discovery.prefer_replica,
>>> ) as c:
>>> await c.ping()
For some operations, you may need to authenticate your requests by
providing a username and password to the client.
>>> async with connect(username='admin', password='changeit') as c:
>>> await c.ping()
Ordinarily you will create a single Client per application, but for
advanced scenarios you might want multiple connections. In this
situation, you can name each connection in order to get better logging.
>>> async with connect(name="event-reader"):
>>> await c.ping()
>>> async with connect(name="event-writer"):
>>> await c.ping()
Args:
host: The IP or DNS entry to connect with, defaults to 'localhost'.
port: The port to connect with, defaults to 1113.
discovery_host: The IP or DNS entry to use for cluster discovery.
discovery_port: The port to use for cluster discovery, defaults to 2113.
username: The username to use when communicating with eventstore.
password: The password to use when communicating with eventstore.
loop:An Asyncio event loop.
selector: An optional function that selects one element from a list of
:class:`photonpump.disovery.DiscoveredNode` elements.
|
[
"Create",
"a",
"new",
"client",
"."
] |
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
|
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L1190-L1290
|
6,675
|
madedotcom/photon-pump
|
photonpump/connection.py
|
MessageReader.start
|
async def start(self):
"""Loop forever reading messages and invoking
the operation that caused them"""
while True:
try:
data = await self.reader.read(8192)
if self._trace_enabled:
self._logger.trace(
"Received %d bytes from remote server:\n%s",
len(data),
msg.dump(data),
)
await self.process(data)
except asyncio.CancelledError:
return
except:
logging.exception("Unhandled error in Message Reader")
raise
|
python
|
async def start(self):
"""Loop forever reading messages and invoking
the operation that caused them"""
while True:
try:
data = await self.reader.read(8192)
if self._trace_enabled:
self._logger.trace(
"Received %d bytes from remote server:\n%s",
len(data),
msg.dump(data),
)
await self.process(data)
except asyncio.CancelledError:
return
except:
logging.exception("Unhandled error in Message Reader")
raise
|
[
"async",
"def",
"start",
"(",
"self",
")",
":",
"while",
"True",
":",
"try",
":",
"data",
"=",
"await",
"self",
".",
"reader",
".",
"read",
"(",
"8192",
")",
"if",
"self",
".",
"_trace_enabled",
":",
"self",
".",
"_logger",
".",
"trace",
"(",
"\"Received %d bytes from remote server:\\n%s\"",
",",
"len",
"(",
"data",
")",
",",
"msg",
".",
"dump",
"(",
"data",
")",
",",
")",
"await",
"self",
".",
"process",
"(",
"data",
")",
"except",
"asyncio",
".",
"CancelledError",
":",
"return",
"except",
":",
"logging",
".",
"exception",
"(",
"\"Unhandled error in Message Reader\"",
")",
"raise"
] |
Loop forever reading messages and invoking
the operation that caused them
|
[
"Loop",
"forever",
"reading",
"messages",
"and",
"invoking",
"the",
"operation",
"that",
"caused",
"them"
] |
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
|
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L397-L416
|
6,676
|
madedotcom/photon-pump
|
photonpump/connection.py
|
Client.ping
|
async def ping(self, conversation_id: uuid.UUID = None) -> float:
"""
Send a message to the remote server to check liveness.
Returns:
The round-trip time to receive a Pong message in fractional seconds
Examples:
>>> async with connect() as conn:
>>> print("Sending a PING to the server")
>>> time_secs = await conn.ping()
>>> print("Received a PONG after {} secs".format(time_secs))
"""
cmd = convo.Ping(conversation_id=conversation_id or uuid.uuid4())
result = await self.dispatcher.start_conversation(cmd)
return await result
|
python
|
async def ping(self, conversation_id: uuid.UUID = None) -> float:
"""
Send a message to the remote server to check liveness.
Returns:
The round-trip time to receive a Pong message in fractional seconds
Examples:
>>> async with connect() as conn:
>>> print("Sending a PING to the server")
>>> time_secs = await conn.ping()
>>> print("Received a PONG after {} secs".format(time_secs))
"""
cmd = convo.Ping(conversation_id=conversation_id or uuid.uuid4())
result = await self.dispatcher.start_conversation(cmd)
return await result
|
[
"async",
"def",
"ping",
"(",
"self",
",",
"conversation_id",
":",
"uuid",
".",
"UUID",
"=",
"None",
")",
"->",
"float",
":",
"cmd",
"=",
"convo",
".",
"Ping",
"(",
"conversation_id",
"=",
"conversation_id",
"or",
"uuid",
".",
"uuid4",
"(",
")",
")",
"result",
"=",
"await",
"self",
".",
"dispatcher",
".",
"start_conversation",
"(",
"cmd",
")",
"return",
"await",
"result"
] |
Send a message to the remote server to check liveness.
Returns:
The round-trip time to receive a Pong message in fractional seconds
Examples:
>>> async with connect() as conn:
>>> print("Sending a PING to the server")
>>> time_secs = await conn.ping()
>>> print("Received a PONG after {} secs".format(time_secs))
|
[
"Send",
"a",
"message",
"to",
"the",
"remote",
"server",
"to",
"check",
"liveness",
"."
] |
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
|
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L581-L599
|
6,677
|
madedotcom/photon-pump
|
photonpump/connection.py
|
Client.publish_event
|
async def publish_event(
self,
stream: str,
type: str,
body: Optional[Any] = None,
id: Optional[uuid.UUID] = None,
metadata: Optional[Any] = None,
expected_version: int = -2,
require_master: bool = False,
) -> None:
"""
Publish a single event to the EventStore.
This method publishes a single event to the remote server and waits
for acknowledgement.
Args:
stream: The stream to publish the event to.
type: the event's type.
body: a serializable body for the event.
id: a unique id for the event. PhotonPump will automatically generate an
id if none is provided.
metadata: Optional serializable metadata block for the event.
expected_version: Used for concurrency control.
If a positive integer is provided, EventStore will check that the stream
is at that version before accepting a write.
There are three magic values:
-4: StreamMustExist. Checks that the stream already exists.
-2: Any. Disables concurrency checks
-1: NoStream. Checks that the stream does not yet exist.
0: EmptyStream. Checks that the stream has been explicitly created but
does not yet contain any events.
require_master: If true, slave nodes will reject this message.
Examples:
>>> async with connect() as conn:
>>> await conn.publish_event(
>>> "inventory_item-1",
>>> "item_created",
>>> body={ "item-id": 1, "created-date": "2018-08-19" },
>>> expected_version=ExpectedVersion.StreamMustNotExist
>>> )
>>>
>>> await conn.publish_event(
>>> "inventory_item-1",
>>> "item_deleted",
>>> expected_version=1,
>>> metadata={'deleted-by': 'bob' }
>>> )
"""
event = msg.NewEvent(type, id or uuid.uuid4(), body, metadata)
conversation = convo.WriteEvents(
stream,
[event],
expected_version=expected_version,
require_master=require_master,
)
result = await self.dispatcher.start_conversation(conversation)
return await result
|
python
|
async def publish_event(
self,
stream: str,
type: str,
body: Optional[Any] = None,
id: Optional[uuid.UUID] = None,
metadata: Optional[Any] = None,
expected_version: int = -2,
require_master: bool = False,
) -> None:
"""
Publish a single event to the EventStore.
This method publishes a single event to the remote server and waits
for acknowledgement.
Args:
stream: The stream to publish the event to.
type: the event's type.
body: a serializable body for the event.
id: a unique id for the event. PhotonPump will automatically generate an
id if none is provided.
metadata: Optional serializable metadata block for the event.
expected_version: Used for concurrency control.
If a positive integer is provided, EventStore will check that the stream
is at that version before accepting a write.
There are three magic values:
-4: StreamMustExist. Checks that the stream already exists.
-2: Any. Disables concurrency checks
-1: NoStream. Checks that the stream does not yet exist.
0: EmptyStream. Checks that the stream has been explicitly created but
does not yet contain any events.
require_master: If true, slave nodes will reject this message.
Examples:
>>> async with connect() as conn:
>>> await conn.publish_event(
>>> "inventory_item-1",
>>> "item_created",
>>> body={ "item-id": 1, "created-date": "2018-08-19" },
>>> expected_version=ExpectedVersion.StreamMustNotExist
>>> )
>>>
>>> await conn.publish_event(
>>> "inventory_item-1",
>>> "item_deleted",
>>> expected_version=1,
>>> metadata={'deleted-by': 'bob' }
>>> )
"""
event = msg.NewEvent(type, id or uuid.uuid4(), body, metadata)
conversation = convo.WriteEvents(
stream,
[event],
expected_version=expected_version,
require_master=require_master,
)
result = await self.dispatcher.start_conversation(conversation)
return await result
|
[
"async",
"def",
"publish_event",
"(",
"self",
",",
"stream",
":",
"str",
",",
"type",
":",
"str",
",",
"body",
":",
"Optional",
"[",
"Any",
"]",
"=",
"None",
",",
"id",
":",
"Optional",
"[",
"uuid",
".",
"UUID",
"]",
"=",
"None",
",",
"metadata",
":",
"Optional",
"[",
"Any",
"]",
"=",
"None",
",",
"expected_version",
":",
"int",
"=",
"-",
"2",
",",
"require_master",
":",
"bool",
"=",
"False",
",",
")",
"->",
"None",
":",
"event",
"=",
"msg",
".",
"NewEvent",
"(",
"type",
",",
"id",
"or",
"uuid",
".",
"uuid4",
"(",
")",
",",
"body",
",",
"metadata",
")",
"conversation",
"=",
"convo",
".",
"WriteEvents",
"(",
"stream",
",",
"[",
"event",
"]",
",",
"expected_version",
"=",
"expected_version",
",",
"require_master",
"=",
"require_master",
",",
")",
"result",
"=",
"await",
"self",
".",
"dispatcher",
".",
"start_conversation",
"(",
"conversation",
")",
"return",
"await",
"result"
] |
Publish a single event to the EventStore.
This method publishes a single event to the remote server and waits
for acknowledgement.
Args:
stream: The stream to publish the event to.
type: the event's type.
body: a serializable body for the event.
id: a unique id for the event. PhotonPump will automatically generate an
id if none is provided.
metadata: Optional serializable metadata block for the event.
expected_version: Used for concurrency control.
If a positive integer is provided, EventStore will check that the stream
is at that version before accepting a write.
There are three magic values:
-4: StreamMustExist. Checks that the stream already exists.
-2: Any. Disables concurrency checks
-1: NoStream. Checks that the stream does not yet exist.
0: EmptyStream. Checks that the stream has been explicitly created but
does not yet contain any events.
require_master: If true, slave nodes will reject this message.
Examples:
>>> async with connect() as conn:
>>> await conn.publish_event(
>>> "inventory_item-1",
>>> "item_created",
>>> body={ "item-id": 1, "created-date": "2018-08-19" },
>>> expected_version=ExpectedVersion.StreamMustNotExist
>>> )
>>>
>>> await conn.publish_event(
>>> "inventory_item-1",
>>> "item_deleted",
>>> expected_version=1,
>>> metadata={'deleted-by': 'bob' }
>>> )
|
[
"Publish",
"a",
"single",
"event",
"to",
"the",
"EventStore",
"."
] |
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
|
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L601-L663
|
6,678
|
madedotcom/photon-pump
|
photonpump/connection.py
|
Client.get_event
|
async def get_event(
self,
stream: str,
event_number: int,
resolve_links=True,
require_master=False,
correlation_id: uuid.UUID = None,
) -> msg.Event:
"""
Get a single event by stream and event number.
Args:
stream: The name of the stream containing the event.
event_number: The sequence number of the event to read.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
Returns:
The resolved event if found, else None.
Examples:
>>> async with connection() as conn:
>>> await conn.publish("inventory_item-1", "item_created")
>>> event = await conn.get_event("inventory_item-1", 1)
>>> print(event)
"""
correlation_id = correlation_id or uuid.uuid4()
cmd = convo.ReadEvent(
stream,
event_number,
resolve_links,
require_master,
conversation_id=correlation_id,
)
result = await self.dispatcher.start_conversation(cmd)
return await result
|
python
|
async def get_event(
self,
stream: str,
event_number: int,
resolve_links=True,
require_master=False,
correlation_id: uuid.UUID = None,
) -> msg.Event:
"""
Get a single event by stream and event number.
Args:
stream: The name of the stream containing the event.
event_number: The sequence number of the event to read.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
Returns:
The resolved event if found, else None.
Examples:
>>> async with connection() as conn:
>>> await conn.publish("inventory_item-1", "item_created")
>>> event = await conn.get_event("inventory_item-1", 1)
>>> print(event)
"""
correlation_id = correlation_id or uuid.uuid4()
cmd = convo.ReadEvent(
stream,
event_number,
resolve_links,
require_master,
conversation_id=correlation_id,
)
result = await self.dispatcher.start_conversation(cmd)
return await result
|
[
"async",
"def",
"get_event",
"(",
"self",
",",
"stream",
":",
"str",
",",
"event_number",
":",
"int",
",",
"resolve_links",
"=",
"True",
",",
"require_master",
"=",
"False",
",",
"correlation_id",
":",
"uuid",
".",
"UUID",
"=",
"None",
",",
")",
"->",
"msg",
".",
"Event",
":",
"correlation_id",
"=",
"correlation_id",
"or",
"uuid",
".",
"uuid4",
"(",
")",
"cmd",
"=",
"convo",
".",
"ReadEvent",
"(",
"stream",
",",
"event_number",
",",
"resolve_links",
",",
"require_master",
",",
"conversation_id",
"=",
"correlation_id",
",",
")",
"result",
"=",
"await",
"self",
".",
"dispatcher",
".",
"start_conversation",
"(",
"cmd",
")",
"return",
"await",
"result"
] |
Get a single event by stream and event number.
Args:
stream: The name of the stream containing the event.
event_number: The sequence number of the event to read.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
Returns:
The resolved event if found, else None.
Examples:
>>> async with connection() as conn:
>>> await conn.publish("inventory_item-1", "item_created")
>>> event = await conn.get_event("inventory_item-1", 1)
>>> print(event)
|
[
"Get",
"a",
"single",
"event",
"by",
"stream",
"and",
"event",
"number",
"."
] |
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
|
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L682-L724
|
6,679
|
madedotcom/photon-pump
|
photonpump/connection.py
|
Client.get
|
async def get(
self,
stream: str,
direction: msg.StreamDirection = msg.StreamDirection.Forward,
from_event: int = 0,
max_count: int = 100,
resolve_links: bool = True,
require_master: bool = False,
correlation_id: uuid.UUID = None,
):
"""
Read a range of events from a stream.
Args:
stream: The name of the stream to read
direction (optional): Controls whether to read events forward or backward.
defaults to Forward.
from_event (optional): The first event to read.
defaults to the beginning of the stream when direction is forward
and the end of the stream if direction is backward.
max_count (optional): The maximum number of events to return.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this command.
Examples:
Read 5 events from a stream
>>> async for event in conn.get("my-stream", max_count=5):
>>> print(event)
Read events 21 to 30
>>> async for event in conn.get("my-stream", max_count=10, from_event=21):
>>> print(event)
Read 10 most recent events in reverse order
>>> async for event in conn.get(
"my-stream",
max_count=10,
direction=StreamDirection.Backward
):
>>> print(event)
"""
correlation_id = correlation_id
cmd = convo.ReadStreamEvents(
stream,
from_event,
max_count,
resolve_links,
require_master,
direction=direction,
)
result = await self.dispatcher.start_conversation(cmd)
return await result
|
python
|
async def get(
self,
stream: str,
direction: msg.StreamDirection = msg.StreamDirection.Forward,
from_event: int = 0,
max_count: int = 100,
resolve_links: bool = True,
require_master: bool = False,
correlation_id: uuid.UUID = None,
):
"""
Read a range of events from a stream.
Args:
stream: The name of the stream to read
direction (optional): Controls whether to read events forward or backward.
defaults to Forward.
from_event (optional): The first event to read.
defaults to the beginning of the stream when direction is forward
and the end of the stream if direction is backward.
max_count (optional): The maximum number of events to return.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this command.
Examples:
Read 5 events from a stream
>>> async for event in conn.get("my-stream", max_count=5):
>>> print(event)
Read events 21 to 30
>>> async for event in conn.get("my-stream", max_count=10, from_event=21):
>>> print(event)
Read 10 most recent events in reverse order
>>> async for event in conn.get(
"my-stream",
max_count=10,
direction=StreamDirection.Backward
):
>>> print(event)
"""
correlation_id = correlation_id
cmd = convo.ReadStreamEvents(
stream,
from_event,
max_count,
resolve_links,
require_master,
direction=direction,
)
result = await self.dispatcher.start_conversation(cmd)
return await result
|
[
"async",
"def",
"get",
"(",
"self",
",",
"stream",
":",
"str",
",",
"direction",
":",
"msg",
".",
"StreamDirection",
"=",
"msg",
".",
"StreamDirection",
".",
"Forward",
",",
"from_event",
":",
"int",
"=",
"0",
",",
"max_count",
":",
"int",
"=",
"100",
",",
"resolve_links",
":",
"bool",
"=",
"True",
",",
"require_master",
":",
"bool",
"=",
"False",
",",
"correlation_id",
":",
"uuid",
".",
"UUID",
"=",
"None",
",",
")",
":",
"correlation_id",
"=",
"correlation_id",
"cmd",
"=",
"convo",
".",
"ReadStreamEvents",
"(",
"stream",
",",
"from_event",
",",
"max_count",
",",
"resolve_links",
",",
"require_master",
",",
"direction",
"=",
"direction",
",",
")",
"result",
"=",
"await",
"self",
".",
"dispatcher",
".",
"start_conversation",
"(",
"cmd",
")",
"return",
"await",
"result"
] |
Read a range of events from a stream.
Args:
stream: The name of the stream to read
direction (optional): Controls whether to read events forward or backward.
defaults to Forward.
from_event (optional): The first event to read.
defaults to the beginning of the stream when direction is forward
and the end of the stream if direction is backward.
max_count (optional): The maximum number of events to return.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this command.
Examples:
Read 5 events from a stream
>>> async for event in conn.get("my-stream", max_count=5):
>>> print(event)
Read events 21 to 30
>>> async for event in conn.get("my-stream", max_count=10, from_event=21):
>>> print(event)
Read 10 most recent events in reverse order
>>> async for event in conn.get(
"my-stream",
max_count=10,
direction=StreamDirection.Backward
):
>>> print(event)
|
[
"Read",
"a",
"range",
"of",
"events",
"from",
"a",
"stream",
"."
] |
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
|
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L726-L786
|
6,680
|
madedotcom/photon-pump
|
photonpump/connection.py
|
Client.get_all
|
async def get_all(
self,
direction: msg.StreamDirection = msg.StreamDirection.Forward,
from_position: Optional[Union[msg.Position, msg._PositionSentinel]] = None,
max_count: int = 100,
resolve_links: bool = True,
require_master: bool = False,
correlation_id: uuid.UUID = None,
):
"""
Read a range of events from the whole database.
Args:
direction (optional): Controls whether to read events forward or backward.
defaults to Forward.
from_position (optional): The position to read from.
defaults to the beginning of the stream when direction is forward
and the end of the stream if direction is backward.
max_count (optional): The maximum number of events to return.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this command.
Examples:
Read 5 events
>>> async for event in conn.get_all(max_count=5):
>>> print(event)
Read 10 most recent events in reverse order
>>> async for event in conn.get_all(
max_count=10,
direction=StreamDirection.Backward
):
>>> print(event)
"""
correlation_id = correlation_id
cmd = convo.ReadAllEvents(
msg.Position.for_direction(direction, from_position),
max_count,
resolve_links,
require_master,
direction=direction,
credentials=self.credential,
)
result = await self.dispatcher.start_conversation(cmd)
return await result
|
python
|
async def get_all(
self,
direction: msg.StreamDirection = msg.StreamDirection.Forward,
from_position: Optional[Union[msg.Position, msg._PositionSentinel]] = None,
max_count: int = 100,
resolve_links: bool = True,
require_master: bool = False,
correlation_id: uuid.UUID = None,
):
"""
Read a range of events from the whole database.
Args:
direction (optional): Controls whether to read events forward or backward.
defaults to Forward.
from_position (optional): The position to read from.
defaults to the beginning of the stream when direction is forward
and the end of the stream if direction is backward.
max_count (optional): The maximum number of events to return.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this command.
Examples:
Read 5 events
>>> async for event in conn.get_all(max_count=5):
>>> print(event)
Read 10 most recent events in reverse order
>>> async for event in conn.get_all(
max_count=10,
direction=StreamDirection.Backward
):
>>> print(event)
"""
correlation_id = correlation_id
cmd = convo.ReadAllEvents(
msg.Position.for_direction(direction, from_position),
max_count,
resolve_links,
require_master,
direction=direction,
credentials=self.credential,
)
result = await self.dispatcher.start_conversation(cmd)
return await result
|
[
"async",
"def",
"get_all",
"(",
"self",
",",
"direction",
":",
"msg",
".",
"StreamDirection",
"=",
"msg",
".",
"StreamDirection",
".",
"Forward",
",",
"from_position",
":",
"Optional",
"[",
"Union",
"[",
"msg",
".",
"Position",
",",
"msg",
".",
"_PositionSentinel",
"]",
"]",
"=",
"None",
",",
"max_count",
":",
"int",
"=",
"100",
",",
"resolve_links",
":",
"bool",
"=",
"True",
",",
"require_master",
":",
"bool",
"=",
"False",
",",
"correlation_id",
":",
"uuid",
".",
"UUID",
"=",
"None",
",",
")",
":",
"correlation_id",
"=",
"correlation_id",
"cmd",
"=",
"convo",
".",
"ReadAllEvents",
"(",
"msg",
".",
"Position",
".",
"for_direction",
"(",
"direction",
",",
"from_position",
")",
",",
"max_count",
",",
"resolve_links",
",",
"require_master",
",",
"direction",
"=",
"direction",
",",
"credentials",
"=",
"self",
".",
"credential",
",",
")",
"result",
"=",
"await",
"self",
".",
"dispatcher",
".",
"start_conversation",
"(",
"cmd",
")",
"return",
"await",
"result"
] |
Read a range of events from the whole database.
Args:
direction (optional): Controls whether to read events forward or backward.
defaults to Forward.
from_position (optional): The position to read from.
defaults to the beginning of the stream when direction is forward
and the end of the stream if direction is backward.
max_count (optional): The maximum number of events to return.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this command.
Examples:
Read 5 events
>>> async for event in conn.get_all(max_count=5):
>>> print(event)
Read 10 most recent events in reverse order
>>> async for event in conn.get_all(
max_count=10,
direction=StreamDirection.Backward
):
>>> print(event)
|
[
"Read",
"a",
"range",
"of",
"events",
"from",
"the",
"whole",
"database",
"."
] |
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
|
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L788-L840
|
6,681
|
madedotcom/photon-pump
|
photonpump/connection.py
|
Client.iter
|
async def iter(
self,
stream: str,
direction: msg.StreamDirection = msg.StreamDirection.Forward,
from_event: int = None,
batch_size: int = 100,
resolve_links: bool = True,
require_master: bool = False,
correlation_id: uuid.UUID = None,
):
"""
Read through a stream of events until the end and then stop.
Args:
stream: The name of the stream to read.
direction: Controls whether to read forward or backward through the
stream. Defaults to StreamDirection.Forward
from_event: The sequence number of the first event to read from the
stream. Reads from the appropriate end of the stream if unset.
batch_size: The maximum number of events to read at a time.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
Examples:
Print every event from the stream "my-stream".
>>> with async.connect() as conn:
>>> async for event in conn.iter("my-stream"):
>>> print(event)
Print every event from the stream "my-stream" in reverse order
>>> with async.connect() as conn:
>>> async for event in conn.iter("my-stream", direction=StreamDirection.Backward):
>>> print(event)
Skip the first 10 events of the stream
>>> with async.connect() as conn:
>>> async for event in conn.iter("my-stream", from_event=11):
>>> print(event)
"""
correlation_id = correlation_id or uuid.uuid4()
cmd = convo.IterStreamEvents(
stream,
from_event,
batch_size,
resolve_links,
direction=direction,
credentials=self.credential,
)
result = await self.dispatcher.start_conversation(cmd)
iterator = await result
async for event in iterator:
yield event
|
python
|
async def iter(
self,
stream: str,
direction: msg.StreamDirection = msg.StreamDirection.Forward,
from_event: int = None,
batch_size: int = 100,
resolve_links: bool = True,
require_master: bool = False,
correlation_id: uuid.UUID = None,
):
"""
Read through a stream of events until the end and then stop.
Args:
stream: The name of the stream to read.
direction: Controls whether to read forward or backward through the
stream. Defaults to StreamDirection.Forward
from_event: The sequence number of the first event to read from the
stream. Reads from the appropriate end of the stream if unset.
batch_size: The maximum number of events to read at a time.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
Examples:
Print every event from the stream "my-stream".
>>> with async.connect() as conn:
>>> async for event in conn.iter("my-stream"):
>>> print(event)
Print every event from the stream "my-stream" in reverse order
>>> with async.connect() as conn:
>>> async for event in conn.iter("my-stream", direction=StreamDirection.Backward):
>>> print(event)
Skip the first 10 events of the stream
>>> with async.connect() as conn:
>>> async for event in conn.iter("my-stream", from_event=11):
>>> print(event)
"""
correlation_id = correlation_id or uuid.uuid4()
cmd = convo.IterStreamEvents(
stream,
from_event,
batch_size,
resolve_links,
direction=direction,
credentials=self.credential,
)
result = await self.dispatcher.start_conversation(cmd)
iterator = await result
async for event in iterator:
yield event
|
[
"async",
"def",
"iter",
"(",
"self",
",",
"stream",
":",
"str",
",",
"direction",
":",
"msg",
".",
"StreamDirection",
"=",
"msg",
".",
"StreamDirection",
".",
"Forward",
",",
"from_event",
":",
"int",
"=",
"None",
",",
"batch_size",
":",
"int",
"=",
"100",
",",
"resolve_links",
":",
"bool",
"=",
"True",
",",
"require_master",
":",
"bool",
"=",
"False",
",",
"correlation_id",
":",
"uuid",
".",
"UUID",
"=",
"None",
",",
")",
":",
"correlation_id",
"=",
"correlation_id",
"or",
"uuid",
".",
"uuid4",
"(",
")",
"cmd",
"=",
"convo",
".",
"IterStreamEvents",
"(",
"stream",
",",
"from_event",
",",
"batch_size",
",",
"resolve_links",
",",
"direction",
"=",
"direction",
",",
"credentials",
"=",
"self",
".",
"credential",
",",
")",
"result",
"=",
"await",
"self",
".",
"dispatcher",
".",
"start_conversation",
"(",
"cmd",
")",
"iterator",
"=",
"await",
"result",
"async",
"for",
"event",
"in",
"iterator",
":",
"yield",
"event"
] |
Read through a stream of events until the end and then stop.
Args:
stream: The name of the stream to read.
direction: Controls whether to read forward or backward through the
stream. Defaults to StreamDirection.Forward
from_event: The sequence number of the first event to read from the
stream. Reads from the appropriate end of the stream if unset.
batch_size: The maximum number of events to read at a time.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
Examples:
Print every event from the stream "my-stream".
>>> with async.connect() as conn:
>>> async for event in conn.iter("my-stream"):
>>> print(event)
Print every event from the stream "my-stream" in reverse order
>>> with async.connect() as conn:
>>> async for event in conn.iter("my-stream", direction=StreamDirection.Backward):
>>> print(event)
Skip the first 10 events of the stream
>>> with async.connect() as conn:
>>> async for event in conn.iter("my-stream", from_event=11):
>>> print(event)
|
[
"Read",
"through",
"a",
"stream",
"of",
"events",
"until",
"the",
"end",
"and",
"then",
"stop",
"."
] |
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
|
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L842-L902
|
6,682
|
madedotcom/photon-pump
|
photonpump/connection.py
|
Client.iter_all
|
async def iter_all(
self,
direction: msg.StreamDirection = msg.StreamDirection.Forward,
from_position: Optional[Union[msg.Position, msg._PositionSentinel]] = None,
batch_size: int = 100,
resolve_links: bool = True,
require_master: bool = False,
correlation_id: Optional[uuid.UUID] = None,
):
"""
Read through all the events in the database.
Args:
direction (optional): Controls whether to read forward or backward
through the events. Defaults to StreamDirection.Forward
from_position (optional): The position to start reading from.
Defaults to photonpump.Beginning when direction is Forward,
photonpump.End when direction is Backward.
batch_size (optional): The maximum number of events to read at a time.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
Examples:
Print every event from the database.
>>> with async.connect() as conn:
>>> async for event in conn.iter_all()
>>> print(event)
Print every event from the database in reverse order
>>> with async.connect() as conn:
>>> async for event in conn.iter_all(direction=StreamDirection.Backward):
>>> print(event)
Start reading from a known commit position
>>> with async.connect() as conn:
>>> async for event in conn.iter_all(from_position=Position(12345))
>>> print(event)
"""
correlation_id = correlation_id
cmd = convo.IterAllEvents(
msg.Position.for_direction(direction, from_position),
batch_size,
resolve_links,
require_master,
direction,
self.credential,
correlation_id,
)
result = await self.dispatcher.start_conversation(cmd)
iterator = await result
async for event in iterator:
yield event
|
python
|
async def iter_all(
self,
direction: msg.StreamDirection = msg.StreamDirection.Forward,
from_position: Optional[Union[msg.Position, msg._PositionSentinel]] = None,
batch_size: int = 100,
resolve_links: bool = True,
require_master: bool = False,
correlation_id: Optional[uuid.UUID] = None,
):
"""
Read through all the events in the database.
Args:
direction (optional): Controls whether to read forward or backward
through the events. Defaults to StreamDirection.Forward
from_position (optional): The position to start reading from.
Defaults to photonpump.Beginning when direction is Forward,
photonpump.End when direction is Backward.
batch_size (optional): The maximum number of events to read at a time.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
Examples:
Print every event from the database.
>>> with async.connect() as conn:
>>> async for event in conn.iter_all()
>>> print(event)
Print every event from the database in reverse order
>>> with async.connect() as conn:
>>> async for event in conn.iter_all(direction=StreamDirection.Backward):
>>> print(event)
Start reading from a known commit position
>>> with async.connect() as conn:
>>> async for event in conn.iter_all(from_position=Position(12345))
>>> print(event)
"""
correlation_id = correlation_id
cmd = convo.IterAllEvents(
msg.Position.for_direction(direction, from_position),
batch_size,
resolve_links,
require_master,
direction,
self.credential,
correlation_id,
)
result = await self.dispatcher.start_conversation(cmd)
iterator = await result
async for event in iterator:
yield event
|
[
"async",
"def",
"iter_all",
"(",
"self",
",",
"direction",
":",
"msg",
".",
"StreamDirection",
"=",
"msg",
".",
"StreamDirection",
".",
"Forward",
",",
"from_position",
":",
"Optional",
"[",
"Union",
"[",
"msg",
".",
"Position",
",",
"msg",
".",
"_PositionSentinel",
"]",
"]",
"=",
"None",
",",
"batch_size",
":",
"int",
"=",
"100",
",",
"resolve_links",
":",
"bool",
"=",
"True",
",",
"require_master",
":",
"bool",
"=",
"False",
",",
"correlation_id",
":",
"Optional",
"[",
"uuid",
".",
"UUID",
"]",
"=",
"None",
",",
")",
":",
"correlation_id",
"=",
"correlation_id",
"cmd",
"=",
"convo",
".",
"IterAllEvents",
"(",
"msg",
".",
"Position",
".",
"for_direction",
"(",
"direction",
",",
"from_position",
")",
",",
"batch_size",
",",
"resolve_links",
",",
"require_master",
",",
"direction",
",",
"self",
".",
"credential",
",",
"correlation_id",
",",
")",
"result",
"=",
"await",
"self",
".",
"dispatcher",
".",
"start_conversation",
"(",
"cmd",
")",
"iterator",
"=",
"await",
"result",
"async",
"for",
"event",
"in",
"iterator",
":",
"yield",
"event"
] |
Read through all the events in the database.
Args:
direction (optional): Controls whether to read forward or backward
through the events. Defaults to StreamDirection.Forward
from_position (optional): The position to start reading from.
Defaults to photonpump.Beginning when direction is Forward,
photonpump.End when direction is Backward.
batch_size (optional): The maximum number of events to read at a time.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
Examples:
Print every event from the database.
>>> with async.connect() as conn:
>>> async for event in conn.iter_all()
>>> print(event)
Print every event from the database in reverse order
>>> with async.connect() as conn:
>>> async for event in conn.iter_all(direction=StreamDirection.Backward):
>>> print(event)
Start reading from a known commit position
>>> with async.connect() as conn:
>>> async for event in conn.iter_all(from_position=Position(12345))
>>> print(event)
|
[
"Read",
"through",
"all",
"the",
"events",
"in",
"the",
"database",
"."
] |
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
|
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L904-L965
|
6,683
|
madedotcom/photon-pump
|
photonpump/connection.py
|
Client.subscribe_to
|
async def subscribe_to(
self, stream, start_from=-1, resolve_link_tos=True, batch_size: int = 100
):
"""
Subscribe to receive notifications when a new event is published
to a stream.
Args:
stream: The name of the stream.
start_from (optional): The first event to read.
This parameter defaults to the magic value -1 which is treated
as meaning "from the end of the stream". IF this value is used,
no historical events will be returned.
For any other value, photonpump will read all events from
start_from until the end of the stream in pages of max_size
before subscribing to receive new events as they arrive.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
batch_size (optioal): The number of events to pull down from
eventstore in one go.
Returns:
A VolatileSubscription.
Examples:
>>> async with connection() as conn:
>>> # Subscribe only to NEW events on the cpu-metrics stream
>>> subs = await conn.subscribe_to("price-changes")
>>> async for event in subs.events:
>>> print(event)
>>> async with connection() as conn:
>>> # Read all historical events and then receive updates as they
>>> # arrive.
>>> subs = await conn.subscribe_to("price-changes", start_from=0)
>>> async for event in subs.events:
>>> print(event)
"""
if start_from == -1:
cmd: convo.Conversation = convo.SubscribeToStream(
stream, resolve_link_tos, credentials=self.credential
)
else:
cmd = convo.CatchupSubscription(
stream, start_from, batch_size, credential=self.credential
)
future = await self.dispatcher.start_conversation(cmd)
return await future
|
python
|
async def subscribe_to(
self, stream, start_from=-1, resolve_link_tos=True, batch_size: int = 100
):
"""
Subscribe to receive notifications when a new event is published
to a stream.
Args:
stream: The name of the stream.
start_from (optional): The first event to read.
This parameter defaults to the magic value -1 which is treated
as meaning "from the end of the stream". IF this value is used,
no historical events will be returned.
For any other value, photonpump will read all events from
start_from until the end of the stream in pages of max_size
before subscribing to receive new events as they arrive.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
batch_size (optioal): The number of events to pull down from
eventstore in one go.
Returns:
A VolatileSubscription.
Examples:
>>> async with connection() as conn:
>>> # Subscribe only to NEW events on the cpu-metrics stream
>>> subs = await conn.subscribe_to("price-changes")
>>> async for event in subs.events:
>>> print(event)
>>> async with connection() as conn:
>>> # Read all historical events and then receive updates as they
>>> # arrive.
>>> subs = await conn.subscribe_to("price-changes", start_from=0)
>>> async for event in subs.events:
>>> print(event)
"""
if start_from == -1:
cmd: convo.Conversation = convo.SubscribeToStream(
stream, resolve_link_tos, credentials=self.credential
)
else:
cmd = convo.CatchupSubscription(
stream, start_from, batch_size, credential=self.credential
)
future = await self.dispatcher.start_conversation(cmd)
return await future
|
[
"async",
"def",
"subscribe_to",
"(",
"self",
",",
"stream",
",",
"start_from",
"=",
"-",
"1",
",",
"resolve_link_tos",
"=",
"True",
",",
"batch_size",
":",
"int",
"=",
"100",
")",
":",
"if",
"start_from",
"==",
"-",
"1",
":",
"cmd",
":",
"convo",
".",
"Conversation",
"=",
"convo",
".",
"SubscribeToStream",
"(",
"stream",
",",
"resolve_link_tos",
",",
"credentials",
"=",
"self",
".",
"credential",
")",
"else",
":",
"cmd",
"=",
"convo",
".",
"CatchupSubscription",
"(",
"stream",
",",
"start_from",
",",
"batch_size",
",",
"credential",
"=",
"self",
".",
"credential",
")",
"future",
"=",
"await",
"self",
".",
"dispatcher",
".",
"start_conversation",
"(",
"cmd",
")",
"return",
"await",
"future"
] |
Subscribe to receive notifications when a new event is published
to a stream.
Args:
stream: The name of the stream.
start_from (optional): The first event to read.
This parameter defaults to the magic value -1 which is treated
as meaning "from the end of the stream". IF this value is used,
no historical events will be returned.
For any other value, photonpump will read all events from
start_from until the end of the stream in pages of max_size
before subscribing to receive new events as they arrive.
resolve_links (optional): True if eventstore should
automatically resolve Link Events, otherwise False.
required_master (optional): True if this command must be
sent direct to the master node, otherwise False.
correlation_id (optional): A unique identifer for this
command.
batch_size (optioal): The number of events to pull down from
eventstore in one go.
Returns:
A VolatileSubscription.
Examples:
>>> async with connection() as conn:
>>> # Subscribe only to NEW events on the cpu-metrics stream
>>> subs = await conn.subscribe_to("price-changes")
>>> async for event in subs.events:
>>> print(event)
>>> async with connection() as conn:
>>> # Read all historical events and then receive updates as they
>>> # arrive.
>>> subs = await conn.subscribe_to("price-changes", start_from=0)
>>> async for event in subs.events:
>>> print(event)
|
[
"Subscribe",
"to",
"receive",
"notifications",
"when",
"a",
"new",
"event",
"is",
"published",
"to",
"a",
"stream",
"."
] |
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
|
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/connection.py#L1029-L1086
|
6,684
|
madedotcom/photon-pump
|
photonpump/discovery.py
|
prefer_master
|
def prefer_master(nodes: List[DiscoveredNode]) -> Optional[DiscoveredNode]:
"""
Select the master if available, otherwise fall back to a replica.
"""
return max(nodes, key=attrgetter("state"))
|
python
|
def prefer_master(nodes: List[DiscoveredNode]) -> Optional[DiscoveredNode]:
"""
Select the master if available, otherwise fall back to a replica.
"""
return max(nodes, key=attrgetter("state"))
|
[
"def",
"prefer_master",
"(",
"nodes",
":",
"List",
"[",
"DiscoveredNode",
"]",
")",
"->",
"Optional",
"[",
"DiscoveredNode",
"]",
":",
"return",
"max",
"(",
"nodes",
",",
"key",
"=",
"attrgetter",
"(",
"\"state\"",
")",
")"
] |
Select the master if available, otherwise fall back to a replica.
|
[
"Select",
"the",
"master",
"if",
"available",
"otherwise",
"fall",
"back",
"to",
"a",
"replica",
"."
] |
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
|
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/discovery.py#L60-L64
|
6,685
|
madedotcom/photon-pump
|
photonpump/discovery.py
|
prefer_replica
|
def prefer_replica(nodes: List[DiscoveredNode]) -> Optional[DiscoveredNode]:
"""
Select a random replica if any are available or fall back to the master.
"""
masters = [node for node in nodes if node.state == NodeState.Master]
replicas = [node for node in nodes if node.state != NodeState.Master]
if replicas:
return random.choice(replicas)
else:
# if you have more than one master then you're on your own, bud.
return masters[0]
|
python
|
def prefer_replica(nodes: List[DiscoveredNode]) -> Optional[DiscoveredNode]:
"""
Select a random replica if any are available or fall back to the master.
"""
masters = [node for node in nodes if node.state == NodeState.Master]
replicas = [node for node in nodes if node.state != NodeState.Master]
if replicas:
return random.choice(replicas)
else:
# if you have more than one master then you're on your own, bud.
return masters[0]
|
[
"def",
"prefer_replica",
"(",
"nodes",
":",
"List",
"[",
"DiscoveredNode",
"]",
")",
"->",
"Optional",
"[",
"DiscoveredNode",
"]",
":",
"masters",
"=",
"[",
"node",
"for",
"node",
"in",
"nodes",
"if",
"node",
".",
"state",
"==",
"NodeState",
".",
"Master",
"]",
"replicas",
"=",
"[",
"node",
"for",
"node",
"in",
"nodes",
"if",
"node",
".",
"state",
"!=",
"NodeState",
".",
"Master",
"]",
"if",
"replicas",
":",
"return",
"random",
".",
"choice",
"(",
"replicas",
")",
"else",
":",
"# if you have more than one master then you're on your own, bud.",
"return",
"masters",
"[",
"0",
"]"
] |
Select a random replica if any are available or fall back to the master.
|
[
"Select",
"a",
"random",
"replica",
"if",
"any",
"are",
"available",
"or",
"fall",
"back",
"to",
"the",
"master",
"."
] |
ff0736c9cacd43c1f783c9668eefb53d03a3a93e
|
https://github.com/madedotcom/photon-pump/blob/ff0736c9cacd43c1f783c9668eefb53d03a3a93e/photonpump/discovery.py#L67-L79
|
6,686
|
nteract/vdom
|
vdom/core.py
|
create_event_handler
|
def create_event_handler(event_type, handler):
"""Register a comm and return a serializable object with target name"""
target_name = '{hash}_{event_type}'.format(hash=hash(handler), event_type=event_type)
def handle_comm_opened(comm, msg):
@comm.on_msg
def _handle_msg(msg):
data = msg['content']['data']
event = json.loads(data)
return_value = handler(event)
if return_value:
comm.send(return_value)
comm.send('Comm target "{target_name}" registered by vdom'.format(target_name=target_name))
# Register a new comm for this event handler
if get_ipython():
get_ipython().kernel.comm_manager.register_target(target_name, handle_comm_opened)
# Return a serialized object
return target_name
|
python
|
def create_event_handler(event_type, handler):
"""Register a comm and return a serializable object with target name"""
target_name = '{hash}_{event_type}'.format(hash=hash(handler), event_type=event_type)
def handle_comm_opened(comm, msg):
@comm.on_msg
def _handle_msg(msg):
data = msg['content']['data']
event = json.loads(data)
return_value = handler(event)
if return_value:
comm.send(return_value)
comm.send('Comm target "{target_name}" registered by vdom'.format(target_name=target_name))
# Register a new comm for this event handler
if get_ipython():
get_ipython().kernel.comm_manager.register_target(target_name, handle_comm_opened)
# Return a serialized object
return target_name
|
[
"def",
"create_event_handler",
"(",
"event_type",
",",
"handler",
")",
":",
"target_name",
"=",
"'{hash}_{event_type}'",
".",
"format",
"(",
"hash",
"=",
"hash",
"(",
"handler",
")",
",",
"event_type",
"=",
"event_type",
")",
"def",
"handle_comm_opened",
"(",
"comm",
",",
"msg",
")",
":",
"@",
"comm",
".",
"on_msg",
"def",
"_handle_msg",
"(",
"msg",
")",
":",
"data",
"=",
"msg",
"[",
"'content'",
"]",
"[",
"'data'",
"]",
"event",
"=",
"json",
".",
"loads",
"(",
"data",
")",
"return_value",
"=",
"handler",
"(",
"event",
")",
"if",
"return_value",
":",
"comm",
".",
"send",
"(",
"return_value",
")",
"comm",
".",
"send",
"(",
"'Comm target \"{target_name}\" registered by vdom'",
".",
"format",
"(",
"target_name",
"=",
"target_name",
")",
")",
"# Register a new comm for this event handler",
"if",
"get_ipython",
"(",
")",
":",
"get_ipython",
"(",
")",
".",
"kernel",
".",
"comm_manager",
".",
"register_target",
"(",
"target_name",
",",
"handle_comm_opened",
")",
"# Return a serialized object",
"return",
"target_name"
] |
Register a comm and return a serializable object with target name
|
[
"Register",
"a",
"comm",
"and",
"return",
"a",
"serializable",
"object",
"with",
"target",
"name"
] |
d1ef48dc20d50379b8137a104125c92f64b916e4
|
https://github.com/nteract/vdom/blob/d1ef48dc20d50379b8137a104125c92f64b916e4/vdom/core.py#L49-L70
|
6,687
|
nteract/vdom
|
vdom/core.py
|
to_json
|
def to_json(el, schema=None):
"""Convert an element to VDOM JSON
If you wish to validate the JSON, pass in a schema via the schema keyword
argument. If a schema is provided, this raises a ValidationError if JSON
does not match the schema.
"""
if type(el) is str:
json_el = el
elif type(el) is list:
json_el = list(map(to_json, el))
elif type(el) is dict:
assert 'tagName' in el
json_el = el.copy()
if 'attributes' not in el:
json_el['attributes'] = {}
if 'children' not in el:
json_el['children'] = []
elif isinstance(el, VDOM):
json_el = el.to_dict()
else:
json_el = el
if schema:
try:
validate(instance=json_el, schema=schema, cls=Draft4Validator)
except ValidationError as e:
raise ValidationError(_validate_err_template.format(schema, e))
return json_el
|
python
|
def to_json(el, schema=None):
"""Convert an element to VDOM JSON
If you wish to validate the JSON, pass in a schema via the schema keyword
argument. If a schema is provided, this raises a ValidationError if JSON
does not match the schema.
"""
if type(el) is str:
json_el = el
elif type(el) is list:
json_el = list(map(to_json, el))
elif type(el) is dict:
assert 'tagName' in el
json_el = el.copy()
if 'attributes' not in el:
json_el['attributes'] = {}
if 'children' not in el:
json_el['children'] = []
elif isinstance(el, VDOM):
json_el = el.to_dict()
else:
json_el = el
if schema:
try:
validate(instance=json_el, schema=schema, cls=Draft4Validator)
except ValidationError as e:
raise ValidationError(_validate_err_template.format(schema, e))
return json_el
|
[
"def",
"to_json",
"(",
"el",
",",
"schema",
"=",
"None",
")",
":",
"if",
"type",
"(",
"el",
")",
"is",
"str",
":",
"json_el",
"=",
"el",
"elif",
"type",
"(",
"el",
")",
"is",
"list",
":",
"json_el",
"=",
"list",
"(",
"map",
"(",
"to_json",
",",
"el",
")",
")",
"elif",
"type",
"(",
"el",
")",
"is",
"dict",
":",
"assert",
"'tagName'",
"in",
"el",
"json_el",
"=",
"el",
".",
"copy",
"(",
")",
"if",
"'attributes'",
"not",
"in",
"el",
":",
"json_el",
"[",
"'attributes'",
"]",
"=",
"{",
"}",
"if",
"'children'",
"not",
"in",
"el",
":",
"json_el",
"[",
"'children'",
"]",
"=",
"[",
"]",
"elif",
"isinstance",
"(",
"el",
",",
"VDOM",
")",
":",
"json_el",
"=",
"el",
".",
"to_dict",
"(",
")",
"else",
":",
"json_el",
"=",
"el",
"if",
"schema",
":",
"try",
":",
"validate",
"(",
"instance",
"=",
"json_el",
",",
"schema",
"=",
"schema",
",",
"cls",
"=",
"Draft4Validator",
")",
"except",
"ValidationError",
"as",
"e",
":",
"raise",
"ValidationError",
"(",
"_validate_err_template",
".",
"format",
"(",
"schema",
",",
"e",
")",
")",
"return",
"json_el"
] |
Convert an element to VDOM JSON
If you wish to validate the JSON, pass in a schema via the schema keyword
argument. If a schema is provided, this raises a ValidationError if JSON
does not match the schema.
|
[
"Convert",
"an",
"element",
"to",
"VDOM",
"JSON"
] |
d1ef48dc20d50379b8137a104125c92f64b916e4
|
https://github.com/nteract/vdom/blob/d1ef48dc20d50379b8137a104125c92f64b916e4/vdom/core.py#L73-L102
|
6,688
|
nteract/vdom
|
vdom/core.py
|
create_component
|
def create_component(tag_name, allow_children=True):
"""
Create a component for an HTML Tag
Examples:
>>> marquee = create_component('marquee')
>>> marquee('woohoo')
<marquee>woohoo</marquee>
"""
def _component(*children, **kwargs):
if 'children' in kwargs:
children = kwargs.pop('children')
else:
# Flatten children under specific circumstances
# This supports the use case of div([a, b, c])
# And allows users to skip the * operator
if len(children) == 1 and isinstance(children[0], list):
# We want children to be tuples and not lists, so
# they can be immutable
children = tuple(children[0])
style = None
event_handlers = None
attributes = dict(**kwargs)
if 'style' in kwargs:
style = kwargs.pop('style')
if 'attributes' in kwargs:
attributes = kwargs['attributes']
for key, value in attributes.items():
if callable(value):
attributes = attributes.copy()
if event_handlers == None:
event_handlers = {key: attributes.pop(key)}
else:
event_handlers[key] = attributes.pop(key)
if not allow_children and children:
# We don't allow children, but some were passed in
raise ValueError('<{tag_name} /> cannot have children'.format(tag_name=tag_name))
v = VDOM(tag_name, attributes, style, children, None, event_handlers)
return v
return _component
|
python
|
def create_component(tag_name, allow_children=True):
"""
Create a component for an HTML Tag
Examples:
>>> marquee = create_component('marquee')
>>> marquee('woohoo')
<marquee>woohoo</marquee>
"""
def _component(*children, **kwargs):
if 'children' in kwargs:
children = kwargs.pop('children')
else:
# Flatten children under specific circumstances
# This supports the use case of div([a, b, c])
# And allows users to skip the * operator
if len(children) == 1 and isinstance(children[0], list):
# We want children to be tuples and not lists, so
# they can be immutable
children = tuple(children[0])
style = None
event_handlers = None
attributes = dict(**kwargs)
if 'style' in kwargs:
style = kwargs.pop('style')
if 'attributes' in kwargs:
attributes = kwargs['attributes']
for key, value in attributes.items():
if callable(value):
attributes = attributes.copy()
if event_handlers == None:
event_handlers = {key: attributes.pop(key)}
else:
event_handlers[key] = attributes.pop(key)
if not allow_children and children:
# We don't allow children, but some were passed in
raise ValueError('<{tag_name} /> cannot have children'.format(tag_name=tag_name))
v = VDOM(tag_name, attributes, style, children, None, event_handlers)
return v
return _component
|
[
"def",
"create_component",
"(",
"tag_name",
",",
"allow_children",
"=",
"True",
")",
":",
"def",
"_component",
"(",
"*",
"children",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"'children'",
"in",
"kwargs",
":",
"children",
"=",
"kwargs",
".",
"pop",
"(",
"'children'",
")",
"else",
":",
"# Flatten children under specific circumstances",
"# This supports the use case of div([a, b, c])",
"# And allows users to skip the * operator",
"if",
"len",
"(",
"children",
")",
"==",
"1",
"and",
"isinstance",
"(",
"children",
"[",
"0",
"]",
",",
"list",
")",
":",
"# We want children to be tuples and not lists, so",
"# they can be immutable",
"children",
"=",
"tuple",
"(",
"children",
"[",
"0",
"]",
")",
"style",
"=",
"None",
"event_handlers",
"=",
"None",
"attributes",
"=",
"dict",
"(",
"*",
"*",
"kwargs",
")",
"if",
"'style'",
"in",
"kwargs",
":",
"style",
"=",
"kwargs",
".",
"pop",
"(",
"'style'",
")",
"if",
"'attributes'",
"in",
"kwargs",
":",
"attributes",
"=",
"kwargs",
"[",
"'attributes'",
"]",
"for",
"key",
",",
"value",
"in",
"attributes",
".",
"items",
"(",
")",
":",
"if",
"callable",
"(",
"value",
")",
":",
"attributes",
"=",
"attributes",
".",
"copy",
"(",
")",
"if",
"event_handlers",
"==",
"None",
":",
"event_handlers",
"=",
"{",
"key",
":",
"attributes",
".",
"pop",
"(",
"key",
")",
"}",
"else",
":",
"event_handlers",
"[",
"key",
"]",
"=",
"attributes",
".",
"pop",
"(",
"key",
")",
"if",
"not",
"allow_children",
"and",
"children",
":",
"# We don't allow children, but some were passed in",
"raise",
"ValueError",
"(",
"'<{tag_name} /> cannot have children'",
".",
"format",
"(",
"tag_name",
"=",
"tag_name",
")",
")",
"v",
"=",
"VDOM",
"(",
"tag_name",
",",
"attributes",
",",
"style",
",",
"children",
",",
"None",
",",
"event_handlers",
")",
"return",
"v",
"return",
"_component"
] |
Create a component for an HTML Tag
Examples:
>>> marquee = create_component('marquee')
>>> marquee('woohoo')
<marquee>woohoo</marquee>
|
[
"Create",
"a",
"component",
"for",
"an",
"HTML",
"Tag"
] |
d1ef48dc20d50379b8137a104125c92f64b916e4
|
https://github.com/nteract/vdom/blob/d1ef48dc20d50379b8137a104125c92f64b916e4/vdom/core.py#L301-L343
|
6,689
|
nteract/vdom
|
vdom/core.py
|
VDOM.validate
|
def validate(self, schema):
"""
Validate VDOM against given JSON Schema
Raises ValidationError if schema does not match
"""
try:
validate(instance=self.to_dict(), schema=schema, cls=Draft4Validator)
except ValidationError as e:
raise ValidationError(_validate_err_template.format(VDOM_SCHEMA, e))
|
python
|
def validate(self, schema):
"""
Validate VDOM against given JSON Schema
Raises ValidationError if schema does not match
"""
try:
validate(instance=self.to_dict(), schema=schema, cls=Draft4Validator)
except ValidationError as e:
raise ValidationError(_validate_err_template.format(VDOM_SCHEMA, e))
|
[
"def",
"validate",
"(",
"self",
",",
"schema",
")",
":",
"try",
":",
"validate",
"(",
"instance",
"=",
"self",
".",
"to_dict",
"(",
")",
",",
"schema",
"=",
"schema",
",",
"cls",
"=",
"Draft4Validator",
")",
"except",
"ValidationError",
"as",
"e",
":",
"raise",
"ValidationError",
"(",
"_validate_err_template",
".",
"format",
"(",
"VDOM_SCHEMA",
",",
"e",
")",
")"
] |
Validate VDOM against given JSON Schema
Raises ValidationError if schema does not match
|
[
"Validate",
"VDOM",
"against",
"given",
"JSON",
"Schema"
] |
d1ef48dc20d50379b8137a104125c92f64b916e4
|
https://github.com/nteract/vdom/blob/d1ef48dc20d50379b8137a104125c92f64b916e4/vdom/core.py#L174-L183
|
6,690
|
nteract/vdom
|
vdom/core.py
|
VDOM.to_dict
|
def to_dict(self):
"""Converts VDOM object to a dictionary that passes our schema
"""
attributes = dict(self.attributes.items())
if self.style:
attributes.update({"style": dict(self.style.items())})
vdom_dict = {'tagName': self.tag_name, 'attributes': attributes}
if self.event_handlers:
event_handlers = dict(self.event_handlers.items())
for key, value in event_handlers.items():
value = create_event_handler(key, value)
event_handlers[key] = value
vdom_dict['eventHandlers'] = event_handlers
if self.key:
vdom_dict['key'] = self.key
vdom_dict['children'] = [c.to_dict() if isinstance(c, VDOM) else c for c in self.children]
return vdom_dict
|
python
|
def to_dict(self):
"""Converts VDOM object to a dictionary that passes our schema
"""
attributes = dict(self.attributes.items())
if self.style:
attributes.update({"style": dict(self.style.items())})
vdom_dict = {'tagName': self.tag_name, 'attributes': attributes}
if self.event_handlers:
event_handlers = dict(self.event_handlers.items())
for key, value in event_handlers.items():
value = create_event_handler(key, value)
event_handlers[key] = value
vdom_dict['eventHandlers'] = event_handlers
if self.key:
vdom_dict['key'] = self.key
vdom_dict['children'] = [c.to_dict() if isinstance(c, VDOM) else c for c in self.children]
return vdom_dict
|
[
"def",
"to_dict",
"(",
"self",
")",
":",
"attributes",
"=",
"dict",
"(",
"self",
".",
"attributes",
".",
"items",
"(",
")",
")",
"if",
"self",
".",
"style",
":",
"attributes",
".",
"update",
"(",
"{",
"\"style\"",
":",
"dict",
"(",
"self",
".",
"style",
".",
"items",
"(",
")",
")",
"}",
")",
"vdom_dict",
"=",
"{",
"'tagName'",
":",
"self",
".",
"tag_name",
",",
"'attributes'",
":",
"attributes",
"}",
"if",
"self",
".",
"event_handlers",
":",
"event_handlers",
"=",
"dict",
"(",
"self",
".",
"event_handlers",
".",
"items",
"(",
")",
")",
"for",
"key",
",",
"value",
"in",
"event_handlers",
".",
"items",
"(",
")",
":",
"value",
"=",
"create_event_handler",
"(",
"key",
",",
"value",
")",
"event_handlers",
"[",
"key",
"]",
"=",
"value",
"vdom_dict",
"[",
"'eventHandlers'",
"]",
"=",
"event_handlers",
"if",
"self",
".",
"key",
":",
"vdom_dict",
"[",
"'key'",
"]",
"=",
"self",
".",
"key",
"vdom_dict",
"[",
"'children'",
"]",
"=",
"[",
"c",
".",
"to_dict",
"(",
")",
"if",
"isinstance",
"(",
"c",
",",
"VDOM",
")",
"else",
"c",
"for",
"c",
"in",
"self",
".",
"children",
"]",
"return",
"vdom_dict"
] |
Converts VDOM object to a dictionary that passes our schema
|
[
"Converts",
"VDOM",
"object",
"to",
"a",
"dictionary",
"that",
"passes",
"our",
"schema"
] |
d1ef48dc20d50379b8137a104125c92f64b916e4
|
https://github.com/nteract/vdom/blob/d1ef48dc20d50379b8137a104125c92f64b916e4/vdom/core.py#L185-L201
|
6,691
|
konstantint/PassportEye
|
passporteye/mrz/text.py
|
MRZ._guess_type
|
def _guess_type(mrz_lines):
"""Guesses the type of the MRZ from given lines. Returns 'TD1', 'TD2', 'TD3', 'MRVA', 'MRVB' or None.
The algorithm is basically just counting lines, looking at their length and checking whether the first character is a 'V'
>>> MRZ._guess_type([]) is None
True
>>> MRZ._guess_type([1]) is None
True
>>> MRZ._guess_type([1,2]) is None # No len() for numbers
True
>>> MRZ._guess_type(['a','b']) # This way passes
'TD2'
>>> MRZ._guess_type(['*'*40, '*'*40])
'TD3'
>>> MRZ._guess_type([1,2,3])
'TD1'
>>> MRZ._guess_type(['V'*40, '*'*40])
'MRVA'
>>> MRZ._guess_type(['V'*36, '*'*36])
'MRVB'
"""
try:
if len(mrz_lines) == 3:
return 'TD1'
elif len(mrz_lines) == 2 and len(mrz_lines[0]) < 40 and len(mrz_lines[1]) < 40:
return 'MRVB' if mrz_lines[0][0].upper() == 'V' else 'TD2'
elif len(mrz_lines) == 2:
return 'MRVA' if mrz_lines[0][0].upper() == 'V' else 'TD3'
else:
return None
except Exception: #pylint: disable=broad-except
return None
|
python
|
def _guess_type(mrz_lines):
"""Guesses the type of the MRZ from given lines. Returns 'TD1', 'TD2', 'TD3', 'MRVA', 'MRVB' or None.
The algorithm is basically just counting lines, looking at their length and checking whether the first character is a 'V'
>>> MRZ._guess_type([]) is None
True
>>> MRZ._guess_type([1]) is None
True
>>> MRZ._guess_type([1,2]) is None # No len() for numbers
True
>>> MRZ._guess_type(['a','b']) # This way passes
'TD2'
>>> MRZ._guess_type(['*'*40, '*'*40])
'TD3'
>>> MRZ._guess_type([1,2,3])
'TD1'
>>> MRZ._guess_type(['V'*40, '*'*40])
'MRVA'
>>> MRZ._guess_type(['V'*36, '*'*36])
'MRVB'
"""
try:
if len(mrz_lines) == 3:
return 'TD1'
elif len(mrz_lines) == 2 and len(mrz_lines[0]) < 40 and len(mrz_lines[1]) < 40:
return 'MRVB' if mrz_lines[0][0].upper() == 'V' else 'TD2'
elif len(mrz_lines) == 2:
return 'MRVA' if mrz_lines[0][0].upper() == 'V' else 'TD3'
else:
return None
except Exception: #pylint: disable=broad-except
return None
|
[
"def",
"_guess_type",
"(",
"mrz_lines",
")",
":",
"try",
":",
"if",
"len",
"(",
"mrz_lines",
")",
"==",
"3",
":",
"return",
"'TD1'",
"elif",
"len",
"(",
"mrz_lines",
")",
"==",
"2",
"and",
"len",
"(",
"mrz_lines",
"[",
"0",
"]",
")",
"<",
"40",
"and",
"len",
"(",
"mrz_lines",
"[",
"1",
"]",
")",
"<",
"40",
":",
"return",
"'MRVB'",
"if",
"mrz_lines",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"==",
"'V'",
"else",
"'TD2'",
"elif",
"len",
"(",
"mrz_lines",
")",
"==",
"2",
":",
"return",
"'MRVA'",
"if",
"mrz_lines",
"[",
"0",
"]",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"==",
"'V'",
"else",
"'TD3'",
"else",
":",
"return",
"None",
"except",
"Exception",
":",
"#pylint: disable=broad-except",
"return",
"None"
] |
Guesses the type of the MRZ from given lines. Returns 'TD1', 'TD2', 'TD3', 'MRVA', 'MRVB' or None.
The algorithm is basically just counting lines, looking at their length and checking whether the first character is a 'V'
>>> MRZ._guess_type([]) is None
True
>>> MRZ._guess_type([1]) is None
True
>>> MRZ._guess_type([1,2]) is None # No len() for numbers
True
>>> MRZ._guess_type(['a','b']) # This way passes
'TD2'
>>> MRZ._guess_type(['*'*40, '*'*40])
'TD3'
>>> MRZ._guess_type([1,2,3])
'TD1'
>>> MRZ._guess_type(['V'*40, '*'*40])
'MRVA'
>>> MRZ._guess_type(['V'*36, '*'*36])
'MRVB'
|
[
"Guesses",
"the",
"type",
"of",
"the",
"MRZ",
"from",
"given",
"lines",
".",
"Returns",
"TD1",
"TD2",
"TD3",
"MRVA",
"MRVB",
"or",
"None",
".",
"The",
"algorithm",
"is",
"basically",
"just",
"counting",
"lines",
"looking",
"at",
"their",
"length",
"and",
"checking",
"whether",
"the",
"first",
"character",
"is",
"a",
"V"
] |
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
|
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/mrz/text.py#L129-L160
|
6,692
|
konstantint/PassportEye
|
passporteye/util/pipeline.py
|
Pipeline.remove_component
|
def remove_component(self, name):
"""Removes an existing component with a given name, invalidating all the values computed by
the previous component."""
if name not in self.components:
raise Exception("No component named %s" % name)
del self.components[name]
del self.depends[name]
for p in self.provides[name]:
del self.whoprovides[p]
self.invalidate(p)
del self.provides[name]
|
python
|
def remove_component(self, name):
"""Removes an existing component with a given name, invalidating all the values computed by
the previous component."""
if name not in self.components:
raise Exception("No component named %s" % name)
del self.components[name]
del self.depends[name]
for p in self.provides[name]:
del self.whoprovides[p]
self.invalidate(p)
del self.provides[name]
|
[
"def",
"remove_component",
"(",
"self",
",",
"name",
")",
":",
"if",
"name",
"not",
"in",
"self",
".",
"components",
":",
"raise",
"Exception",
"(",
"\"No component named %s\"",
"%",
"name",
")",
"del",
"self",
".",
"components",
"[",
"name",
"]",
"del",
"self",
".",
"depends",
"[",
"name",
"]",
"for",
"p",
"in",
"self",
".",
"provides",
"[",
"name",
"]",
":",
"del",
"self",
".",
"whoprovides",
"[",
"p",
"]",
"self",
".",
"invalidate",
"(",
"p",
")",
"del",
"self",
".",
"provides",
"[",
"name",
"]"
] |
Removes an existing component with a given name, invalidating all the values computed by
the previous component.
|
[
"Removes",
"an",
"existing",
"component",
"with",
"a",
"given",
"name",
"invalidating",
"all",
"the",
"values",
"computed",
"by",
"the",
"previous",
"component",
"."
] |
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
|
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/pipeline.py#L68-L78
|
6,693
|
konstantint/PassportEye
|
passporteye/util/pipeline.py
|
Pipeline.replace_component
|
def replace_component(self, name, callable, provides=None, depends=None):
"""Changes an existing component with a given name, invalidating all the values computed by
the previous component and its successors."""
self.remove_component(name)
self.add_component(name, callable, provides, depends)
|
python
|
def replace_component(self, name, callable, provides=None, depends=None):
"""Changes an existing component with a given name, invalidating all the values computed by
the previous component and its successors."""
self.remove_component(name)
self.add_component(name, callable, provides, depends)
|
[
"def",
"replace_component",
"(",
"self",
",",
"name",
",",
"callable",
",",
"provides",
"=",
"None",
",",
"depends",
"=",
"None",
")",
":",
"self",
".",
"remove_component",
"(",
"name",
")",
"self",
".",
"add_component",
"(",
"name",
",",
"callable",
",",
"provides",
",",
"depends",
")"
] |
Changes an existing component with a given name, invalidating all the values computed by
the previous component and its successors.
|
[
"Changes",
"an",
"existing",
"component",
"with",
"a",
"given",
"name",
"invalidating",
"all",
"the",
"values",
"computed",
"by",
"the",
"previous",
"component",
"and",
"its",
"successors",
"."
] |
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
|
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/pipeline.py#L80-L84
|
6,694
|
konstantint/PassportEye
|
passporteye/util/pipeline.py
|
Pipeline.invalidate
|
def invalidate(self, key):
"""Remove the given data item along with all items that depend on it in the graph."""
if key not in self.data:
return
del self.data[key]
# Find all components that used it and invalidate their results
for cname in self.components:
if key in self.depends[cname]:
for downstream_key in self.provides[cname]:
self.invalidate(downstream_key)
|
python
|
def invalidate(self, key):
"""Remove the given data item along with all items that depend on it in the graph."""
if key not in self.data:
return
del self.data[key]
# Find all components that used it and invalidate their results
for cname in self.components:
if key in self.depends[cname]:
for downstream_key in self.provides[cname]:
self.invalidate(downstream_key)
|
[
"def",
"invalidate",
"(",
"self",
",",
"key",
")",
":",
"if",
"key",
"not",
"in",
"self",
".",
"data",
":",
"return",
"del",
"self",
".",
"data",
"[",
"key",
"]",
"# Find all components that used it and invalidate their results",
"for",
"cname",
"in",
"self",
".",
"components",
":",
"if",
"key",
"in",
"self",
".",
"depends",
"[",
"cname",
"]",
":",
"for",
"downstream_key",
"in",
"self",
".",
"provides",
"[",
"cname",
"]",
":",
"self",
".",
"invalidate",
"(",
"downstream_key",
")"
] |
Remove the given data item along with all items that depend on it in the graph.
|
[
"Remove",
"the",
"given",
"data",
"item",
"along",
"with",
"all",
"items",
"that",
"depend",
"on",
"it",
"in",
"the",
"graph",
"."
] |
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
|
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/pipeline.py#L86-L96
|
6,695
|
konstantint/PassportEye
|
passporteye/util/ocr.py
|
ocr
|
def ocr(img, mrz_mode=True, extra_cmdline_params=''):
"""Runs Tesseract on a given image. Writes an intermediate tempfile and then runs the tesseract command on the image.
This is a simplified modification of image_to_string from PyTesseract, which is adapted to SKImage rather than PIL.
In principle we could have reimplemented it just as well - there are some apparent bugs in PyTesseract, but it works so far :)
:param mrz_mode: when this is True (default) the tesseract is configured to recognize MRZs rather than arbitrary texts.
When False, no specific configuration parameters are passed (and you are free to provide your own via `extra_cmdline_params`)
:param extra_cmdline_params: extra parameters passed to tesseract. When mrz_mode=True, these are appended to whatever is the
"best known" configuration at the moment.
"--oem 0" is the parameter you might want to pass. This selects the Tesseract's "legacy" OCR engine, which often seems
to work better than the new LSTM-based one.
"""
input_file_name = '%s.bmp' % _tempnam()
output_file_name_base = '%s' % _tempnam()
output_file_name = "%s.txt" % output_file_name_base
try:
# Prevent annoying warning about lossy conversion to uint8
if str(img.dtype).startswith('float') and np.nanmin(img) >= 0 and np.nanmax(img) <= 1:
img = img.astype(np.float64) * (np.power(2.0, 8) - 1) + 0.499999999
img = img.astype(np.uint8)
imwrite(input_file_name, img)
if mrz_mode:
# NB: Tesseract 4.0 does not seem to support tessedit_char_whitelist
config = ("--psm 6 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789><"
" -c load_system_dawg=F -c load_freq_dawg=F {}").format(extra_cmdline_params)
else:
config = "{}".format(extra_cmdline_params)
pytesseract.run_tesseract(input_file_name,
output_file_name_base,
'txt',
lang=None,
config=config)
if sys.version_info.major == 3:
f = open(output_file_name, encoding='utf-8')
else:
f = open(output_file_name)
try:
return f.read().strip()
finally:
f.close()
finally:
pytesseract.cleanup(input_file_name)
pytesseract.cleanup(output_file_name)
|
python
|
def ocr(img, mrz_mode=True, extra_cmdline_params=''):
"""Runs Tesseract on a given image. Writes an intermediate tempfile and then runs the tesseract command on the image.
This is a simplified modification of image_to_string from PyTesseract, which is adapted to SKImage rather than PIL.
In principle we could have reimplemented it just as well - there are some apparent bugs in PyTesseract, but it works so far :)
:param mrz_mode: when this is True (default) the tesseract is configured to recognize MRZs rather than arbitrary texts.
When False, no specific configuration parameters are passed (and you are free to provide your own via `extra_cmdline_params`)
:param extra_cmdline_params: extra parameters passed to tesseract. When mrz_mode=True, these are appended to whatever is the
"best known" configuration at the moment.
"--oem 0" is the parameter you might want to pass. This selects the Tesseract's "legacy" OCR engine, which often seems
to work better than the new LSTM-based one.
"""
input_file_name = '%s.bmp' % _tempnam()
output_file_name_base = '%s' % _tempnam()
output_file_name = "%s.txt" % output_file_name_base
try:
# Prevent annoying warning about lossy conversion to uint8
if str(img.dtype).startswith('float') and np.nanmin(img) >= 0 and np.nanmax(img) <= 1:
img = img.astype(np.float64) * (np.power(2.0, 8) - 1) + 0.499999999
img = img.astype(np.uint8)
imwrite(input_file_name, img)
if mrz_mode:
# NB: Tesseract 4.0 does not seem to support tessedit_char_whitelist
config = ("--psm 6 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789><"
" -c load_system_dawg=F -c load_freq_dawg=F {}").format(extra_cmdline_params)
else:
config = "{}".format(extra_cmdline_params)
pytesseract.run_tesseract(input_file_name,
output_file_name_base,
'txt',
lang=None,
config=config)
if sys.version_info.major == 3:
f = open(output_file_name, encoding='utf-8')
else:
f = open(output_file_name)
try:
return f.read().strip()
finally:
f.close()
finally:
pytesseract.cleanup(input_file_name)
pytesseract.cleanup(output_file_name)
|
[
"def",
"ocr",
"(",
"img",
",",
"mrz_mode",
"=",
"True",
",",
"extra_cmdline_params",
"=",
"''",
")",
":",
"input_file_name",
"=",
"'%s.bmp'",
"%",
"_tempnam",
"(",
")",
"output_file_name_base",
"=",
"'%s'",
"%",
"_tempnam",
"(",
")",
"output_file_name",
"=",
"\"%s.txt\"",
"%",
"output_file_name_base",
"try",
":",
"# Prevent annoying warning about lossy conversion to uint8",
"if",
"str",
"(",
"img",
".",
"dtype",
")",
".",
"startswith",
"(",
"'float'",
")",
"and",
"np",
".",
"nanmin",
"(",
"img",
")",
">=",
"0",
"and",
"np",
".",
"nanmax",
"(",
"img",
")",
"<=",
"1",
":",
"img",
"=",
"img",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"*",
"(",
"np",
".",
"power",
"(",
"2.0",
",",
"8",
")",
"-",
"1",
")",
"+",
"0.499999999",
"img",
"=",
"img",
".",
"astype",
"(",
"np",
".",
"uint8",
")",
"imwrite",
"(",
"input_file_name",
",",
"img",
")",
"if",
"mrz_mode",
":",
"# NB: Tesseract 4.0 does not seem to support tessedit_char_whitelist",
"config",
"=",
"(",
"\"--psm 6 -c tessedit_char_whitelist=ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789><\"",
"\" -c load_system_dawg=F -c load_freq_dawg=F {}\"",
")",
".",
"format",
"(",
"extra_cmdline_params",
")",
"else",
":",
"config",
"=",
"\"{}\"",
".",
"format",
"(",
"extra_cmdline_params",
")",
"pytesseract",
".",
"run_tesseract",
"(",
"input_file_name",
",",
"output_file_name_base",
",",
"'txt'",
",",
"lang",
"=",
"None",
",",
"config",
"=",
"config",
")",
"if",
"sys",
".",
"version_info",
".",
"major",
"==",
"3",
":",
"f",
"=",
"open",
"(",
"output_file_name",
",",
"encoding",
"=",
"'utf-8'",
")",
"else",
":",
"f",
"=",
"open",
"(",
"output_file_name",
")",
"try",
":",
"return",
"f",
".",
"read",
"(",
")",
".",
"strip",
"(",
")",
"finally",
":",
"f",
".",
"close",
"(",
")",
"finally",
":",
"pytesseract",
".",
"cleanup",
"(",
"input_file_name",
")",
"pytesseract",
".",
"cleanup",
"(",
"output_file_name",
")"
] |
Runs Tesseract on a given image. Writes an intermediate tempfile and then runs the tesseract command on the image.
This is a simplified modification of image_to_string from PyTesseract, which is adapted to SKImage rather than PIL.
In principle we could have reimplemented it just as well - there are some apparent bugs in PyTesseract, but it works so far :)
:param mrz_mode: when this is True (default) the tesseract is configured to recognize MRZs rather than arbitrary texts.
When False, no specific configuration parameters are passed (and you are free to provide your own via `extra_cmdline_params`)
:param extra_cmdline_params: extra parameters passed to tesseract. When mrz_mode=True, these are appended to whatever is the
"best known" configuration at the moment.
"--oem 0" is the parameter you might want to pass. This selects the Tesseract's "legacy" OCR engine, which often seems
to work better than the new LSTM-based one.
|
[
"Runs",
"Tesseract",
"on",
"a",
"given",
"image",
".",
"Writes",
"an",
"intermediate",
"tempfile",
"and",
"then",
"runs",
"the",
"tesseract",
"command",
"on",
"the",
"image",
"."
] |
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
|
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/ocr.py#L16-L64
|
6,696
|
konstantint/PassportEye
|
passporteye/util/geometry.py
|
RotatedBox.approx_equal
|
def approx_equal(self, center, width, height, angle, tol=1e-6):
"Method mainly useful for testing"
return abs(self.cx - center[0]) < tol and abs(self.cy - center[1]) < tol and abs(self.width - width) < tol and \
abs(self.height - height) < tol and abs(self.angle - angle) < tol
|
python
|
def approx_equal(self, center, width, height, angle, tol=1e-6):
"Method mainly useful for testing"
return abs(self.cx - center[0]) < tol and abs(self.cy - center[1]) < tol and abs(self.width - width) < tol and \
abs(self.height - height) < tol and abs(self.angle - angle) < tol
|
[
"def",
"approx_equal",
"(",
"self",
",",
"center",
",",
"width",
",",
"height",
",",
"angle",
",",
"tol",
"=",
"1e-6",
")",
":",
"return",
"abs",
"(",
"self",
".",
"cx",
"-",
"center",
"[",
"0",
"]",
")",
"<",
"tol",
"and",
"abs",
"(",
"self",
".",
"cy",
"-",
"center",
"[",
"1",
"]",
")",
"<",
"tol",
"and",
"abs",
"(",
"self",
".",
"width",
"-",
"width",
")",
"<",
"tol",
"and",
"abs",
"(",
"self",
".",
"height",
"-",
"height",
")",
"<",
"tol",
"and",
"abs",
"(",
"self",
".",
"angle",
"-",
"angle",
")",
"<",
"tol"
] |
Method mainly useful for testing
|
[
"Method",
"mainly",
"useful",
"for",
"testing"
] |
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
|
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/geometry.py#L49-L52
|
6,697
|
konstantint/PassportEye
|
passporteye/util/geometry.py
|
RotatedBox.rotated
|
def rotated(self, rotation_center, angle):
"""Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle.
>>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1)
"""
rot = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
t = np.asfarray(rotation_center)
new_c = np.dot(rot.T, (self.center - t)) + t
return RotatedBox(new_c, self.width, self.height, (self.angle+angle) % (np.pi*2))
|
python
|
def rotated(self, rotation_center, angle):
"""Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle.
>>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1)
"""
rot = np.array([[np.cos(angle), np.sin(angle)], [-np.sin(angle), np.cos(angle)]])
t = np.asfarray(rotation_center)
new_c = np.dot(rot.T, (self.center - t)) + t
return RotatedBox(new_c, self.width, self.height, (self.angle+angle) % (np.pi*2))
|
[
"def",
"rotated",
"(",
"self",
",",
"rotation_center",
",",
"angle",
")",
":",
"rot",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"np",
".",
"cos",
"(",
"angle",
")",
",",
"np",
".",
"sin",
"(",
"angle",
")",
"]",
",",
"[",
"-",
"np",
".",
"sin",
"(",
"angle",
")",
",",
"np",
".",
"cos",
"(",
"angle",
")",
"]",
"]",
")",
"t",
"=",
"np",
".",
"asfarray",
"(",
"rotation_center",
")",
"new_c",
"=",
"np",
".",
"dot",
"(",
"rot",
".",
"T",
",",
"(",
"self",
".",
"center",
"-",
"t",
")",
")",
"+",
"t",
"return",
"RotatedBox",
"(",
"new_c",
",",
"self",
".",
"width",
",",
"self",
".",
"height",
",",
"(",
"self",
".",
"angle",
"+",
"angle",
")",
"%",
"(",
"np",
".",
"pi",
"*",
"2",
")",
")"
] |
Returns a RotatedBox that is obtained by rotating this box around a given center by a given angle.
>>> assert RotatedBox([2, 2], 2, 1, 0.1).rotated([1, 1], np.pi/2).approx_equal([0, 2], 2, 1, np.pi/2+0.1)
|
[
"Returns",
"a",
"RotatedBox",
"that",
"is",
"obtained",
"by",
"rotating",
"this",
"box",
"around",
"a",
"given",
"center",
"by",
"a",
"given",
"angle",
"."
] |
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
|
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/geometry.py#L54-L62
|
6,698
|
konstantint/PassportEye
|
passporteye/util/geometry.py
|
RotatedBox.as_poly
|
def as_poly(self, margin_width=0, margin_height=0):
"""Converts this box to a polygon, i.e. 4x2 array, representing the four corners starting from lower left to upper left counterclockwise.
:param margin_width: The additional "margin" that will be added to the box along its width dimension (from both sides) before conversion.
:param margin_height: The additional "margin" that will be added to the box along its height dimension (from both sides) before conversion.
>>> RotatedBox([0, 0], 4, 2, 0).as_poly()
array([[-2., -1.],
[ 2., -1.],
[ 2., 1.],
[-2., 1.]])
>>> RotatedBox([0, 0], 4, 2, np.pi/4).as_poly()
array([[-0.707..., -2.121...],
[ 2.121..., 0.707...],
[ 0.707..., 2.121...],
[-2.121..., -0.707...]])
>>> RotatedBox([0, 0], 4, 2, np.pi/2).as_poly()
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
>>> RotatedBox([0, 0], 0, 0, np.pi/2).as_poly(2, 1)
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
"""
v_hor = (self.width/2 + margin_width)*np.array([np.cos(self.angle), np.sin(self.angle)])
v_vert = (self.height/2 + margin_height)*np.array([-np.sin(self.angle), np.cos(self.angle)])
c = np.array([self.cx, self.cy])
return np.vstack([c - v_hor - v_vert, c + v_hor - v_vert, c + v_hor + v_vert, c - v_hor + v_vert])
|
python
|
def as_poly(self, margin_width=0, margin_height=0):
"""Converts this box to a polygon, i.e. 4x2 array, representing the four corners starting from lower left to upper left counterclockwise.
:param margin_width: The additional "margin" that will be added to the box along its width dimension (from both sides) before conversion.
:param margin_height: The additional "margin" that will be added to the box along its height dimension (from both sides) before conversion.
>>> RotatedBox([0, 0], 4, 2, 0).as_poly()
array([[-2., -1.],
[ 2., -1.],
[ 2., 1.],
[-2., 1.]])
>>> RotatedBox([0, 0], 4, 2, np.pi/4).as_poly()
array([[-0.707..., -2.121...],
[ 2.121..., 0.707...],
[ 0.707..., 2.121...],
[-2.121..., -0.707...]])
>>> RotatedBox([0, 0], 4, 2, np.pi/2).as_poly()
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
>>> RotatedBox([0, 0], 0, 0, np.pi/2).as_poly(2, 1)
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
"""
v_hor = (self.width/2 + margin_width)*np.array([np.cos(self.angle), np.sin(self.angle)])
v_vert = (self.height/2 + margin_height)*np.array([-np.sin(self.angle), np.cos(self.angle)])
c = np.array([self.cx, self.cy])
return np.vstack([c - v_hor - v_vert, c + v_hor - v_vert, c + v_hor + v_vert, c - v_hor + v_vert])
|
[
"def",
"as_poly",
"(",
"self",
",",
"margin_width",
"=",
"0",
",",
"margin_height",
"=",
"0",
")",
":",
"v_hor",
"=",
"(",
"self",
".",
"width",
"/",
"2",
"+",
"margin_width",
")",
"*",
"np",
".",
"array",
"(",
"[",
"np",
".",
"cos",
"(",
"self",
".",
"angle",
")",
",",
"np",
".",
"sin",
"(",
"self",
".",
"angle",
")",
"]",
")",
"v_vert",
"=",
"(",
"self",
".",
"height",
"/",
"2",
"+",
"margin_height",
")",
"*",
"np",
".",
"array",
"(",
"[",
"-",
"np",
".",
"sin",
"(",
"self",
".",
"angle",
")",
",",
"np",
".",
"cos",
"(",
"self",
".",
"angle",
")",
"]",
")",
"c",
"=",
"np",
".",
"array",
"(",
"[",
"self",
".",
"cx",
",",
"self",
".",
"cy",
"]",
")",
"return",
"np",
".",
"vstack",
"(",
"[",
"c",
"-",
"v_hor",
"-",
"v_vert",
",",
"c",
"+",
"v_hor",
"-",
"v_vert",
",",
"c",
"+",
"v_hor",
"+",
"v_vert",
",",
"c",
"-",
"v_hor",
"+",
"v_vert",
"]",
")"
] |
Converts this box to a polygon, i.e. 4x2 array, representing the four corners starting from lower left to upper left counterclockwise.
:param margin_width: The additional "margin" that will be added to the box along its width dimension (from both sides) before conversion.
:param margin_height: The additional "margin" that will be added to the box along its height dimension (from both sides) before conversion.
>>> RotatedBox([0, 0], 4, 2, 0).as_poly()
array([[-2., -1.],
[ 2., -1.],
[ 2., 1.],
[-2., 1.]])
>>> RotatedBox([0, 0], 4, 2, np.pi/4).as_poly()
array([[-0.707..., -2.121...],
[ 2.121..., 0.707...],
[ 0.707..., 2.121...],
[-2.121..., -0.707...]])
>>> RotatedBox([0, 0], 4, 2, np.pi/2).as_poly()
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
>>> RotatedBox([0, 0], 0, 0, np.pi/2).as_poly(2, 1)
array([[ 1., -2.],
[ 1., 2.],
[-1., 2.],
[-1., -2.]])
|
[
"Converts",
"this",
"box",
"to",
"a",
"polygon",
"i",
".",
"e",
".",
"4x2",
"array",
"representing",
"the",
"four",
"corners",
"starting",
"from",
"lower",
"left",
"to",
"upper",
"left",
"counterclockwise",
"."
] |
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
|
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/geometry.py#L64-L94
|
6,699
|
konstantint/PassportEye
|
passporteye/util/geometry.py
|
RotatedBox.extract_from_image
|
def extract_from_image(self, img, scale=1.0, margin_width=5, margin_height=5):
"""Extracts the contents of this box from a given image.
For that the image is "unrotated" by the appropriate angle, and the corresponding part is extracted from it.
Returns an image with dimensions height*scale x width*scale.
Note that the box coordinates are interpreted as "image coordinates" (i.e. x is row and y is column),
and box angle is considered to be relative to the vertical (i.e. np.pi/2 is "normal orientation")
:param img: a numpy ndarray suitable for image processing via skimage.
:param scale: the RotatedBox is scaled by this value before performing the extraction.
This is necessary when, for example, the location of a particular feature is determined using a smaller image,
yet then the corresponding area needs to be extracted from the original, larger image.
The scale parameter in this case should be width_of_larger_image/width_of_smaller_image.
:param margin_width: The margin that should be added to the width dimension of the box from each size.
This value is given wrt actual box dimensions (i.e. not scaled).
:param margin_height: The margin that should be added to the height dimension of the box from each side.
:return: a numpy ndarray, corresponding to the extracted region (aligned straight).
TODO: This could be made more efficient if we avoid rotating the full image and cut out the ROI from it beforehand.
"""
rotate_by = (np.pi/2 - self.angle)*180/np.pi
img_rotated = transform.rotate(img, angle=rotate_by, center=[self.center[1]*scale, self.center[0]*scale], resize=True)
# The resizeable transform will shift the resulting image somewhat wrt original coordinates.
# When we cut out the box we will compensate for this shift.
shift_c, shift_r = self._compensate_rotation_shift(img, scale)
r1 = max(int((self.center[0] - self.height/2 - margin_height)*scale - shift_r), 0)
r2 = int((self.center[0] + self.height/2 + margin_height)*scale - shift_r)
c1 = max(int((self.center[1] - self.width/2 - margin_width)*scale - shift_c), 0)
c2 = int((self.center[1] + self.width/2 + margin_width)*scale - shift_c)
return img_rotated[r1:r2, c1:c2]
|
python
|
def extract_from_image(self, img, scale=1.0, margin_width=5, margin_height=5):
"""Extracts the contents of this box from a given image.
For that the image is "unrotated" by the appropriate angle, and the corresponding part is extracted from it.
Returns an image with dimensions height*scale x width*scale.
Note that the box coordinates are interpreted as "image coordinates" (i.e. x is row and y is column),
and box angle is considered to be relative to the vertical (i.e. np.pi/2 is "normal orientation")
:param img: a numpy ndarray suitable for image processing via skimage.
:param scale: the RotatedBox is scaled by this value before performing the extraction.
This is necessary when, for example, the location of a particular feature is determined using a smaller image,
yet then the corresponding area needs to be extracted from the original, larger image.
The scale parameter in this case should be width_of_larger_image/width_of_smaller_image.
:param margin_width: The margin that should be added to the width dimension of the box from each size.
This value is given wrt actual box dimensions (i.e. not scaled).
:param margin_height: The margin that should be added to the height dimension of the box from each side.
:return: a numpy ndarray, corresponding to the extracted region (aligned straight).
TODO: This could be made more efficient if we avoid rotating the full image and cut out the ROI from it beforehand.
"""
rotate_by = (np.pi/2 - self.angle)*180/np.pi
img_rotated = transform.rotate(img, angle=rotate_by, center=[self.center[1]*scale, self.center[0]*scale], resize=True)
# The resizeable transform will shift the resulting image somewhat wrt original coordinates.
# When we cut out the box we will compensate for this shift.
shift_c, shift_r = self._compensate_rotation_shift(img, scale)
r1 = max(int((self.center[0] - self.height/2 - margin_height)*scale - shift_r), 0)
r2 = int((self.center[0] + self.height/2 + margin_height)*scale - shift_r)
c1 = max(int((self.center[1] - self.width/2 - margin_width)*scale - shift_c), 0)
c2 = int((self.center[1] + self.width/2 + margin_width)*scale - shift_c)
return img_rotated[r1:r2, c1:c2]
|
[
"def",
"extract_from_image",
"(",
"self",
",",
"img",
",",
"scale",
"=",
"1.0",
",",
"margin_width",
"=",
"5",
",",
"margin_height",
"=",
"5",
")",
":",
"rotate_by",
"=",
"(",
"np",
".",
"pi",
"/",
"2",
"-",
"self",
".",
"angle",
")",
"*",
"180",
"/",
"np",
".",
"pi",
"img_rotated",
"=",
"transform",
".",
"rotate",
"(",
"img",
",",
"angle",
"=",
"rotate_by",
",",
"center",
"=",
"[",
"self",
".",
"center",
"[",
"1",
"]",
"*",
"scale",
",",
"self",
".",
"center",
"[",
"0",
"]",
"*",
"scale",
"]",
",",
"resize",
"=",
"True",
")",
"# The resizeable transform will shift the resulting image somewhat wrt original coordinates.",
"# When we cut out the box we will compensate for this shift.",
"shift_c",
",",
"shift_r",
"=",
"self",
".",
"_compensate_rotation_shift",
"(",
"img",
",",
"scale",
")",
"r1",
"=",
"max",
"(",
"int",
"(",
"(",
"self",
".",
"center",
"[",
"0",
"]",
"-",
"self",
".",
"height",
"/",
"2",
"-",
"margin_height",
")",
"*",
"scale",
"-",
"shift_r",
")",
",",
"0",
")",
"r2",
"=",
"int",
"(",
"(",
"self",
".",
"center",
"[",
"0",
"]",
"+",
"self",
".",
"height",
"/",
"2",
"+",
"margin_height",
")",
"*",
"scale",
"-",
"shift_r",
")",
"c1",
"=",
"max",
"(",
"int",
"(",
"(",
"self",
".",
"center",
"[",
"1",
"]",
"-",
"self",
".",
"width",
"/",
"2",
"-",
"margin_width",
")",
"*",
"scale",
"-",
"shift_c",
")",
",",
"0",
")",
"c2",
"=",
"int",
"(",
"(",
"self",
".",
"center",
"[",
"1",
"]",
"+",
"self",
".",
"width",
"/",
"2",
"+",
"margin_width",
")",
"*",
"scale",
"-",
"shift_c",
")",
"return",
"img_rotated",
"[",
"r1",
":",
"r2",
",",
"c1",
":",
"c2",
"]"
] |
Extracts the contents of this box from a given image.
For that the image is "unrotated" by the appropriate angle, and the corresponding part is extracted from it.
Returns an image with dimensions height*scale x width*scale.
Note that the box coordinates are interpreted as "image coordinates" (i.e. x is row and y is column),
and box angle is considered to be relative to the vertical (i.e. np.pi/2 is "normal orientation")
:param img: a numpy ndarray suitable for image processing via skimage.
:param scale: the RotatedBox is scaled by this value before performing the extraction.
This is necessary when, for example, the location of a particular feature is determined using a smaller image,
yet then the corresponding area needs to be extracted from the original, larger image.
The scale parameter in this case should be width_of_larger_image/width_of_smaller_image.
:param margin_width: The margin that should be added to the width dimension of the box from each size.
This value is given wrt actual box dimensions (i.e. not scaled).
:param margin_height: The margin that should be added to the height dimension of the box from each side.
:return: a numpy ndarray, corresponding to the extracted region (aligned straight).
TODO: This could be made more efficient if we avoid rotating the full image and cut out the ROI from it beforehand.
|
[
"Extracts",
"the",
"contents",
"of",
"this",
"box",
"from",
"a",
"given",
"image",
".",
"For",
"that",
"the",
"image",
"is",
"unrotated",
"by",
"the",
"appropriate",
"angle",
"and",
"the",
"corresponding",
"part",
"is",
"extracted",
"from",
"it",
"."
] |
b32afba0f5dc4eb600c4edc4f49e5d49959c5415
|
https://github.com/konstantint/PassportEye/blob/b32afba0f5dc4eb600c4edc4f49e5d49959c5415/passporteye/util/geometry.py#L119-L149
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.