id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
7,500
|
tylertreat/BigQuery-Python
|
bigquery/client.py
|
BigQueryClient._get_all_tables_for_dataset
|
def _get_all_tables_for_dataset(self, dataset_id, project_id=None):
"""Retrieve a list of all tables for the dataset.
Parameters
----------
dataset_id : str
The dataset to retrieve table names for
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
A ``dict`` containing tables key with all tables
"""
project_id = self._get_project_id(project_id)
result = self.bigquery.tables().list(
projectId=project_id,
datasetId=dataset_id).execute(num_retries=self.num_retries)
page_token = result.get('nextPageToken')
while page_token:
res = self.bigquery.tables().list(
projectId=project_id,
datasetId=dataset_id,
pageToken=page_token
).execute(num_retries=self.num_retries)
page_token = res.get('nextPageToken')
result['tables'] += res.get('tables', [])
return result
|
python
|
def _get_all_tables_for_dataset(self, dataset_id, project_id=None):
"""Retrieve a list of all tables for the dataset.
Parameters
----------
dataset_id : str
The dataset to retrieve table names for
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
A ``dict`` containing tables key with all tables
"""
project_id = self._get_project_id(project_id)
result = self.bigquery.tables().list(
projectId=project_id,
datasetId=dataset_id).execute(num_retries=self.num_retries)
page_token = result.get('nextPageToken')
while page_token:
res = self.bigquery.tables().list(
projectId=project_id,
datasetId=dataset_id,
pageToken=page_token
).execute(num_retries=self.num_retries)
page_token = res.get('nextPageToken')
result['tables'] += res.get('tables', [])
return result
|
[
"def",
"_get_all_tables_for_dataset",
"(",
"self",
",",
"dataset_id",
",",
"project_id",
"=",
"None",
")",
":",
"project_id",
"=",
"self",
".",
"_get_project_id",
"(",
"project_id",
")",
"result",
"=",
"self",
".",
"bigquery",
".",
"tables",
"(",
")",
".",
"list",
"(",
"projectId",
"=",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"page_token",
"=",
"result",
".",
"get",
"(",
"'nextPageToken'",
")",
"while",
"page_token",
":",
"res",
"=",
"self",
".",
"bigquery",
".",
"tables",
"(",
")",
".",
"list",
"(",
"projectId",
"=",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
",",
"pageToken",
"=",
"page_token",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"page_token",
"=",
"res",
".",
"get",
"(",
"'nextPageToken'",
")",
"result",
"[",
"'tables'",
"]",
"+=",
"res",
".",
"get",
"(",
"'tables'",
",",
"[",
"]",
")",
"return",
"result"
] |
Retrieve a list of all tables for the dataset.
Parameters
----------
dataset_id : str
The dataset to retrieve table names for
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
dict
A ``dict`` containing tables key with all tables
|
[
"Retrieve",
"a",
"list",
"of",
"all",
"tables",
"for",
"the",
"dataset",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L1472-L1502
|
7,501
|
tylertreat/BigQuery-Python
|
bigquery/client.py
|
BigQueryClient._parse_table_list_response
|
def _parse_table_list_response(self, list_response):
"""Parse the response received from calling list on tables.
Parameters
----------
list_response
The response found by calling list on a BigQuery table object.
Returns
-------
dict
Dates referenced by table names
"""
tables = defaultdict(dict)
for table in list_response.get('tables', []):
table_ref = table.get('tableReference')
if not table_ref:
continue
table_id = table_ref.get('tableId', '')
year_month, app_id = self._parse_table_name(table_id)
if not year_month:
continue
table_date = datetime.strptime(year_month, '%Y-%m')
unix_seconds = calendar.timegm(table_date.timetuple())
tables[app_id].update({table_id: unix_seconds})
# Turn off defualting
tables.default_factory = None
return tables
|
python
|
def _parse_table_list_response(self, list_response):
"""Parse the response received from calling list on tables.
Parameters
----------
list_response
The response found by calling list on a BigQuery table object.
Returns
-------
dict
Dates referenced by table names
"""
tables = defaultdict(dict)
for table in list_response.get('tables', []):
table_ref = table.get('tableReference')
if not table_ref:
continue
table_id = table_ref.get('tableId', '')
year_month, app_id = self._parse_table_name(table_id)
if not year_month:
continue
table_date = datetime.strptime(year_month, '%Y-%m')
unix_seconds = calendar.timegm(table_date.timetuple())
tables[app_id].update({table_id: unix_seconds})
# Turn off defualting
tables.default_factory = None
return tables
|
[
"def",
"_parse_table_list_response",
"(",
"self",
",",
"list_response",
")",
":",
"tables",
"=",
"defaultdict",
"(",
"dict",
")",
"for",
"table",
"in",
"list_response",
".",
"get",
"(",
"'tables'",
",",
"[",
"]",
")",
":",
"table_ref",
"=",
"table",
".",
"get",
"(",
"'tableReference'",
")",
"if",
"not",
"table_ref",
":",
"continue",
"table_id",
"=",
"table_ref",
".",
"get",
"(",
"'tableId'",
",",
"''",
")",
"year_month",
",",
"app_id",
"=",
"self",
".",
"_parse_table_name",
"(",
"table_id",
")",
"if",
"not",
"year_month",
":",
"continue",
"table_date",
"=",
"datetime",
".",
"strptime",
"(",
"year_month",
",",
"'%Y-%m'",
")",
"unix_seconds",
"=",
"calendar",
".",
"timegm",
"(",
"table_date",
".",
"timetuple",
"(",
")",
")",
"tables",
"[",
"app_id",
"]",
".",
"update",
"(",
"{",
"table_id",
":",
"unix_seconds",
"}",
")",
"# Turn off defualting",
"tables",
".",
"default_factory",
"=",
"None",
"return",
"tables"
] |
Parse the response received from calling list on tables.
Parameters
----------
list_response
The response found by calling list on a BigQuery table object.
Returns
-------
dict
Dates referenced by table names
|
[
"Parse",
"the",
"response",
"received",
"from",
"calling",
"list",
"on",
"tables",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L1504-L1540
|
7,502
|
tylertreat/BigQuery-Python
|
bigquery/client.py
|
BigQueryClient._parse_table_name
|
def _parse_table_name(self, table_id):
"""Parse a table name in the form of appid_YYYY_MM or
YYYY_MM_appid and return a tuple consisting of YYYY-MM and the app id.
Returns (None, None) in the event of a name like <desc>_YYYYMMDD_<int>
Parameters
----------
table_id : str
The table id as listed by BigQuery
Returns
-------
tuple
(year/month, app id), or (None, None) if the table id cannot be
parsed.
"""
# Prefix date
attributes = table_id.split('_')
year_month = "-".join(attributes[:2])
app_id = "-".join(attributes[2:])
# Check if date parsed correctly
if year_month.count("-") == 1 and all(
[num.isdigit() for num in year_month.split('-')]):
return year_month, app_id
# Postfix date
attributes = table_id.split('_')
year_month = "-".join(attributes[-2:])
app_id = "-".join(attributes[:-2])
# Check if date parsed correctly
if year_month.count("-") == 1 and all(
[num.isdigit() for num in year_month.split('-')]) and len(year_month) == 7:
return year_month, app_id
return None, None
|
python
|
def _parse_table_name(self, table_id):
"""Parse a table name in the form of appid_YYYY_MM or
YYYY_MM_appid and return a tuple consisting of YYYY-MM and the app id.
Returns (None, None) in the event of a name like <desc>_YYYYMMDD_<int>
Parameters
----------
table_id : str
The table id as listed by BigQuery
Returns
-------
tuple
(year/month, app id), or (None, None) if the table id cannot be
parsed.
"""
# Prefix date
attributes = table_id.split('_')
year_month = "-".join(attributes[:2])
app_id = "-".join(attributes[2:])
# Check if date parsed correctly
if year_month.count("-") == 1 and all(
[num.isdigit() for num in year_month.split('-')]):
return year_month, app_id
# Postfix date
attributes = table_id.split('_')
year_month = "-".join(attributes[-2:])
app_id = "-".join(attributes[:-2])
# Check if date parsed correctly
if year_month.count("-") == 1 and all(
[num.isdigit() for num in year_month.split('-')]) and len(year_month) == 7:
return year_month, app_id
return None, None
|
[
"def",
"_parse_table_name",
"(",
"self",
",",
"table_id",
")",
":",
"# Prefix date",
"attributes",
"=",
"table_id",
".",
"split",
"(",
"'_'",
")",
"year_month",
"=",
"\"-\"",
".",
"join",
"(",
"attributes",
"[",
":",
"2",
"]",
")",
"app_id",
"=",
"\"-\"",
".",
"join",
"(",
"attributes",
"[",
"2",
":",
"]",
")",
"# Check if date parsed correctly",
"if",
"year_month",
".",
"count",
"(",
"\"-\"",
")",
"==",
"1",
"and",
"all",
"(",
"[",
"num",
".",
"isdigit",
"(",
")",
"for",
"num",
"in",
"year_month",
".",
"split",
"(",
"'-'",
")",
"]",
")",
":",
"return",
"year_month",
",",
"app_id",
"# Postfix date",
"attributes",
"=",
"table_id",
".",
"split",
"(",
"'_'",
")",
"year_month",
"=",
"\"-\"",
".",
"join",
"(",
"attributes",
"[",
"-",
"2",
":",
"]",
")",
"app_id",
"=",
"\"-\"",
".",
"join",
"(",
"attributes",
"[",
":",
"-",
"2",
"]",
")",
"# Check if date parsed correctly",
"if",
"year_month",
".",
"count",
"(",
"\"-\"",
")",
"==",
"1",
"and",
"all",
"(",
"[",
"num",
".",
"isdigit",
"(",
")",
"for",
"num",
"in",
"year_month",
".",
"split",
"(",
"'-'",
")",
"]",
")",
"and",
"len",
"(",
"year_month",
")",
"==",
"7",
":",
"return",
"year_month",
",",
"app_id",
"return",
"None",
",",
"None"
] |
Parse a table name in the form of appid_YYYY_MM or
YYYY_MM_appid and return a tuple consisting of YYYY-MM and the app id.
Returns (None, None) in the event of a name like <desc>_YYYYMMDD_<int>
Parameters
----------
table_id : str
The table id as listed by BigQuery
Returns
-------
tuple
(year/month, app id), or (None, None) if the table id cannot be
parsed.
|
[
"Parse",
"a",
"table",
"name",
"in",
"the",
"form",
"of",
"appid_YYYY_MM",
"or",
"YYYY_MM_appid",
"and",
"return",
"a",
"tuple",
"consisting",
"of",
"YYYY",
"-",
"MM",
"and",
"the",
"app",
"id",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L1542-L1581
|
7,503
|
tylertreat/BigQuery-Python
|
bigquery/client.py
|
BigQueryClient._filter_tables_by_time
|
def _filter_tables_by_time(self, tables, start_time, end_time):
"""Filter a table dictionary and return table names based on the range
of start and end times in unix seconds.
Parameters
----------
tables : dict
Dates referenced by table names
start_time : int
The unix time after which records will be fetched
end_time : int
The unix time up to which records will be fetched
Returns
-------
list
Table names that are inside the time range
"""
return [table_name for (table_name, unix_seconds) in tables.items()
if self._in_range(start_time, end_time, unix_seconds)]
|
python
|
def _filter_tables_by_time(self, tables, start_time, end_time):
"""Filter a table dictionary and return table names based on the range
of start and end times in unix seconds.
Parameters
----------
tables : dict
Dates referenced by table names
start_time : int
The unix time after which records will be fetched
end_time : int
The unix time up to which records will be fetched
Returns
-------
list
Table names that are inside the time range
"""
return [table_name for (table_name, unix_seconds) in tables.items()
if self._in_range(start_time, end_time, unix_seconds)]
|
[
"def",
"_filter_tables_by_time",
"(",
"self",
",",
"tables",
",",
"start_time",
",",
"end_time",
")",
":",
"return",
"[",
"table_name",
"for",
"(",
"table_name",
",",
"unix_seconds",
")",
"in",
"tables",
".",
"items",
"(",
")",
"if",
"self",
".",
"_in_range",
"(",
"start_time",
",",
"end_time",
",",
"unix_seconds",
")",
"]"
] |
Filter a table dictionary and return table names based on the range
of start and end times in unix seconds.
Parameters
----------
tables : dict
Dates referenced by table names
start_time : int
The unix time after which records will be fetched
end_time : int
The unix time up to which records will be fetched
Returns
-------
list
Table names that are inside the time range
|
[
"Filter",
"a",
"table",
"dictionary",
"and",
"return",
"table",
"names",
"based",
"on",
"the",
"range",
"of",
"start",
"and",
"end",
"times",
"in",
"unix",
"seconds",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L1583-L1603
|
7,504
|
tylertreat/BigQuery-Python
|
bigquery/client.py
|
BigQueryClient._in_range
|
def _in_range(self, start_time, end_time, time):
"""Indicate if the given time falls inside of the given range.
Parameters
----------
start_time : int
The unix time for the start of the range
end_time : int
The unix time for the end of the range
time : int
The unix time to check
Returns
-------
bool
True if the time falls within the range, False otherwise.
"""
ONE_MONTH = 2764800 # 32 days
return start_time <= time <= end_time or \
time <= start_time <= time + ONE_MONTH or \
time <= end_time <= time + ONE_MONTH
|
python
|
def _in_range(self, start_time, end_time, time):
"""Indicate if the given time falls inside of the given range.
Parameters
----------
start_time : int
The unix time for the start of the range
end_time : int
The unix time for the end of the range
time : int
The unix time to check
Returns
-------
bool
True if the time falls within the range, False otherwise.
"""
ONE_MONTH = 2764800 # 32 days
return start_time <= time <= end_time or \
time <= start_time <= time + ONE_MONTH or \
time <= end_time <= time + ONE_MONTH
|
[
"def",
"_in_range",
"(",
"self",
",",
"start_time",
",",
"end_time",
",",
"time",
")",
":",
"ONE_MONTH",
"=",
"2764800",
"# 32 days",
"return",
"start_time",
"<=",
"time",
"<=",
"end_time",
"or",
"time",
"<=",
"start_time",
"<=",
"time",
"+",
"ONE_MONTH",
"or",
"time",
"<=",
"end_time",
"<=",
"time",
"+",
"ONE_MONTH"
] |
Indicate if the given time falls inside of the given range.
Parameters
----------
start_time : int
The unix time for the start of the range
end_time : int
The unix time for the end of the range
time : int
The unix time to check
Returns
-------
bool
True if the time falls within the range, False otherwise.
|
[
"Indicate",
"if",
"the",
"given",
"time",
"falls",
"inside",
"of",
"the",
"given",
"range",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L1605-L1627
|
7,505
|
tylertreat/BigQuery-Python
|
bigquery/client.py
|
BigQueryClient._transform_row
|
def _transform_row(self, row, schema):
"""Apply the given schema to the given BigQuery data row.
Parameters
----------
row
A single BigQuery row to transform
schema : list
The BigQuery table schema to apply to the row, specifically
the list of field dicts.
Returns
-------
dict
Mapping schema to row
"""
log = {}
# Match each schema column with its associated row value
for index, col_dict in enumerate(schema):
col_name = col_dict['name']
row_value = row['f'][index]['v']
if row_value is None:
log[col_name] = None
continue
# Recurse on nested records
if col_dict['type'] == 'RECORD':
row_value = self._recurse_on_row(col_dict, row_value)
# Otherwise just cast the value
elif col_dict['type'] == 'INTEGER':
row_value = int(row_value)
elif col_dict['type'] == 'FLOAT':
row_value = float(row_value)
elif col_dict['type'] == 'BOOLEAN':
row_value = row_value in ('True', 'true', 'TRUE')
elif col_dict['type'] == 'TIMESTAMP':
row_value = float(row_value)
log[col_name] = row_value
return log
|
python
|
def _transform_row(self, row, schema):
"""Apply the given schema to the given BigQuery data row.
Parameters
----------
row
A single BigQuery row to transform
schema : list
The BigQuery table schema to apply to the row, specifically
the list of field dicts.
Returns
-------
dict
Mapping schema to row
"""
log = {}
# Match each schema column with its associated row value
for index, col_dict in enumerate(schema):
col_name = col_dict['name']
row_value = row['f'][index]['v']
if row_value is None:
log[col_name] = None
continue
# Recurse on nested records
if col_dict['type'] == 'RECORD':
row_value = self._recurse_on_row(col_dict, row_value)
# Otherwise just cast the value
elif col_dict['type'] == 'INTEGER':
row_value = int(row_value)
elif col_dict['type'] == 'FLOAT':
row_value = float(row_value)
elif col_dict['type'] == 'BOOLEAN':
row_value = row_value in ('True', 'true', 'TRUE')
elif col_dict['type'] == 'TIMESTAMP':
row_value = float(row_value)
log[col_name] = row_value
return log
|
[
"def",
"_transform_row",
"(",
"self",
",",
"row",
",",
"schema",
")",
":",
"log",
"=",
"{",
"}",
"# Match each schema column with its associated row value",
"for",
"index",
",",
"col_dict",
"in",
"enumerate",
"(",
"schema",
")",
":",
"col_name",
"=",
"col_dict",
"[",
"'name'",
"]",
"row_value",
"=",
"row",
"[",
"'f'",
"]",
"[",
"index",
"]",
"[",
"'v'",
"]",
"if",
"row_value",
"is",
"None",
":",
"log",
"[",
"col_name",
"]",
"=",
"None",
"continue",
"# Recurse on nested records",
"if",
"col_dict",
"[",
"'type'",
"]",
"==",
"'RECORD'",
":",
"row_value",
"=",
"self",
".",
"_recurse_on_row",
"(",
"col_dict",
",",
"row_value",
")",
"# Otherwise just cast the value",
"elif",
"col_dict",
"[",
"'type'",
"]",
"==",
"'INTEGER'",
":",
"row_value",
"=",
"int",
"(",
"row_value",
")",
"elif",
"col_dict",
"[",
"'type'",
"]",
"==",
"'FLOAT'",
":",
"row_value",
"=",
"float",
"(",
"row_value",
")",
"elif",
"col_dict",
"[",
"'type'",
"]",
"==",
"'BOOLEAN'",
":",
"row_value",
"=",
"row_value",
"in",
"(",
"'True'",
",",
"'true'",
",",
"'TRUE'",
")",
"elif",
"col_dict",
"[",
"'type'",
"]",
"==",
"'TIMESTAMP'",
":",
"row_value",
"=",
"float",
"(",
"row_value",
")",
"log",
"[",
"col_name",
"]",
"=",
"row_value",
"return",
"log"
] |
Apply the given schema to the given BigQuery data row.
Parameters
----------
row
A single BigQuery row to transform
schema : list
The BigQuery table schema to apply to the row, specifically
the list of field dicts.
Returns
-------
dict
Mapping schema to row
|
[
"Apply",
"the",
"given",
"schema",
"to",
"the",
"given",
"BigQuery",
"data",
"row",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L1664-L1711
|
7,506
|
tylertreat/BigQuery-Python
|
bigquery/client.py
|
BigQueryClient._recurse_on_row
|
def _recurse_on_row(self, col_dict, nested_value):
"""Apply the schema specified by the given dict to the nested value by
recursing on it.
Parameters
----------
col_dict : dict
The schema to apply to the nested value.
nested_value : A value nested in a BigQuery row.
Returns
-------
Union[dict, list]
``dict`` or ``list`` of ``dict`` objects from applied schema.
"""
row_value = None
# Multiple nested records
if col_dict['mode'] == 'REPEATED' and isinstance(nested_value, list):
row_value = [self._transform_row(record['v'], col_dict['fields'])
for record in nested_value]
# A single nested record
else:
row_value = self._transform_row(nested_value, col_dict['fields'])
return row_value
|
python
|
def _recurse_on_row(self, col_dict, nested_value):
"""Apply the schema specified by the given dict to the nested value by
recursing on it.
Parameters
----------
col_dict : dict
The schema to apply to the nested value.
nested_value : A value nested in a BigQuery row.
Returns
-------
Union[dict, list]
``dict`` or ``list`` of ``dict`` objects from applied schema.
"""
row_value = None
# Multiple nested records
if col_dict['mode'] == 'REPEATED' and isinstance(nested_value, list):
row_value = [self._transform_row(record['v'], col_dict['fields'])
for record in nested_value]
# A single nested record
else:
row_value = self._transform_row(nested_value, col_dict['fields'])
return row_value
|
[
"def",
"_recurse_on_row",
"(",
"self",
",",
"col_dict",
",",
"nested_value",
")",
":",
"row_value",
"=",
"None",
"# Multiple nested records",
"if",
"col_dict",
"[",
"'mode'",
"]",
"==",
"'REPEATED'",
"and",
"isinstance",
"(",
"nested_value",
",",
"list",
")",
":",
"row_value",
"=",
"[",
"self",
".",
"_transform_row",
"(",
"record",
"[",
"'v'",
"]",
",",
"col_dict",
"[",
"'fields'",
"]",
")",
"for",
"record",
"in",
"nested_value",
"]",
"# A single nested record",
"else",
":",
"row_value",
"=",
"self",
".",
"_transform_row",
"(",
"nested_value",
",",
"col_dict",
"[",
"'fields'",
"]",
")",
"return",
"row_value"
] |
Apply the schema specified by the given dict to the nested value by
recursing on it.
Parameters
----------
col_dict : dict
The schema to apply to the nested value.
nested_value : A value nested in a BigQuery row.
Returns
-------
Union[dict, list]
``dict`` or ``list`` of ``dict`` objects from applied schema.
|
[
"Apply",
"the",
"schema",
"specified",
"by",
"the",
"given",
"dict",
"to",
"the",
"nested",
"value",
"by",
"recursing",
"on",
"it",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L1713-L1740
|
7,507
|
tylertreat/BigQuery-Python
|
bigquery/client.py
|
BigQueryClient._generate_hex_for_uris
|
def _generate_hex_for_uris(self, uris):
"""Given uris, generate and return hex version of it
Parameters
----------
uris : list
Containing all uris
Returns
-------
str
Hexed uris
"""
return sha256((":".join(uris) + str(time())).encode()).hexdigest()
|
python
|
def _generate_hex_for_uris(self, uris):
"""Given uris, generate and return hex version of it
Parameters
----------
uris : list
Containing all uris
Returns
-------
str
Hexed uris
"""
return sha256((":".join(uris) + str(time())).encode()).hexdigest()
|
[
"def",
"_generate_hex_for_uris",
"(",
"self",
",",
"uris",
")",
":",
"return",
"sha256",
"(",
"(",
"\":\"",
".",
"join",
"(",
"uris",
")",
"+",
"str",
"(",
"time",
"(",
")",
")",
")",
".",
"encode",
"(",
")",
")",
".",
"hexdigest",
"(",
")"
] |
Given uris, generate and return hex version of it
Parameters
----------
uris : list
Containing all uris
Returns
-------
str
Hexed uris
|
[
"Given",
"uris",
"generate",
"and",
"return",
"hex",
"version",
"of",
"it"
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L1742-L1755
|
7,508
|
tylertreat/BigQuery-Python
|
bigquery/client.py
|
BigQueryClient.create_dataset
|
def create_dataset(self, dataset_id, friendly_name=None, description=None,
access=None, location=None, project_id=None):
"""Create a new BigQuery dataset.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referenceID of the dataset, not the integer id of the dataset)
friendly_name: str, optional
A human readable name
description: str, optional
Longer string providing a description
access : list, optional
Indicating access permissions (see
https://developers.google.com/bigquery/docs/reference/v2/datasets#resource)
location : str, optional
Indicating where dataset should be stored: EU or US (see
https://developers.google.com/bigquery/docs/reference/v2/datasets#resource)
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
Union[bool, dict]
``bool`` indicating if dataset was created or not, or response
from BigQuery if swallow_results is set for False
"""
project_id = self._get_project_id(project_id)
try:
datasets = self.bigquery.datasets()
dataset_data = self.dataset_resource(dataset_id,
project_id=project_id,
friendly_name=friendly_name,
description=description,
access=access,
location=location
)
response = datasets.insert(projectId=project_id,
body=dataset_data).execute(
num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.error(
'Cannot create dataset {0}, {1}'.format(dataset_id, e))
if self.swallow_results:
return False
else:
return {}
|
python
|
def create_dataset(self, dataset_id, friendly_name=None, description=None,
access=None, location=None, project_id=None):
"""Create a new BigQuery dataset.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referenceID of the dataset, not the integer id of the dataset)
friendly_name: str, optional
A human readable name
description: str, optional
Longer string providing a description
access : list, optional
Indicating access permissions (see
https://developers.google.com/bigquery/docs/reference/v2/datasets#resource)
location : str, optional
Indicating where dataset should be stored: EU or US (see
https://developers.google.com/bigquery/docs/reference/v2/datasets#resource)
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
Union[bool, dict]
``bool`` indicating if dataset was created or not, or response
from BigQuery if swallow_results is set for False
"""
project_id = self._get_project_id(project_id)
try:
datasets = self.bigquery.datasets()
dataset_data = self.dataset_resource(dataset_id,
project_id=project_id,
friendly_name=friendly_name,
description=description,
access=access,
location=location
)
response = datasets.insert(projectId=project_id,
body=dataset_data).execute(
num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.error(
'Cannot create dataset {0}, {1}'.format(dataset_id, e))
if self.swallow_results:
return False
else:
return {}
|
[
"def",
"create_dataset",
"(",
"self",
",",
"dataset_id",
",",
"friendly_name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"access",
"=",
"None",
",",
"location",
"=",
"None",
",",
"project_id",
"=",
"None",
")",
":",
"project_id",
"=",
"self",
".",
"_get_project_id",
"(",
"project_id",
")",
"try",
":",
"datasets",
"=",
"self",
".",
"bigquery",
".",
"datasets",
"(",
")",
"dataset_data",
"=",
"self",
".",
"dataset_resource",
"(",
"dataset_id",
",",
"project_id",
"=",
"project_id",
",",
"friendly_name",
"=",
"friendly_name",
",",
"description",
"=",
"description",
",",
"access",
"=",
"access",
",",
"location",
"=",
"location",
")",
"response",
"=",
"datasets",
".",
"insert",
"(",
"projectId",
"=",
"project_id",
",",
"body",
"=",
"dataset_data",
")",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"if",
"self",
".",
"swallow_results",
":",
"return",
"True",
"else",
":",
"return",
"response",
"except",
"HttpError",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Cannot create dataset {0}, {1}'",
".",
"format",
"(",
"dataset_id",
",",
"e",
")",
")",
"if",
"self",
".",
"swallow_results",
":",
"return",
"False",
"else",
":",
"return",
"{",
"}"
] |
Create a new BigQuery dataset.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referenceID of the dataset, not the integer id of the dataset)
friendly_name: str, optional
A human readable name
description: str, optional
Longer string providing a description
access : list, optional
Indicating access permissions (see
https://developers.google.com/bigquery/docs/reference/v2/datasets#resource)
location : str, optional
Indicating where dataset should be stored: EU or US (see
https://developers.google.com/bigquery/docs/reference/v2/datasets#resource)
project_id: str
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
Union[bool, dict]
``bool`` indicating if dataset was created or not, or response
from BigQuery if swallow_results is set for False
|
[
"Create",
"a",
"new",
"BigQuery",
"dataset",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L1782-L1835
|
7,509
|
tylertreat/BigQuery-Python
|
bigquery/client.py
|
BigQueryClient.delete_dataset
|
def delete_dataset(self, dataset_id, delete_contents=False, project_id=None):
"""Delete a BigQuery dataset.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referenceId of the dataset)
Unique ``str`` identifying the BigQuery project contains the dataset
delete_contents : bool, optional
If True, forces the deletion of the dataset even when the dataset
contains data (Default = False)
project_id: str, optional
Returns
-------
Union[bool, dict[
ool indicating if the delete was successful or not, or response
from BigQuery if swallow_results is set for False
Raises
-------
HttpError
404 when dataset with dataset_id does not exist
"""
project_id = self._get_project_id(project_id)
try:
datasets = self.bigquery.datasets()
request = datasets.delete(projectId=project_id,
datasetId=dataset_id,
deleteContents=delete_contents)
response = request.execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.error(
'Cannot delete dataset {0}: {1}'.format(dataset_id, e))
if self.swallow_results:
return False
else:
return {}
|
python
|
def delete_dataset(self, dataset_id, delete_contents=False, project_id=None):
"""Delete a BigQuery dataset.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referenceId of the dataset)
Unique ``str`` identifying the BigQuery project contains the dataset
delete_contents : bool, optional
If True, forces the deletion of the dataset even when the dataset
contains data (Default = False)
project_id: str, optional
Returns
-------
Union[bool, dict[
ool indicating if the delete was successful or not, or response
from BigQuery if swallow_results is set for False
Raises
-------
HttpError
404 when dataset with dataset_id does not exist
"""
project_id = self._get_project_id(project_id)
try:
datasets = self.bigquery.datasets()
request = datasets.delete(projectId=project_id,
datasetId=dataset_id,
deleteContents=delete_contents)
response = request.execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.error(
'Cannot delete dataset {0}: {1}'.format(dataset_id, e))
if self.swallow_results:
return False
else:
return {}
|
[
"def",
"delete_dataset",
"(",
"self",
",",
"dataset_id",
",",
"delete_contents",
"=",
"False",
",",
"project_id",
"=",
"None",
")",
":",
"project_id",
"=",
"self",
".",
"_get_project_id",
"(",
"project_id",
")",
"try",
":",
"datasets",
"=",
"self",
".",
"bigquery",
".",
"datasets",
"(",
")",
"request",
"=",
"datasets",
".",
"delete",
"(",
"projectId",
"=",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
",",
"deleteContents",
"=",
"delete_contents",
")",
"response",
"=",
"request",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"if",
"self",
".",
"swallow_results",
":",
"return",
"True",
"else",
":",
"return",
"response",
"except",
"HttpError",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Cannot delete dataset {0}: {1}'",
".",
"format",
"(",
"dataset_id",
",",
"e",
")",
")",
"if",
"self",
".",
"swallow_results",
":",
"return",
"False",
"else",
":",
"return",
"{",
"}"
] |
Delete a BigQuery dataset.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referenceId of the dataset)
Unique ``str`` identifying the BigQuery project contains the dataset
delete_contents : bool, optional
If True, forces the deletion of the dataset even when the dataset
contains data (Default = False)
project_id: str, optional
Returns
-------
Union[bool, dict[
ool indicating if the delete was successful or not, or response
from BigQuery if swallow_results is set for False
Raises
-------
HttpError
404 when dataset with dataset_id does not exist
|
[
"Delete",
"a",
"BigQuery",
"dataset",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L1861-L1904
|
7,510
|
tylertreat/BigQuery-Python
|
bigquery/client.py
|
BigQueryClient.update_dataset
|
def update_dataset(self, dataset_id, friendly_name=None, description=None,
access=None, project_id=None):
"""Updates information in an existing dataset. The update method
replaces the entire dataset resource, whereas the patch method only
replaces fields that are provided in the submitted dataset resource.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referencedId of the dataset)
friendly_name : str, optional
An optional descriptive name for the dataset.
description : str, optional
An optional description of the dataset.
access : list, optional
Indicating access permissions
project_id: str, optional
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
Union[bool, dict]
``bool`` indicating if the update was successful or not, or
response from BigQuery if swallow_results is set for False.
"""
project_id = self._get_project_id(project_id)
try:
datasets = self.bigquery.datasets()
body = self.dataset_resource(dataset_id,
friendly_name=friendly_name,
description=description,
access=access,
project_id=project_id)
request = datasets.update(projectId=project_id,
datasetId=dataset_id,
body=body)
response = request.execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.error(
'Cannot update dataset {0}: {1}'.format(dataset_id, e))
if self.swallow_results:
return False
else:
return {}
|
python
|
def update_dataset(self, dataset_id, friendly_name=None, description=None,
access=None, project_id=None):
"""Updates information in an existing dataset. The update method
replaces the entire dataset resource, whereas the patch method only
replaces fields that are provided in the submitted dataset resource.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referencedId of the dataset)
friendly_name : str, optional
An optional descriptive name for the dataset.
description : str, optional
An optional description of the dataset.
access : list, optional
Indicating access permissions
project_id: str, optional
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
Union[bool, dict]
``bool`` indicating if the update was successful or not, or
response from BigQuery if swallow_results is set for False.
"""
project_id = self._get_project_id(project_id)
try:
datasets = self.bigquery.datasets()
body = self.dataset_resource(dataset_id,
friendly_name=friendly_name,
description=description,
access=access,
project_id=project_id)
request = datasets.update(projectId=project_id,
datasetId=dataset_id,
body=body)
response = request.execute(num_retries=self.num_retries)
if self.swallow_results:
return True
else:
return response
except HttpError as e:
logger.error(
'Cannot update dataset {0}: {1}'.format(dataset_id, e))
if self.swallow_results:
return False
else:
return {}
|
[
"def",
"update_dataset",
"(",
"self",
",",
"dataset_id",
",",
"friendly_name",
"=",
"None",
",",
"description",
"=",
"None",
",",
"access",
"=",
"None",
",",
"project_id",
"=",
"None",
")",
":",
"project_id",
"=",
"self",
".",
"_get_project_id",
"(",
"project_id",
")",
"try",
":",
"datasets",
"=",
"self",
".",
"bigquery",
".",
"datasets",
"(",
")",
"body",
"=",
"self",
".",
"dataset_resource",
"(",
"dataset_id",
",",
"friendly_name",
"=",
"friendly_name",
",",
"description",
"=",
"description",
",",
"access",
"=",
"access",
",",
"project_id",
"=",
"project_id",
")",
"request",
"=",
"datasets",
".",
"update",
"(",
"projectId",
"=",
"project_id",
",",
"datasetId",
"=",
"dataset_id",
",",
"body",
"=",
"body",
")",
"response",
"=",
"request",
".",
"execute",
"(",
"num_retries",
"=",
"self",
".",
"num_retries",
")",
"if",
"self",
".",
"swallow_results",
":",
"return",
"True",
"else",
":",
"return",
"response",
"except",
"HttpError",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"'Cannot update dataset {0}: {1}'",
".",
"format",
"(",
"dataset_id",
",",
"e",
")",
")",
"if",
"self",
".",
"swallow_results",
":",
"return",
"False",
"else",
":",
"return",
"{",
"}"
] |
Updates information in an existing dataset. The update method
replaces the entire dataset resource, whereas the patch method only
replaces fields that are provided in the submitted dataset resource.
Parameters
----------
dataset_id : str
Unique ``str`` identifying the dataset with the project (the
referencedId of the dataset)
friendly_name : str, optional
An optional descriptive name for the dataset.
description : str, optional
An optional description of the dataset.
access : list, optional
Indicating access permissions
project_id: str, optional
Unique ``str`` identifying the BigQuery project contains the dataset
Returns
-------
Union[bool, dict]
``bool`` indicating if the update was successful or not, or
response from BigQuery if swallow_results is set for False.
|
[
"Updates",
"information",
"in",
"an",
"existing",
"dataset",
".",
"The",
"update",
"method",
"replaces",
"the",
"entire",
"dataset",
"resource",
"whereas",
"the",
"patch",
"method",
"only",
"replaces",
"fields",
"that",
"are",
"provided",
"in",
"the",
"submitted",
"dataset",
"resource",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/client.py#L1906-L1956
|
7,511
|
tylertreat/BigQuery-Python
|
bigquery/schema_builder.py
|
schema_from_record
|
def schema_from_record(record, timestamp_parser=default_timestamp_parser):
"""Generate a BigQuery schema given an example of a record that is to be
inserted into BigQuery.
Parameters
----------
record : dict
Example of a record that is to be inserted into BigQuery
timestamp_parser : function, optional
Unary function taking a ``str`` and returning and ``bool`` that is
True if the string represents a date
Returns
-------
Schema: list
"""
return [describe_field(k, v, timestamp_parser=timestamp_parser)
for k, v in list(record.items())]
|
python
|
def schema_from_record(record, timestamp_parser=default_timestamp_parser):
"""Generate a BigQuery schema given an example of a record that is to be
inserted into BigQuery.
Parameters
----------
record : dict
Example of a record that is to be inserted into BigQuery
timestamp_parser : function, optional
Unary function taking a ``str`` and returning and ``bool`` that is
True if the string represents a date
Returns
-------
Schema: list
"""
return [describe_field(k, v, timestamp_parser=timestamp_parser)
for k, v in list(record.items())]
|
[
"def",
"schema_from_record",
"(",
"record",
",",
"timestamp_parser",
"=",
"default_timestamp_parser",
")",
":",
"return",
"[",
"describe_field",
"(",
"k",
",",
"v",
",",
"timestamp_parser",
"=",
"timestamp_parser",
")",
"for",
"k",
",",
"v",
"in",
"list",
"(",
"record",
".",
"items",
"(",
")",
")",
"]"
] |
Generate a BigQuery schema given an example of a record that is to be
inserted into BigQuery.
Parameters
----------
record : dict
Example of a record that is to be inserted into BigQuery
timestamp_parser : function, optional
Unary function taking a ``str`` and returning and ``bool`` that is
True if the string represents a date
Returns
-------
Schema: list
|
[
"Generate",
"a",
"BigQuery",
"schema",
"given",
"an",
"example",
"of",
"a",
"record",
"that",
"is",
"to",
"be",
"inserted",
"into",
"BigQuery",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/schema_builder.py#L22-L39
|
7,512
|
tylertreat/BigQuery-Python
|
bigquery/schema_builder.py
|
describe_field
|
def describe_field(k, v, timestamp_parser=default_timestamp_parser):
"""Given a key representing a column name and value representing the value
stored in the column, return a representation of the BigQuery schema
element describing that field. Raise errors if invalid value types are
provided.
Parameters
----------
k : Union[str, unicode]
Key representing the column
v : Union[str, unicode, int, float, datetime, object]
Value mapped to by `k`
Returns
-------
object
Describing the field
Raises
------
Exception
If invalid value types are provided.
Examples
--------
>>> describe_field("username", "Bob")
{"name": "username", "type": "string", "mode": "nullable"}
>>> describe_field("users", [{"username": "Bob"}])
{"name": "users", "type": "record", "mode": "repeated",
"fields": [{"name":"username","type":"string","mode":"nullable"}]}
"""
def bq_schema_field(name, bq_type, mode):
return {"name": name, "type": bq_type, "mode": mode}
if isinstance(v, list):
if len(v) == 0:
raise Exception(
"Can't describe schema because of empty list {0}:[]".format(k))
v = v[0]
mode = "repeated"
else:
mode = "nullable"
bq_type = bigquery_type(v, timestamp_parser=timestamp_parser)
if not bq_type:
raise InvalidTypeException(k, v)
field = bq_schema_field(k, bq_type, mode)
if bq_type == "record":
try:
field['fields'] = schema_from_record(v, timestamp_parser)
except InvalidTypeException as e:
# recursively construct the key causing the error
raise InvalidTypeException("%s.%s" % (k, e.key), e.value)
return field
|
python
|
def describe_field(k, v, timestamp_parser=default_timestamp_parser):
"""Given a key representing a column name and value representing the value
stored in the column, return a representation of the BigQuery schema
element describing that field. Raise errors if invalid value types are
provided.
Parameters
----------
k : Union[str, unicode]
Key representing the column
v : Union[str, unicode, int, float, datetime, object]
Value mapped to by `k`
Returns
-------
object
Describing the field
Raises
------
Exception
If invalid value types are provided.
Examples
--------
>>> describe_field("username", "Bob")
{"name": "username", "type": "string", "mode": "nullable"}
>>> describe_field("users", [{"username": "Bob"}])
{"name": "users", "type": "record", "mode": "repeated",
"fields": [{"name":"username","type":"string","mode":"nullable"}]}
"""
def bq_schema_field(name, bq_type, mode):
return {"name": name, "type": bq_type, "mode": mode}
if isinstance(v, list):
if len(v) == 0:
raise Exception(
"Can't describe schema because of empty list {0}:[]".format(k))
v = v[0]
mode = "repeated"
else:
mode = "nullable"
bq_type = bigquery_type(v, timestamp_parser=timestamp_parser)
if not bq_type:
raise InvalidTypeException(k, v)
field = bq_schema_field(k, bq_type, mode)
if bq_type == "record":
try:
field['fields'] = schema_from_record(v, timestamp_parser)
except InvalidTypeException as e:
# recursively construct the key causing the error
raise InvalidTypeException("%s.%s" % (k, e.key), e.value)
return field
|
[
"def",
"describe_field",
"(",
"k",
",",
"v",
",",
"timestamp_parser",
"=",
"default_timestamp_parser",
")",
":",
"def",
"bq_schema_field",
"(",
"name",
",",
"bq_type",
",",
"mode",
")",
":",
"return",
"{",
"\"name\"",
":",
"name",
",",
"\"type\"",
":",
"bq_type",
",",
"\"mode\"",
":",
"mode",
"}",
"if",
"isinstance",
"(",
"v",
",",
"list",
")",
":",
"if",
"len",
"(",
"v",
")",
"==",
"0",
":",
"raise",
"Exception",
"(",
"\"Can't describe schema because of empty list {0}:[]\"",
".",
"format",
"(",
"k",
")",
")",
"v",
"=",
"v",
"[",
"0",
"]",
"mode",
"=",
"\"repeated\"",
"else",
":",
"mode",
"=",
"\"nullable\"",
"bq_type",
"=",
"bigquery_type",
"(",
"v",
",",
"timestamp_parser",
"=",
"timestamp_parser",
")",
"if",
"not",
"bq_type",
":",
"raise",
"InvalidTypeException",
"(",
"k",
",",
"v",
")",
"field",
"=",
"bq_schema_field",
"(",
"k",
",",
"bq_type",
",",
"mode",
")",
"if",
"bq_type",
"==",
"\"record\"",
":",
"try",
":",
"field",
"[",
"'fields'",
"]",
"=",
"schema_from_record",
"(",
"v",
",",
"timestamp_parser",
")",
"except",
"InvalidTypeException",
"as",
"e",
":",
"# recursively construct the key causing the error",
"raise",
"InvalidTypeException",
"(",
"\"%s.%s\"",
"%",
"(",
"k",
",",
"e",
".",
"key",
")",
",",
"e",
".",
"value",
")",
"return",
"field"
] |
Given a key representing a column name and value representing the value
stored in the column, return a representation of the BigQuery schema
element describing that field. Raise errors if invalid value types are
provided.
Parameters
----------
k : Union[str, unicode]
Key representing the column
v : Union[str, unicode, int, float, datetime, object]
Value mapped to by `k`
Returns
-------
object
Describing the field
Raises
------
Exception
If invalid value types are provided.
Examples
--------
>>> describe_field("username", "Bob")
{"name": "username", "type": "string", "mode": "nullable"}
>>> describe_field("users", [{"username": "Bob"}])
{"name": "users", "type": "record", "mode": "repeated",
"fields": [{"name":"username","type":"string","mode":"nullable"}]}
|
[
"Given",
"a",
"key",
"representing",
"a",
"column",
"name",
"and",
"value",
"representing",
"the",
"value",
"stored",
"in",
"the",
"column",
"return",
"a",
"representation",
"of",
"the",
"BigQuery",
"schema",
"element",
"describing",
"that",
"field",
".",
"Raise",
"errors",
"if",
"invalid",
"value",
"types",
"are",
"provided",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/schema_builder.py#L42-L98
|
7,513
|
tylertreat/BigQuery-Python
|
bigquery/query_builder.py
|
render_query
|
def render_query(dataset, tables, select=None, conditions=None,
groupings=None, having=None, order_by=None, limit=None):
"""Render a query that will run over the given tables using the specified
parameters.
Parameters
----------
dataset : str
The BigQuery dataset to query data from
tables : Union[dict, list]
The table in `dataset` to query.
select : dict, optional
The keys function as column names and the values function as options to
apply to the select field such as alias and format. For example,
select['start_time'] might have the form
{'alias': 'StartTime', 'format': 'INTEGER-FORMAT_UTC_USEC'}, which
would be represented as 'SEC_TO_TIMESTAMP(INTEGER(start_time)) as
StartTime' in a query. Pass `None` to select all.
conditions : list, optional
a ``list`` of ``dict`` objects to filter results by. Each dict should
have the keys 'field', 'type', and 'comparators'. The first two map to
strings representing the field (e.g. 'foo') and type (e.g. 'FLOAT').
'comparators' maps to another ``dict`` containing the keys 'condition',
'negate', and 'value'.
If 'comparators' = {'condition': '>=', 'negate': False, 'value': 1},
this example will be rendered as 'foo >= FLOAT('1')' in the query.
``list`` of field names to group by
order_by : dict, optional
Keys = {'field', 'direction'}. `dict` should be formatted as
{'field':'TimeStamp, 'direction':'desc'} or similar
limit : int, optional
Limit the amount of data needed to be returned.
Returns
-------
str
A rendered query
"""
if None in (dataset, tables):
return None
query = "%s %s %s %s %s %s %s" % (
_render_select(select),
_render_sources(dataset, tables),
_render_conditions(conditions),
_render_groupings(groupings),
_render_having(having),
_render_order(order_by),
_render_limit(limit)
)
return query
|
python
|
def render_query(dataset, tables, select=None, conditions=None,
groupings=None, having=None, order_by=None, limit=None):
"""Render a query that will run over the given tables using the specified
parameters.
Parameters
----------
dataset : str
The BigQuery dataset to query data from
tables : Union[dict, list]
The table in `dataset` to query.
select : dict, optional
The keys function as column names and the values function as options to
apply to the select field such as alias and format. For example,
select['start_time'] might have the form
{'alias': 'StartTime', 'format': 'INTEGER-FORMAT_UTC_USEC'}, which
would be represented as 'SEC_TO_TIMESTAMP(INTEGER(start_time)) as
StartTime' in a query. Pass `None` to select all.
conditions : list, optional
a ``list`` of ``dict`` objects to filter results by. Each dict should
have the keys 'field', 'type', and 'comparators'. The first two map to
strings representing the field (e.g. 'foo') and type (e.g. 'FLOAT').
'comparators' maps to another ``dict`` containing the keys 'condition',
'negate', and 'value'.
If 'comparators' = {'condition': '>=', 'negate': False, 'value': 1},
this example will be rendered as 'foo >= FLOAT('1')' in the query.
``list`` of field names to group by
order_by : dict, optional
Keys = {'field', 'direction'}. `dict` should be formatted as
{'field':'TimeStamp, 'direction':'desc'} or similar
limit : int, optional
Limit the amount of data needed to be returned.
Returns
-------
str
A rendered query
"""
if None in (dataset, tables):
return None
query = "%s %s %s %s %s %s %s" % (
_render_select(select),
_render_sources(dataset, tables),
_render_conditions(conditions),
_render_groupings(groupings),
_render_having(having),
_render_order(order_by),
_render_limit(limit)
)
return query
|
[
"def",
"render_query",
"(",
"dataset",
",",
"tables",
",",
"select",
"=",
"None",
",",
"conditions",
"=",
"None",
",",
"groupings",
"=",
"None",
",",
"having",
"=",
"None",
",",
"order_by",
"=",
"None",
",",
"limit",
"=",
"None",
")",
":",
"if",
"None",
"in",
"(",
"dataset",
",",
"tables",
")",
":",
"return",
"None",
"query",
"=",
"\"%s %s %s %s %s %s %s\"",
"%",
"(",
"_render_select",
"(",
"select",
")",
",",
"_render_sources",
"(",
"dataset",
",",
"tables",
")",
",",
"_render_conditions",
"(",
"conditions",
")",
",",
"_render_groupings",
"(",
"groupings",
")",
",",
"_render_having",
"(",
"having",
")",
",",
"_render_order",
"(",
"order_by",
")",
",",
"_render_limit",
"(",
"limit",
")",
")",
"return",
"query"
] |
Render a query that will run over the given tables using the specified
parameters.
Parameters
----------
dataset : str
The BigQuery dataset to query data from
tables : Union[dict, list]
The table in `dataset` to query.
select : dict, optional
The keys function as column names and the values function as options to
apply to the select field such as alias and format. For example,
select['start_time'] might have the form
{'alias': 'StartTime', 'format': 'INTEGER-FORMAT_UTC_USEC'}, which
would be represented as 'SEC_TO_TIMESTAMP(INTEGER(start_time)) as
StartTime' in a query. Pass `None` to select all.
conditions : list, optional
a ``list`` of ``dict`` objects to filter results by. Each dict should
have the keys 'field', 'type', and 'comparators'. The first two map to
strings representing the field (e.g. 'foo') and type (e.g. 'FLOAT').
'comparators' maps to another ``dict`` containing the keys 'condition',
'negate', and 'value'.
If 'comparators' = {'condition': '>=', 'negate': False, 'value': 1},
this example will be rendered as 'foo >= FLOAT('1')' in the query.
``list`` of field names to group by
order_by : dict, optional
Keys = {'field', 'direction'}. `dict` should be formatted as
{'field':'TimeStamp, 'direction':'desc'} or similar
limit : int, optional
Limit the amount of data needed to be returned.
Returns
-------
str
A rendered query
|
[
"Render",
"a",
"query",
"that",
"will",
"run",
"over",
"the",
"given",
"tables",
"using",
"the",
"specified",
"parameters",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/query_builder.py#L7-L59
|
7,514
|
tylertreat/BigQuery-Python
|
bigquery/query_builder.py
|
_render_select
|
def _render_select(selections):
"""Render the selection part of a query.
Parameters
----------
selections : dict
Selections for a table
Returns
-------
str
A string for the "select" part of a query
See Also
--------
render_query : Further clarification of `selections` dict formatting
"""
if not selections:
return 'SELECT *'
rendered_selections = []
for name, options in selections.items():
if not isinstance(options, list):
options = [options]
original_name = name
for options_dict in options:
name = original_name
alias = options_dict.get('alias')
alias = "as %s" % alias if alias else ""
formatter = options_dict.get('format')
if formatter:
name = _format_select(formatter, name)
rendered_selections.append("%s %s" % (name, alias))
return "SELECT " + ", ".join(rendered_selections)
|
python
|
def _render_select(selections):
"""Render the selection part of a query.
Parameters
----------
selections : dict
Selections for a table
Returns
-------
str
A string for the "select" part of a query
See Also
--------
render_query : Further clarification of `selections` dict formatting
"""
if not selections:
return 'SELECT *'
rendered_selections = []
for name, options in selections.items():
if not isinstance(options, list):
options = [options]
original_name = name
for options_dict in options:
name = original_name
alias = options_dict.get('alias')
alias = "as %s" % alias if alias else ""
formatter = options_dict.get('format')
if formatter:
name = _format_select(formatter, name)
rendered_selections.append("%s %s" % (name, alias))
return "SELECT " + ", ".join(rendered_selections)
|
[
"def",
"_render_select",
"(",
"selections",
")",
":",
"if",
"not",
"selections",
":",
"return",
"'SELECT *'",
"rendered_selections",
"=",
"[",
"]",
"for",
"name",
",",
"options",
"in",
"selections",
".",
"items",
"(",
")",
":",
"if",
"not",
"isinstance",
"(",
"options",
",",
"list",
")",
":",
"options",
"=",
"[",
"options",
"]",
"original_name",
"=",
"name",
"for",
"options_dict",
"in",
"options",
":",
"name",
"=",
"original_name",
"alias",
"=",
"options_dict",
".",
"get",
"(",
"'alias'",
")",
"alias",
"=",
"\"as %s\"",
"%",
"alias",
"if",
"alias",
"else",
"\"\"",
"formatter",
"=",
"options_dict",
".",
"get",
"(",
"'format'",
")",
"if",
"formatter",
":",
"name",
"=",
"_format_select",
"(",
"formatter",
",",
"name",
")",
"rendered_selections",
".",
"append",
"(",
"\"%s %s\"",
"%",
"(",
"name",
",",
"alias",
")",
")",
"return",
"\"SELECT \"",
"+",
"\", \"",
".",
"join",
"(",
"rendered_selections",
")"
] |
Render the selection part of a query.
Parameters
----------
selections : dict
Selections for a table
Returns
-------
str
A string for the "select" part of a query
See Also
--------
render_query : Further clarification of `selections` dict formatting
|
[
"Render",
"the",
"selection",
"part",
"of",
"a",
"query",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/query_builder.py#L62-L100
|
7,515
|
tylertreat/BigQuery-Python
|
bigquery/query_builder.py
|
_format_select
|
def _format_select(formatter, name):
"""Modify the query selector by applying any formatters to it.
Parameters
----------
formatter : str
Hyphen-delimited formatter string where formatters are
applied inside-out, e.g. the formatter string
SEC_TO_MICRO-INTEGER-FORMAT_UTC_USEC applied to the selector
foo would result in FORMAT_UTC_USEC(INTEGER(foo*1000000)).
name: str
The name of the selector to apply formatters to.
Returns
-------
str
The formatted selector
"""
for caster in formatter.split('-'):
if caster == 'SEC_TO_MICRO':
name = "%s*1000000" % name
elif ':' in caster:
caster, args = caster.split(':')
name = "%s(%s,%s)" % (caster, name, args)
else:
name = "%s(%s)" % (caster, name)
return name
|
python
|
def _format_select(formatter, name):
"""Modify the query selector by applying any formatters to it.
Parameters
----------
formatter : str
Hyphen-delimited formatter string where formatters are
applied inside-out, e.g. the formatter string
SEC_TO_MICRO-INTEGER-FORMAT_UTC_USEC applied to the selector
foo would result in FORMAT_UTC_USEC(INTEGER(foo*1000000)).
name: str
The name of the selector to apply formatters to.
Returns
-------
str
The formatted selector
"""
for caster in formatter.split('-'):
if caster == 'SEC_TO_MICRO':
name = "%s*1000000" % name
elif ':' in caster:
caster, args = caster.split(':')
name = "%s(%s,%s)" % (caster, name, args)
else:
name = "%s(%s)" % (caster, name)
return name
|
[
"def",
"_format_select",
"(",
"formatter",
",",
"name",
")",
":",
"for",
"caster",
"in",
"formatter",
".",
"split",
"(",
"'-'",
")",
":",
"if",
"caster",
"==",
"'SEC_TO_MICRO'",
":",
"name",
"=",
"\"%s*1000000\"",
"%",
"name",
"elif",
"':'",
"in",
"caster",
":",
"caster",
",",
"args",
"=",
"caster",
".",
"split",
"(",
"':'",
")",
"name",
"=",
"\"%s(%s,%s)\"",
"%",
"(",
"caster",
",",
"name",
",",
"args",
")",
"else",
":",
"name",
"=",
"\"%s(%s)\"",
"%",
"(",
"caster",
",",
"name",
")",
"return",
"name"
] |
Modify the query selector by applying any formatters to it.
Parameters
----------
formatter : str
Hyphen-delimited formatter string where formatters are
applied inside-out, e.g. the formatter string
SEC_TO_MICRO-INTEGER-FORMAT_UTC_USEC applied to the selector
foo would result in FORMAT_UTC_USEC(INTEGER(foo*1000000)).
name: str
The name of the selector to apply formatters to.
Returns
-------
str
The formatted selector
|
[
"Modify",
"the",
"query",
"selector",
"by",
"applying",
"any",
"formatters",
"to",
"it",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/query_builder.py#L103-L131
|
7,516
|
tylertreat/BigQuery-Python
|
bigquery/query_builder.py
|
_render_sources
|
def _render_sources(dataset, tables):
"""Render the source part of a query.
Parameters
----------
dataset : str
The data set to fetch log data from.
tables : Union[dict, list]
The tables to fetch log data from
Returns
-------
str
A string that represents the "from" part of a query.
"""
if isinstance(tables, dict):
if tables.get('date_range', False):
try:
dataset_table = '.'.join([dataset, tables['table']])
return "FROM (TABLE_DATE_RANGE([{}], TIMESTAMP('{}'),"\
" TIMESTAMP('{}'))) ".format(dataset_table,
tables['from_date'],
tables['to_date'])
except KeyError as exp:
logger.warn(
'Missing parameter %s in selecting sources' % (exp))
else:
return "FROM " + ", ".join(
["[%s.%s]" % (dataset, table) for table in tables])
|
python
|
def _render_sources(dataset, tables):
"""Render the source part of a query.
Parameters
----------
dataset : str
The data set to fetch log data from.
tables : Union[dict, list]
The tables to fetch log data from
Returns
-------
str
A string that represents the "from" part of a query.
"""
if isinstance(tables, dict):
if tables.get('date_range', False):
try:
dataset_table = '.'.join([dataset, tables['table']])
return "FROM (TABLE_DATE_RANGE([{}], TIMESTAMP('{}'),"\
" TIMESTAMP('{}'))) ".format(dataset_table,
tables['from_date'],
tables['to_date'])
except KeyError as exp:
logger.warn(
'Missing parameter %s in selecting sources' % (exp))
else:
return "FROM " + ", ".join(
["[%s.%s]" % (dataset, table) for table in tables])
|
[
"def",
"_render_sources",
"(",
"dataset",
",",
"tables",
")",
":",
"if",
"isinstance",
"(",
"tables",
",",
"dict",
")",
":",
"if",
"tables",
".",
"get",
"(",
"'date_range'",
",",
"False",
")",
":",
"try",
":",
"dataset_table",
"=",
"'.'",
".",
"join",
"(",
"[",
"dataset",
",",
"tables",
"[",
"'table'",
"]",
"]",
")",
"return",
"\"FROM (TABLE_DATE_RANGE([{}], TIMESTAMP('{}'),\"",
"\" TIMESTAMP('{}'))) \"",
".",
"format",
"(",
"dataset_table",
",",
"tables",
"[",
"'from_date'",
"]",
",",
"tables",
"[",
"'to_date'",
"]",
")",
"except",
"KeyError",
"as",
"exp",
":",
"logger",
".",
"warn",
"(",
"'Missing parameter %s in selecting sources'",
"%",
"(",
"exp",
")",
")",
"else",
":",
"return",
"\"FROM \"",
"+",
"\", \"",
".",
"join",
"(",
"[",
"\"[%s.%s]\"",
"%",
"(",
"dataset",
",",
"table",
")",
"for",
"table",
"in",
"tables",
"]",
")"
] |
Render the source part of a query.
Parameters
----------
dataset : str
The data set to fetch log data from.
tables : Union[dict, list]
The tables to fetch log data from
Returns
-------
str
A string that represents the "from" part of a query.
|
[
"Render",
"the",
"source",
"part",
"of",
"a",
"query",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/query_builder.py#L134-L164
|
7,517
|
tylertreat/BigQuery-Python
|
bigquery/query_builder.py
|
_render_conditions
|
def _render_conditions(conditions):
"""Render the conditions part of a query.
Parameters
----------
conditions : list
A list of dictionary items to filter a table.
Returns
-------
str
A string that represents the "where" part of a query
See Also
--------
render_query : Further clarification of `conditions` formatting.
"""
if not conditions:
return ""
rendered_conditions = []
for condition in conditions:
field = condition.get('field')
field_type = condition.get('type')
comparators = condition.get('comparators')
if None in (field, field_type, comparators) or not comparators:
logger.warn('Invalid condition passed in: %s' % condition)
continue
rendered_conditions.append(
_render_condition(field, field_type, comparators))
if not rendered_conditions:
return ""
return "WHERE %s" % (" AND ".join(rendered_conditions))
|
python
|
def _render_conditions(conditions):
"""Render the conditions part of a query.
Parameters
----------
conditions : list
A list of dictionary items to filter a table.
Returns
-------
str
A string that represents the "where" part of a query
See Also
--------
render_query : Further clarification of `conditions` formatting.
"""
if not conditions:
return ""
rendered_conditions = []
for condition in conditions:
field = condition.get('field')
field_type = condition.get('type')
comparators = condition.get('comparators')
if None in (field, field_type, comparators) or not comparators:
logger.warn('Invalid condition passed in: %s' % condition)
continue
rendered_conditions.append(
_render_condition(field, field_type, comparators))
if not rendered_conditions:
return ""
return "WHERE %s" % (" AND ".join(rendered_conditions))
|
[
"def",
"_render_conditions",
"(",
"conditions",
")",
":",
"if",
"not",
"conditions",
":",
"return",
"\"\"",
"rendered_conditions",
"=",
"[",
"]",
"for",
"condition",
"in",
"conditions",
":",
"field",
"=",
"condition",
".",
"get",
"(",
"'field'",
")",
"field_type",
"=",
"condition",
".",
"get",
"(",
"'type'",
")",
"comparators",
"=",
"condition",
".",
"get",
"(",
"'comparators'",
")",
"if",
"None",
"in",
"(",
"field",
",",
"field_type",
",",
"comparators",
")",
"or",
"not",
"comparators",
":",
"logger",
".",
"warn",
"(",
"'Invalid condition passed in: %s'",
"%",
"condition",
")",
"continue",
"rendered_conditions",
".",
"append",
"(",
"_render_condition",
"(",
"field",
",",
"field_type",
",",
"comparators",
")",
")",
"if",
"not",
"rendered_conditions",
":",
"return",
"\"\"",
"return",
"\"WHERE %s\"",
"%",
"(",
"\" AND \"",
".",
"join",
"(",
"rendered_conditions",
")",
")"
] |
Render the conditions part of a query.
Parameters
----------
conditions : list
A list of dictionary items to filter a table.
Returns
-------
str
A string that represents the "where" part of a query
See Also
--------
render_query : Further clarification of `conditions` formatting.
|
[
"Render",
"the",
"conditions",
"part",
"of",
"a",
"query",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/query_builder.py#L167-L205
|
7,518
|
tylertreat/BigQuery-Python
|
bigquery/query_builder.py
|
_render_condition
|
def _render_condition(field, field_type, comparators):
"""Render a single query condition.
Parameters
----------
field : str
The field the condition applies to
field_type : str
The data type of the field.
comparators : array_like
An iterable of logic operators to use.
Returns
-------
str
a condition string.
"""
field_type = field_type.upper()
negated_conditions, normal_conditions = [], []
for comparator in comparators:
condition = comparator.get("condition").upper()
negated = "NOT " if comparator.get("negate") else ""
value = comparator.get("value")
if condition == "IN":
if isinstance(value, (list, tuple, set)):
value = ', '.join(
sorted([_render_condition_value(v, field_type)
for v in value])
)
else:
value = _render_condition_value(value, field_type)
value = "(" + value + ")"
elif condition == "IS NULL" or condition == "IS NOT NULL":
return field + " " + condition
elif condition == "BETWEEN":
if isinstance(value, (tuple, list, set)) and len(value) == 2:
value = ' AND '.join(
sorted([_render_condition_value(v, field_type)
for v in value])
)
elif isinstance(value, (tuple, list, set)) and len(value) != 2:
logger.warn('Invalid condition passed in: %s' % condition)
else:
value = _render_condition_value(value, field_type)
rendered_sub_condition = "%s%s %s %s" % (
negated, field, condition, value)
if comparator.get("negate"):
negated_conditions.append(rendered_sub_condition)
else:
normal_conditions.append(rendered_sub_condition)
rendered_normal = " AND ".join(normal_conditions)
rendered_negated = " AND ".join(negated_conditions)
if rendered_normal and rendered_negated:
return "((%s) AND (%s))" % (rendered_normal, rendered_negated)
return "(%s)" % (rendered_normal or rendered_negated)
|
python
|
def _render_condition(field, field_type, comparators):
"""Render a single query condition.
Parameters
----------
field : str
The field the condition applies to
field_type : str
The data type of the field.
comparators : array_like
An iterable of logic operators to use.
Returns
-------
str
a condition string.
"""
field_type = field_type.upper()
negated_conditions, normal_conditions = [], []
for comparator in comparators:
condition = comparator.get("condition").upper()
negated = "NOT " if comparator.get("negate") else ""
value = comparator.get("value")
if condition == "IN":
if isinstance(value, (list, tuple, set)):
value = ', '.join(
sorted([_render_condition_value(v, field_type)
for v in value])
)
else:
value = _render_condition_value(value, field_type)
value = "(" + value + ")"
elif condition == "IS NULL" or condition == "IS NOT NULL":
return field + " " + condition
elif condition == "BETWEEN":
if isinstance(value, (tuple, list, set)) and len(value) == 2:
value = ' AND '.join(
sorted([_render_condition_value(v, field_type)
for v in value])
)
elif isinstance(value, (tuple, list, set)) and len(value) != 2:
logger.warn('Invalid condition passed in: %s' % condition)
else:
value = _render_condition_value(value, field_type)
rendered_sub_condition = "%s%s %s %s" % (
negated, field, condition, value)
if comparator.get("negate"):
negated_conditions.append(rendered_sub_condition)
else:
normal_conditions.append(rendered_sub_condition)
rendered_normal = " AND ".join(normal_conditions)
rendered_negated = " AND ".join(negated_conditions)
if rendered_normal and rendered_negated:
return "((%s) AND (%s))" % (rendered_normal, rendered_negated)
return "(%s)" % (rendered_normal or rendered_negated)
|
[
"def",
"_render_condition",
"(",
"field",
",",
"field_type",
",",
"comparators",
")",
":",
"field_type",
"=",
"field_type",
".",
"upper",
"(",
")",
"negated_conditions",
",",
"normal_conditions",
"=",
"[",
"]",
",",
"[",
"]",
"for",
"comparator",
"in",
"comparators",
":",
"condition",
"=",
"comparator",
".",
"get",
"(",
"\"condition\"",
")",
".",
"upper",
"(",
")",
"negated",
"=",
"\"NOT \"",
"if",
"comparator",
".",
"get",
"(",
"\"negate\"",
")",
"else",
"\"\"",
"value",
"=",
"comparator",
".",
"get",
"(",
"\"value\"",
")",
"if",
"condition",
"==",
"\"IN\"",
":",
"if",
"isinstance",
"(",
"value",
",",
"(",
"list",
",",
"tuple",
",",
"set",
")",
")",
":",
"value",
"=",
"', '",
".",
"join",
"(",
"sorted",
"(",
"[",
"_render_condition_value",
"(",
"v",
",",
"field_type",
")",
"for",
"v",
"in",
"value",
"]",
")",
")",
"else",
":",
"value",
"=",
"_render_condition_value",
"(",
"value",
",",
"field_type",
")",
"value",
"=",
"\"(\"",
"+",
"value",
"+",
"\")\"",
"elif",
"condition",
"==",
"\"IS NULL\"",
"or",
"condition",
"==",
"\"IS NOT NULL\"",
":",
"return",
"field",
"+",
"\" \"",
"+",
"condition",
"elif",
"condition",
"==",
"\"BETWEEN\"",
":",
"if",
"isinstance",
"(",
"value",
",",
"(",
"tuple",
",",
"list",
",",
"set",
")",
")",
"and",
"len",
"(",
"value",
")",
"==",
"2",
":",
"value",
"=",
"' AND '",
".",
"join",
"(",
"sorted",
"(",
"[",
"_render_condition_value",
"(",
"v",
",",
"field_type",
")",
"for",
"v",
"in",
"value",
"]",
")",
")",
"elif",
"isinstance",
"(",
"value",
",",
"(",
"tuple",
",",
"list",
",",
"set",
")",
")",
"and",
"len",
"(",
"value",
")",
"!=",
"2",
":",
"logger",
".",
"warn",
"(",
"'Invalid condition passed in: %s'",
"%",
"condition",
")",
"else",
":",
"value",
"=",
"_render_condition_value",
"(",
"value",
",",
"field_type",
")",
"rendered_sub_condition",
"=",
"\"%s%s %s %s\"",
"%",
"(",
"negated",
",",
"field",
",",
"condition",
",",
"value",
")",
"if",
"comparator",
".",
"get",
"(",
"\"negate\"",
")",
":",
"negated_conditions",
".",
"append",
"(",
"rendered_sub_condition",
")",
"else",
":",
"normal_conditions",
".",
"append",
"(",
"rendered_sub_condition",
")",
"rendered_normal",
"=",
"\" AND \"",
".",
"join",
"(",
"normal_conditions",
")",
"rendered_negated",
"=",
"\" AND \"",
".",
"join",
"(",
"negated_conditions",
")",
"if",
"rendered_normal",
"and",
"rendered_negated",
":",
"return",
"\"((%s) AND (%s))\"",
"%",
"(",
"rendered_normal",
",",
"rendered_negated",
")",
"return",
"\"(%s)\"",
"%",
"(",
"rendered_normal",
"or",
"rendered_negated",
")"
] |
Render a single query condition.
Parameters
----------
field : str
The field the condition applies to
field_type : str
The data type of the field.
comparators : array_like
An iterable of logic operators to use.
Returns
-------
str
a condition string.
|
[
"Render",
"a",
"single",
"query",
"condition",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/query_builder.py#L208-L272
|
7,519
|
tylertreat/BigQuery-Python
|
bigquery/query_builder.py
|
_render_condition_value
|
def _render_condition_value(value, field_type):
"""Render a query condition value.
Parameters
----------
value : Union[bool, int, float, str, datetime]
The value of the condition
field_type : str
The data type of the field
Returns
-------
str
A value string.
"""
# BigQuery cannot cast strings to booleans, convert to ints
if field_type == "BOOLEAN":
value = 1 if value else 0
elif field_type in ("STRING", "INTEGER", "FLOAT"):
value = "'%s'" % (value)
elif field_type in ("TIMESTAMP"):
value = "'%s'" % (str(value))
return "%s(%s)" % (field_type, value)
|
python
|
def _render_condition_value(value, field_type):
"""Render a query condition value.
Parameters
----------
value : Union[bool, int, float, str, datetime]
The value of the condition
field_type : str
The data type of the field
Returns
-------
str
A value string.
"""
# BigQuery cannot cast strings to booleans, convert to ints
if field_type == "BOOLEAN":
value = 1 if value else 0
elif field_type in ("STRING", "INTEGER", "FLOAT"):
value = "'%s'" % (value)
elif field_type in ("TIMESTAMP"):
value = "'%s'" % (str(value))
return "%s(%s)" % (field_type, value)
|
[
"def",
"_render_condition_value",
"(",
"value",
",",
"field_type",
")",
":",
"# BigQuery cannot cast strings to booleans, convert to ints",
"if",
"field_type",
"==",
"\"BOOLEAN\"",
":",
"value",
"=",
"1",
"if",
"value",
"else",
"0",
"elif",
"field_type",
"in",
"(",
"\"STRING\"",
",",
"\"INTEGER\"",
",",
"\"FLOAT\"",
")",
":",
"value",
"=",
"\"'%s'\"",
"%",
"(",
"value",
")",
"elif",
"field_type",
"in",
"(",
"\"TIMESTAMP\"",
")",
":",
"value",
"=",
"\"'%s'\"",
"%",
"(",
"str",
"(",
"value",
")",
")",
"return",
"\"%s(%s)\"",
"%",
"(",
"field_type",
",",
"value",
")"
] |
Render a query condition value.
Parameters
----------
value : Union[bool, int, float, str, datetime]
The value of the condition
field_type : str
The data type of the field
Returns
-------
str
A value string.
|
[
"Render",
"a",
"query",
"condition",
"value",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/query_builder.py#L275-L298
|
7,520
|
tylertreat/BigQuery-Python
|
bigquery/query_builder.py
|
_render_having
|
def _render_having(having_conditions):
"""Render the having part of a query.
Parameters
----------
having_conditions : list
A ``list`` of ``dict``s to filter the rows
Returns
-------
str
A string that represents the "having" part of a query.
See Also
--------
render_query : Further clarification of `conditions` formatting.
"""
if not having_conditions:
return ""
rendered_conditions = []
for condition in having_conditions:
field = condition.get('field')
field_type = condition.get('type')
comparators = condition.get('comparators')
if None in (field, field_type, comparators) or not comparators:
logger.warn('Invalid condition passed in: %s' % condition)
continue
rendered_conditions.append(
_render_condition(field, field_type, comparators))
if not rendered_conditions:
return ""
return "HAVING %s" % (" AND ".join(rendered_conditions))
|
python
|
def _render_having(having_conditions):
"""Render the having part of a query.
Parameters
----------
having_conditions : list
A ``list`` of ``dict``s to filter the rows
Returns
-------
str
A string that represents the "having" part of a query.
See Also
--------
render_query : Further clarification of `conditions` formatting.
"""
if not having_conditions:
return ""
rendered_conditions = []
for condition in having_conditions:
field = condition.get('field')
field_type = condition.get('type')
comparators = condition.get('comparators')
if None in (field, field_type, comparators) or not comparators:
logger.warn('Invalid condition passed in: %s' % condition)
continue
rendered_conditions.append(
_render_condition(field, field_type, comparators))
if not rendered_conditions:
return ""
return "HAVING %s" % (" AND ".join(rendered_conditions))
|
[
"def",
"_render_having",
"(",
"having_conditions",
")",
":",
"if",
"not",
"having_conditions",
":",
"return",
"\"\"",
"rendered_conditions",
"=",
"[",
"]",
"for",
"condition",
"in",
"having_conditions",
":",
"field",
"=",
"condition",
".",
"get",
"(",
"'field'",
")",
"field_type",
"=",
"condition",
".",
"get",
"(",
"'type'",
")",
"comparators",
"=",
"condition",
".",
"get",
"(",
"'comparators'",
")",
"if",
"None",
"in",
"(",
"field",
",",
"field_type",
",",
"comparators",
")",
"or",
"not",
"comparators",
":",
"logger",
".",
"warn",
"(",
"'Invalid condition passed in: %s'",
"%",
"condition",
")",
"continue",
"rendered_conditions",
".",
"append",
"(",
"_render_condition",
"(",
"field",
",",
"field_type",
",",
"comparators",
")",
")",
"if",
"not",
"rendered_conditions",
":",
"return",
"\"\"",
"return",
"\"HAVING %s\"",
"%",
"(",
"\" AND \"",
".",
"join",
"(",
"rendered_conditions",
")",
")"
] |
Render the having part of a query.
Parameters
----------
having_conditions : list
A ``list`` of ``dict``s to filter the rows
Returns
-------
str
A string that represents the "having" part of a query.
See Also
--------
render_query : Further clarification of `conditions` formatting.
|
[
"Render",
"the",
"having",
"part",
"of",
"a",
"query",
"."
] |
88d99de42d954d49fc281460068f0e95003da098
|
https://github.com/tylertreat/BigQuery-Python/blob/88d99de42d954d49fc281460068f0e95003da098/bigquery/query_builder.py#L321-L358
|
7,521
|
stlehmann/Flask-MQTT
|
flask_mqtt/__init__.py
|
Mqtt.init_app
|
def init_app(self, app):
# type: (Flask) -> None
"""Init the Flask-MQTT addon."""
self.client_id = app.config.get("MQTT_CLIENT_ID", "")
if isinstance(self.client_id, unicode):
self.client._client_id = self.client_id.encode('utf-8')
else:
self.client._client_id = self.client_id
self.client._transport = app.config.get("MQTT_TRANSPORT", "tcp").lower()
self.client._protocol = app.config.get("MQTT_PROTOCOL_VERSION", MQTTv311)
self.client.on_connect = self._handle_connect
self.client.on_disconnect = self._handle_disconnect
self.username = app.config.get("MQTT_USERNAME")
self.password = app.config.get("MQTT_PASSWORD")
self.broker_url = app.config.get("MQTT_BROKER_URL", "localhost")
self.broker_port = app.config.get("MQTT_BROKER_PORT", 1883)
self.tls_enabled = app.config.get("MQTT_TLS_ENABLED", False)
self.keepalive = app.config.get("MQTT_KEEPALIVE", 60)
self.last_will_topic = app.config.get("MQTT_LAST_WILL_TOPIC")
self.last_will_message = app.config.get("MQTT_LAST_WILL_MESSAGE")
self.last_will_qos = app.config.get("MQTT_LAST_WILL_QOS", 0)
self.last_will_retain = app.config.get("MQTT_LAST_WILL_RETAIN", False)
if self.tls_enabled:
self.tls_ca_certs = app.config["MQTT_TLS_CA_CERTS"]
self.tls_certfile = app.config.get("MQTT_TLS_CERTFILE")
self.tls_keyfile = app.config.get("MQTT_TLS_KEYFILE")
self.tls_cert_reqs = app.config.get("MQTT_TLS_CERT_REQS",
ssl.CERT_REQUIRED)
self.tls_version = app.config.get("MQTT_TLS_VERSION",
ssl.PROTOCOL_TLSv1)
self.tls_ciphers = app.config.get("MQTT_TLS_CIPHERS")
self.tls_insecure = app.config.get("MQTT_TLS_INSECURE", False)
# set last will message
if self.last_will_topic is not None:
self.client.will_set(
self.last_will_topic,
self.last_will_message,
self.last_will_qos,
self.last_will_retain,
)
self._connect()
|
python
|
def init_app(self, app):
# type: (Flask) -> None
"""Init the Flask-MQTT addon."""
self.client_id = app.config.get("MQTT_CLIENT_ID", "")
if isinstance(self.client_id, unicode):
self.client._client_id = self.client_id.encode('utf-8')
else:
self.client._client_id = self.client_id
self.client._transport = app.config.get("MQTT_TRANSPORT", "tcp").lower()
self.client._protocol = app.config.get("MQTT_PROTOCOL_VERSION", MQTTv311)
self.client.on_connect = self._handle_connect
self.client.on_disconnect = self._handle_disconnect
self.username = app.config.get("MQTT_USERNAME")
self.password = app.config.get("MQTT_PASSWORD")
self.broker_url = app.config.get("MQTT_BROKER_URL", "localhost")
self.broker_port = app.config.get("MQTT_BROKER_PORT", 1883)
self.tls_enabled = app.config.get("MQTT_TLS_ENABLED", False)
self.keepalive = app.config.get("MQTT_KEEPALIVE", 60)
self.last_will_topic = app.config.get("MQTT_LAST_WILL_TOPIC")
self.last_will_message = app.config.get("MQTT_LAST_WILL_MESSAGE")
self.last_will_qos = app.config.get("MQTT_LAST_WILL_QOS", 0)
self.last_will_retain = app.config.get("MQTT_LAST_WILL_RETAIN", False)
if self.tls_enabled:
self.tls_ca_certs = app.config["MQTT_TLS_CA_CERTS"]
self.tls_certfile = app.config.get("MQTT_TLS_CERTFILE")
self.tls_keyfile = app.config.get("MQTT_TLS_KEYFILE")
self.tls_cert_reqs = app.config.get("MQTT_TLS_CERT_REQS",
ssl.CERT_REQUIRED)
self.tls_version = app.config.get("MQTT_TLS_VERSION",
ssl.PROTOCOL_TLSv1)
self.tls_ciphers = app.config.get("MQTT_TLS_CIPHERS")
self.tls_insecure = app.config.get("MQTT_TLS_INSECURE", False)
# set last will message
if self.last_will_topic is not None:
self.client.will_set(
self.last_will_topic,
self.last_will_message,
self.last_will_qos,
self.last_will_retain,
)
self._connect()
|
[
"def",
"init_app",
"(",
"self",
",",
"app",
")",
":",
"# type: (Flask) -> None",
"self",
".",
"client_id",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_CLIENT_ID\"",
",",
"\"\"",
")",
"if",
"isinstance",
"(",
"self",
".",
"client_id",
",",
"unicode",
")",
":",
"self",
".",
"client",
".",
"_client_id",
"=",
"self",
".",
"client_id",
".",
"encode",
"(",
"'utf-8'",
")",
"else",
":",
"self",
".",
"client",
".",
"_client_id",
"=",
"self",
".",
"client_id",
"self",
".",
"client",
".",
"_transport",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_TRANSPORT\"",
",",
"\"tcp\"",
")",
".",
"lower",
"(",
")",
"self",
".",
"client",
".",
"_protocol",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_PROTOCOL_VERSION\"",
",",
"MQTTv311",
")",
"self",
".",
"client",
".",
"on_connect",
"=",
"self",
".",
"_handle_connect",
"self",
".",
"client",
".",
"on_disconnect",
"=",
"self",
".",
"_handle_disconnect",
"self",
".",
"username",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_USERNAME\"",
")",
"self",
".",
"password",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_PASSWORD\"",
")",
"self",
".",
"broker_url",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_BROKER_URL\"",
",",
"\"localhost\"",
")",
"self",
".",
"broker_port",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_BROKER_PORT\"",
",",
"1883",
")",
"self",
".",
"tls_enabled",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_TLS_ENABLED\"",
",",
"False",
")",
"self",
".",
"keepalive",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_KEEPALIVE\"",
",",
"60",
")",
"self",
".",
"last_will_topic",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_LAST_WILL_TOPIC\"",
")",
"self",
".",
"last_will_message",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_LAST_WILL_MESSAGE\"",
")",
"self",
".",
"last_will_qos",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_LAST_WILL_QOS\"",
",",
"0",
")",
"self",
".",
"last_will_retain",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_LAST_WILL_RETAIN\"",
",",
"False",
")",
"if",
"self",
".",
"tls_enabled",
":",
"self",
".",
"tls_ca_certs",
"=",
"app",
".",
"config",
"[",
"\"MQTT_TLS_CA_CERTS\"",
"]",
"self",
".",
"tls_certfile",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_TLS_CERTFILE\"",
")",
"self",
".",
"tls_keyfile",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_TLS_KEYFILE\"",
")",
"self",
".",
"tls_cert_reqs",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_TLS_CERT_REQS\"",
",",
"ssl",
".",
"CERT_REQUIRED",
")",
"self",
".",
"tls_version",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_TLS_VERSION\"",
",",
"ssl",
".",
"PROTOCOL_TLSv1",
")",
"self",
".",
"tls_ciphers",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_TLS_CIPHERS\"",
")",
"self",
".",
"tls_insecure",
"=",
"app",
".",
"config",
".",
"get",
"(",
"\"MQTT_TLS_INSECURE\"",
",",
"False",
")",
"# set last will message",
"if",
"self",
".",
"last_will_topic",
"is",
"not",
"None",
":",
"self",
".",
"client",
".",
"will_set",
"(",
"self",
".",
"last_will_topic",
",",
"self",
".",
"last_will_message",
",",
"self",
".",
"last_will_qos",
",",
"self",
".",
"last_will_retain",
",",
")",
"self",
".",
"_connect",
"(",
")"
] |
Init the Flask-MQTT addon.
|
[
"Init",
"the",
"Flask",
"-",
"MQTT",
"addon",
"."
] |
77d474ab87484ae6eaef2fee3bf02406beee2e17
|
https://github.com/stlehmann/Flask-MQTT/blob/77d474ab87484ae6eaef2fee3bf02406beee2e17/flask_mqtt/__init__.py#L87-L133
|
7,522
|
stlehmann/Flask-MQTT
|
flask_mqtt/__init__.py
|
Mqtt.subscribe
|
def subscribe(self, topic, qos=0):
# type: (str, int) -> Tuple[int, int]
"""
Subscribe to a certain topic.
:param topic: a string specifying the subscription topic to
subscribe to.
:param qos: the desired quality of service level for the subscription.
Defaults to 0.
:rtype: (int, int)
:result: (result, mid)
A topic is a UTF-8 string, which is used by the broker to filter
messages for each connected client. A topic consists of one or more
topic levels. Each topic level is separated by a forward slash
(topic level separator).
The function returns a tuple (result, mid), where result is
MQTT_ERR_SUCCESS to indicate success or (MQTT_ERR_NO_CONN, None) if the
client is not currently connected. mid is the message ID for the
subscribe request. The mid value can be used to track the subscribe
request by checking against the mid argument in the on_subscribe()
callback if it is defined.
**Topic example:** `myhome/groundfloor/livingroom/temperature`
"""
# TODO: add support for list of topics
# don't subscribe if already subscribed
# try to subscribe
result, mid = self.client.subscribe(topic=topic, qos=qos)
# if successful add to topics
if result == MQTT_ERR_SUCCESS:
self.topics[topic] = TopicQos(topic=topic, qos=qos)
logger.debug('Subscribed to topic: {0}, qos: {1}'
.format(topic, qos))
else:
logger.error('Error {0} subscribing to topic: {1}'
.format(result, topic))
return (result, mid)
|
python
|
def subscribe(self, topic, qos=0):
# type: (str, int) -> Tuple[int, int]
"""
Subscribe to a certain topic.
:param topic: a string specifying the subscription topic to
subscribe to.
:param qos: the desired quality of service level for the subscription.
Defaults to 0.
:rtype: (int, int)
:result: (result, mid)
A topic is a UTF-8 string, which is used by the broker to filter
messages for each connected client. A topic consists of one or more
topic levels. Each topic level is separated by a forward slash
(topic level separator).
The function returns a tuple (result, mid), where result is
MQTT_ERR_SUCCESS to indicate success or (MQTT_ERR_NO_CONN, None) if the
client is not currently connected. mid is the message ID for the
subscribe request. The mid value can be used to track the subscribe
request by checking against the mid argument in the on_subscribe()
callback if it is defined.
**Topic example:** `myhome/groundfloor/livingroom/temperature`
"""
# TODO: add support for list of topics
# don't subscribe if already subscribed
# try to subscribe
result, mid = self.client.subscribe(topic=topic, qos=qos)
# if successful add to topics
if result == MQTT_ERR_SUCCESS:
self.topics[topic] = TopicQos(topic=topic, qos=qos)
logger.debug('Subscribed to topic: {0}, qos: {1}'
.format(topic, qos))
else:
logger.error('Error {0} subscribing to topic: {1}'
.format(result, topic))
return (result, mid)
|
[
"def",
"subscribe",
"(",
"self",
",",
"topic",
",",
"qos",
"=",
"0",
")",
":",
"# type: (str, int) -> Tuple[int, int]",
"# TODO: add support for list of topics",
"# don't subscribe if already subscribed",
"# try to subscribe",
"result",
",",
"mid",
"=",
"self",
".",
"client",
".",
"subscribe",
"(",
"topic",
"=",
"topic",
",",
"qos",
"=",
"qos",
")",
"# if successful add to topics",
"if",
"result",
"==",
"MQTT_ERR_SUCCESS",
":",
"self",
".",
"topics",
"[",
"topic",
"]",
"=",
"TopicQos",
"(",
"topic",
"=",
"topic",
",",
"qos",
"=",
"qos",
")",
"logger",
".",
"debug",
"(",
"'Subscribed to topic: {0}, qos: {1}'",
".",
"format",
"(",
"topic",
",",
"qos",
")",
")",
"else",
":",
"logger",
".",
"error",
"(",
"'Error {0} subscribing to topic: {1}'",
".",
"format",
"(",
"result",
",",
"topic",
")",
")",
"return",
"(",
"result",
",",
"mid",
")"
] |
Subscribe to a certain topic.
:param topic: a string specifying the subscription topic to
subscribe to.
:param qos: the desired quality of service level for the subscription.
Defaults to 0.
:rtype: (int, int)
:result: (result, mid)
A topic is a UTF-8 string, which is used by the broker to filter
messages for each connected client. A topic consists of one or more
topic levels. Each topic level is separated by a forward slash
(topic level separator).
The function returns a tuple (result, mid), where result is
MQTT_ERR_SUCCESS to indicate success or (MQTT_ERR_NO_CONN, None) if the
client is not currently connected. mid is the message ID for the
subscribe request. The mid value can be used to track the subscribe
request by checking against the mid argument in the on_subscribe()
callback if it is defined.
**Topic example:** `myhome/groundfloor/livingroom/temperature`
|
[
"Subscribe",
"to",
"a",
"certain",
"topic",
"."
] |
77d474ab87484ae6eaef2fee3bf02406beee2e17
|
https://github.com/stlehmann/Flask-MQTT/blob/77d474ab87484ae6eaef2fee3bf02406beee2e17/flask_mqtt/__init__.py#L225-L268
|
7,523
|
stlehmann/Flask-MQTT
|
flask_mqtt/__init__.py
|
Mqtt.unsubscribe
|
def unsubscribe(self, topic):
# type: (str) -> Optional[Tuple[int, int]]
"""
Unsubscribe from a single topic.
:param topic: a single string that is the subscription topic to
unsubscribe from
:rtype: (int, int)
:result: (result, mid)
Returns a tuple (result, mid), where result is MQTT_ERR_SUCCESS
to indicate success or (MQTT_ERR_NO_CONN, None) if the client is not
currently connected.
mid is the message ID for the unsubscribe request. The mid value can be
used to track the unsubscribe request by checking against the mid
argument in the on_unsubscribe() callback if it is defined.
"""
# don't unsubscribe if not in topics
if topic in self.topics:
result, mid = self.client.unsubscribe(topic)
if result == MQTT_ERR_SUCCESS:
self.topics.pop(topic)
logger.debug('Unsubscribed from topic: {0}'.format(topic))
else:
logger.debug('Error {0} unsubscribing from topic: {1}'
.format(result, topic))
# if successful remove from topics
return result, mid
return None
|
python
|
def unsubscribe(self, topic):
# type: (str) -> Optional[Tuple[int, int]]
"""
Unsubscribe from a single topic.
:param topic: a single string that is the subscription topic to
unsubscribe from
:rtype: (int, int)
:result: (result, mid)
Returns a tuple (result, mid), where result is MQTT_ERR_SUCCESS
to indicate success or (MQTT_ERR_NO_CONN, None) if the client is not
currently connected.
mid is the message ID for the unsubscribe request. The mid value can be
used to track the unsubscribe request by checking against the mid
argument in the on_unsubscribe() callback if it is defined.
"""
# don't unsubscribe if not in topics
if topic in self.topics:
result, mid = self.client.unsubscribe(topic)
if result == MQTT_ERR_SUCCESS:
self.topics.pop(topic)
logger.debug('Unsubscribed from topic: {0}'.format(topic))
else:
logger.debug('Error {0} unsubscribing from topic: {1}'
.format(result, topic))
# if successful remove from topics
return result, mid
return None
|
[
"def",
"unsubscribe",
"(",
"self",
",",
"topic",
")",
":",
"# type: (str) -> Optional[Tuple[int, int]]",
"# don't unsubscribe if not in topics",
"if",
"topic",
"in",
"self",
".",
"topics",
":",
"result",
",",
"mid",
"=",
"self",
".",
"client",
".",
"unsubscribe",
"(",
"topic",
")",
"if",
"result",
"==",
"MQTT_ERR_SUCCESS",
":",
"self",
".",
"topics",
".",
"pop",
"(",
"topic",
")",
"logger",
".",
"debug",
"(",
"'Unsubscribed from topic: {0}'",
".",
"format",
"(",
"topic",
")",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"'Error {0} unsubscribing from topic: {1}'",
".",
"format",
"(",
"result",
",",
"topic",
")",
")",
"# if successful remove from topics",
"return",
"result",
",",
"mid",
"return",
"None"
] |
Unsubscribe from a single topic.
:param topic: a single string that is the subscription topic to
unsubscribe from
:rtype: (int, int)
:result: (result, mid)
Returns a tuple (result, mid), where result is MQTT_ERR_SUCCESS
to indicate success or (MQTT_ERR_NO_CONN, None) if the client is not
currently connected.
mid is the message ID for the unsubscribe request. The mid value can be
used to track the unsubscribe request by checking against the mid
argument in the on_unsubscribe() callback if it is defined.
|
[
"Unsubscribe",
"from",
"a",
"single",
"topic",
"."
] |
77d474ab87484ae6eaef2fee3bf02406beee2e17
|
https://github.com/stlehmann/Flask-MQTT/blob/77d474ab87484ae6eaef2fee3bf02406beee2e17/flask_mqtt/__init__.py#L270-L302
|
7,524
|
stlehmann/Flask-MQTT
|
flask_mqtt/__init__.py
|
Mqtt.unsubscribe_all
|
def unsubscribe_all(self):
# type: () -> None
"""Unsubscribe from all topics."""
topics = list(self.topics.keys())
for topic in topics:
self.unsubscribe(topic)
|
python
|
def unsubscribe_all(self):
# type: () -> None
"""Unsubscribe from all topics."""
topics = list(self.topics.keys())
for topic in topics:
self.unsubscribe(topic)
|
[
"def",
"unsubscribe_all",
"(",
"self",
")",
":",
"# type: () -> None",
"topics",
"=",
"list",
"(",
"self",
".",
"topics",
".",
"keys",
"(",
")",
")",
"for",
"topic",
"in",
"topics",
":",
"self",
".",
"unsubscribe",
"(",
"topic",
")"
] |
Unsubscribe from all topics.
|
[
"Unsubscribe",
"from",
"all",
"topics",
"."
] |
77d474ab87484ae6eaef2fee3bf02406beee2e17
|
https://github.com/stlehmann/Flask-MQTT/blob/77d474ab87484ae6eaef2fee3bf02406beee2e17/flask_mqtt/__init__.py#L304-L309
|
7,525
|
stlehmann/Flask-MQTT
|
flask_mqtt/__init__.py
|
Mqtt.publish
|
def publish(self, topic, payload=None, qos=0, retain=False):
# type: (str, bytes, int, bool) -> Tuple[int, int]
"""
Send a message to the broker.
:param topic: the topic that the message should be published on
:param payload: the actual message to send. If not given, or set to
None a zero length message will be used. Passing an
int or float will result in the payload being
converted to a string representing that number.
If you wish to send a true int/float, use struct.pack()
to create the payload you require.
:param qos: the quality of service level to use
:param retain: if set to True, the message will be set as the
"last known good"/retained message for the topic
:returns: Returns a tuple (result, mid), where result is
MQTT_ERR_SUCCESS to indicate success or MQTT_ERR_NO_CONN
if the client is not currently connected. mid is the message
ID for the publish request.
"""
if not self.connected:
self.client.reconnect()
result, mid = self.client.publish(topic, payload, qos, retain)
if result == MQTT_ERR_SUCCESS:
logger.debug('Published topic {0}: {1}'.format(topic, payload))
else:
logger.error('Error {0} publishing topic {1}'
.format(result, topic))
return (result, mid)
|
python
|
def publish(self, topic, payload=None, qos=0, retain=False):
# type: (str, bytes, int, bool) -> Tuple[int, int]
"""
Send a message to the broker.
:param topic: the topic that the message should be published on
:param payload: the actual message to send. If not given, or set to
None a zero length message will be used. Passing an
int or float will result in the payload being
converted to a string representing that number.
If you wish to send a true int/float, use struct.pack()
to create the payload you require.
:param qos: the quality of service level to use
:param retain: if set to True, the message will be set as the
"last known good"/retained message for the topic
:returns: Returns a tuple (result, mid), where result is
MQTT_ERR_SUCCESS to indicate success or MQTT_ERR_NO_CONN
if the client is not currently connected. mid is the message
ID for the publish request.
"""
if not self.connected:
self.client.reconnect()
result, mid = self.client.publish(topic, payload, qos, retain)
if result == MQTT_ERR_SUCCESS:
logger.debug('Published topic {0}: {1}'.format(topic, payload))
else:
logger.error('Error {0} publishing topic {1}'
.format(result, topic))
return (result, mid)
|
[
"def",
"publish",
"(",
"self",
",",
"topic",
",",
"payload",
"=",
"None",
",",
"qos",
"=",
"0",
",",
"retain",
"=",
"False",
")",
":",
"# type: (str, bytes, int, bool) -> Tuple[int, int]",
"if",
"not",
"self",
".",
"connected",
":",
"self",
".",
"client",
".",
"reconnect",
"(",
")",
"result",
",",
"mid",
"=",
"self",
".",
"client",
".",
"publish",
"(",
"topic",
",",
"payload",
",",
"qos",
",",
"retain",
")",
"if",
"result",
"==",
"MQTT_ERR_SUCCESS",
":",
"logger",
".",
"debug",
"(",
"'Published topic {0}: {1}'",
".",
"format",
"(",
"topic",
",",
"payload",
")",
")",
"else",
":",
"logger",
".",
"error",
"(",
"'Error {0} publishing topic {1}'",
".",
"format",
"(",
"result",
",",
"topic",
")",
")",
"return",
"(",
"result",
",",
"mid",
")"
] |
Send a message to the broker.
:param topic: the topic that the message should be published on
:param payload: the actual message to send. If not given, or set to
None a zero length message will be used. Passing an
int or float will result in the payload being
converted to a string representing that number.
If you wish to send a true int/float, use struct.pack()
to create the payload you require.
:param qos: the quality of service level to use
:param retain: if set to True, the message will be set as the
"last known good"/retained message for the topic
:returns: Returns a tuple (result, mid), where result is
MQTT_ERR_SUCCESS to indicate success or MQTT_ERR_NO_CONN
if the client is not currently connected. mid is the message
ID for the publish request.
|
[
"Send",
"a",
"message",
"to",
"the",
"broker",
"."
] |
77d474ab87484ae6eaef2fee3bf02406beee2e17
|
https://github.com/stlehmann/Flask-MQTT/blob/77d474ab87484ae6eaef2fee3bf02406beee2e17/flask_mqtt/__init__.py#L311-L343
|
7,526
|
stlehmann/Flask-MQTT
|
flask_mqtt/__init__.py
|
Mqtt.on_subscribe
|
def on_subscribe(self):
# type: () -> Callable
"""Decorate a callback function to handle subscritions.
**Usage:**::
@mqtt.on_subscribe()
def handle_subscribe(client, userdata, mid, granted_qos):
print('Subscription id {} granted with qos {}.'
.format(mid, granted_qos))
"""
def decorator(handler):
# type: (Callable) -> Callable
self.client.on_subscribe = handler
return handler
return decorator
|
python
|
def on_subscribe(self):
# type: () -> Callable
"""Decorate a callback function to handle subscritions.
**Usage:**::
@mqtt.on_subscribe()
def handle_subscribe(client, userdata, mid, granted_qos):
print('Subscription id {} granted with qos {}.'
.format(mid, granted_qos))
"""
def decorator(handler):
# type: (Callable) -> Callable
self.client.on_subscribe = handler
return handler
return decorator
|
[
"def",
"on_subscribe",
"(",
"self",
")",
":",
"# type: () -> Callable",
"def",
"decorator",
"(",
"handler",
")",
":",
"# type: (Callable) -> Callable",
"self",
".",
"client",
".",
"on_subscribe",
"=",
"handler",
"return",
"handler",
"return",
"decorator"
] |
Decorate a callback function to handle subscritions.
**Usage:**::
@mqtt.on_subscribe()
def handle_subscribe(client, userdata, mid, granted_qos):
print('Subscription id {} granted with qos {}.'
.format(mid, granted_qos))
|
[
"Decorate",
"a",
"callback",
"function",
"to",
"handle",
"subscritions",
"."
] |
77d474ab87484ae6eaef2fee3bf02406beee2e17
|
https://github.com/stlehmann/Flask-MQTT/blob/77d474ab87484ae6eaef2fee3bf02406beee2e17/flask_mqtt/__init__.py#L421-L437
|
7,527
|
stlehmann/Flask-MQTT
|
flask_mqtt/__init__.py
|
Mqtt.on_unsubscribe
|
def on_unsubscribe(self):
# type: () -> Callable
"""Decorate a callback funtion to handle unsubscribtions.
**Usage:**::
@mqtt.unsubscribe()
def handle_unsubscribe(client, userdata, mid)
print('Unsubscribed from topic (id: {})'
.format(mid)')
"""
def decorator(handler):
# type: (Callable) -> Callable
self.client.on_unsubscribe = handler
return handler
return decorator
|
python
|
def on_unsubscribe(self):
# type: () -> Callable
"""Decorate a callback funtion to handle unsubscribtions.
**Usage:**::
@mqtt.unsubscribe()
def handle_unsubscribe(client, userdata, mid)
print('Unsubscribed from topic (id: {})'
.format(mid)')
"""
def decorator(handler):
# type: (Callable) -> Callable
self.client.on_unsubscribe = handler
return handler
return decorator
|
[
"def",
"on_unsubscribe",
"(",
"self",
")",
":",
"# type: () -> Callable",
"def",
"decorator",
"(",
"handler",
")",
":",
"# type: (Callable) -> Callable",
"self",
".",
"client",
".",
"on_unsubscribe",
"=",
"handler",
"return",
"handler",
"return",
"decorator"
] |
Decorate a callback funtion to handle unsubscribtions.
**Usage:**::
@mqtt.unsubscribe()
def handle_unsubscribe(client, userdata, mid)
print('Unsubscribed from topic (id: {})'
.format(mid)')
|
[
"Decorate",
"a",
"callback",
"funtion",
"to",
"handle",
"unsubscribtions",
"."
] |
77d474ab87484ae6eaef2fee3bf02406beee2e17
|
https://github.com/stlehmann/Flask-MQTT/blob/77d474ab87484ae6eaef2fee3bf02406beee2e17/flask_mqtt/__init__.py#L439-L455
|
7,528
|
stlehmann/Flask-MQTT
|
flask_mqtt/__init__.py
|
Mqtt.on_log
|
def on_log(self):
# type: () -> Callable
"""Decorate a callback function to handle MQTT logging.
**Example Usage:**
::
@mqtt.on_log()
def handle_logging(client, userdata, level, buf):
print(client, userdata, level, buf)
"""
def decorator(handler):
# type: (Callable) -> Callable
self.client.on_log = handler
return handler
return decorator
|
python
|
def on_log(self):
# type: () -> Callable
"""Decorate a callback function to handle MQTT logging.
**Example Usage:**
::
@mqtt.on_log()
def handle_logging(client, userdata, level, buf):
print(client, userdata, level, buf)
"""
def decorator(handler):
# type: (Callable) -> Callable
self.client.on_log = handler
return handler
return decorator
|
[
"def",
"on_log",
"(",
"self",
")",
":",
"# type: () -> Callable",
"def",
"decorator",
"(",
"handler",
")",
":",
"# type: (Callable) -> Callable",
"self",
".",
"client",
".",
"on_log",
"=",
"handler",
"return",
"handler",
"return",
"decorator"
] |
Decorate a callback function to handle MQTT logging.
**Example Usage:**
::
@mqtt.on_log()
def handle_logging(client, userdata, level, buf):
print(client, userdata, level, buf)
|
[
"Decorate",
"a",
"callback",
"function",
"to",
"handle",
"MQTT",
"logging",
"."
] |
77d474ab87484ae6eaef2fee3bf02406beee2e17
|
https://github.com/stlehmann/Flask-MQTT/blob/77d474ab87484ae6eaef2fee3bf02406beee2e17/flask_mqtt/__init__.py#L457-L474
|
7,529
|
kennethreitz/bucketstore
|
bucketstore.py
|
list
|
def list():
"""Lists buckets, by name."""
s3 = boto3.resource('s3')
return [b.name for b in s3.buckets.all()]
|
python
|
def list():
"""Lists buckets, by name."""
s3 = boto3.resource('s3')
return [b.name for b in s3.buckets.all()]
|
[
"def",
"list",
"(",
")",
":",
"s3",
"=",
"boto3",
".",
"resource",
"(",
"'s3'",
")",
"return",
"[",
"b",
".",
"name",
"for",
"b",
"in",
"s3",
".",
"buckets",
".",
"all",
"(",
")",
"]"
] |
Lists buckets, by name.
|
[
"Lists",
"buckets",
"by",
"name",
"."
] |
2d79584d44b9c422192d7fdf08a85a49addf83d5
|
https://github.com/kennethreitz/bucketstore/blob/2d79584d44b9c422192d7fdf08a85a49addf83d5/bucketstore.py#L6-L9
|
7,530
|
kennethreitz/bucketstore
|
bucketstore.py
|
S3Bucket.delete
|
def delete(self, key=None):
"""Deletes the given key, or the whole bucket."""
# Delete the whole bucket.
if key is None:
# Delete everything in the bucket.
for key in self.all():
key.delete()
# Delete the bucket.
return self._boto_bucket.delete()
# If a key was passed, delete they key.
k = self.key(key)
return k.delete()
|
python
|
def delete(self, key=None):
"""Deletes the given key, or the whole bucket."""
# Delete the whole bucket.
if key is None:
# Delete everything in the bucket.
for key in self.all():
key.delete()
# Delete the bucket.
return self._boto_bucket.delete()
# If a key was passed, delete they key.
k = self.key(key)
return k.delete()
|
[
"def",
"delete",
"(",
"self",
",",
"key",
"=",
"None",
")",
":",
"# Delete the whole bucket.",
"if",
"key",
"is",
"None",
":",
"# Delete everything in the bucket.",
"for",
"key",
"in",
"self",
".",
"all",
"(",
")",
":",
"key",
".",
"delete",
"(",
")",
"# Delete the bucket.",
"return",
"self",
".",
"_boto_bucket",
".",
"delete",
"(",
")",
"# If a key was passed, delete they key.",
"k",
"=",
"self",
".",
"key",
"(",
"key",
")",
"return",
"k",
".",
"delete",
"(",
")"
] |
Deletes the given key, or the whole bucket.
|
[
"Deletes",
"the",
"given",
"key",
"or",
"the",
"whole",
"bucket",
"."
] |
2d79584d44b9c422192d7fdf08a85a49addf83d5
|
https://github.com/kennethreitz/bucketstore/blob/2d79584d44b9c422192d7fdf08a85a49addf83d5/bucketstore.py#L80-L94
|
7,531
|
kennethreitz/bucketstore
|
bucketstore.py
|
S3Key.rename
|
def rename(self, new_name):
"""Renames the key to a given new name."""
# Write the new object.
self.bucket.set(new_name, self.get(), self.meta)
# Delete the current key.
self.delete()
# Set the new name.
self.name = new_name
|
python
|
def rename(self, new_name):
"""Renames the key to a given new name."""
# Write the new object.
self.bucket.set(new_name, self.get(), self.meta)
# Delete the current key.
self.delete()
# Set the new name.
self.name = new_name
|
[
"def",
"rename",
"(",
"self",
",",
"new_name",
")",
":",
"# Write the new object.",
"self",
".",
"bucket",
".",
"set",
"(",
"new_name",
",",
"self",
".",
"get",
"(",
")",
",",
"self",
".",
"meta",
")",
"# Delete the current key.",
"self",
".",
"delete",
"(",
")",
"# Set the new name.",
"self",
".",
"name",
"=",
"new_name"
] |
Renames the key to a given new name.
|
[
"Renames",
"the",
"key",
"to",
"a",
"given",
"new",
"name",
"."
] |
2d79584d44b9c422192d7fdf08a85a49addf83d5
|
https://github.com/kennethreitz/bucketstore/blob/2d79584d44b9c422192d7fdf08a85a49addf83d5/bucketstore.py#L126-L135
|
7,532
|
kennethreitz/bucketstore
|
bucketstore.py
|
S3Key.is_public
|
def is_public(self):
"""Returns True if the public-read ACL is set for the Key."""
for grant in self._boto_object.Acl().grants:
if 'AllUsers' in grant['Grantee'].get('URI', ''):
if grant['Permission'] == 'READ':
return True
return False
|
python
|
def is_public(self):
"""Returns True if the public-read ACL is set for the Key."""
for grant in self._boto_object.Acl().grants:
if 'AllUsers' in grant['Grantee'].get('URI', ''):
if grant['Permission'] == 'READ':
return True
return False
|
[
"def",
"is_public",
"(",
"self",
")",
":",
"for",
"grant",
"in",
"self",
".",
"_boto_object",
".",
"Acl",
"(",
")",
".",
"grants",
":",
"if",
"'AllUsers'",
"in",
"grant",
"[",
"'Grantee'",
"]",
".",
"get",
"(",
"'URI'",
",",
"''",
")",
":",
"if",
"grant",
"[",
"'Permission'",
"]",
"==",
"'READ'",
":",
"return",
"True",
"return",
"False"
] |
Returns True if the public-read ACL is set for the Key.
|
[
"Returns",
"True",
"if",
"the",
"public",
"-",
"read",
"ACL",
"is",
"set",
"for",
"the",
"Key",
"."
] |
2d79584d44b9c422192d7fdf08a85a49addf83d5
|
https://github.com/kennethreitz/bucketstore/blob/2d79584d44b9c422192d7fdf08a85a49addf83d5/bucketstore.py#L142-L149
|
7,533
|
kennethreitz/bucketstore
|
bucketstore.py
|
S3Key.url
|
def url(self):
"""Returns the public URL for the given key."""
if self.is_public:
return '{0}/{1}/{2}'.format(
self.bucket._boto_s3.meta.client.meta.endpoint_url,
self.bucket.name,
self.name
)
else:
raise ValueError('{0!r} does not have the public-read ACL set. '
'Use the make_public() method to allow for '
'public URL sharing.'.format(self.name))
|
python
|
def url(self):
"""Returns the public URL for the given key."""
if self.is_public:
return '{0}/{1}/{2}'.format(
self.bucket._boto_s3.meta.client.meta.endpoint_url,
self.bucket.name,
self.name
)
else:
raise ValueError('{0!r} does not have the public-read ACL set. '
'Use the make_public() method to allow for '
'public URL sharing.'.format(self.name))
|
[
"def",
"url",
"(",
"self",
")",
":",
"if",
"self",
".",
"is_public",
":",
"return",
"'{0}/{1}/{2}'",
".",
"format",
"(",
"self",
".",
"bucket",
".",
"_boto_s3",
".",
"meta",
".",
"client",
".",
"meta",
".",
"endpoint_url",
",",
"self",
".",
"bucket",
".",
"name",
",",
"self",
".",
"name",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'{0!r} does not have the public-read ACL set. '",
"'Use the make_public() method to allow for '",
"'public URL sharing.'",
".",
"format",
"(",
"self",
".",
"name",
")",
")"
] |
Returns the public URL for the given key.
|
[
"Returns",
"the",
"public",
"URL",
"for",
"the",
"given",
"key",
"."
] |
2d79584d44b9c422192d7fdf08a85a49addf83d5
|
https://github.com/kennethreitz/bucketstore/blob/2d79584d44b9c422192d7fdf08a85a49addf83d5/bucketstore.py#L167-L178
|
7,534
|
kennethreitz/bucketstore
|
bucketstore.py
|
S3Key.temp_url
|
def temp_url(self, duration=120):
"""Returns a temporary URL for the given key."""
return self.bucket._boto_s3.meta.client.generate_presigned_url(
'get_object',
Params={'Bucket': self.bucket.name, 'Key': self.name},
ExpiresIn=duration
)
|
python
|
def temp_url(self, duration=120):
"""Returns a temporary URL for the given key."""
return self.bucket._boto_s3.meta.client.generate_presigned_url(
'get_object',
Params={'Bucket': self.bucket.name, 'Key': self.name},
ExpiresIn=duration
)
|
[
"def",
"temp_url",
"(",
"self",
",",
"duration",
"=",
"120",
")",
":",
"return",
"self",
".",
"bucket",
".",
"_boto_s3",
".",
"meta",
".",
"client",
".",
"generate_presigned_url",
"(",
"'get_object'",
",",
"Params",
"=",
"{",
"'Bucket'",
":",
"self",
".",
"bucket",
".",
"name",
",",
"'Key'",
":",
"self",
".",
"name",
"}",
",",
"ExpiresIn",
"=",
"duration",
")"
] |
Returns a temporary URL for the given key.
|
[
"Returns",
"a",
"temporary",
"URL",
"for",
"the",
"given",
"key",
"."
] |
2d79584d44b9c422192d7fdf08a85a49addf83d5
|
https://github.com/kennethreitz/bucketstore/blob/2d79584d44b9c422192d7fdf08a85a49addf83d5/bucketstore.py#L180-L186
|
7,535
|
cs50/python-cs50
|
src/cs50/cs50.py
|
eprint
|
def eprint(*args, **kwargs):
"""
Print an error message to standard error, prefixing it with
file name and line number from which method was called.
"""
end = kwargs.get("end", "\n")
sep = kwargs.get("sep", " ")
(filename, lineno) = inspect.stack()[1][1:3]
print("{}:{}: ".format(filename, lineno), end="")
print(*args, end=end, file=sys.stderr, sep=sep)
|
python
|
def eprint(*args, **kwargs):
"""
Print an error message to standard error, prefixing it with
file name and line number from which method was called.
"""
end = kwargs.get("end", "\n")
sep = kwargs.get("sep", " ")
(filename, lineno) = inspect.stack()[1][1:3]
print("{}:{}: ".format(filename, lineno), end="")
print(*args, end=end, file=sys.stderr, sep=sep)
|
[
"def",
"eprint",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"end",
"=",
"kwargs",
".",
"get",
"(",
"\"end\"",
",",
"\"\\n\"",
")",
"sep",
"=",
"kwargs",
".",
"get",
"(",
"\"sep\"",
",",
"\" \"",
")",
"(",
"filename",
",",
"lineno",
")",
"=",
"inspect",
".",
"stack",
"(",
")",
"[",
"1",
"]",
"[",
"1",
":",
"3",
"]",
"print",
"(",
"\"{}:{}: \"",
".",
"format",
"(",
"filename",
",",
"lineno",
")",
",",
"end",
"=",
"\"\"",
")",
"print",
"(",
"*",
"args",
",",
"end",
"=",
"end",
",",
"file",
"=",
"sys",
".",
"stderr",
",",
"sep",
"=",
"sep",
")"
] |
Print an error message to standard error, prefixing it with
file name and line number from which method was called.
|
[
"Print",
"an",
"error",
"message",
"to",
"standard",
"error",
"prefixing",
"it",
"with",
"file",
"name",
"and",
"line",
"number",
"from",
"which",
"method",
"was",
"called",
"."
] |
f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a
|
https://github.com/cs50/python-cs50/blob/f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a/src/cs50/cs50.py#L35-L44
|
7,536
|
cs50/python-cs50
|
src/cs50/cs50.py
|
formatException
|
def formatException(type, value, tb):
"""
Format traceback, darkening entries from global site-packages directories
and user-specific site-packages directory.
https://stackoverflow.com/a/46071447/5156190
"""
# Absolute paths to site-packages
packages = tuple(join(abspath(p), "") for p in sys.path[1:])
# Highlight lines not referring to files in site-packages
lines = []
for line in format_exception(type, value, tb):
matches = re.search(r"^ File \"([^\"]+)\", line \d+, in .+", line)
if matches and matches.group(1).startswith(packages):
lines += line
else:
matches = re.search(r"^(\s*)(.*?)(\s*)$", line, re.DOTALL)
lines.append(matches.group(1) + colored(matches.group(2), "yellow") + matches.group(3))
return "".join(lines).rstrip()
|
python
|
def formatException(type, value, tb):
"""
Format traceback, darkening entries from global site-packages directories
and user-specific site-packages directory.
https://stackoverflow.com/a/46071447/5156190
"""
# Absolute paths to site-packages
packages = tuple(join(abspath(p), "") for p in sys.path[1:])
# Highlight lines not referring to files in site-packages
lines = []
for line in format_exception(type, value, tb):
matches = re.search(r"^ File \"([^\"]+)\", line \d+, in .+", line)
if matches and matches.group(1).startswith(packages):
lines += line
else:
matches = re.search(r"^(\s*)(.*?)(\s*)$", line, re.DOTALL)
lines.append(matches.group(1) + colored(matches.group(2), "yellow") + matches.group(3))
return "".join(lines).rstrip()
|
[
"def",
"formatException",
"(",
"type",
",",
"value",
",",
"tb",
")",
":",
"# Absolute paths to site-packages",
"packages",
"=",
"tuple",
"(",
"join",
"(",
"abspath",
"(",
"p",
")",
",",
"\"\"",
")",
"for",
"p",
"in",
"sys",
".",
"path",
"[",
"1",
":",
"]",
")",
"# Highlight lines not referring to files in site-packages",
"lines",
"=",
"[",
"]",
"for",
"line",
"in",
"format_exception",
"(",
"type",
",",
"value",
",",
"tb",
")",
":",
"matches",
"=",
"re",
".",
"search",
"(",
"r\"^ File \\\"([^\\\"]+)\\\", line \\d+, in .+\"",
",",
"line",
")",
"if",
"matches",
"and",
"matches",
".",
"group",
"(",
"1",
")",
".",
"startswith",
"(",
"packages",
")",
":",
"lines",
"+=",
"line",
"else",
":",
"matches",
"=",
"re",
".",
"search",
"(",
"r\"^(\\s*)(.*?)(\\s*)$\"",
",",
"line",
",",
"re",
".",
"DOTALL",
")",
"lines",
".",
"append",
"(",
"matches",
".",
"group",
"(",
"1",
")",
"+",
"colored",
"(",
"matches",
".",
"group",
"(",
"2",
")",
",",
"\"yellow\"",
")",
"+",
"matches",
".",
"group",
"(",
"3",
")",
")",
"return",
"\"\"",
".",
"join",
"(",
"lines",
")",
".",
"rstrip",
"(",
")"
] |
Format traceback, darkening entries from global site-packages directories
and user-specific site-packages directory.
https://stackoverflow.com/a/46071447/5156190
|
[
"Format",
"traceback",
"darkening",
"entries",
"from",
"global",
"site",
"-",
"packages",
"directories",
"and",
"user",
"-",
"specific",
"site",
"-",
"packages",
"directory",
"."
] |
f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a
|
https://github.com/cs50/python-cs50/blob/f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a/src/cs50/cs50.py#L47-L67
|
7,537
|
cs50/python-cs50
|
src/cs50/cs50.py
|
get_char
|
def get_char(prompt=None):
"""
Read a line of text from standard input and return the equivalent char;
if text is not a single char, user is prompted to retry. If line can't
be read, return None.
"""
while True:
s = get_string(prompt)
if s is None:
return None
if len(s) == 1:
return s[0]
# Temporarily here for backwards compatibility
if prompt is None:
print("Retry: ", end="")
|
python
|
def get_char(prompt=None):
"""
Read a line of text from standard input and return the equivalent char;
if text is not a single char, user is prompted to retry. If line can't
be read, return None.
"""
while True:
s = get_string(prompt)
if s is None:
return None
if len(s) == 1:
return s[0]
# Temporarily here for backwards compatibility
if prompt is None:
print("Retry: ", end="")
|
[
"def",
"get_char",
"(",
"prompt",
"=",
"None",
")",
":",
"while",
"True",
":",
"s",
"=",
"get_string",
"(",
"prompt",
")",
"if",
"s",
"is",
"None",
":",
"return",
"None",
"if",
"len",
"(",
"s",
")",
"==",
"1",
":",
"return",
"s",
"[",
"0",
"]",
"# Temporarily here for backwards compatibility",
"if",
"prompt",
"is",
"None",
":",
"print",
"(",
"\"Retry: \"",
",",
"end",
"=",
"\"\"",
")"
] |
Read a line of text from standard input and return the equivalent char;
if text is not a single char, user is prompted to retry. If line can't
be read, return None.
|
[
"Read",
"a",
"line",
"of",
"text",
"from",
"standard",
"input",
"and",
"return",
"the",
"equivalent",
"char",
";",
"if",
"text",
"is",
"not",
"a",
"single",
"char",
"user",
"is",
"prompted",
"to",
"retry",
".",
"If",
"line",
"can",
"t",
"be",
"read",
"return",
"None",
"."
] |
f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a
|
https://github.com/cs50/python-cs50/blob/f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a/src/cs50/cs50.py#L73-L88
|
7,538
|
cs50/python-cs50
|
src/cs50/cs50.py
|
get_float
|
def get_float(prompt=None):
"""
Read a line of text from standard input and return the equivalent float
as precisely as possible; if text does not represent a double, user is
prompted to retry. If line can't be read, return None.
"""
while True:
s = get_string(prompt)
if s is None:
return None
if len(s) > 0 and re.search(r"^[+-]?\d*(?:\.\d*)?$", s):
try:
return float(s)
except ValueError:
pass
# Temporarily here for backwards compatibility
if prompt is None:
print("Retry: ", end="")
|
python
|
def get_float(prompt=None):
"""
Read a line of text from standard input and return the equivalent float
as precisely as possible; if text does not represent a double, user is
prompted to retry. If line can't be read, return None.
"""
while True:
s = get_string(prompt)
if s is None:
return None
if len(s) > 0 and re.search(r"^[+-]?\d*(?:\.\d*)?$", s):
try:
return float(s)
except ValueError:
pass
# Temporarily here for backwards compatibility
if prompt is None:
print("Retry: ", end="")
|
[
"def",
"get_float",
"(",
"prompt",
"=",
"None",
")",
":",
"while",
"True",
":",
"s",
"=",
"get_string",
"(",
"prompt",
")",
"if",
"s",
"is",
"None",
":",
"return",
"None",
"if",
"len",
"(",
"s",
")",
">",
"0",
"and",
"re",
".",
"search",
"(",
"r\"^[+-]?\\d*(?:\\.\\d*)?$\"",
",",
"s",
")",
":",
"try",
":",
"return",
"float",
"(",
"s",
")",
"except",
"ValueError",
":",
"pass",
"# Temporarily here for backwards compatibility",
"if",
"prompt",
"is",
"None",
":",
"print",
"(",
"\"Retry: \"",
",",
"end",
"=",
"\"\"",
")"
] |
Read a line of text from standard input and return the equivalent float
as precisely as possible; if text does not represent a double, user is
prompted to retry. If line can't be read, return None.
|
[
"Read",
"a",
"line",
"of",
"text",
"from",
"standard",
"input",
"and",
"return",
"the",
"equivalent",
"float",
"as",
"precisely",
"as",
"possible",
";",
"if",
"text",
"does",
"not",
"represent",
"a",
"double",
"user",
"is",
"prompted",
"to",
"retry",
".",
"If",
"line",
"can",
"t",
"be",
"read",
"return",
"None",
"."
] |
f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a
|
https://github.com/cs50/python-cs50/blob/f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a/src/cs50/cs50.py#L91-L109
|
7,539
|
cs50/python-cs50
|
src/cs50/cs50.py
|
get_int
|
def get_int(prompt=None):
"""
Read a line of text from standard input and return the equivalent int;
if text does not represent an int, user is prompted to retry. If line
can't be read, return None.
"""
while True:
s = get_string(prompt)
if s is None:
return None
if re.search(r"^[+-]?\d+$", s):
try:
i = int(s, 10)
if type(i) is int: # Could become long in Python 2
return i
except ValueError:
pass
# Temporarily here for backwards compatibility
if prompt is None:
print("Retry: ", end="")
|
python
|
def get_int(prompt=None):
"""
Read a line of text from standard input and return the equivalent int;
if text does not represent an int, user is prompted to retry. If line
can't be read, return None.
"""
while True:
s = get_string(prompt)
if s is None:
return None
if re.search(r"^[+-]?\d+$", s):
try:
i = int(s, 10)
if type(i) is int: # Could become long in Python 2
return i
except ValueError:
pass
# Temporarily here for backwards compatibility
if prompt is None:
print("Retry: ", end="")
|
[
"def",
"get_int",
"(",
"prompt",
"=",
"None",
")",
":",
"while",
"True",
":",
"s",
"=",
"get_string",
"(",
"prompt",
")",
"if",
"s",
"is",
"None",
":",
"return",
"None",
"if",
"re",
".",
"search",
"(",
"r\"^[+-]?\\d+$\"",
",",
"s",
")",
":",
"try",
":",
"i",
"=",
"int",
"(",
"s",
",",
"10",
")",
"if",
"type",
"(",
"i",
")",
"is",
"int",
":",
"# Could become long in Python 2",
"return",
"i",
"except",
"ValueError",
":",
"pass",
"# Temporarily here for backwards compatibility",
"if",
"prompt",
"is",
"None",
":",
"print",
"(",
"\"Retry: \"",
",",
"end",
"=",
"\"\"",
")"
] |
Read a line of text from standard input and return the equivalent int;
if text does not represent an int, user is prompted to retry. If line
can't be read, return None.
|
[
"Read",
"a",
"line",
"of",
"text",
"from",
"standard",
"input",
"and",
"return",
"the",
"equivalent",
"int",
";",
"if",
"text",
"does",
"not",
"represent",
"an",
"int",
"user",
"is",
"prompted",
"to",
"retry",
".",
"If",
"line",
"can",
"t",
"be",
"read",
"return",
"None",
"."
] |
f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a
|
https://github.com/cs50/python-cs50/blob/f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a/src/cs50/cs50.py#L112-L132
|
7,540
|
cs50/python-cs50
|
src/cs50/sql.py
|
_connect
|
def _connect(dbapi_connection, connection_record):
"""Enables foreign key support."""
# If back end is sqlite
if type(dbapi_connection) is sqlite3.Connection:
# Respect foreign key constraints by default
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
|
python
|
def _connect(dbapi_connection, connection_record):
"""Enables foreign key support."""
# If back end is sqlite
if type(dbapi_connection) is sqlite3.Connection:
# Respect foreign key constraints by default
cursor = dbapi_connection.cursor()
cursor.execute("PRAGMA foreign_keys=ON")
cursor.close()
|
[
"def",
"_connect",
"(",
"dbapi_connection",
",",
"connection_record",
")",
":",
"# If back end is sqlite",
"if",
"type",
"(",
"dbapi_connection",
")",
"is",
"sqlite3",
".",
"Connection",
":",
"# Respect foreign key constraints by default",
"cursor",
"=",
"dbapi_connection",
".",
"cursor",
"(",
")",
"cursor",
".",
"execute",
"(",
"\"PRAGMA foreign_keys=ON\"",
")",
"cursor",
".",
"close",
"(",
")"
] |
Enables foreign key support.
|
[
"Enables",
"foreign",
"key",
"support",
"."
] |
f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a
|
https://github.com/cs50/python-cs50/blob/f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a/src/cs50/sql.py#L233-L242
|
7,541
|
cs50/python-cs50
|
src/cs50/sql.py
|
SQL._parse
|
def _parse(self, e):
"""Parses an exception, returns its message."""
# MySQL
matches = re.search(r"^\(_mysql_exceptions\.OperationalError\) \(\d+, \"(.+)\"\)$", str(e))
if matches:
return matches.group(1)
# PostgreSQL
matches = re.search(r"^\(psycopg2\.OperationalError\) (.+)$", str(e))
if matches:
return matches.group(1)
# SQLite
matches = re.search(r"^\(sqlite3\.OperationalError\) (.+)$", str(e))
if matches:
return matches.group(1)
# Default
return str(e)
|
python
|
def _parse(self, e):
"""Parses an exception, returns its message."""
# MySQL
matches = re.search(r"^\(_mysql_exceptions\.OperationalError\) \(\d+, \"(.+)\"\)$", str(e))
if matches:
return matches.group(1)
# PostgreSQL
matches = re.search(r"^\(psycopg2\.OperationalError\) (.+)$", str(e))
if matches:
return matches.group(1)
# SQLite
matches = re.search(r"^\(sqlite3\.OperationalError\) (.+)$", str(e))
if matches:
return matches.group(1)
# Default
return str(e)
|
[
"def",
"_parse",
"(",
"self",
",",
"e",
")",
":",
"# MySQL",
"matches",
"=",
"re",
".",
"search",
"(",
"r\"^\\(_mysql_exceptions\\.OperationalError\\) \\(\\d+, \\\"(.+)\\\"\\)$\"",
",",
"str",
"(",
"e",
")",
")",
"if",
"matches",
":",
"return",
"matches",
".",
"group",
"(",
"1",
")",
"# PostgreSQL",
"matches",
"=",
"re",
".",
"search",
"(",
"r\"^\\(psycopg2\\.OperationalError\\) (.+)$\"",
",",
"str",
"(",
"e",
")",
")",
"if",
"matches",
":",
"return",
"matches",
".",
"group",
"(",
"1",
")",
"# SQLite",
"matches",
"=",
"re",
".",
"search",
"(",
"r\"^\\(sqlite3\\.OperationalError\\) (.+)$\"",
",",
"str",
"(",
"e",
")",
")",
"if",
"matches",
":",
"return",
"matches",
".",
"group",
"(",
"1",
")",
"# Default",
"return",
"str",
"(",
"e",
")"
] |
Parses an exception, returns its message.
|
[
"Parses",
"an",
"exception",
"returns",
"its",
"message",
"."
] |
f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a
|
https://github.com/cs50/python-cs50/blob/f987e9036bcf1bf60adf50a2827cc2cd5b9fd08a/src/cs50/sql.py#L68-L87
|
7,542
|
Azure/azure-cosmos-table-python
|
azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py
|
TableService.get_table_service_stats
|
def get_table_service_stats(self, timeout=None):
'''
Retrieves statistics related to replication for the Table service. It is
only available when read-access geo-redundant replication is enabled for
the storage account.
With geo-redundant replication, Azure Storage maintains your data durable
in two locations. In both locations, Azure Storage constantly maintains
multiple healthy replicas of your data. The location where you read,
create, update, or delete data is the primary storage account location.
The primary location exists in the region you choose at the time you
create an account via the Azure Management Azure classic portal, for
example, North Central US. The location to which your data is replicated
is the secondary location. The secondary location is automatically
determined based on the location of the primary; it is in a second data
center that resides in the same region as the primary location. Read-only
access is available from the secondary location, if read-access geo-redundant
replication is enabled for your storage account.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: The table service stats.
:rtype: :class:`~azure.storage.common.models.ServiceStats`
'''
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(primary=False, secondary=True)
request.path = '/'
request.query = {
'restype': 'service',
'comp': 'stats',
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _convert_xml_to_service_stats)
|
python
|
def get_table_service_stats(self, timeout=None):
'''
Retrieves statistics related to replication for the Table service. It is
only available when read-access geo-redundant replication is enabled for
the storage account.
With geo-redundant replication, Azure Storage maintains your data durable
in two locations. In both locations, Azure Storage constantly maintains
multiple healthy replicas of your data. The location where you read,
create, update, or delete data is the primary storage account location.
The primary location exists in the region you choose at the time you
create an account via the Azure Management Azure classic portal, for
example, North Central US. The location to which your data is replicated
is the secondary location. The secondary location is automatically
determined based on the location of the primary; it is in a second data
center that resides in the same region as the primary location. Read-only
access is available from the secondary location, if read-access geo-redundant
replication is enabled for your storage account.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: The table service stats.
:rtype: :class:`~azure.storage.common.models.ServiceStats`
'''
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(primary=False, secondary=True)
request.path = '/'
request.query = {
'restype': 'service',
'comp': 'stats',
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _convert_xml_to_service_stats)
|
[
"def",
"get_table_service_stats",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"request",
"=",
"HTTPRequest",
"(",
")",
"request",
".",
"method",
"=",
"'GET'",
"request",
".",
"host_locations",
"=",
"self",
".",
"_get_host_locations",
"(",
"primary",
"=",
"False",
",",
"secondary",
"=",
"True",
")",
"request",
".",
"path",
"=",
"'/'",
"request",
".",
"query",
"=",
"{",
"'restype'",
":",
"'service'",
",",
"'comp'",
":",
"'stats'",
",",
"'timeout'",
":",
"_int_to_str",
"(",
"timeout",
")",
",",
"}",
"return",
"self",
".",
"_perform_request",
"(",
"request",
",",
"_convert_xml_to_service_stats",
")"
] |
Retrieves statistics related to replication for the Table service. It is
only available when read-access geo-redundant replication is enabled for
the storage account.
With geo-redundant replication, Azure Storage maintains your data durable
in two locations. In both locations, Azure Storage constantly maintains
multiple healthy replicas of your data. The location where you read,
create, update, or delete data is the primary storage account location.
The primary location exists in the region you choose at the time you
create an account via the Azure Management Azure classic portal, for
example, North Central US. The location to which your data is replicated
is the secondary location. The secondary location is automatically
determined based on the location of the primary; it is in a second data
center that resides in the same region as the primary location. Read-only
access is available from the secondary location, if read-access geo-redundant
replication is enabled for your storage account.
:param int timeout:
The timeout parameter is expressed in seconds.
:return: The table service stats.
:rtype: :class:`~azure.storage.common.models.ServiceStats`
|
[
"Retrieves",
"statistics",
"related",
"to",
"replication",
"for",
"the",
"Table",
"service",
".",
"It",
"is",
"only",
"available",
"when",
"read",
"-",
"access",
"geo",
"-",
"redundant",
"replication",
"is",
"enabled",
"for",
"the",
"storage",
"account",
"."
] |
a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0
|
https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py#L335-L369
|
7,543
|
Azure/azure-cosmos-table-python
|
azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py
|
TableService.get_table_service_properties
|
def get_table_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Table service, including
logging, analytics and CORS rules.
:param int timeout:
The server timeout, expressed in seconds.
:return: The table service properties.
:rtype: :class:`~azure.storage.common.models.ServiceProperties`
'''
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = '/'
request.query = {
'restype': 'service',
'comp': 'properties',
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _convert_xml_to_service_properties)
|
python
|
def get_table_service_properties(self, timeout=None):
'''
Gets the properties of a storage account's Table service, including
logging, analytics and CORS rules.
:param int timeout:
The server timeout, expressed in seconds.
:return: The table service properties.
:rtype: :class:`~azure.storage.common.models.ServiceProperties`
'''
request = HTTPRequest()
request.method = 'GET'
request.host_locations = self._get_host_locations(secondary=True)
request.path = '/'
request.query = {
'restype': 'service',
'comp': 'properties',
'timeout': _int_to_str(timeout),
}
return self._perform_request(request, _convert_xml_to_service_properties)
|
[
"def",
"get_table_service_properties",
"(",
"self",
",",
"timeout",
"=",
"None",
")",
":",
"request",
"=",
"HTTPRequest",
"(",
")",
"request",
".",
"method",
"=",
"'GET'",
"request",
".",
"host_locations",
"=",
"self",
".",
"_get_host_locations",
"(",
"secondary",
"=",
"True",
")",
"request",
".",
"path",
"=",
"'/'",
"request",
".",
"query",
"=",
"{",
"'restype'",
":",
"'service'",
",",
"'comp'",
":",
"'properties'",
",",
"'timeout'",
":",
"_int_to_str",
"(",
"timeout",
")",
",",
"}",
"return",
"self",
".",
"_perform_request",
"(",
"request",
",",
"_convert_xml_to_service_properties",
")"
] |
Gets the properties of a storage account's Table service, including
logging, analytics and CORS rules.
:param int timeout:
The server timeout, expressed in seconds.
:return: The table service properties.
:rtype: :class:`~azure.storage.common.models.ServiceProperties`
|
[
"Gets",
"the",
"properties",
"of",
"a",
"storage",
"account",
"s",
"Table",
"service",
"including",
"logging",
"analytics",
"and",
"CORS",
"rules",
"."
] |
a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0
|
https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py#L371-L391
|
7,544
|
Azure/azure-cosmos-table-python
|
azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py
|
TableService.delete_table
|
def delete_table(self, table_name, fail_not_exist=False, timeout=None):
'''
Deletes the specified table and any data it contains.
When a table is successfully deleted, it is immediately marked for deletion
and is no longer accessible to clients. The table is later removed from
the Table service during garbage collection.
Note that deleting a table is likely to take at least 40 seconds to complete.
If an operation is attempted against the table while it was being deleted,
an :class:`AzureConflictHttpError` will be thrown.
:param str table_name:
The name of the table to delete.
:param bool fail_not_exist:
Specifies whether to throw an exception if the table doesn't exist.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A boolean indicating whether the table was deleted. If fail_not_exist
was set to True, this will throw instead of returning false.
:rtype: bool
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host_locations = self._get_host_locations()
request.path = '/Tables(\'' + _to_str(table_name) + '\')'
request.query = {'timeout': _int_to_str(timeout)}
request.headers = {_DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1]}
if not fail_not_exist:
try:
self._perform_request(request)
return True
except AzureHttpError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
|
python
|
def delete_table(self, table_name, fail_not_exist=False, timeout=None):
'''
Deletes the specified table and any data it contains.
When a table is successfully deleted, it is immediately marked for deletion
and is no longer accessible to clients. The table is later removed from
the Table service during garbage collection.
Note that deleting a table is likely to take at least 40 seconds to complete.
If an operation is attempted against the table while it was being deleted,
an :class:`AzureConflictHttpError` will be thrown.
:param str table_name:
The name of the table to delete.
:param bool fail_not_exist:
Specifies whether to throw an exception if the table doesn't exist.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A boolean indicating whether the table was deleted. If fail_not_exist
was set to True, this will throw instead of returning false.
:rtype: bool
'''
_validate_not_none('table_name', table_name)
request = HTTPRequest()
request.method = 'DELETE'
request.host_locations = self._get_host_locations()
request.path = '/Tables(\'' + _to_str(table_name) + '\')'
request.query = {'timeout': _int_to_str(timeout)}
request.headers = {_DEFAULT_ACCEPT_HEADER[0]: _DEFAULT_ACCEPT_HEADER[1]}
if not fail_not_exist:
try:
self._perform_request(request)
return True
except AzureHttpError as ex:
_dont_fail_not_exist(ex)
return False
else:
self._perform_request(request)
return True
|
[
"def",
"delete_table",
"(",
"self",
",",
"table_name",
",",
"fail_not_exist",
"=",
"False",
",",
"timeout",
"=",
"None",
")",
":",
"_validate_not_none",
"(",
"'table_name'",
",",
"table_name",
")",
"request",
"=",
"HTTPRequest",
"(",
")",
"request",
".",
"method",
"=",
"'DELETE'",
"request",
".",
"host_locations",
"=",
"self",
".",
"_get_host_locations",
"(",
")",
"request",
".",
"path",
"=",
"'/Tables(\\''",
"+",
"_to_str",
"(",
"table_name",
")",
"+",
"'\\')'",
"request",
".",
"query",
"=",
"{",
"'timeout'",
":",
"_int_to_str",
"(",
"timeout",
")",
"}",
"request",
".",
"headers",
"=",
"{",
"_DEFAULT_ACCEPT_HEADER",
"[",
"0",
"]",
":",
"_DEFAULT_ACCEPT_HEADER",
"[",
"1",
"]",
"}",
"if",
"not",
"fail_not_exist",
":",
"try",
":",
"self",
".",
"_perform_request",
"(",
"request",
")",
"return",
"True",
"except",
"AzureHttpError",
"as",
"ex",
":",
"_dont_fail_not_exist",
"(",
"ex",
")",
"return",
"False",
"else",
":",
"self",
".",
"_perform_request",
"(",
"request",
")",
"return",
"True"
] |
Deletes the specified table and any data it contains.
When a table is successfully deleted, it is immediately marked for deletion
and is no longer accessible to clients. The table is later removed from
the Table service during garbage collection.
Note that deleting a table is likely to take at least 40 seconds to complete.
If an operation is attempted against the table while it was being deleted,
an :class:`AzureConflictHttpError` will be thrown.
:param str table_name:
The name of the table to delete.
:param bool fail_not_exist:
Specifies whether to throw an exception if the table doesn't exist.
:param int timeout:
The server timeout, expressed in seconds.
:return:
A boolean indicating whether the table was deleted. If fail_not_exist
was set to True, this will throw instead of returning false.
:rtype: bool
|
[
"Deletes",
"the",
"specified",
"table",
"and",
"any",
"data",
"it",
"contains",
"."
] |
a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0
|
https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py#L571-L611
|
7,545
|
Azure/azure-cosmos-table-python
|
azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py
|
TableService.query_entities
|
def query_entities(self, table_name, filter=None, select=None, num_results=None,
marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
property_resolver=None, timeout=None):
'''
Returns a generator to list the entities in the table specified. The
generator will lazily follow the continuation tokens returned by the
service and stop when all entities have been returned or num_results is
reached.
If num_results is specified and the account has more than that number of
entities, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int num_results:
The maximum number of entities to return.
:param marker:
An opaque continuation object. This value can be retrieved from the
next_marker field of a previous generator object if max_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:type marker: obj
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds. This function may make multiple
calls to the service in which case the timeout value specified will be
applied to each individual call.
:return: A generator which produces :class:`~azure.storage.table.models.Entity` objects.
:rtype: :class:`~azure.storage.common.models.ListGenerator`
'''
operation_context = _OperationContext(location_lock=True)
if self.key_encryption_key is not None or self.key_resolver_function is not None:
# If query already requests all properties, no need to add the metadata columns
if select is not None and select != '*':
select += ',_ClientEncryptionMetadata1,_ClientEncryptionMetadata2'
args = (table_name,)
kwargs = {'filter': filter, 'select': select, 'max_results': num_results, 'marker': marker,
'accept': accept, 'property_resolver': property_resolver, 'timeout': timeout,
'_context': operation_context}
resp = self._query_entities(*args, **kwargs)
return ListGenerator(resp, self._query_entities, args, kwargs)
|
python
|
def query_entities(self, table_name, filter=None, select=None, num_results=None,
marker=None, accept=TablePayloadFormat.JSON_MINIMAL_METADATA,
property_resolver=None, timeout=None):
'''
Returns a generator to list the entities in the table specified. The
generator will lazily follow the continuation tokens returned by the
service and stop when all entities have been returned or num_results is
reached.
If num_results is specified and the account has more than that number of
entities, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int num_results:
The maximum number of entities to return.
:param marker:
An opaque continuation object. This value can be retrieved from the
next_marker field of a previous generator object if max_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:type marker: obj
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds. This function may make multiple
calls to the service in which case the timeout value specified will be
applied to each individual call.
:return: A generator which produces :class:`~azure.storage.table.models.Entity` objects.
:rtype: :class:`~azure.storage.common.models.ListGenerator`
'''
operation_context = _OperationContext(location_lock=True)
if self.key_encryption_key is not None or self.key_resolver_function is not None:
# If query already requests all properties, no need to add the metadata columns
if select is not None and select != '*':
select += ',_ClientEncryptionMetadata1,_ClientEncryptionMetadata2'
args = (table_name,)
kwargs = {'filter': filter, 'select': select, 'max_results': num_results, 'marker': marker,
'accept': accept, 'property_resolver': property_resolver, 'timeout': timeout,
'_context': operation_context}
resp = self._query_entities(*args, **kwargs)
return ListGenerator(resp, self._query_entities, args, kwargs)
|
[
"def",
"query_entities",
"(",
"self",
",",
"table_name",
",",
"filter",
"=",
"None",
",",
"select",
"=",
"None",
",",
"num_results",
"=",
"None",
",",
"marker",
"=",
"None",
",",
"accept",
"=",
"TablePayloadFormat",
".",
"JSON_MINIMAL_METADATA",
",",
"property_resolver",
"=",
"None",
",",
"timeout",
"=",
"None",
")",
":",
"operation_context",
"=",
"_OperationContext",
"(",
"location_lock",
"=",
"True",
")",
"if",
"self",
".",
"key_encryption_key",
"is",
"not",
"None",
"or",
"self",
".",
"key_resolver_function",
"is",
"not",
"None",
":",
"# If query already requests all properties, no need to add the metadata columns",
"if",
"select",
"is",
"not",
"None",
"and",
"select",
"!=",
"'*'",
":",
"select",
"+=",
"',_ClientEncryptionMetadata1,_ClientEncryptionMetadata2'",
"args",
"=",
"(",
"table_name",
",",
")",
"kwargs",
"=",
"{",
"'filter'",
":",
"filter",
",",
"'select'",
":",
"select",
",",
"'max_results'",
":",
"num_results",
",",
"'marker'",
":",
"marker",
",",
"'accept'",
":",
"accept",
",",
"'property_resolver'",
":",
"property_resolver",
",",
"'timeout'",
":",
"timeout",
",",
"'_context'",
":",
"operation_context",
"}",
"resp",
"=",
"self",
".",
"_query_entities",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"ListGenerator",
"(",
"resp",
",",
"self",
".",
"_query_entities",
",",
"args",
",",
"kwargs",
")"
] |
Returns a generator to list the entities in the table specified. The
generator will lazily follow the continuation tokens returned by the
service and stop when all entities have been returned or num_results is
reached.
If num_results is specified and the account has more than that number of
entities, the generator will have a populated next_marker field once it
finishes. This marker can be used to create a new generator if more
results are desired.
:param str table_name:
The name of the table to query.
:param str filter:
Returns only entities that satisfy the specified filter. Note that
no more than 15 discrete comparisons are permitted within a $filter
string. See http://msdn.microsoft.com/en-us/library/windowsazure/dd894031.aspx
for more information on constructing filters.
:param str select:
Returns only the desired properties of an entity from the set.
:param int num_results:
The maximum number of entities to return.
:param marker:
An opaque continuation object. This value can be retrieved from the
next_marker field of a previous generator object if max_results was
specified and that generator has finished enumerating results. If
specified, this generator will begin returning results from the point
where the previous generator stopped.
:type marker: obj
:param str accept:
Specifies the accepted content type of the response payload. See
:class:`~azure.storage.table.models.TablePayloadFormat` for possible
values.
:param property_resolver:
A function which given the partition key, row key, property name,
property value, and the property EdmType if returned by the service,
returns the EdmType of the property. Generally used if accept is set
to JSON_NO_METADATA.
:type property_resolver: func(pk, rk, prop_name, prop_value, service_edm_type)
:param int timeout:
The server timeout, expressed in seconds. This function may make multiple
calls to the service in which case the timeout value specified will be
applied to each individual call.
:return: A generator which produces :class:`~azure.storage.table.models.Entity` objects.
:rtype: :class:`~azure.storage.common.models.ListGenerator`
|
[
"Returns",
"a",
"generator",
"to",
"list",
"the",
"entities",
"in",
"the",
"table",
"specified",
".",
"The",
"generator",
"will",
"lazily",
"follow",
"the",
"continuation",
"tokens",
"returned",
"by",
"the",
"service",
"and",
"stop",
"when",
"all",
"entities",
"have",
"been",
"returned",
"or",
"num_results",
"is",
"reached",
"."
] |
a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0
|
https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py#L678-L740
|
7,546
|
Azure/azure-cosmos-table-python
|
azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py
|
TableService.merge_entity
|
def merge_entity(self, table_name, entity, if_match='*', timeout=None):
'''
Updates an existing entity by merging the entity's properties. Throws
if the entity does not exist.
This operation does not replace the existing entity as the update_entity
operation does. A property cannot be removed with merge_entity.
Any properties with null values are ignored. All other properties will be
updated or added.
:param str table_name:
The name of the table containing the entity to merge.
:param entity:
The entity to merge. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: dict or :class:`~azure.storage.table.models.Entity`
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The merge operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional merge, set If-Match to the wildcard character (*).
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the entity.
:rtype: str
'''
_validate_not_none('table_name', table_name)
request = _merge_entity(entity, if_match, self.require_encryption,
self.key_encryption_key)
request.host_locations = self._get_host_locations()
request.query['timeout'] = _int_to_str(timeout)
request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
return self._perform_request(request, _extract_etag)
|
python
|
def merge_entity(self, table_name, entity, if_match='*', timeout=None):
'''
Updates an existing entity by merging the entity's properties. Throws
if the entity does not exist.
This operation does not replace the existing entity as the update_entity
operation does. A property cannot be removed with merge_entity.
Any properties with null values are ignored. All other properties will be
updated or added.
:param str table_name:
The name of the table containing the entity to merge.
:param entity:
The entity to merge. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: dict or :class:`~azure.storage.table.models.Entity`
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The merge operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional merge, set If-Match to the wildcard character (*).
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the entity.
:rtype: str
'''
_validate_not_none('table_name', table_name)
request = _merge_entity(entity, if_match, self.require_encryption,
self.key_encryption_key)
request.host_locations = self._get_host_locations()
request.query['timeout'] = _int_to_str(timeout)
request.path = _get_entity_path(table_name, entity['PartitionKey'], entity['RowKey'])
return self._perform_request(request, _extract_etag)
|
[
"def",
"merge_entity",
"(",
"self",
",",
"table_name",
",",
"entity",
",",
"if_match",
"=",
"'*'",
",",
"timeout",
"=",
"None",
")",
":",
"_validate_not_none",
"(",
"'table_name'",
",",
"table_name",
")",
"request",
"=",
"_merge_entity",
"(",
"entity",
",",
"if_match",
",",
"self",
".",
"require_encryption",
",",
"self",
".",
"key_encryption_key",
")",
"request",
".",
"host_locations",
"=",
"self",
".",
"_get_host_locations",
"(",
")",
"request",
".",
"query",
"[",
"'timeout'",
"]",
"=",
"_int_to_str",
"(",
"timeout",
")",
"request",
".",
"path",
"=",
"_get_entity_path",
"(",
"table_name",
",",
"entity",
"[",
"'PartitionKey'",
"]",
",",
"entity",
"[",
"'RowKey'",
"]",
")",
"return",
"self",
".",
"_perform_request",
"(",
"request",
",",
"_extract_etag",
")"
] |
Updates an existing entity by merging the entity's properties. Throws
if the entity does not exist.
This operation does not replace the existing entity as the update_entity
operation does. A property cannot be removed with merge_entity.
Any properties with null values are ignored. All other properties will be
updated or added.
:param str table_name:
The name of the table containing the entity to merge.
:param entity:
The entity to merge. Could be a dict or an entity object.
Must contain a PartitionKey and a RowKey.
:type entity: dict or :class:`~azure.storage.table.models.Entity`
:param str if_match:
The client may specify the ETag for the entity on the
request in order to compare to the ETag maintained by the service
for the purpose of optimistic concurrency. The merge operation
will be performed only if the ETag sent by the client matches the
value maintained by the server, indicating that the entity has
not been modified since it was retrieved by the client. To force
an unconditional merge, set If-Match to the wildcard character (*).
:param int timeout:
The server timeout, expressed in seconds.
:return: The etag of the entity.
:rtype: str
|
[
"Updates",
"an",
"existing",
"entity",
"by",
"merging",
"the",
"entity",
"s",
"properties",
".",
"Throws",
"if",
"the",
"entity",
"does",
"not",
"exist",
".",
"This",
"operation",
"does",
"not",
"replace",
"the",
"existing",
"entity",
"as",
"the",
"update_entity",
"operation",
"does",
".",
"A",
"property",
"cannot",
"be",
"removed",
"with",
"merge_entity",
".",
"Any",
"properties",
"with",
"null",
"values",
"are",
"ignored",
".",
"All",
"other",
"properties",
"will",
"be",
"updated",
"or",
"added",
"."
] |
a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0
|
https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/azure/cosmosdb/table/tableservice.py#L969-L1008
|
7,547
|
Azure/azure-cosmos-table-python
|
azure-cosmosdb-table/samples/table/table_usage.py
|
TableSamples.create_entity_class
|
def create_entity_class(self):
'''
Creates a class-based entity with fixed values, using all of the supported data types.
'''
entity = Entity()
# Partition key and row key must be strings and are required
entity.PartitionKey = 'pk{}'.format(str(uuid.uuid4()).replace('-', ''))
entity.RowKey = 'rk{}'.format(str(uuid.uuid4()).replace('-', ''))
# Some basic types are inferred
entity.age = 39 # EdmType.INT64
entity.large = 933311100 # EdmType.INT64
entity.sex = 'male' # EdmType.STRING
entity.married = True # EdmType.BOOLEAN
entity.ratio = 3.1 # EdmType.DOUBLE
entity.birthday = datetime(1970, 10, 4) # EdmType.DATETIME
# Binary, Int32 and GUID must be explicitly typed
entity.binary = EntityProperty(EdmType.BINARY, b'xyz')
entity.other = EntityProperty(EdmType.INT32, 20)
entity.clsid = EntityProperty(EdmType.GUID, 'c9da6455-213d-42c9-9a79-3e9149a57833')
return entity
|
python
|
def create_entity_class(self):
'''
Creates a class-based entity with fixed values, using all of the supported data types.
'''
entity = Entity()
# Partition key and row key must be strings and are required
entity.PartitionKey = 'pk{}'.format(str(uuid.uuid4()).replace('-', ''))
entity.RowKey = 'rk{}'.format(str(uuid.uuid4()).replace('-', ''))
# Some basic types are inferred
entity.age = 39 # EdmType.INT64
entity.large = 933311100 # EdmType.INT64
entity.sex = 'male' # EdmType.STRING
entity.married = True # EdmType.BOOLEAN
entity.ratio = 3.1 # EdmType.DOUBLE
entity.birthday = datetime(1970, 10, 4) # EdmType.DATETIME
# Binary, Int32 and GUID must be explicitly typed
entity.binary = EntityProperty(EdmType.BINARY, b'xyz')
entity.other = EntityProperty(EdmType.INT32, 20)
entity.clsid = EntityProperty(EdmType.GUID, 'c9da6455-213d-42c9-9a79-3e9149a57833')
return entity
|
[
"def",
"create_entity_class",
"(",
"self",
")",
":",
"entity",
"=",
"Entity",
"(",
")",
"# Partition key and row key must be strings and are required",
"entity",
".",
"PartitionKey",
"=",
"'pk{}'",
".",
"format",
"(",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
")",
"entity",
".",
"RowKey",
"=",
"'rk{}'",
".",
"format",
"(",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
")",
"# Some basic types are inferred",
"entity",
".",
"age",
"=",
"39",
"# EdmType.INT64",
"entity",
".",
"large",
"=",
"933311100",
"# EdmType.INT64",
"entity",
".",
"sex",
"=",
"'male'",
"# EdmType.STRING",
"entity",
".",
"married",
"=",
"True",
"# EdmType.BOOLEAN",
"entity",
".",
"ratio",
"=",
"3.1",
"# EdmType.DOUBLE",
"entity",
".",
"birthday",
"=",
"datetime",
"(",
"1970",
",",
"10",
",",
"4",
")",
"# EdmType.DATETIME",
"# Binary, Int32 and GUID must be explicitly typed",
"entity",
".",
"binary",
"=",
"EntityProperty",
"(",
"EdmType",
".",
"BINARY",
",",
"b'xyz'",
")",
"entity",
".",
"other",
"=",
"EntityProperty",
"(",
"EdmType",
".",
"INT32",
",",
"20",
")",
"entity",
".",
"clsid",
"=",
"EntityProperty",
"(",
"EdmType",
".",
"GUID",
",",
"'c9da6455-213d-42c9-9a79-3e9149a57833'",
")",
"return",
"entity"
] |
Creates a class-based entity with fixed values, using all of the supported data types.
|
[
"Creates",
"a",
"class",
"-",
"based",
"entity",
"with",
"fixed",
"values",
"using",
"all",
"of",
"the",
"supported",
"data",
"types",
"."
] |
a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0
|
https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/samples/table/table_usage.py#L203-L225
|
7,548
|
Azure/azure-cosmos-table-python
|
azure-cosmosdb-table/samples/table/table_usage.py
|
TableSamples.create_entity_dict
|
def create_entity_dict(self):
'''
Creates a dict-based entity with fixed values, using all of the supported data types.
'''
entity = {}
# Partition key and row key must be strings and are required
entity['PartitionKey'] = 'pk{}'.format(str(uuid.uuid4()).replace('-', ''))
entity['RowKey'] = 'rk{}'.format(str(uuid.uuid4()).replace('-', ''))
# Some basic types are inferred
entity['age'] = 39 # EdmType.INT64
entity['large'] = 933311100 # EdmType.INT64
entity['sex'] = 'male' # EdmType.STRING
entity['married'] = True # EdmType.BOOLEAN
entity['ratio'] = 3.1 # EdmType.DOUBLE
entity['birthday'] = datetime(1970, 10, 4) # EdmType.DATETIME
# Binary, Int32 and GUID must be explicitly typed
entity['binary'] = EntityProperty(EdmType.BINARY, b'xyz')
entity['other'] = EntityProperty(EdmType.INT32, 20)
entity['clsid'] = EntityProperty(EdmType.GUID, 'c9da6455-213d-42c9-9a79-3e9149a57833')
return entity
|
python
|
def create_entity_dict(self):
'''
Creates a dict-based entity with fixed values, using all of the supported data types.
'''
entity = {}
# Partition key and row key must be strings and are required
entity['PartitionKey'] = 'pk{}'.format(str(uuid.uuid4()).replace('-', ''))
entity['RowKey'] = 'rk{}'.format(str(uuid.uuid4()).replace('-', ''))
# Some basic types are inferred
entity['age'] = 39 # EdmType.INT64
entity['large'] = 933311100 # EdmType.INT64
entity['sex'] = 'male' # EdmType.STRING
entity['married'] = True # EdmType.BOOLEAN
entity['ratio'] = 3.1 # EdmType.DOUBLE
entity['birthday'] = datetime(1970, 10, 4) # EdmType.DATETIME
# Binary, Int32 and GUID must be explicitly typed
entity['binary'] = EntityProperty(EdmType.BINARY, b'xyz')
entity['other'] = EntityProperty(EdmType.INT32, 20)
entity['clsid'] = EntityProperty(EdmType.GUID, 'c9da6455-213d-42c9-9a79-3e9149a57833')
return entity
|
[
"def",
"create_entity_dict",
"(",
"self",
")",
":",
"entity",
"=",
"{",
"}",
"# Partition key and row key must be strings and are required",
"entity",
"[",
"'PartitionKey'",
"]",
"=",
"'pk{}'",
".",
"format",
"(",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
")",
"entity",
"[",
"'RowKey'",
"]",
"=",
"'rk{}'",
".",
"format",
"(",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
".",
"replace",
"(",
"'-'",
",",
"''",
")",
")",
"# Some basic types are inferred",
"entity",
"[",
"'age'",
"]",
"=",
"39",
"# EdmType.INT64",
"entity",
"[",
"'large'",
"]",
"=",
"933311100",
"# EdmType.INT64",
"entity",
"[",
"'sex'",
"]",
"=",
"'male'",
"# EdmType.STRING",
"entity",
"[",
"'married'",
"]",
"=",
"True",
"# EdmType.BOOLEAN",
"entity",
"[",
"'ratio'",
"]",
"=",
"3.1",
"# EdmType.DOUBLE",
"entity",
"[",
"'birthday'",
"]",
"=",
"datetime",
"(",
"1970",
",",
"10",
",",
"4",
")",
"# EdmType.DATETIME",
"# Binary, Int32 and GUID must be explicitly typed",
"entity",
"[",
"'binary'",
"]",
"=",
"EntityProperty",
"(",
"EdmType",
".",
"BINARY",
",",
"b'xyz'",
")",
"entity",
"[",
"'other'",
"]",
"=",
"EntityProperty",
"(",
"EdmType",
".",
"INT32",
",",
"20",
")",
"entity",
"[",
"'clsid'",
"]",
"=",
"EntityProperty",
"(",
"EdmType",
".",
"GUID",
",",
"'c9da6455-213d-42c9-9a79-3e9149a57833'",
")",
"return",
"entity"
] |
Creates a dict-based entity with fixed values, using all of the supported data types.
|
[
"Creates",
"a",
"dict",
"-",
"based",
"entity",
"with",
"fixed",
"values",
"using",
"all",
"of",
"the",
"supported",
"data",
"types",
"."
] |
a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0
|
https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/samples/table/table_usage.py#L227-L249
|
7,549
|
Azure/azure-cosmos-table-python
|
azure-cosmosdb-table/azure/cosmosdb/table/_serialization.py
|
_convert_batch_to_json
|
def _convert_batch_to_json(batch_requests):
'''
Create json to send for an array of batch requests.
batch_requests:
an array of requests
'''
batch_boundary = b'batch_' + _new_boundary()
changeset_boundary = b'changeset_' + _new_boundary()
body = [b'--' + batch_boundary + b'\n',
b'Content-Type: multipart/mixed; boundary=',
changeset_boundary + b'\n\n']
content_id = 1
# Adds each request body to the POST data.
for _, request in batch_requests:
body.append(b'--' + changeset_boundary + b'\n')
body.append(b'Content-Type: application/http\n')
body.append(b'Content-Transfer-Encoding: binary\n\n')
body.append(request.method.encode('utf-8'))
body.append(b' ')
body.append(request.path.encode('utf-8'))
body.append(b' HTTP/1.1\n')
body.append(b'Content-ID: ')
body.append(str(content_id).encode('utf-8') + b'\n')
content_id += 1
for name, value in request.headers.items():
if name in _SUB_HEADERS:
body.append(name.encode('utf-8') + b': ')
body.append(value.encode('utf-8') + b'\n')
# Add different headers for different request types.
if not request.method == 'DELETE':
body.append(b'Content-Length: ')
body.append(str(len(request.body)).encode('utf-8'))
body.append(b'\n\n')
body.append(request.body + b'\n')
body.append(b'\n')
body.append(b'--' + changeset_boundary + b'--' + b'\n')
body.append(b'--' + batch_boundary + b'--')
return b''.join(body), 'multipart/mixed; boundary=' + batch_boundary.decode('utf-8')
|
python
|
def _convert_batch_to_json(batch_requests):
'''
Create json to send for an array of batch requests.
batch_requests:
an array of requests
'''
batch_boundary = b'batch_' + _new_boundary()
changeset_boundary = b'changeset_' + _new_boundary()
body = [b'--' + batch_boundary + b'\n',
b'Content-Type: multipart/mixed; boundary=',
changeset_boundary + b'\n\n']
content_id = 1
# Adds each request body to the POST data.
for _, request in batch_requests:
body.append(b'--' + changeset_boundary + b'\n')
body.append(b'Content-Type: application/http\n')
body.append(b'Content-Transfer-Encoding: binary\n\n')
body.append(request.method.encode('utf-8'))
body.append(b' ')
body.append(request.path.encode('utf-8'))
body.append(b' HTTP/1.1\n')
body.append(b'Content-ID: ')
body.append(str(content_id).encode('utf-8') + b'\n')
content_id += 1
for name, value in request.headers.items():
if name in _SUB_HEADERS:
body.append(name.encode('utf-8') + b': ')
body.append(value.encode('utf-8') + b'\n')
# Add different headers for different request types.
if not request.method == 'DELETE':
body.append(b'Content-Length: ')
body.append(str(len(request.body)).encode('utf-8'))
body.append(b'\n\n')
body.append(request.body + b'\n')
body.append(b'\n')
body.append(b'--' + changeset_boundary + b'--' + b'\n')
body.append(b'--' + batch_boundary + b'--')
return b''.join(body), 'multipart/mixed; boundary=' + batch_boundary.decode('utf-8')
|
[
"def",
"_convert_batch_to_json",
"(",
"batch_requests",
")",
":",
"batch_boundary",
"=",
"b'batch_'",
"+",
"_new_boundary",
"(",
")",
"changeset_boundary",
"=",
"b'changeset_'",
"+",
"_new_boundary",
"(",
")",
"body",
"=",
"[",
"b'--'",
"+",
"batch_boundary",
"+",
"b'\\n'",
",",
"b'Content-Type: multipart/mixed; boundary='",
",",
"changeset_boundary",
"+",
"b'\\n\\n'",
"]",
"content_id",
"=",
"1",
"# Adds each request body to the POST data.",
"for",
"_",
",",
"request",
"in",
"batch_requests",
":",
"body",
".",
"append",
"(",
"b'--'",
"+",
"changeset_boundary",
"+",
"b'\\n'",
")",
"body",
".",
"append",
"(",
"b'Content-Type: application/http\\n'",
")",
"body",
".",
"append",
"(",
"b'Content-Transfer-Encoding: binary\\n\\n'",
")",
"body",
".",
"append",
"(",
"request",
".",
"method",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"body",
".",
"append",
"(",
"b' '",
")",
"body",
".",
"append",
"(",
"request",
".",
"path",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"body",
".",
"append",
"(",
"b' HTTP/1.1\\n'",
")",
"body",
".",
"append",
"(",
"b'Content-ID: '",
")",
"body",
".",
"append",
"(",
"str",
"(",
"content_id",
")",
".",
"encode",
"(",
"'utf-8'",
")",
"+",
"b'\\n'",
")",
"content_id",
"+=",
"1",
"for",
"name",
",",
"value",
"in",
"request",
".",
"headers",
".",
"items",
"(",
")",
":",
"if",
"name",
"in",
"_SUB_HEADERS",
":",
"body",
".",
"append",
"(",
"name",
".",
"encode",
"(",
"'utf-8'",
")",
"+",
"b': '",
")",
"body",
".",
"append",
"(",
"value",
".",
"encode",
"(",
"'utf-8'",
")",
"+",
"b'\\n'",
")",
"# Add different headers for different request types.",
"if",
"not",
"request",
".",
"method",
"==",
"'DELETE'",
":",
"body",
".",
"append",
"(",
"b'Content-Length: '",
")",
"body",
".",
"append",
"(",
"str",
"(",
"len",
"(",
"request",
".",
"body",
")",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"body",
".",
"append",
"(",
"b'\\n\\n'",
")",
"body",
".",
"append",
"(",
"request",
".",
"body",
"+",
"b'\\n'",
")",
"body",
".",
"append",
"(",
"b'\\n'",
")",
"body",
".",
"append",
"(",
"b'--'",
"+",
"changeset_boundary",
"+",
"b'--'",
"+",
"b'\\n'",
")",
"body",
".",
"append",
"(",
"b'--'",
"+",
"batch_boundary",
"+",
"b'--'",
")",
"return",
"b''",
".",
"join",
"(",
"body",
")",
",",
"'multipart/mixed; boundary='",
"+",
"batch_boundary",
".",
"decode",
"(",
"'utf-8'",
")"
] |
Create json to send for an array of batch requests.
batch_requests:
an array of requests
|
[
"Create",
"json",
"to",
"send",
"for",
"an",
"array",
"of",
"batch",
"requests",
"."
] |
a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0
|
https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/azure/cosmosdb/table/_serialization.py#L220-L266
|
7,550
|
Azure/azure-cosmos-table-python
|
azure-cosmosdb-table/azure/cosmosdb/table/_encryption.py
|
_decrypt_entity
|
def _decrypt_entity(entity, encrypted_properties_list, content_encryption_key, entityIV, isJavaV1):
'''
Decrypts the specified entity using AES256 in CBC mode with 128 bit padding. Unwraps the CEK
using either the specified KEK or the key returned by the key_resolver. Properties
specified in the encrypted_properties_list, will be decrypted and decoded to utf-8 strings.
:param entity:
The entity being retrieved and decrypted. Could be a dict or an entity object.
:param list encrypted_properties_list:
The encrypted list of all the properties that are encrypted.
:param bytes[] content_encryption_key:
The key used internally to encrypt the entity. Extrated from the entity metadata.
:param bytes[] entityIV:
The intialization vector used to seed the encryption algorithm. Extracted from the
entity metadata.
:return: The decrypted entity
:rtype: Entity
'''
_validate_not_none('entity', entity)
decrypted_entity = deepcopy(entity)
try:
for property in entity.keys():
if property in encrypted_properties_list:
value = entity[property]
propertyIV = _generate_property_iv(entityIV,
entity['PartitionKey'], entity['RowKey'],
property, isJavaV1)
cipher = _generate_AES_CBC_cipher(content_encryption_key,
propertyIV)
# Decrypt the property.
decryptor = cipher.decryptor()
decrypted_data = (decryptor.update(value.value) + decryptor.finalize())
# Unpad the data.
unpadder = PKCS7(128).unpadder()
decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
decrypted_data = decrypted_data.decode('utf-8')
decrypted_entity[property] = decrypted_data
decrypted_entity.pop('_ClientEncryptionMetadata1')
decrypted_entity.pop('_ClientEncryptionMetadata2')
return decrypted_entity
except:
raise AzureException(_ERROR_DECRYPTION_FAILURE)
|
python
|
def _decrypt_entity(entity, encrypted_properties_list, content_encryption_key, entityIV, isJavaV1):
'''
Decrypts the specified entity using AES256 in CBC mode with 128 bit padding. Unwraps the CEK
using either the specified KEK or the key returned by the key_resolver. Properties
specified in the encrypted_properties_list, will be decrypted and decoded to utf-8 strings.
:param entity:
The entity being retrieved and decrypted. Could be a dict or an entity object.
:param list encrypted_properties_list:
The encrypted list of all the properties that are encrypted.
:param bytes[] content_encryption_key:
The key used internally to encrypt the entity. Extrated from the entity metadata.
:param bytes[] entityIV:
The intialization vector used to seed the encryption algorithm. Extracted from the
entity metadata.
:return: The decrypted entity
:rtype: Entity
'''
_validate_not_none('entity', entity)
decrypted_entity = deepcopy(entity)
try:
for property in entity.keys():
if property in encrypted_properties_list:
value = entity[property]
propertyIV = _generate_property_iv(entityIV,
entity['PartitionKey'], entity['RowKey'],
property, isJavaV1)
cipher = _generate_AES_CBC_cipher(content_encryption_key,
propertyIV)
# Decrypt the property.
decryptor = cipher.decryptor()
decrypted_data = (decryptor.update(value.value) + decryptor.finalize())
# Unpad the data.
unpadder = PKCS7(128).unpadder()
decrypted_data = (unpadder.update(decrypted_data) + unpadder.finalize())
decrypted_data = decrypted_data.decode('utf-8')
decrypted_entity[property] = decrypted_data
decrypted_entity.pop('_ClientEncryptionMetadata1')
decrypted_entity.pop('_ClientEncryptionMetadata2')
return decrypted_entity
except:
raise AzureException(_ERROR_DECRYPTION_FAILURE)
|
[
"def",
"_decrypt_entity",
"(",
"entity",
",",
"encrypted_properties_list",
",",
"content_encryption_key",
",",
"entityIV",
",",
"isJavaV1",
")",
":",
"_validate_not_none",
"(",
"'entity'",
",",
"entity",
")",
"decrypted_entity",
"=",
"deepcopy",
"(",
"entity",
")",
"try",
":",
"for",
"property",
"in",
"entity",
".",
"keys",
"(",
")",
":",
"if",
"property",
"in",
"encrypted_properties_list",
":",
"value",
"=",
"entity",
"[",
"property",
"]",
"propertyIV",
"=",
"_generate_property_iv",
"(",
"entityIV",
",",
"entity",
"[",
"'PartitionKey'",
"]",
",",
"entity",
"[",
"'RowKey'",
"]",
",",
"property",
",",
"isJavaV1",
")",
"cipher",
"=",
"_generate_AES_CBC_cipher",
"(",
"content_encryption_key",
",",
"propertyIV",
")",
"# Decrypt the property.",
"decryptor",
"=",
"cipher",
".",
"decryptor",
"(",
")",
"decrypted_data",
"=",
"(",
"decryptor",
".",
"update",
"(",
"value",
".",
"value",
")",
"+",
"decryptor",
".",
"finalize",
"(",
")",
")",
"# Unpad the data.",
"unpadder",
"=",
"PKCS7",
"(",
"128",
")",
".",
"unpadder",
"(",
")",
"decrypted_data",
"=",
"(",
"unpadder",
".",
"update",
"(",
"decrypted_data",
")",
"+",
"unpadder",
".",
"finalize",
"(",
")",
")",
"decrypted_data",
"=",
"decrypted_data",
".",
"decode",
"(",
"'utf-8'",
")",
"decrypted_entity",
"[",
"property",
"]",
"=",
"decrypted_data",
"decrypted_entity",
".",
"pop",
"(",
"'_ClientEncryptionMetadata1'",
")",
"decrypted_entity",
".",
"pop",
"(",
"'_ClientEncryptionMetadata2'",
")",
"return",
"decrypted_entity",
"except",
":",
"raise",
"AzureException",
"(",
"_ERROR_DECRYPTION_FAILURE",
")"
] |
Decrypts the specified entity using AES256 in CBC mode with 128 bit padding. Unwraps the CEK
using either the specified KEK or the key returned by the key_resolver. Properties
specified in the encrypted_properties_list, will be decrypted and decoded to utf-8 strings.
:param entity:
The entity being retrieved and decrypted. Could be a dict or an entity object.
:param list encrypted_properties_list:
The encrypted list of all the properties that are encrypted.
:param bytes[] content_encryption_key:
The key used internally to encrypt the entity. Extrated from the entity metadata.
:param bytes[] entityIV:
The intialization vector used to seed the encryption algorithm. Extracted from the
entity metadata.
:return: The decrypted entity
:rtype: Entity
|
[
"Decrypts",
"the",
"specified",
"entity",
"using",
"AES256",
"in",
"CBC",
"mode",
"with",
"128",
"bit",
"padding",
".",
"Unwraps",
"the",
"CEK",
"using",
"either",
"the",
"specified",
"KEK",
"or",
"the",
"key",
"returned",
"by",
"the",
"key_resolver",
".",
"Properties",
"specified",
"in",
"the",
"encrypted_properties_list",
"will",
"be",
"decrypted",
"and",
"decoded",
"to",
"utf",
"-",
"8",
"strings",
"."
] |
a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0
|
https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/azure/cosmosdb/table/_encryption.py#L163-L212
|
7,551
|
Azure/azure-cosmos-table-python
|
azure-cosmosdb-table/azure/cosmosdb/table/_encryption.py
|
_generate_property_iv
|
def _generate_property_iv(entity_iv, pk, rk, property_name, isJavaV1):
'''
Uses the entity_iv, partition key, and row key to generate and return
the iv for the specified property.
'''
digest = Hash(SHA256(), default_backend())
if not isJavaV1:
digest.update(entity_iv +
(rk + pk + property_name).encode('utf-8'))
else:
digest.update(entity_iv +
(pk + rk + property_name).encode('utf-8'))
propertyIV = digest.finalize()
return propertyIV[:16]
|
python
|
def _generate_property_iv(entity_iv, pk, rk, property_name, isJavaV1):
'''
Uses the entity_iv, partition key, and row key to generate and return
the iv for the specified property.
'''
digest = Hash(SHA256(), default_backend())
if not isJavaV1:
digest.update(entity_iv +
(rk + pk + property_name).encode('utf-8'))
else:
digest.update(entity_iv +
(pk + rk + property_name).encode('utf-8'))
propertyIV = digest.finalize()
return propertyIV[:16]
|
[
"def",
"_generate_property_iv",
"(",
"entity_iv",
",",
"pk",
",",
"rk",
",",
"property_name",
",",
"isJavaV1",
")",
":",
"digest",
"=",
"Hash",
"(",
"SHA256",
"(",
")",
",",
"default_backend",
"(",
")",
")",
"if",
"not",
"isJavaV1",
":",
"digest",
".",
"update",
"(",
"entity_iv",
"+",
"(",
"rk",
"+",
"pk",
"+",
"property_name",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"else",
":",
"digest",
".",
"update",
"(",
"entity_iv",
"+",
"(",
"pk",
"+",
"rk",
"+",
"property_name",
")",
".",
"encode",
"(",
"'utf-8'",
")",
")",
"propertyIV",
"=",
"digest",
".",
"finalize",
"(",
")",
"return",
"propertyIV",
"[",
":",
"16",
"]"
] |
Uses the entity_iv, partition key, and row key to generate and return
the iv for the specified property.
|
[
"Uses",
"the",
"entity_iv",
"partition",
"key",
"and",
"row",
"key",
"to",
"generate",
"and",
"return",
"the",
"iv",
"for",
"the",
"specified",
"property",
"."
] |
a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0
|
https://github.com/Azure/azure-cosmos-table-python/blob/a7b618f6bddc465c9fdf899ea2971dfe4d04fcf0/azure-cosmosdb-table/azure/cosmosdb/table/_encryption.py#L287-L300
|
7,552
|
fuhrysteve/marshmallow-jsonschema
|
marshmallow_jsonschema/base.py
|
JSONSchema._get_default_mapping
|
def _get_default_mapping(self, obj):
"""Return default mapping if there are no special needs."""
mapping = {v: k for k, v in obj.TYPE_MAPPING.items()}
mapping.update({
fields.Email: text_type,
fields.Dict: dict,
fields.Url: text_type,
fields.List: list,
fields.LocalDateTime: datetime.datetime,
fields.Nested: '_from_nested_schema',
})
return mapping
|
python
|
def _get_default_mapping(self, obj):
"""Return default mapping if there are no special needs."""
mapping = {v: k for k, v in obj.TYPE_MAPPING.items()}
mapping.update({
fields.Email: text_type,
fields.Dict: dict,
fields.Url: text_type,
fields.List: list,
fields.LocalDateTime: datetime.datetime,
fields.Nested: '_from_nested_schema',
})
return mapping
|
[
"def",
"_get_default_mapping",
"(",
"self",
",",
"obj",
")",
":",
"mapping",
"=",
"{",
"v",
":",
"k",
"for",
"k",
",",
"v",
"in",
"obj",
".",
"TYPE_MAPPING",
".",
"items",
"(",
")",
"}",
"mapping",
".",
"update",
"(",
"{",
"fields",
".",
"Email",
":",
"text_type",
",",
"fields",
".",
"Dict",
":",
"dict",
",",
"fields",
".",
"Url",
":",
"text_type",
",",
"fields",
".",
"List",
":",
"list",
",",
"fields",
".",
"LocalDateTime",
":",
"datetime",
".",
"datetime",
",",
"fields",
".",
"Nested",
":",
"'_from_nested_schema'",
",",
"}",
")",
"return",
"mapping"
] |
Return default mapping if there are no special needs.
|
[
"Return",
"default",
"mapping",
"if",
"there",
"are",
"no",
"special",
"needs",
"."
] |
3e0891a79d586c49deb75188d9ee1728597d093b
|
https://github.com/fuhrysteve/marshmallow-jsonschema/blob/3e0891a79d586c49deb75188d9ee1728597d093b/marshmallow_jsonschema/base.py#L96-L107
|
7,553
|
fuhrysteve/marshmallow-jsonschema
|
marshmallow_jsonschema/base.py
|
JSONSchema.get_properties
|
def get_properties(self, obj):
"""Fill out properties field."""
properties = {}
for field_name, field in sorted(obj.fields.items()):
schema = self._get_schema_for_field(obj, field)
properties[field.name] = schema
return properties
|
python
|
def get_properties(self, obj):
"""Fill out properties field."""
properties = {}
for field_name, field in sorted(obj.fields.items()):
schema = self._get_schema_for_field(obj, field)
properties[field.name] = schema
return properties
|
[
"def",
"get_properties",
"(",
"self",
",",
"obj",
")",
":",
"properties",
"=",
"{",
"}",
"for",
"field_name",
",",
"field",
"in",
"sorted",
"(",
"obj",
".",
"fields",
".",
"items",
"(",
")",
")",
":",
"schema",
"=",
"self",
".",
"_get_schema_for_field",
"(",
"obj",
",",
"field",
")",
"properties",
"[",
"field",
".",
"name",
"]",
"=",
"schema",
"return",
"properties"
] |
Fill out properties field.
|
[
"Fill",
"out",
"properties",
"field",
"."
] |
3e0891a79d586c49deb75188d9ee1728597d093b
|
https://github.com/fuhrysteve/marshmallow-jsonschema/blob/3e0891a79d586c49deb75188d9ee1728597d093b/marshmallow_jsonschema/base.py#L109-L117
|
7,554
|
fuhrysteve/marshmallow-jsonschema
|
marshmallow_jsonschema/base.py
|
JSONSchema.get_required
|
def get_required(self, obj):
"""Fill out required field."""
required = []
for field_name, field in sorted(obj.fields.items()):
if field.required:
required.append(field.name)
return required or missing
|
python
|
def get_required(self, obj):
"""Fill out required field."""
required = []
for field_name, field in sorted(obj.fields.items()):
if field.required:
required.append(field.name)
return required or missing
|
[
"def",
"get_required",
"(",
"self",
",",
"obj",
")",
":",
"required",
"=",
"[",
"]",
"for",
"field_name",
",",
"field",
"in",
"sorted",
"(",
"obj",
".",
"fields",
".",
"items",
"(",
")",
")",
":",
"if",
"field",
".",
"required",
":",
"required",
".",
"append",
"(",
"field",
".",
"name",
")",
"return",
"required",
"or",
"missing"
] |
Fill out required field.
|
[
"Fill",
"out",
"required",
"field",
"."
] |
3e0891a79d586c49deb75188d9ee1728597d093b
|
https://github.com/fuhrysteve/marshmallow-jsonschema/blob/3e0891a79d586c49deb75188d9ee1728597d093b/marshmallow_jsonschema/base.py#L119-L127
|
7,555
|
fuhrysteve/marshmallow-jsonschema
|
marshmallow_jsonschema/base.py
|
JSONSchema._from_python_type
|
def _from_python_type(self, obj, field, pytype):
"""Get schema definition from python type."""
json_schema = {
'title': field.attribute or field.name,
}
for key, val in TYPE_MAP[pytype].items():
json_schema[key] = val
if field.dump_only:
json_schema['readonly'] = True
if field.default is not missing:
json_schema['default'] = field.default
# NOTE: doubled up to maintain backwards compatibility
metadata = field.metadata.get('metadata', {})
metadata.update(field.metadata)
for md_key, md_val in metadata.items():
if md_key == 'metadata':
continue
json_schema[md_key] = md_val
if isinstance(field, fields.List):
json_schema['items'] = self._get_schema_for_field(
obj, field.container
)
return json_schema
|
python
|
def _from_python_type(self, obj, field, pytype):
"""Get schema definition from python type."""
json_schema = {
'title': field.attribute or field.name,
}
for key, val in TYPE_MAP[pytype].items():
json_schema[key] = val
if field.dump_only:
json_schema['readonly'] = True
if field.default is not missing:
json_schema['default'] = field.default
# NOTE: doubled up to maintain backwards compatibility
metadata = field.metadata.get('metadata', {})
metadata.update(field.metadata)
for md_key, md_val in metadata.items():
if md_key == 'metadata':
continue
json_schema[md_key] = md_val
if isinstance(field, fields.List):
json_schema['items'] = self._get_schema_for_field(
obj, field.container
)
return json_schema
|
[
"def",
"_from_python_type",
"(",
"self",
",",
"obj",
",",
"field",
",",
"pytype",
")",
":",
"json_schema",
"=",
"{",
"'title'",
":",
"field",
".",
"attribute",
"or",
"field",
".",
"name",
",",
"}",
"for",
"key",
",",
"val",
"in",
"TYPE_MAP",
"[",
"pytype",
"]",
".",
"items",
"(",
")",
":",
"json_schema",
"[",
"key",
"]",
"=",
"val",
"if",
"field",
".",
"dump_only",
":",
"json_schema",
"[",
"'readonly'",
"]",
"=",
"True",
"if",
"field",
".",
"default",
"is",
"not",
"missing",
":",
"json_schema",
"[",
"'default'",
"]",
"=",
"field",
".",
"default",
"# NOTE: doubled up to maintain backwards compatibility",
"metadata",
"=",
"field",
".",
"metadata",
".",
"get",
"(",
"'metadata'",
",",
"{",
"}",
")",
"metadata",
".",
"update",
"(",
"field",
".",
"metadata",
")",
"for",
"md_key",
",",
"md_val",
"in",
"metadata",
".",
"items",
"(",
")",
":",
"if",
"md_key",
"==",
"'metadata'",
":",
"continue",
"json_schema",
"[",
"md_key",
"]",
"=",
"md_val",
"if",
"isinstance",
"(",
"field",
",",
"fields",
".",
"List",
")",
":",
"json_schema",
"[",
"'items'",
"]",
"=",
"self",
".",
"_get_schema_for_field",
"(",
"obj",
",",
"field",
".",
"container",
")",
"return",
"json_schema"
] |
Get schema definition from python type.
|
[
"Get",
"schema",
"definition",
"from",
"python",
"type",
"."
] |
3e0891a79d586c49deb75188d9ee1728597d093b
|
https://github.com/fuhrysteve/marshmallow-jsonschema/blob/3e0891a79d586c49deb75188d9ee1728597d093b/marshmallow_jsonschema/base.py#L129-L157
|
7,556
|
fuhrysteve/marshmallow-jsonschema
|
marshmallow_jsonschema/base.py
|
JSONSchema._get_schema_for_field
|
def _get_schema_for_field(self, obj, field):
"""Get schema and validators for field."""
mapping = self._get_default_mapping(obj)
if hasattr(field, '_jsonschema_type_mapping'):
schema = field._jsonschema_type_mapping()
elif '_jsonschema_type_mapping' in field.metadata:
schema = field.metadata['_jsonschema_type_mapping']
elif field.__class__ in mapping:
pytype = mapping[field.__class__]
if isinstance(pytype, basestring):
schema = getattr(self, pytype)(obj, field)
else:
schema = self._from_python_type(
obj, field, pytype
)
else:
raise ValueError('unsupported field type %s' % field)
# Apply any and all validators that field may have
for validator in field.validators:
if validator.__class__ in FIELD_VALIDATORS:
schema = FIELD_VALIDATORS[validator.__class__](
schema, field, validator, obj
)
return schema
|
python
|
def _get_schema_for_field(self, obj, field):
"""Get schema and validators for field."""
mapping = self._get_default_mapping(obj)
if hasattr(field, '_jsonschema_type_mapping'):
schema = field._jsonschema_type_mapping()
elif '_jsonschema_type_mapping' in field.metadata:
schema = field.metadata['_jsonschema_type_mapping']
elif field.__class__ in mapping:
pytype = mapping[field.__class__]
if isinstance(pytype, basestring):
schema = getattr(self, pytype)(obj, field)
else:
schema = self._from_python_type(
obj, field, pytype
)
else:
raise ValueError('unsupported field type %s' % field)
# Apply any and all validators that field may have
for validator in field.validators:
if validator.__class__ in FIELD_VALIDATORS:
schema = FIELD_VALIDATORS[validator.__class__](
schema, field, validator, obj
)
return schema
|
[
"def",
"_get_schema_for_field",
"(",
"self",
",",
"obj",
",",
"field",
")",
":",
"mapping",
"=",
"self",
".",
"_get_default_mapping",
"(",
"obj",
")",
"if",
"hasattr",
"(",
"field",
",",
"'_jsonschema_type_mapping'",
")",
":",
"schema",
"=",
"field",
".",
"_jsonschema_type_mapping",
"(",
")",
"elif",
"'_jsonschema_type_mapping'",
"in",
"field",
".",
"metadata",
":",
"schema",
"=",
"field",
".",
"metadata",
"[",
"'_jsonschema_type_mapping'",
"]",
"elif",
"field",
".",
"__class__",
"in",
"mapping",
":",
"pytype",
"=",
"mapping",
"[",
"field",
".",
"__class__",
"]",
"if",
"isinstance",
"(",
"pytype",
",",
"basestring",
")",
":",
"schema",
"=",
"getattr",
"(",
"self",
",",
"pytype",
")",
"(",
"obj",
",",
"field",
")",
"else",
":",
"schema",
"=",
"self",
".",
"_from_python_type",
"(",
"obj",
",",
"field",
",",
"pytype",
")",
"else",
":",
"raise",
"ValueError",
"(",
"'unsupported field type %s'",
"%",
"field",
")",
"# Apply any and all validators that field may have",
"for",
"validator",
"in",
"field",
".",
"validators",
":",
"if",
"validator",
".",
"__class__",
"in",
"FIELD_VALIDATORS",
":",
"schema",
"=",
"FIELD_VALIDATORS",
"[",
"validator",
".",
"__class__",
"]",
"(",
"schema",
",",
"field",
",",
"validator",
",",
"obj",
")",
"return",
"schema"
] |
Get schema and validators for field.
|
[
"Get",
"schema",
"and",
"validators",
"for",
"field",
"."
] |
3e0891a79d586c49deb75188d9ee1728597d093b
|
https://github.com/fuhrysteve/marshmallow-jsonschema/blob/3e0891a79d586c49deb75188d9ee1728597d093b/marshmallow_jsonschema/base.py#L159-L183
|
7,557
|
fuhrysteve/marshmallow-jsonschema
|
marshmallow_jsonschema/base.py
|
JSONSchema._from_nested_schema
|
def _from_nested_schema(self, obj, field):
"""Support nested field."""
if isinstance(field.nested, basestring):
nested = get_class(field.nested)
else:
nested = field.nested
name = nested.__name__
outer_name = obj.__class__.__name__
only = field.only
exclude = field.exclude
# If this is not a schema we've seen, and it's not this schema,
# put it in our list of schema defs
if name not in self._nested_schema_classes and name != outer_name:
wrapped_nested = self.__class__(nested=True)
wrapped_dumped = wrapped_nested.dump(
nested(only=only, exclude=exclude)
)
# Handle change in return value type between Marshmallow
# versions 2 and 3.
if marshmallow.__version__.split('.', 1)[0] >= '3':
self._nested_schema_classes[name] = wrapped_dumped
else:
self._nested_schema_classes[name] = wrapped_dumped.data
self._nested_schema_classes.update(
wrapped_nested._nested_schema_classes
)
# and the schema is just a reference to the def
schema = {
'type': 'object',
'$ref': '#/definitions/{}'.format(name)
}
# NOTE: doubled up to maintain backwards compatibility
metadata = field.metadata.get('metadata', {})
metadata.update(field.metadata)
for md_key, md_val in metadata.items():
if md_key == 'metadata':
continue
schema[md_key] = md_val
if field.many:
schema = {
'type': ["array"] if field.required else ['array', 'null'],
'items': schema,
}
return schema
|
python
|
def _from_nested_schema(self, obj, field):
"""Support nested field."""
if isinstance(field.nested, basestring):
nested = get_class(field.nested)
else:
nested = field.nested
name = nested.__name__
outer_name = obj.__class__.__name__
only = field.only
exclude = field.exclude
# If this is not a schema we've seen, and it's not this schema,
# put it in our list of schema defs
if name not in self._nested_schema_classes and name != outer_name:
wrapped_nested = self.__class__(nested=True)
wrapped_dumped = wrapped_nested.dump(
nested(only=only, exclude=exclude)
)
# Handle change in return value type between Marshmallow
# versions 2 and 3.
if marshmallow.__version__.split('.', 1)[0] >= '3':
self._nested_schema_classes[name] = wrapped_dumped
else:
self._nested_schema_classes[name] = wrapped_dumped.data
self._nested_schema_classes.update(
wrapped_nested._nested_schema_classes
)
# and the schema is just a reference to the def
schema = {
'type': 'object',
'$ref': '#/definitions/{}'.format(name)
}
# NOTE: doubled up to maintain backwards compatibility
metadata = field.metadata.get('metadata', {})
metadata.update(field.metadata)
for md_key, md_val in metadata.items():
if md_key == 'metadata':
continue
schema[md_key] = md_val
if field.many:
schema = {
'type': ["array"] if field.required else ['array', 'null'],
'items': schema,
}
return schema
|
[
"def",
"_from_nested_schema",
"(",
"self",
",",
"obj",
",",
"field",
")",
":",
"if",
"isinstance",
"(",
"field",
".",
"nested",
",",
"basestring",
")",
":",
"nested",
"=",
"get_class",
"(",
"field",
".",
"nested",
")",
"else",
":",
"nested",
"=",
"field",
".",
"nested",
"name",
"=",
"nested",
".",
"__name__",
"outer_name",
"=",
"obj",
".",
"__class__",
".",
"__name__",
"only",
"=",
"field",
".",
"only",
"exclude",
"=",
"field",
".",
"exclude",
"# If this is not a schema we've seen, and it's not this schema,",
"# put it in our list of schema defs",
"if",
"name",
"not",
"in",
"self",
".",
"_nested_schema_classes",
"and",
"name",
"!=",
"outer_name",
":",
"wrapped_nested",
"=",
"self",
".",
"__class__",
"(",
"nested",
"=",
"True",
")",
"wrapped_dumped",
"=",
"wrapped_nested",
".",
"dump",
"(",
"nested",
"(",
"only",
"=",
"only",
",",
"exclude",
"=",
"exclude",
")",
")",
"# Handle change in return value type between Marshmallow",
"# versions 2 and 3.",
"if",
"marshmallow",
".",
"__version__",
".",
"split",
"(",
"'.'",
",",
"1",
")",
"[",
"0",
"]",
">=",
"'3'",
":",
"self",
".",
"_nested_schema_classes",
"[",
"name",
"]",
"=",
"wrapped_dumped",
"else",
":",
"self",
".",
"_nested_schema_classes",
"[",
"name",
"]",
"=",
"wrapped_dumped",
".",
"data",
"self",
".",
"_nested_schema_classes",
".",
"update",
"(",
"wrapped_nested",
".",
"_nested_schema_classes",
")",
"# and the schema is just a reference to the def",
"schema",
"=",
"{",
"'type'",
":",
"'object'",
",",
"'$ref'",
":",
"'#/definitions/{}'",
".",
"format",
"(",
"name",
")",
"}",
"# NOTE: doubled up to maintain backwards compatibility",
"metadata",
"=",
"field",
".",
"metadata",
".",
"get",
"(",
"'metadata'",
",",
"{",
"}",
")",
"metadata",
".",
"update",
"(",
"field",
".",
"metadata",
")",
"for",
"md_key",
",",
"md_val",
"in",
"metadata",
".",
"items",
"(",
")",
":",
"if",
"md_key",
"==",
"'metadata'",
":",
"continue",
"schema",
"[",
"md_key",
"]",
"=",
"md_val",
"if",
"field",
".",
"many",
":",
"schema",
"=",
"{",
"'type'",
":",
"[",
"\"array\"",
"]",
"if",
"field",
".",
"required",
"else",
"[",
"'array'",
",",
"'null'",
"]",
",",
"'items'",
":",
"schema",
",",
"}",
"return",
"schema"
] |
Support nested field.
|
[
"Support",
"nested",
"field",
"."
] |
3e0891a79d586c49deb75188d9ee1728597d093b
|
https://github.com/fuhrysteve/marshmallow-jsonschema/blob/3e0891a79d586c49deb75188d9ee1728597d093b/marshmallow_jsonschema/base.py#L185-L236
|
7,558
|
fuhrysteve/marshmallow-jsonschema
|
marshmallow_jsonschema/base.py
|
JSONSchema.wrap
|
def wrap(self, data):
"""Wrap this with the root schema definitions."""
if self.nested: # no need to wrap, will be in outer defs
return data
name = self.obj.__class__.__name__
self._nested_schema_classes[name] = data
root = {
'definitions': self._nested_schema_classes,
'$ref': '#/definitions/{name}'.format(name=name)
}
return root
|
python
|
def wrap(self, data):
"""Wrap this with the root schema definitions."""
if self.nested: # no need to wrap, will be in outer defs
return data
name = self.obj.__class__.__name__
self._nested_schema_classes[name] = data
root = {
'definitions': self._nested_schema_classes,
'$ref': '#/definitions/{name}'.format(name=name)
}
return root
|
[
"def",
"wrap",
"(",
"self",
",",
"data",
")",
":",
"if",
"self",
".",
"nested",
":",
"# no need to wrap, will be in outer defs",
"return",
"data",
"name",
"=",
"self",
".",
"obj",
".",
"__class__",
".",
"__name__",
"self",
".",
"_nested_schema_classes",
"[",
"name",
"]",
"=",
"data",
"root",
"=",
"{",
"'definitions'",
":",
"self",
".",
"_nested_schema_classes",
",",
"'$ref'",
":",
"'#/definitions/{name}'",
".",
"format",
"(",
"name",
"=",
"name",
")",
"}",
"return",
"root"
] |
Wrap this with the root schema definitions.
|
[
"Wrap",
"this",
"with",
"the",
"root",
"schema",
"definitions",
"."
] |
3e0891a79d586c49deb75188d9ee1728597d093b
|
https://github.com/fuhrysteve/marshmallow-jsonschema/blob/3e0891a79d586c49deb75188d9ee1728597d093b/marshmallow_jsonschema/base.py#L244-L255
|
7,559
|
fuhrysteve/marshmallow-jsonschema
|
marshmallow_jsonschema/validation.py
|
handle_length
|
def handle_length(schema, field, validator, parent_schema):
"""Adds validation logic for ``marshmallow.validate.Length``, setting the
values appropriately for ``fields.List``, ``fields.Nested``, and
``fields.String``.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.Length): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
Raises:
ValueError: Raised if the `field` is something other than
`fields.List`, `fields.Nested`, or `fields.String`
"""
if isinstance(field, fields.String):
minKey = 'minLength'
maxKey = 'maxLength'
elif isinstance(field, (fields.List, fields.Nested)):
minKey = 'minItems'
maxKey = 'maxItems'
else:
raise ValueError("In order to set the Length validator for JSON "
"schema, the field must be either a List or a String")
if validator.min:
schema[minKey] = validator.min
if validator.max:
schema[maxKey] = validator.max
if validator.equal:
schema[minKey] = validator.equal
schema[maxKey] = validator.equal
return schema
|
python
|
def handle_length(schema, field, validator, parent_schema):
"""Adds validation logic for ``marshmallow.validate.Length``, setting the
values appropriately for ``fields.List``, ``fields.Nested``, and
``fields.String``.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.Length): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
Raises:
ValueError: Raised if the `field` is something other than
`fields.List`, `fields.Nested`, or `fields.String`
"""
if isinstance(field, fields.String):
minKey = 'minLength'
maxKey = 'maxLength'
elif isinstance(field, (fields.List, fields.Nested)):
minKey = 'minItems'
maxKey = 'maxItems'
else:
raise ValueError("In order to set the Length validator for JSON "
"schema, the field must be either a List or a String")
if validator.min:
schema[minKey] = validator.min
if validator.max:
schema[maxKey] = validator.max
if validator.equal:
schema[minKey] = validator.equal
schema[maxKey] = validator.equal
return schema
|
[
"def",
"handle_length",
"(",
"schema",
",",
"field",
",",
"validator",
",",
"parent_schema",
")",
":",
"if",
"isinstance",
"(",
"field",
",",
"fields",
".",
"String",
")",
":",
"minKey",
"=",
"'minLength'",
"maxKey",
"=",
"'maxLength'",
"elif",
"isinstance",
"(",
"field",
",",
"(",
"fields",
".",
"List",
",",
"fields",
".",
"Nested",
")",
")",
":",
"minKey",
"=",
"'minItems'",
"maxKey",
"=",
"'maxItems'",
"else",
":",
"raise",
"ValueError",
"(",
"\"In order to set the Length validator for JSON \"",
"\"schema, the field must be either a List or a String\"",
")",
"if",
"validator",
".",
"min",
":",
"schema",
"[",
"minKey",
"]",
"=",
"validator",
".",
"min",
"if",
"validator",
".",
"max",
":",
"schema",
"[",
"maxKey",
"]",
"=",
"validator",
".",
"max",
"if",
"validator",
".",
"equal",
":",
"schema",
"[",
"minKey",
"]",
"=",
"validator",
".",
"equal",
"schema",
"[",
"maxKey",
"]",
"=",
"validator",
".",
"equal",
"return",
"schema"
] |
Adds validation logic for ``marshmallow.validate.Length``, setting the
values appropriately for ``fields.List``, ``fields.Nested``, and
``fields.String``.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.Length): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
Raises:
ValueError: Raised if the `field` is something other than
`fields.List`, `fields.Nested`, or `fields.String`
|
[
"Adds",
"validation",
"logic",
"for",
"marshmallow",
".",
"validate",
".",
"Length",
"setting",
"the",
"values",
"appropriately",
"for",
"fields",
".",
"List",
"fields",
".",
"Nested",
"and",
"fields",
".",
"String",
"."
] |
3e0891a79d586c49deb75188d9ee1728597d093b
|
https://github.com/fuhrysteve/marshmallow-jsonschema/blob/3e0891a79d586c49deb75188d9ee1728597d093b/marshmallow_jsonschema/validation.py#L4-L47
|
7,560
|
fuhrysteve/marshmallow-jsonschema
|
marshmallow_jsonschema/validation.py
|
handle_one_of
|
def handle_one_of(schema, field, validator, parent_schema):
"""Adds the validation logic for ``marshmallow.validate.OneOf`` by setting
the JSONSchema `enum` property to the allowed choices in the validator.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.OneOf): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
"""
if validator.choices:
schema['enum'] = list(validator.choices)
schema['enumNames'] = list(validator.labels)
return schema
|
python
|
def handle_one_of(schema, field, validator, parent_schema):
"""Adds the validation logic for ``marshmallow.validate.OneOf`` by setting
the JSONSchema `enum` property to the allowed choices in the validator.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.OneOf): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
"""
if validator.choices:
schema['enum'] = list(validator.choices)
schema['enumNames'] = list(validator.labels)
return schema
|
[
"def",
"handle_one_of",
"(",
"schema",
",",
"field",
",",
"validator",
",",
"parent_schema",
")",
":",
"if",
"validator",
".",
"choices",
":",
"schema",
"[",
"'enum'",
"]",
"=",
"list",
"(",
"validator",
".",
"choices",
")",
"schema",
"[",
"'enumNames'",
"]",
"=",
"list",
"(",
"validator",
".",
"labels",
")",
"return",
"schema"
] |
Adds the validation logic for ``marshmallow.validate.OneOf`` by setting
the JSONSchema `enum` property to the allowed choices in the validator.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.OneOf): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
|
[
"Adds",
"the",
"validation",
"logic",
"for",
"marshmallow",
".",
"validate",
".",
"OneOf",
"by",
"setting",
"the",
"JSONSchema",
"enum",
"property",
"to",
"the",
"allowed",
"choices",
"in",
"the",
"validator",
"."
] |
3e0891a79d586c49deb75188d9ee1728597d093b
|
https://github.com/fuhrysteve/marshmallow-jsonschema/blob/3e0891a79d586c49deb75188d9ee1728597d093b/marshmallow_jsonschema/validation.py#L50-L72
|
7,561
|
fuhrysteve/marshmallow-jsonschema
|
marshmallow_jsonschema/validation.py
|
handle_range
|
def handle_range(schema, field, validator, parent_schema):
"""Adds validation logic for ``marshmallow.validate.Range``, setting the
values appropriately ``fields.Number`` and it's subclasses.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.Length): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
"""
if not isinstance(field, fields.Number):
return schema
if validator.min:
schema['minimum'] = validator.min
schema['exclusiveMinimum'] = True
else:
schema['minimum'] = 0
schema['exclusiveMinimum'] = False
if validator.max:
schema['maximum'] = validator.max
schema['exclusiveMaximum'] = True
return schema
|
python
|
def handle_range(schema, field, validator, parent_schema):
"""Adds validation logic for ``marshmallow.validate.Range``, setting the
values appropriately ``fields.Number`` and it's subclasses.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.Length): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
"""
if not isinstance(field, fields.Number):
return schema
if validator.min:
schema['minimum'] = validator.min
schema['exclusiveMinimum'] = True
else:
schema['minimum'] = 0
schema['exclusiveMinimum'] = False
if validator.max:
schema['maximum'] = validator.max
schema['exclusiveMaximum'] = True
return schema
|
[
"def",
"handle_range",
"(",
"schema",
",",
"field",
",",
"validator",
",",
"parent_schema",
")",
":",
"if",
"not",
"isinstance",
"(",
"field",
",",
"fields",
".",
"Number",
")",
":",
"return",
"schema",
"if",
"validator",
".",
"min",
":",
"schema",
"[",
"'minimum'",
"]",
"=",
"validator",
".",
"min",
"schema",
"[",
"'exclusiveMinimum'",
"]",
"=",
"True",
"else",
":",
"schema",
"[",
"'minimum'",
"]",
"=",
"0",
"schema",
"[",
"'exclusiveMinimum'",
"]",
"=",
"False",
"if",
"validator",
".",
"max",
":",
"schema",
"[",
"'maximum'",
"]",
"=",
"validator",
".",
"max",
"schema",
"[",
"'exclusiveMaximum'",
"]",
"=",
"True",
"return",
"schema"
] |
Adds validation logic for ``marshmallow.validate.Range``, setting the
values appropriately ``fields.Number`` and it's subclasses.
Args:
schema (dict): The original JSON schema we generated. This is what we
want to post-process.
field (fields.Field): The field that generated the original schema and
who this post-processor belongs to.
validator (marshmallow.validate.Length): The validator attached to the
passed in field.
parent_schema (marshmallow.Schema): The Schema instance that the field
belongs to.
Returns:
dict: A, possibly, new JSON Schema that has been post processed and
altered.
|
[
"Adds",
"validation",
"logic",
"for",
"marshmallow",
".",
"validate",
".",
"Range",
"setting",
"the",
"values",
"appropriately",
"fields",
".",
"Number",
"and",
"it",
"s",
"subclasses",
"."
] |
3e0891a79d586c49deb75188d9ee1728597d093b
|
https://github.com/fuhrysteve/marshmallow-jsonschema/blob/3e0891a79d586c49deb75188d9ee1728597d093b/marshmallow_jsonschema/validation.py#L75-L107
|
7,562
|
mmp2/megaman
|
megaman/utils/eigendecomp.py
|
check_eigen_solver
|
def check_eigen_solver(eigen_solver, solver_kwds, size=None, nvec=None):
"""Check that the selected eigensolver is valid
Parameters
----------
eigen_solver : string
string value to validate
size, nvec : int (optional)
if both provided, use the specified problem size and number of vectors
to determine the optimal method to use with eigen_solver='auto'
Returns
-------
eigen_solver : string
The eigen solver. This only differs from the input if
eigen_solver == 'auto' and `size` is specified.
"""
if eigen_solver in BAD_EIGEN_SOLVERS:
raise ValueError(BAD_EIGEN_SOLVERS[eigen_solver])
elif eigen_solver not in EIGEN_SOLVERS:
raise ValueError("Unrecognized eigen_solver: '{0}'."
"Should be one of: {1}".format(eigen_solver,
EIGEN_SOLVERS))
if size is not None and nvec is not None:
# do some checks of the eigensolver
if eigen_solver == 'lobpcg' and size < 5 * nvec + 1:
warnings.warn("lobpcg does not perform well with small matrices or "
"with large numbers of vectors. Switching to 'dense'")
eigen_solver = 'dense'
solver_kwds = None
elif eigen_solver == 'auto':
if size > 200 and nvec < 10:
if PYAMG_LOADED:
eigen_solver = 'amg'
solver_kwds = None
else:
eigen_solver = 'arpack'
solver_kwds = None
else:
eigen_solver = 'dense'
solver_kwds = None
return eigen_solver, solver_kwds
|
python
|
def check_eigen_solver(eigen_solver, solver_kwds, size=None, nvec=None):
"""Check that the selected eigensolver is valid
Parameters
----------
eigen_solver : string
string value to validate
size, nvec : int (optional)
if both provided, use the specified problem size and number of vectors
to determine the optimal method to use with eigen_solver='auto'
Returns
-------
eigen_solver : string
The eigen solver. This only differs from the input if
eigen_solver == 'auto' and `size` is specified.
"""
if eigen_solver in BAD_EIGEN_SOLVERS:
raise ValueError(BAD_EIGEN_SOLVERS[eigen_solver])
elif eigen_solver not in EIGEN_SOLVERS:
raise ValueError("Unrecognized eigen_solver: '{0}'."
"Should be one of: {1}".format(eigen_solver,
EIGEN_SOLVERS))
if size is not None and nvec is not None:
# do some checks of the eigensolver
if eigen_solver == 'lobpcg' and size < 5 * nvec + 1:
warnings.warn("lobpcg does not perform well with small matrices or "
"with large numbers of vectors. Switching to 'dense'")
eigen_solver = 'dense'
solver_kwds = None
elif eigen_solver == 'auto':
if size > 200 and nvec < 10:
if PYAMG_LOADED:
eigen_solver = 'amg'
solver_kwds = None
else:
eigen_solver = 'arpack'
solver_kwds = None
else:
eigen_solver = 'dense'
solver_kwds = None
return eigen_solver, solver_kwds
|
[
"def",
"check_eigen_solver",
"(",
"eigen_solver",
",",
"solver_kwds",
",",
"size",
"=",
"None",
",",
"nvec",
"=",
"None",
")",
":",
"if",
"eigen_solver",
"in",
"BAD_EIGEN_SOLVERS",
":",
"raise",
"ValueError",
"(",
"BAD_EIGEN_SOLVERS",
"[",
"eigen_solver",
"]",
")",
"elif",
"eigen_solver",
"not",
"in",
"EIGEN_SOLVERS",
":",
"raise",
"ValueError",
"(",
"\"Unrecognized eigen_solver: '{0}'.\"",
"\"Should be one of: {1}\"",
".",
"format",
"(",
"eigen_solver",
",",
"EIGEN_SOLVERS",
")",
")",
"if",
"size",
"is",
"not",
"None",
"and",
"nvec",
"is",
"not",
"None",
":",
"# do some checks of the eigensolver",
"if",
"eigen_solver",
"==",
"'lobpcg'",
"and",
"size",
"<",
"5",
"*",
"nvec",
"+",
"1",
":",
"warnings",
".",
"warn",
"(",
"\"lobpcg does not perform well with small matrices or \"",
"\"with large numbers of vectors. Switching to 'dense'\"",
")",
"eigen_solver",
"=",
"'dense'",
"solver_kwds",
"=",
"None",
"elif",
"eigen_solver",
"==",
"'auto'",
":",
"if",
"size",
">",
"200",
"and",
"nvec",
"<",
"10",
":",
"if",
"PYAMG_LOADED",
":",
"eigen_solver",
"=",
"'amg'",
"solver_kwds",
"=",
"None",
"else",
":",
"eigen_solver",
"=",
"'arpack'",
"solver_kwds",
"=",
"None",
"else",
":",
"eigen_solver",
"=",
"'dense'",
"solver_kwds",
"=",
"None",
"return",
"eigen_solver",
",",
"solver_kwds"
] |
Check that the selected eigensolver is valid
Parameters
----------
eigen_solver : string
string value to validate
size, nvec : int (optional)
if both provided, use the specified problem size and number of vectors
to determine the optimal method to use with eigen_solver='auto'
Returns
-------
eigen_solver : string
The eigen solver. This only differs from the input if
eigen_solver == 'auto' and `size` is specified.
|
[
"Check",
"that",
"the",
"selected",
"eigensolver",
"is",
"valid"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/utils/eigendecomp.py#L28-L72
|
7,563
|
mmp2/megaman
|
megaman/relaxation/precomputed.py
|
precompute_optimzation_Y
|
def precompute_optimzation_Y(laplacian_matrix, n_samples, relaxation_kwds):
"""compute Lk, neighbors and subset to index map for projected == False"""
relaxation_kwds.setdefault('presave',False)
relaxation_kwds.setdefault('presave_name','pre_comp_current.npy')
relaxation_kwds.setdefault('verbose',False)
if relaxation_kwds['verbose']:
print ('Making Lk and nbhds')
Lk_tensor, nbk, si_map = \
compute_Lk(laplacian_matrix, n_samples, relaxation_kwds['subset'])
if relaxation_kwds['presave']:
raise NotImplementedError('Not yet implemented presave')
return { 'Lk': Lk_tensor, 'nbk': nbk, 'si_map': si_map }
|
python
|
def precompute_optimzation_Y(laplacian_matrix, n_samples, relaxation_kwds):
"""compute Lk, neighbors and subset to index map for projected == False"""
relaxation_kwds.setdefault('presave',False)
relaxation_kwds.setdefault('presave_name','pre_comp_current.npy')
relaxation_kwds.setdefault('verbose',False)
if relaxation_kwds['verbose']:
print ('Making Lk and nbhds')
Lk_tensor, nbk, si_map = \
compute_Lk(laplacian_matrix, n_samples, relaxation_kwds['subset'])
if relaxation_kwds['presave']:
raise NotImplementedError('Not yet implemented presave')
return { 'Lk': Lk_tensor, 'nbk': nbk, 'si_map': si_map }
|
[
"def",
"precompute_optimzation_Y",
"(",
"laplacian_matrix",
",",
"n_samples",
",",
"relaxation_kwds",
")",
":",
"relaxation_kwds",
".",
"setdefault",
"(",
"'presave'",
",",
"False",
")",
"relaxation_kwds",
".",
"setdefault",
"(",
"'presave_name'",
",",
"'pre_comp_current.npy'",
")",
"relaxation_kwds",
".",
"setdefault",
"(",
"'verbose'",
",",
"False",
")",
"if",
"relaxation_kwds",
"[",
"'verbose'",
"]",
":",
"print",
"(",
"'Making Lk and nbhds'",
")",
"Lk_tensor",
",",
"nbk",
",",
"si_map",
"=",
"compute_Lk",
"(",
"laplacian_matrix",
",",
"n_samples",
",",
"relaxation_kwds",
"[",
"'subset'",
"]",
")",
"if",
"relaxation_kwds",
"[",
"'presave'",
"]",
":",
"raise",
"NotImplementedError",
"(",
"'Not yet implemented presave'",
")",
"return",
"{",
"'Lk'",
":",
"Lk_tensor",
",",
"'nbk'",
":",
"nbk",
",",
"'si_map'",
":",
"si_map",
"}"
] |
compute Lk, neighbors and subset to index map for projected == False
|
[
"compute",
"Lk",
"neighbors",
"and",
"subset",
"to",
"index",
"map",
"for",
"projected",
"==",
"False"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/relaxation/precomputed.py#L8-L19
|
7,564
|
mmp2/megaman
|
megaman/relaxation/precomputed.py
|
compute_Lk
|
def compute_Lk(laplacian_matrix,n_samples,subset):
"""
Compute sparse L matrix, neighbors and subset to L matrix index map.
Returns
-------
Lk_tensor : array-like. Length = n
each component correspond to the sparse matrix of Lk, which is
generated by extracting the kth row of laplacian and removing zeros.
nbk : array-like. Length = n
each component correspond to the neighbor index of point k, which is
used in slicing the gradient, Y or S arrays.
si_map : dictionary.
subset index to Lk_tensor (or nbk) index mapping.
"""
Lk_tensor = []
nbk = []
row,column = laplacian_matrix.T.nonzero()
nnz_val = np.squeeze(np.asarray(laplacian_matrix.T[(row,column)]))
sorted_col_args = np.argsort(column)
sorted_col_vals = column[sorted_col_args]
breaks_row = np.diff(row).nonzero()[0]
breaks_col = np.diff(sorted_col_vals).nonzero()[0]
si_map = {}
for idx,k in enumerate(subset):
if k == 0:
nbk.append( column[:breaks_row[k]+1].T )
lk = nnz_val[np.sort(sorted_col_args[:breaks_col[k]+1])]
elif k == n_samples-1:
nbk.append( column[breaks_row[k-1]+1:].T )
lk = nnz_val[np.sort(sorted_col_args[breaks_col[k-1]+1:])]
else:
nbk.append( column[breaks_row[k-1]+1:breaks_row[k]+1].T )
lk = nnz_val[np.sort(
sorted_col_args[breaks_col[k-1]+1:breaks_col[k]+1])]
npair = nbk[idx].shape[0]
rk = (nbk[idx] == k).nonzero()[0]
Lk = sp.sparse.lil_matrix((npair,npair))
Lk.setdiag(lk)
Lk[:,rk] = -(lk.reshape(-1,1))
Lk[rk,:] = -(lk.reshape(1,-1))
Lk_tensor.append(sp.sparse.csr_matrix(Lk))
si_map[k] = idx
assert len(Lk_tensor) == subset.shape[0], \
'Size of Lk_tensor should be the same as subset.'
return Lk_tensor, nbk, si_map
|
python
|
def compute_Lk(laplacian_matrix,n_samples,subset):
"""
Compute sparse L matrix, neighbors and subset to L matrix index map.
Returns
-------
Lk_tensor : array-like. Length = n
each component correspond to the sparse matrix of Lk, which is
generated by extracting the kth row of laplacian and removing zeros.
nbk : array-like. Length = n
each component correspond to the neighbor index of point k, which is
used in slicing the gradient, Y or S arrays.
si_map : dictionary.
subset index to Lk_tensor (or nbk) index mapping.
"""
Lk_tensor = []
nbk = []
row,column = laplacian_matrix.T.nonzero()
nnz_val = np.squeeze(np.asarray(laplacian_matrix.T[(row,column)]))
sorted_col_args = np.argsort(column)
sorted_col_vals = column[sorted_col_args]
breaks_row = np.diff(row).nonzero()[0]
breaks_col = np.diff(sorted_col_vals).nonzero()[0]
si_map = {}
for idx,k in enumerate(subset):
if k == 0:
nbk.append( column[:breaks_row[k]+1].T )
lk = nnz_val[np.sort(sorted_col_args[:breaks_col[k]+1])]
elif k == n_samples-1:
nbk.append( column[breaks_row[k-1]+1:].T )
lk = nnz_val[np.sort(sorted_col_args[breaks_col[k-1]+1:])]
else:
nbk.append( column[breaks_row[k-1]+1:breaks_row[k]+1].T )
lk = nnz_val[np.sort(
sorted_col_args[breaks_col[k-1]+1:breaks_col[k]+1])]
npair = nbk[idx].shape[0]
rk = (nbk[idx] == k).nonzero()[0]
Lk = sp.sparse.lil_matrix((npair,npair))
Lk.setdiag(lk)
Lk[:,rk] = -(lk.reshape(-1,1))
Lk[rk,:] = -(lk.reshape(1,-1))
Lk_tensor.append(sp.sparse.csr_matrix(Lk))
si_map[k] = idx
assert len(Lk_tensor) == subset.shape[0], \
'Size of Lk_tensor should be the same as subset.'
return Lk_tensor, nbk, si_map
|
[
"def",
"compute_Lk",
"(",
"laplacian_matrix",
",",
"n_samples",
",",
"subset",
")",
":",
"Lk_tensor",
"=",
"[",
"]",
"nbk",
"=",
"[",
"]",
"row",
",",
"column",
"=",
"laplacian_matrix",
".",
"T",
".",
"nonzero",
"(",
")",
"nnz_val",
"=",
"np",
".",
"squeeze",
"(",
"np",
".",
"asarray",
"(",
"laplacian_matrix",
".",
"T",
"[",
"(",
"row",
",",
"column",
")",
"]",
")",
")",
"sorted_col_args",
"=",
"np",
".",
"argsort",
"(",
"column",
")",
"sorted_col_vals",
"=",
"column",
"[",
"sorted_col_args",
"]",
"breaks_row",
"=",
"np",
".",
"diff",
"(",
"row",
")",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"breaks_col",
"=",
"np",
".",
"diff",
"(",
"sorted_col_vals",
")",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"si_map",
"=",
"{",
"}",
"for",
"idx",
",",
"k",
"in",
"enumerate",
"(",
"subset",
")",
":",
"if",
"k",
"==",
"0",
":",
"nbk",
".",
"append",
"(",
"column",
"[",
":",
"breaks_row",
"[",
"k",
"]",
"+",
"1",
"]",
".",
"T",
")",
"lk",
"=",
"nnz_val",
"[",
"np",
".",
"sort",
"(",
"sorted_col_args",
"[",
":",
"breaks_col",
"[",
"k",
"]",
"+",
"1",
"]",
")",
"]",
"elif",
"k",
"==",
"n_samples",
"-",
"1",
":",
"nbk",
".",
"append",
"(",
"column",
"[",
"breaks_row",
"[",
"k",
"-",
"1",
"]",
"+",
"1",
":",
"]",
".",
"T",
")",
"lk",
"=",
"nnz_val",
"[",
"np",
".",
"sort",
"(",
"sorted_col_args",
"[",
"breaks_col",
"[",
"k",
"-",
"1",
"]",
"+",
"1",
":",
"]",
")",
"]",
"else",
":",
"nbk",
".",
"append",
"(",
"column",
"[",
"breaks_row",
"[",
"k",
"-",
"1",
"]",
"+",
"1",
":",
"breaks_row",
"[",
"k",
"]",
"+",
"1",
"]",
".",
"T",
")",
"lk",
"=",
"nnz_val",
"[",
"np",
".",
"sort",
"(",
"sorted_col_args",
"[",
"breaks_col",
"[",
"k",
"-",
"1",
"]",
"+",
"1",
":",
"breaks_col",
"[",
"k",
"]",
"+",
"1",
"]",
")",
"]",
"npair",
"=",
"nbk",
"[",
"idx",
"]",
".",
"shape",
"[",
"0",
"]",
"rk",
"=",
"(",
"nbk",
"[",
"idx",
"]",
"==",
"k",
")",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"Lk",
"=",
"sp",
".",
"sparse",
".",
"lil_matrix",
"(",
"(",
"npair",
",",
"npair",
")",
")",
"Lk",
".",
"setdiag",
"(",
"lk",
")",
"Lk",
"[",
":",
",",
"rk",
"]",
"=",
"-",
"(",
"lk",
".",
"reshape",
"(",
"-",
"1",
",",
"1",
")",
")",
"Lk",
"[",
"rk",
",",
":",
"]",
"=",
"-",
"(",
"lk",
".",
"reshape",
"(",
"1",
",",
"-",
"1",
")",
")",
"Lk_tensor",
".",
"append",
"(",
"sp",
".",
"sparse",
".",
"csr_matrix",
"(",
"Lk",
")",
")",
"si_map",
"[",
"k",
"]",
"=",
"idx",
"assert",
"len",
"(",
"Lk_tensor",
")",
"==",
"subset",
".",
"shape",
"[",
"0",
"]",
",",
"'Size of Lk_tensor should be the same as subset.'",
"return",
"Lk_tensor",
",",
"nbk",
",",
"si_map"
] |
Compute sparse L matrix, neighbors and subset to L matrix index map.
Returns
-------
Lk_tensor : array-like. Length = n
each component correspond to the sparse matrix of Lk, which is
generated by extracting the kth row of laplacian and removing zeros.
nbk : array-like. Length = n
each component correspond to the neighbor index of point k, which is
used in slicing the gradient, Y or S arrays.
si_map : dictionary.
subset index to Lk_tensor (or nbk) index mapping.
|
[
"Compute",
"sparse",
"L",
"matrix",
"neighbors",
"and",
"subset",
"to",
"L",
"matrix",
"index",
"map",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/relaxation/precomputed.py#L21-L71
|
7,565
|
mmp2/megaman
|
megaman/relaxation/precomputed.py
|
precompute_optimzation_S
|
def precompute_optimzation_S(laplacian_matrix,n_samples,relaxation_kwds):
"""compute Rk, A, ATAinv, neighbors and pairs for projected mode"""
relaxation_kwds.setdefault('presave',False)
relaxation_kwds.setdefault('presave_name','pre_comp_current.npy')
relaxation_kwds.setdefault('verbose',False)
if relaxation_kwds['verbose']:
print ('Pre-computing quantities Y to S conversions')
print ('Making A and Pairs')
A, pairs = makeA(laplacian_matrix)
if relaxation_kwds['verbose']:
print ('Making Rk and nbhds')
Rk_tensor, nbk = compute_Rk(laplacian_matrix,A,n_samples)
# TODO: not quite sure what is ATAinv? why we need this?
ATAinv = np.linalg.pinv(A.T.dot(A).todense())
if relaxation_kwds['verbose']:
print ('Finish calculating pseudo inverse')
if relaxation_kwds['presave']:
raise NotImplementedError('Not yet implemented presave')
return { 'RK': Rk_tensor, 'nbk': nbk,
'ATAinv': ATAinv, 'pairs': pairs, 'A': A }
|
python
|
def precompute_optimzation_S(laplacian_matrix,n_samples,relaxation_kwds):
"""compute Rk, A, ATAinv, neighbors and pairs for projected mode"""
relaxation_kwds.setdefault('presave',False)
relaxation_kwds.setdefault('presave_name','pre_comp_current.npy')
relaxation_kwds.setdefault('verbose',False)
if relaxation_kwds['verbose']:
print ('Pre-computing quantities Y to S conversions')
print ('Making A and Pairs')
A, pairs = makeA(laplacian_matrix)
if relaxation_kwds['verbose']:
print ('Making Rk and nbhds')
Rk_tensor, nbk = compute_Rk(laplacian_matrix,A,n_samples)
# TODO: not quite sure what is ATAinv? why we need this?
ATAinv = np.linalg.pinv(A.T.dot(A).todense())
if relaxation_kwds['verbose']:
print ('Finish calculating pseudo inverse')
if relaxation_kwds['presave']:
raise NotImplementedError('Not yet implemented presave')
return { 'RK': Rk_tensor, 'nbk': nbk,
'ATAinv': ATAinv, 'pairs': pairs, 'A': A }
|
[
"def",
"precompute_optimzation_S",
"(",
"laplacian_matrix",
",",
"n_samples",
",",
"relaxation_kwds",
")",
":",
"relaxation_kwds",
".",
"setdefault",
"(",
"'presave'",
",",
"False",
")",
"relaxation_kwds",
".",
"setdefault",
"(",
"'presave_name'",
",",
"'pre_comp_current.npy'",
")",
"relaxation_kwds",
".",
"setdefault",
"(",
"'verbose'",
",",
"False",
")",
"if",
"relaxation_kwds",
"[",
"'verbose'",
"]",
":",
"print",
"(",
"'Pre-computing quantities Y to S conversions'",
")",
"print",
"(",
"'Making A and Pairs'",
")",
"A",
",",
"pairs",
"=",
"makeA",
"(",
"laplacian_matrix",
")",
"if",
"relaxation_kwds",
"[",
"'verbose'",
"]",
":",
"print",
"(",
"'Making Rk and nbhds'",
")",
"Rk_tensor",
",",
"nbk",
"=",
"compute_Rk",
"(",
"laplacian_matrix",
",",
"A",
",",
"n_samples",
")",
"# TODO: not quite sure what is ATAinv? why we need this?",
"ATAinv",
"=",
"np",
".",
"linalg",
".",
"pinv",
"(",
"A",
".",
"T",
".",
"dot",
"(",
"A",
")",
".",
"todense",
"(",
")",
")",
"if",
"relaxation_kwds",
"[",
"'verbose'",
"]",
":",
"print",
"(",
"'Finish calculating pseudo inverse'",
")",
"if",
"relaxation_kwds",
"[",
"'presave'",
"]",
":",
"raise",
"NotImplementedError",
"(",
"'Not yet implemented presave'",
")",
"return",
"{",
"'RK'",
":",
"Rk_tensor",
",",
"'nbk'",
":",
"nbk",
",",
"'ATAinv'",
":",
"ATAinv",
",",
"'pairs'",
":",
"pairs",
",",
"'A'",
":",
"A",
"}"
] |
compute Rk, A, ATAinv, neighbors and pairs for projected mode
|
[
"compute",
"Rk",
"A",
"ATAinv",
"neighbors",
"and",
"pairs",
"for",
"projected",
"mode"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/relaxation/precomputed.py#L73-L92
|
7,566
|
mmp2/megaman
|
megaman/relaxation/precomputed.py
|
compute_Rk
|
def compute_Rk(L,A,n_samples):
# TODO: need to inspect more into compute Rk.
"""
Compute sparse L matrix and neighbors.
Returns
-------
Rk_tensor : array-like. Length = n
each component correspond to the sparse matrix of Lk, which is
generated by extracting the kth row of laplacian and removing zeros.
nbk : array-like. Length = n
each component correspond to the neighbor index of point k, which is
used in slicing the gradient, Y or S arrays.
"""
laplacian_matrix = L.copy()
laplacian_matrix.setdiag(0)
laplacian_matrix.eliminate_zeros()
n = n_samples
Rk_tensor = []
nbk = []
row_A,column_A = A.T.nonzero()
row,column = laplacian_matrix.nonzero()
nnz_val = np.squeeze(np.asarray(laplacian_matrix.T[(row,column)]))
sorted_col_args = np.argsort(column)
sorted_col_vals = column[sorted_col_args]
breaks_row_A = np.diff(row_A).nonzero()[0]
breaks_col = np.diff(sorted_col_vals).nonzero()[0]
for k in range(n_samples):
if k == 0:
nbk.append( column_A[:breaks_row_A[k]+1].T )
Rk_tensor.append(
nnz_val[np.sort(sorted_col_args[:breaks_col[k]+1])])
elif k == n_samples-1:
nbk.append( column_A[breaks_row_A[k-1]+1:].T )
Rk_tensor.append(
nnz_val[np.sort(sorted_col_args[breaks_col[k-1]+1:])])
else:
nbk.append( column_A[breaks_row_A[k-1]+1:breaks_row_A[k]+1].T )
Rk_tensor.append(nnz_val[np.sort(
sorted_col_args[breaks_col[k-1]+1:breaks_col[k]+1])])
return Rk_tensor, nbk
|
python
|
def compute_Rk(L,A,n_samples):
# TODO: need to inspect more into compute Rk.
"""
Compute sparse L matrix and neighbors.
Returns
-------
Rk_tensor : array-like. Length = n
each component correspond to the sparse matrix of Lk, which is
generated by extracting the kth row of laplacian and removing zeros.
nbk : array-like. Length = n
each component correspond to the neighbor index of point k, which is
used in slicing the gradient, Y or S arrays.
"""
laplacian_matrix = L.copy()
laplacian_matrix.setdiag(0)
laplacian_matrix.eliminate_zeros()
n = n_samples
Rk_tensor = []
nbk = []
row_A,column_A = A.T.nonzero()
row,column = laplacian_matrix.nonzero()
nnz_val = np.squeeze(np.asarray(laplacian_matrix.T[(row,column)]))
sorted_col_args = np.argsort(column)
sorted_col_vals = column[sorted_col_args]
breaks_row_A = np.diff(row_A).nonzero()[0]
breaks_col = np.diff(sorted_col_vals).nonzero()[0]
for k in range(n_samples):
if k == 0:
nbk.append( column_A[:breaks_row_A[k]+1].T )
Rk_tensor.append(
nnz_val[np.sort(sorted_col_args[:breaks_col[k]+1])])
elif k == n_samples-1:
nbk.append( column_A[breaks_row_A[k-1]+1:].T )
Rk_tensor.append(
nnz_val[np.sort(sorted_col_args[breaks_col[k-1]+1:])])
else:
nbk.append( column_A[breaks_row_A[k-1]+1:breaks_row_A[k]+1].T )
Rk_tensor.append(nnz_val[np.sort(
sorted_col_args[breaks_col[k-1]+1:breaks_col[k]+1])])
return Rk_tensor, nbk
|
[
"def",
"compute_Rk",
"(",
"L",
",",
"A",
",",
"n_samples",
")",
":",
"# TODO: need to inspect more into compute Rk.",
"laplacian_matrix",
"=",
"L",
".",
"copy",
"(",
")",
"laplacian_matrix",
".",
"setdiag",
"(",
"0",
")",
"laplacian_matrix",
".",
"eliminate_zeros",
"(",
")",
"n",
"=",
"n_samples",
"Rk_tensor",
"=",
"[",
"]",
"nbk",
"=",
"[",
"]",
"row_A",
",",
"column_A",
"=",
"A",
".",
"T",
".",
"nonzero",
"(",
")",
"row",
",",
"column",
"=",
"laplacian_matrix",
".",
"nonzero",
"(",
")",
"nnz_val",
"=",
"np",
".",
"squeeze",
"(",
"np",
".",
"asarray",
"(",
"laplacian_matrix",
".",
"T",
"[",
"(",
"row",
",",
"column",
")",
"]",
")",
")",
"sorted_col_args",
"=",
"np",
".",
"argsort",
"(",
"column",
")",
"sorted_col_vals",
"=",
"column",
"[",
"sorted_col_args",
"]",
"breaks_row_A",
"=",
"np",
".",
"diff",
"(",
"row_A",
")",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"breaks_col",
"=",
"np",
".",
"diff",
"(",
"sorted_col_vals",
")",
".",
"nonzero",
"(",
")",
"[",
"0",
"]",
"for",
"k",
"in",
"range",
"(",
"n_samples",
")",
":",
"if",
"k",
"==",
"0",
":",
"nbk",
".",
"append",
"(",
"column_A",
"[",
":",
"breaks_row_A",
"[",
"k",
"]",
"+",
"1",
"]",
".",
"T",
")",
"Rk_tensor",
".",
"append",
"(",
"nnz_val",
"[",
"np",
".",
"sort",
"(",
"sorted_col_args",
"[",
":",
"breaks_col",
"[",
"k",
"]",
"+",
"1",
"]",
")",
"]",
")",
"elif",
"k",
"==",
"n_samples",
"-",
"1",
":",
"nbk",
".",
"append",
"(",
"column_A",
"[",
"breaks_row_A",
"[",
"k",
"-",
"1",
"]",
"+",
"1",
":",
"]",
".",
"T",
")",
"Rk_tensor",
".",
"append",
"(",
"nnz_val",
"[",
"np",
".",
"sort",
"(",
"sorted_col_args",
"[",
"breaks_col",
"[",
"k",
"-",
"1",
"]",
"+",
"1",
":",
"]",
")",
"]",
")",
"else",
":",
"nbk",
".",
"append",
"(",
"column_A",
"[",
"breaks_row_A",
"[",
"k",
"-",
"1",
"]",
"+",
"1",
":",
"breaks_row_A",
"[",
"k",
"]",
"+",
"1",
"]",
".",
"T",
")",
"Rk_tensor",
".",
"append",
"(",
"nnz_val",
"[",
"np",
".",
"sort",
"(",
"sorted_col_args",
"[",
"breaks_col",
"[",
"k",
"-",
"1",
"]",
"+",
"1",
":",
"breaks_col",
"[",
"k",
"]",
"+",
"1",
"]",
")",
"]",
")",
"return",
"Rk_tensor",
",",
"nbk"
] |
Compute sparse L matrix and neighbors.
Returns
-------
Rk_tensor : array-like. Length = n
each component correspond to the sparse matrix of Lk, which is
generated by extracting the kth row of laplacian and removing zeros.
nbk : array-like. Length = n
each component correspond to the neighbor index of point k, which is
used in slicing the gradient, Y or S arrays.
|
[
"Compute",
"sparse",
"L",
"matrix",
"and",
"neighbors",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/relaxation/precomputed.py#L116-L161
|
7,567
|
mmp2/megaman
|
doc/sphinxext/numpy_ext/automodapi.py
|
_mod_info
|
def _mod_info(modname, toskip=[], onlylocals=True):
"""
Determines if a module is a module or a package and whether or not
it has classes or functions.
"""
hascls = hasfunc = False
for localnm, fqnm, obj in zip(*find_mod_objs(modname, onlylocals=onlylocals)):
if localnm not in toskip:
hascls = hascls or inspect.isclass(obj)
hasfunc = hasfunc or inspect.isroutine(obj)
if hascls and hasfunc:
break
# find_mod_objs has already imported modname
# TODO: There is probably a cleaner way to do this, though this is pretty
# reliable for all Python versions for most cases that we care about.
pkg = sys.modules[modname]
ispkg = (hasattr(pkg, '__file__') and isinstance(pkg.__file__, str) and
os.path.split(pkg.__file__)[1].startswith('__init__.py'))
return ispkg, hascls, hasfunc
|
python
|
def _mod_info(modname, toskip=[], onlylocals=True):
"""
Determines if a module is a module or a package and whether or not
it has classes or functions.
"""
hascls = hasfunc = False
for localnm, fqnm, obj in zip(*find_mod_objs(modname, onlylocals=onlylocals)):
if localnm not in toskip:
hascls = hascls or inspect.isclass(obj)
hasfunc = hasfunc or inspect.isroutine(obj)
if hascls and hasfunc:
break
# find_mod_objs has already imported modname
# TODO: There is probably a cleaner way to do this, though this is pretty
# reliable for all Python versions for most cases that we care about.
pkg = sys.modules[modname]
ispkg = (hasattr(pkg, '__file__') and isinstance(pkg.__file__, str) and
os.path.split(pkg.__file__)[1].startswith('__init__.py'))
return ispkg, hascls, hasfunc
|
[
"def",
"_mod_info",
"(",
"modname",
",",
"toskip",
"=",
"[",
"]",
",",
"onlylocals",
"=",
"True",
")",
":",
"hascls",
"=",
"hasfunc",
"=",
"False",
"for",
"localnm",
",",
"fqnm",
",",
"obj",
"in",
"zip",
"(",
"*",
"find_mod_objs",
"(",
"modname",
",",
"onlylocals",
"=",
"onlylocals",
")",
")",
":",
"if",
"localnm",
"not",
"in",
"toskip",
":",
"hascls",
"=",
"hascls",
"or",
"inspect",
".",
"isclass",
"(",
"obj",
")",
"hasfunc",
"=",
"hasfunc",
"or",
"inspect",
".",
"isroutine",
"(",
"obj",
")",
"if",
"hascls",
"and",
"hasfunc",
":",
"break",
"# find_mod_objs has already imported modname",
"# TODO: There is probably a cleaner way to do this, though this is pretty",
"# reliable for all Python versions for most cases that we care about.",
"pkg",
"=",
"sys",
".",
"modules",
"[",
"modname",
"]",
"ispkg",
"=",
"(",
"hasattr",
"(",
"pkg",
",",
"'__file__'",
")",
"and",
"isinstance",
"(",
"pkg",
".",
"__file__",
",",
"str",
")",
"and",
"os",
".",
"path",
".",
"split",
"(",
"pkg",
".",
"__file__",
")",
"[",
"1",
"]",
".",
"startswith",
"(",
"'__init__.py'",
")",
")",
"return",
"ispkg",
",",
"hascls",
",",
"hasfunc"
] |
Determines if a module is a module or a package and whether or not
it has classes or functions.
|
[
"Determines",
"if",
"a",
"module",
"is",
"a",
"module",
"or",
"a",
"package",
"and",
"whether",
"or",
"not",
"it",
"has",
"classes",
"or",
"functions",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/doc/sphinxext/numpy_ext/automodapi.py#L328-L350
|
7,568
|
mmp2/megaman
|
megaman/geometry/affinity.py
|
compute_affinity_matrix
|
def compute_affinity_matrix(adjacency_matrix, method='auto', **kwargs):
"""Compute the affinity matrix with the given method"""
if method == 'auto':
method = 'gaussian'
return Affinity.init(method, **kwargs).affinity_matrix(adjacency_matrix)
|
python
|
def compute_affinity_matrix(adjacency_matrix, method='auto', **kwargs):
"""Compute the affinity matrix with the given method"""
if method == 'auto':
method = 'gaussian'
return Affinity.init(method, **kwargs).affinity_matrix(adjacency_matrix)
|
[
"def",
"compute_affinity_matrix",
"(",
"adjacency_matrix",
",",
"method",
"=",
"'auto'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"method",
"==",
"'auto'",
":",
"method",
"=",
"'gaussian'",
"return",
"Affinity",
".",
"init",
"(",
"method",
",",
"*",
"*",
"kwargs",
")",
".",
"affinity_matrix",
"(",
"adjacency_matrix",
")"
] |
Compute the affinity matrix with the given method
|
[
"Compute",
"the",
"affinity",
"matrix",
"with",
"the",
"given",
"method"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/geometry/affinity.py#L11-L15
|
7,569
|
mmp2/megaman
|
megaman/embedding/locally_linear.py
|
barycenter_graph
|
def barycenter_graph(distance_matrix, X, reg=1e-3):
"""
Computes the barycenter weighted graph for points in X
Parameters
----------
distance_matrix: sparse Ndarray, (N_obs, N_obs) pairwise distance matrix.
X : Ndarray (N_obs, N_dim) observed data matrix.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
W : sparse matrix in CSR format, shape = [n_samples, n_samples]
W[i, j] is assigned the weight of edge that connects i to j.
"""
(N, d_in) = X.shape
(rows, cols) = distance_matrix.nonzero()
W = sparse.lil_matrix((N, N)) # best for W[i, nbrs_i] = w/np.sum(w)
for i in range(N):
nbrs_i = cols[rows == i]
n_neighbors_i = len(nbrs_i)
v = np.ones(n_neighbors_i, dtype=X.dtype)
C = X[nbrs_i] - X[i]
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::n_neighbors_i + 1] += R
w = solve(G, v, sym_pos = True)
W[i, nbrs_i] = w / np.sum(w)
return W
|
python
|
def barycenter_graph(distance_matrix, X, reg=1e-3):
"""
Computes the barycenter weighted graph for points in X
Parameters
----------
distance_matrix: sparse Ndarray, (N_obs, N_obs) pairwise distance matrix.
X : Ndarray (N_obs, N_dim) observed data matrix.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
W : sparse matrix in CSR format, shape = [n_samples, n_samples]
W[i, j] is assigned the weight of edge that connects i to j.
"""
(N, d_in) = X.shape
(rows, cols) = distance_matrix.nonzero()
W = sparse.lil_matrix((N, N)) # best for W[i, nbrs_i] = w/np.sum(w)
for i in range(N):
nbrs_i = cols[rows == i]
n_neighbors_i = len(nbrs_i)
v = np.ones(n_neighbors_i, dtype=X.dtype)
C = X[nbrs_i] - X[i]
G = np.dot(C, C.T)
trace = np.trace(G)
if trace > 0:
R = reg * trace
else:
R = reg
G.flat[::n_neighbors_i + 1] += R
w = solve(G, v, sym_pos = True)
W[i, nbrs_i] = w / np.sum(w)
return W
|
[
"def",
"barycenter_graph",
"(",
"distance_matrix",
",",
"X",
",",
"reg",
"=",
"1e-3",
")",
":",
"(",
"N",
",",
"d_in",
")",
"=",
"X",
".",
"shape",
"(",
"rows",
",",
"cols",
")",
"=",
"distance_matrix",
".",
"nonzero",
"(",
")",
"W",
"=",
"sparse",
".",
"lil_matrix",
"(",
"(",
"N",
",",
"N",
")",
")",
"# best for W[i, nbrs_i] = w/np.sum(w)",
"for",
"i",
"in",
"range",
"(",
"N",
")",
":",
"nbrs_i",
"=",
"cols",
"[",
"rows",
"==",
"i",
"]",
"n_neighbors_i",
"=",
"len",
"(",
"nbrs_i",
")",
"v",
"=",
"np",
".",
"ones",
"(",
"n_neighbors_i",
",",
"dtype",
"=",
"X",
".",
"dtype",
")",
"C",
"=",
"X",
"[",
"nbrs_i",
"]",
"-",
"X",
"[",
"i",
"]",
"G",
"=",
"np",
".",
"dot",
"(",
"C",
",",
"C",
".",
"T",
")",
"trace",
"=",
"np",
".",
"trace",
"(",
"G",
")",
"if",
"trace",
">",
"0",
":",
"R",
"=",
"reg",
"*",
"trace",
"else",
":",
"R",
"=",
"reg",
"G",
".",
"flat",
"[",
":",
":",
"n_neighbors_i",
"+",
"1",
"]",
"+=",
"R",
"w",
"=",
"solve",
"(",
"G",
",",
"v",
",",
"sym_pos",
"=",
"True",
")",
"W",
"[",
"i",
",",
"nbrs_i",
"]",
"=",
"w",
"/",
"np",
".",
"sum",
"(",
"w",
")",
"return",
"W"
] |
Computes the barycenter weighted graph for points in X
Parameters
----------
distance_matrix: sparse Ndarray, (N_obs, N_obs) pairwise distance matrix.
X : Ndarray (N_obs, N_dim) observed data matrix.
reg : float, optional
Amount of regularization when solving the least-squares
problem. Only relevant if mode='barycenter'. If None, use the
default.
Returns
-------
W : sparse matrix in CSR format, shape = [n_samples, n_samples]
W[i, j] is assigned the weight of edge that connects i to j.
|
[
"Computes",
"the",
"barycenter",
"weighted",
"graph",
"for",
"points",
"in",
"X"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/embedding/locally_linear.py#L22-L57
|
7,570
|
mmp2/megaman
|
megaman/embedding/locally_linear.py
|
locally_linear_embedding
|
def locally_linear_embedding(geom, n_components, reg=1e-3,
eigen_solver='auto', random_state=None,
solver_kwds=None):
"""
Perform a Locally Linear Embedding analysis on the data.
Parameters
----------
geom : a Geometry object from megaman.geometry.geometry
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).
"""
if geom.X is None:
raise ValueError("Must pass data matrix X to Geometry")
if geom.adjacency_matrix is None:
geom.compute_adjacency_matrix()
W = barycenter_graph(geom.adjacency_matrix, geom.X, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
eigen_solver, solver_kwds = check_eigen_solver(eigen_solver, solver_kwds,
size=W.shape[0],
nvec=n_components + 1)
if eigen_solver != 'dense':
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
random_state=random_state)
|
python
|
def locally_linear_embedding(geom, n_components, reg=1e-3,
eigen_solver='auto', random_state=None,
solver_kwds=None):
"""
Perform a Locally Linear Embedding analysis on the data.
Parameters
----------
geom : a Geometry object from megaman.geometry.geometry
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).
"""
if geom.X is None:
raise ValueError("Must pass data matrix X to Geometry")
if geom.adjacency_matrix is None:
geom.compute_adjacency_matrix()
W = barycenter_graph(geom.adjacency_matrix, geom.X, reg=reg)
# we'll compute M = (I-W)'(I-W)
# depending on the solver, we'll do this differently
eigen_solver, solver_kwds = check_eigen_solver(eigen_solver, solver_kwds,
size=W.shape[0],
nvec=n_components + 1)
if eigen_solver != 'dense':
M = eye(*W.shape, format=W.format) - W
M = (M.T * M).tocsr()
else:
M = (W.T * W - W.T - W).toarray()
M.flat[::M.shape[0] + 1] += 1 # W = W - I = W - I
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
random_state=random_state)
|
[
"def",
"locally_linear_embedding",
"(",
"geom",
",",
"n_components",
",",
"reg",
"=",
"1e-3",
",",
"eigen_solver",
"=",
"'auto'",
",",
"random_state",
"=",
"None",
",",
"solver_kwds",
"=",
"None",
")",
":",
"if",
"geom",
".",
"X",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Must pass data matrix X to Geometry\"",
")",
"if",
"geom",
".",
"adjacency_matrix",
"is",
"None",
":",
"geom",
".",
"compute_adjacency_matrix",
"(",
")",
"W",
"=",
"barycenter_graph",
"(",
"geom",
".",
"adjacency_matrix",
",",
"geom",
".",
"X",
",",
"reg",
"=",
"reg",
")",
"# we'll compute M = (I-W)'(I-W)",
"# depending on the solver, we'll do this differently",
"eigen_solver",
",",
"solver_kwds",
"=",
"check_eigen_solver",
"(",
"eigen_solver",
",",
"solver_kwds",
",",
"size",
"=",
"W",
".",
"shape",
"[",
"0",
"]",
",",
"nvec",
"=",
"n_components",
"+",
"1",
")",
"if",
"eigen_solver",
"!=",
"'dense'",
":",
"M",
"=",
"eye",
"(",
"*",
"W",
".",
"shape",
",",
"format",
"=",
"W",
".",
"format",
")",
"-",
"W",
"M",
"=",
"(",
"M",
".",
"T",
"*",
"M",
")",
".",
"tocsr",
"(",
")",
"else",
":",
"M",
"=",
"(",
"W",
".",
"T",
"*",
"W",
"-",
"W",
".",
"T",
"-",
"W",
")",
".",
"toarray",
"(",
")",
"M",
".",
"flat",
"[",
":",
":",
"M",
".",
"shape",
"[",
"0",
"]",
"+",
"1",
"]",
"+=",
"1",
"# W = W - I = W - I",
"return",
"null_space",
"(",
"M",
",",
"n_components",
",",
"k_skip",
"=",
"1",
",",
"eigen_solver",
"=",
"eigen_solver",
",",
"random_state",
"=",
"random_state",
")"
] |
Perform a Locally Linear Embedding analysis on the data.
Parameters
----------
geom : a Geometry object from megaman.geometry.geometry
n_components : integer
number of coordinates for the manifold.
reg : float
regularization constant, multiplies the trace of the local covariance
matrix of the distances.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
Y : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
.. [1] Roweis, S. & Saul, L. Nonlinear dimensionality reduction
by locally linear embedding. Science 290:2323 (2000).
|
[
"Perform",
"a",
"Locally",
"Linear",
"Embedding",
"analysis",
"on",
"the",
"data",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/embedding/locally_linear.py#L60-L128
|
7,571
|
mmp2/megaman
|
megaman/utils/validation.py
|
_num_samples
|
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
|
python
|
def _num_samples(x):
"""Return number of samples in array-like x."""
if hasattr(x, 'fit'):
# Don't get num_samples from an ensembles length!
raise TypeError('Expected sequence or array-like, got '
'estimator %s' % x)
if not hasattr(x, '__len__') and not hasattr(x, 'shape'):
if hasattr(x, '__array__'):
x = np.asarray(x)
else:
raise TypeError("Expected sequence or array-like, got %s" %
type(x))
if hasattr(x, 'shape'):
if len(x.shape) == 0:
raise TypeError("Singleton array %r cannot be considered"
" a valid collection." % x)
return x.shape[0]
else:
return len(x)
|
[
"def",
"_num_samples",
"(",
"x",
")",
":",
"if",
"hasattr",
"(",
"x",
",",
"'fit'",
")",
":",
"# Don't get num_samples from an ensembles length!",
"raise",
"TypeError",
"(",
"'Expected sequence or array-like, got '",
"'estimator %s'",
"%",
"x",
")",
"if",
"not",
"hasattr",
"(",
"x",
",",
"'__len__'",
")",
"and",
"not",
"hasattr",
"(",
"x",
",",
"'shape'",
")",
":",
"if",
"hasattr",
"(",
"x",
",",
"'__array__'",
")",
":",
"x",
"=",
"np",
".",
"asarray",
"(",
"x",
")",
"else",
":",
"raise",
"TypeError",
"(",
"\"Expected sequence or array-like, got %s\"",
"%",
"type",
"(",
"x",
")",
")",
"if",
"hasattr",
"(",
"x",
",",
"'shape'",
")",
":",
"if",
"len",
"(",
"x",
".",
"shape",
")",
"==",
"0",
":",
"raise",
"TypeError",
"(",
"\"Singleton array %r cannot be considered\"",
"\" a valid collection.\"",
"%",
"x",
")",
"return",
"x",
".",
"shape",
"[",
"0",
"]",
"else",
":",
"return",
"len",
"(",
"x",
")"
] |
Return number of samples in array-like x.
|
[
"Return",
"number",
"of",
"samples",
"in",
"array",
"-",
"like",
"x",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/utils/validation.py#L68-L86
|
7,572
|
mmp2/megaman
|
megaman/utils/spectral_clustering.py
|
spectral_clustering
|
def spectral_clustering(geom, K, eigen_solver = 'dense', random_state = None, solver_kwds = None,
renormalize = True, stabalize = True, additional_vectors = 0):
"""
Spectral clustering for find K clusters by using the eigenvectors of a
matrix which is derived from a set of similarities S.
Parameters
-----------
S: array-like,shape(n_sample,n_sample)
similarity matrix
K: integer
number of K clusters
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.RandomState
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
renormalize : (bool) whether or not to set the rows of the eigenvectors to have norm 1
this can improve label quality
stabalize : (bool) whether or not to compute the (more stable) eigenvectors of L = D^-1/2*S*D^-1/2
instead of P = D^-1*S
additional_vectors : (int) compute additional eigen vectors when computing eigen decomposition.
When eigen_solver = 'amg' or 'lopcg' often if a small number of eigen values is sought the
largest eigenvalue returned is *not* equal to 1 (it should be). This can usually be fixed
by requesting more than K eigenvalues until the first eigenvalue is close to 1 and then
omitted. The remaining K-1 eigenvectors should be informative.
Returns
-------
labels: array-like, shape (1,n_samples)
"""
# Step 1: get similarity matrix
if geom.affinity_matrix is None:
S = geom.compute_affinity_matrix()
else:
S = geom.affinity_matrix
# Check for stability method, symmetric solvers require this
if eigen_solver in ['lobpcg', 'amg']:
stabalize = True
if stabalize:
geom.laplacian_type = 'symmetricnormalized'
return_lapsym = True
else:
geom.laplacian_type = 'randomwalk'
return_lapsym = False
# Step 2: get the Laplacian matrix
P = geom.compute_laplacian_matrix(return_lapsym = return_lapsym)
# by default the Laplacian is subtracted from the Identify matrix (this step may not be needed)
P += identity(P.shape[0])
# Step 3: Compute the top K eigenvectors and drop the first
if eigen_solver in ['auto', 'amg', 'lobpcg']:
n_components = 2*int(np.log(P.shape[0]))*K + 1
n_components += int(additional_vectors)
else:
n_components = K
n_components = min(n_components, P.shape[0])
(lambdas, eigen_vectors) = eigen_decomposition(P, n_components=n_components, eigen_solver=eigen_solver,
random_state=random_state, drop_first = True,
solver_kwds=solver_kwds)
# the first vector is usually uninformative
if eigen_solver in ['auto', 'lobpcg', 'amg']:
if np.abs(lambdas[0] - 1) > 1e-4:
warnings.warn("largest eigenvalue not equal to 1. Results may be poor. Try increasing additional_vectors parameter")
eigen_vectors = eigen_vectors[:, 1:K]
lambdas = lambdas[1:K]
# If stability method chosen, adjust eigenvectors
if stabalize:
w = np.array(geom.laplacian_weights)
eigen_vectors /= np.sqrt(w[:,np.newaxis])
eigen_vectors /= np.linalg.norm(eigen_vectors, axis = 0)
# If renormalize: set each data point to unit length
if renormalize:
norms = np.linalg.norm(eigen_vectors, axis=1)
eigen_vectors /= norms[:,np.newaxis]
# Step 4: run k-means clustering
labels = k_means_clustering(eigen_vectors,K)
return labels, eigen_vectors, P
|
python
|
def spectral_clustering(geom, K, eigen_solver = 'dense', random_state = None, solver_kwds = None,
renormalize = True, stabalize = True, additional_vectors = 0):
"""
Spectral clustering for find K clusters by using the eigenvectors of a
matrix which is derived from a set of similarities S.
Parameters
-----------
S: array-like,shape(n_sample,n_sample)
similarity matrix
K: integer
number of K clusters
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.RandomState
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
renormalize : (bool) whether or not to set the rows of the eigenvectors to have norm 1
this can improve label quality
stabalize : (bool) whether or not to compute the (more stable) eigenvectors of L = D^-1/2*S*D^-1/2
instead of P = D^-1*S
additional_vectors : (int) compute additional eigen vectors when computing eigen decomposition.
When eigen_solver = 'amg' or 'lopcg' often if a small number of eigen values is sought the
largest eigenvalue returned is *not* equal to 1 (it should be). This can usually be fixed
by requesting more than K eigenvalues until the first eigenvalue is close to 1 and then
omitted. The remaining K-1 eigenvectors should be informative.
Returns
-------
labels: array-like, shape (1,n_samples)
"""
# Step 1: get similarity matrix
if geom.affinity_matrix is None:
S = geom.compute_affinity_matrix()
else:
S = geom.affinity_matrix
# Check for stability method, symmetric solvers require this
if eigen_solver in ['lobpcg', 'amg']:
stabalize = True
if stabalize:
geom.laplacian_type = 'symmetricnormalized'
return_lapsym = True
else:
geom.laplacian_type = 'randomwalk'
return_lapsym = False
# Step 2: get the Laplacian matrix
P = geom.compute_laplacian_matrix(return_lapsym = return_lapsym)
# by default the Laplacian is subtracted from the Identify matrix (this step may not be needed)
P += identity(P.shape[0])
# Step 3: Compute the top K eigenvectors and drop the first
if eigen_solver in ['auto', 'amg', 'lobpcg']:
n_components = 2*int(np.log(P.shape[0]))*K + 1
n_components += int(additional_vectors)
else:
n_components = K
n_components = min(n_components, P.shape[0])
(lambdas, eigen_vectors) = eigen_decomposition(P, n_components=n_components, eigen_solver=eigen_solver,
random_state=random_state, drop_first = True,
solver_kwds=solver_kwds)
# the first vector is usually uninformative
if eigen_solver in ['auto', 'lobpcg', 'amg']:
if np.abs(lambdas[0] - 1) > 1e-4:
warnings.warn("largest eigenvalue not equal to 1. Results may be poor. Try increasing additional_vectors parameter")
eigen_vectors = eigen_vectors[:, 1:K]
lambdas = lambdas[1:K]
# If stability method chosen, adjust eigenvectors
if stabalize:
w = np.array(geom.laplacian_weights)
eigen_vectors /= np.sqrt(w[:,np.newaxis])
eigen_vectors /= np.linalg.norm(eigen_vectors, axis = 0)
# If renormalize: set each data point to unit length
if renormalize:
norms = np.linalg.norm(eigen_vectors, axis=1)
eigen_vectors /= norms[:,np.newaxis]
# Step 4: run k-means clustering
labels = k_means_clustering(eigen_vectors,K)
return labels, eigen_vectors, P
|
[
"def",
"spectral_clustering",
"(",
"geom",
",",
"K",
",",
"eigen_solver",
"=",
"'dense'",
",",
"random_state",
"=",
"None",
",",
"solver_kwds",
"=",
"None",
",",
"renormalize",
"=",
"True",
",",
"stabalize",
"=",
"True",
",",
"additional_vectors",
"=",
"0",
")",
":",
"# Step 1: get similarity matrix",
"if",
"geom",
".",
"affinity_matrix",
"is",
"None",
":",
"S",
"=",
"geom",
".",
"compute_affinity_matrix",
"(",
")",
"else",
":",
"S",
"=",
"geom",
".",
"affinity_matrix",
"# Check for stability method, symmetric solvers require this",
"if",
"eigen_solver",
"in",
"[",
"'lobpcg'",
",",
"'amg'",
"]",
":",
"stabalize",
"=",
"True",
"if",
"stabalize",
":",
"geom",
".",
"laplacian_type",
"=",
"'symmetricnormalized'",
"return_lapsym",
"=",
"True",
"else",
":",
"geom",
".",
"laplacian_type",
"=",
"'randomwalk'",
"return_lapsym",
"=",
"False",
"# Step 2: get the Laplacian matrix",
"P",
"=",
"geom",
".",
"compute_laplacian_matrix",
"(",
"return_lapsym",
"=",
"return_lapsym",
")",
"# by default the Laplacian is subtracted from the Identify matrix (this step may not be needed)",
"P",
"+=",
"identity",
"(",
"P",
".",
"shape",
"[",
"0",
"]",
")",
"# Step 3: Compute the top K eigenvectors and drop the first ",
"if",
"eigen_solver",
"in",
"[",
"'auto'",
",",
"'amg'",
",",
"'lobpcg'",
"]",
":",
"n_components",
"=",
"2",
"*",
"int",
"(",
"np",
".",
"log",
"(",
"P",
".",
"shape",
"[",
"0",
"]",
")",
")",
"*",
"K",
"+",
"1",
"n_components",
"+=",
"int",
"(",
"additional_vectors",
")",
"else",
":",
"n_components",
"=",
"K",
"n_components",
"=",
"min",
"(",
"n_components",
",",
"P",
".",
"shape",
"[",
"0",
"]",
")",
"(",
"lambdas",
",",
"eigen_vectors",
")",
"=",
"eigen_decomposition",
"(",
"P",
",",
"n_components",
"=",
"n_components",
",",
"eigen_solver",
"=",
"eigen_solver",
",",
"random_state",
"=",
"random_state",
",",
"drop_first",
"=",
"True",
",",
"solver_kwds",
"=",
"solver_kwds",
")",
"# the first vector is usually uninformative ",
"if",
"eigen_solver",
"in",
"[",
"'auto'",
",",
"'lobpcg'",
",",
"'amg'",
"]",
":",
"if",
"np",
".",
"abs",
"(",
"lambdas",
"[",
"0",
"]",
"-",
"1",
")",
">",
"1e-4",
":",
"warnings",
".",
"warn",
"(",
"\"largest eigenvalue not equal to 1. Results may be poor. Try increasing additional_vectors parameter\"",
")",
"eigen_vectors",
"=",
"eigen_vectors",
"[",
":",
",",
"1",
":",
"K",
"]",
"lambdas",
"=",
"lambdas",
"[",
"1",
":",
"K",
"]",
"# If stability method chosen, adjust eigenvectors",
"if",
"stabalize",
":",
"w",
"=",
"np",
".",
"array",
"(",
"geom",
".",
"laplacian_weights",
")",
"eigen_vectors",
"/=",
"np",
".",
"sqrt",
"(",
"w",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
")",
"eigen_vectors",
"/=",
"np",
".",
"linalg",
".",
"norm",
"(",
"eigen_vectors",
",",
"axis",
"=",
"0",
")",
"# If renormalize: set each data point to unit length",
"if",
"renormalize",
":",
"norms",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"eigen_vectors",
",",
"axis",
"=",
"1",
")",
"eigen_vectors",
"/=",
"norms",
"[",
":",
",",
"np",
".",
"newaxis",
"]",
"# Step 4: run k-means clustering",
"labels",
"=",
"k_means_clustering",
"(",
"eigen_vectors",
",",
"K",
")",
"return",
"labels",
",",
"eigen_vectors",
",",
"P"
] |
Spectral clustering for find K clusters by using the eigenvectors of a
matrix which is derived from a set of similarities S.
Parameters
-----------
S: array-like,shape(n_sample,n_sample)
similarity matrix
K: integer
number of K clusters
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.RandomState
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
renormalize : (bool) whether or not to set the rows of the eigenvectors to have norm 1
this can improve label quality
stabalize : (bool) whether or not to compute the (more stable) eigenvectors of L = D^-1/2*S*D^-1/2
instead of P = D^-1*S
additional_vectors : (int) compute additional eigen vectors when computing eigen decomposition.
When eigen_solver = 'amg' or 'lopcg' often if a small number of eigen values is sought the
largest eigenvalue returned is *not* equal to 1 (it should be). This can usually be fixed
by requesting more than K eigenvalues until the first eigenvalue is close to 1 and then
omitted. The remaining K-1 eigenvectors should be informative.
Returns
-------
labels: array-like, shape (1,n_samples)
|
[
"Spectral",
"clustering",
"for",
"find",
"K",
"clusters",
"by",
"using",
"the",
"eigenvectors",
"of",
"a",
"matrix",
"which",
"is",
"derived",
"from",
"a",
"set",
"of",
"similarities",
"S",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/utils/spectral_clustering.py#L94-L193
|
7,573
|
mmp2/megaman
|
megaman/plotter/covar_plotter3.py
|
pathpatch_2d_to_3d
|
def pathpatch_2d_to_3d(pathpatch, z = 0, normal = 'z'):
"""
Transforms a 2D Patch to a 3D patch using the given normal vector.
The patch is projected into they XY plane, rotated about the origin
and finally translated by z.
"""
if type(normal) is str: #Translate strings to normal vectors
index = "xyz".index(normal)
normal = np.roll((1.0,0,0), index)
normal /= np.linalg.norm(normal) #Make sure the vector is normalised
path = pathpatch.get_path() #Get the path and the associated transform
trans = pathpatch.get_patch_transform()
path = trans.transform_path(path) #Apply the transform
pathpatch.__class__ = art3d.PathPatch3D #Change the class
pathpatch._code3d = path.codes #Copy the codes
pathpatch._facecolor3d = pathpatch.get_facecolor #Get the face color
verts = path.vertices #Get the vertices in 2D
d = np.cross(normal, (0, 0, 1)) #Obtain the rotation vector
M = rotation_matrix(d) #Get the rotation matrix
pathpatch._segment3d = \
np.array([np.dot(M, (x, y, 0)) + (0, 0, z) for x, y in verts])
return pathpatch
|
python
|
def pathpatch_2d_to_3d(pathpatch, z = 0, normal = 'z'):
"""
Transforms a 2D Patch to a 3D patch using the given normal vector.
The patch is projected into they XY plane, rotated about the origin
and finally translated by z.
"""
if type(normal) is str: #Translate strings to normal vectors
index = "xyz".index(normal)
normal = np.roll((1.0,0,0), index)
normal /= np.linalg.norm(normal) #Make sure the vector is normalised
path = pathpatch.get_path() #Get the path and the associated transform
trans = pathpatch.get_patch_transform()
path = trans.transform_path(path) #Apply the transform
pathpatch.__class__ = art3d.PathPatch3D #Change the class
pathpatch._code3d = path.codes #Copy the codes
pathpatch._facecolor3d = pathpatch.get_facecolor #Get the face color
verts = path.vertices #Get the vertices in 2D
d = np.cross(normal, (0, 0, 1)) #Obtain the rotation vector
M = rotation_matrix(d) #Get the rotation matrix
pathpatch._segment3d = \
np.array([np.dot(M, (x, y, 0)) + (0, 0, z) for x, y in verts])
return pathpatch
|
[
"def",
"pathpatch_2d_to_3d",
"(",
"pathpatch",
",",
"z",
"=",
"0",
",",
"normal",
"=",
"'z'",
")",
":",
"if",
"type",
"(",
"normal",
")",
"is",
"str",
":",
"#Translate strings to normal vectors",
"index",
"=",
"\"xyz\"",
".",
"index",
"(",
"normal",
")",
"normal",
"=",
"np",
".",
"roll",
"(",
"(",
"1.0",
",",
"0",
",",
"0",
")",
",",
"index",
")",
"normal",
"/=",
"np",
".",
"linalg",
".",
"norm",
"(",
"normal",
")",
"#Make sure the vector is normalised",
"path",
"=",
"pathpatch",
".",
"get_path",
"(",
")",
"#Get the path and the associated transform",
"trans",
"=",
"pathpatch",
".",
"get_patch_transform",
"(",
")",
"path",
"=",
"trans",
".",
"transform_path",
"(",
"path",
")",
"#Apply the transform",
"pathpatch",
".",
"__class__",
"=",
"art3d",
".",
"PathPatch3D",
"#Change the class",
"pathpatch",
".",
"_code3d",
"=",
"path",
".",
"codes",
"#Copy the codes",
"pathpatch",
".",
"_facecolor3d",
"=",
"pathpatch",
".",
"get_facecolor",
"#Get the face color",
"verts",
"=",
"path",
".",
"vertices",
"#Get the vertices in 2D",
"d",
"=",
"np",
".",
"cross",
"(",
"normal",
",",
"(",
"0",
",",
"0",
",",
"1",
")",
")",
"#Obtain the rotation vector",
"M",
"=",
"rotation_matrix",
"(",
"d",
")",
"#Get the rotation matrix",
"pathpatch",
".",
"_segment3d",
"=",
"np",
".",
"array",
"(",
"[",
"np",
".",
"dot",
"(",
"M",
",",
"(",
"x",
",",
"y",
",",
"0",
")",
")",
"+",
"(",
"0",
",",
"0",
",",
"z",
")",
"for",
"x",
",",
"y",
"in",
"verts",
"]",
")",
"return",
"pathpatch"
] |
Transforms a 2D Patch to a 3D patch using the given normal vector.
The patch is projected into they XY plane, rotated about the origin
and finally translated by z.
|
[
"Transforms",
"a",
"2D",
"Patch",
"to",
"a",
"3D",
"patch",
"using",
"the",
"given",
"normal",
"vector",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/plotter/covar_plotter3.py#L44-L73
|
7,574
|
mmp2/megaman
|
megaman/plotter/covar_plotter3.py
|
calc_2d_ellipse_properties
|
def calc_2d_ellipse_properties(cov,nstd=2):
"""Calculate the properties for 2d ellipse given the covariance matrix."""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
vals, vecs = eigsorted(cov)
width, height = 2 * nstd * np.sqrt(vals[:2])
normal = vecs[:,2] if vecs[2,2] > 0 else -vecs[:,2]
d = np.cross(normal, (0, 0, 1))
M = rotation_matrix(d)
x_trans = np.dot(M,(1,0,0))
cos_val = np.dot(vecs[:,0],x_trans)/np.linalg.norm(vecs[:,0])/np.linalg.norm(x_trans)
theta = np.degrees(np.arccos(np.clip(cos_val, -1, 1))) # if you really want the angle
return { 'width': width, 'height': height, 'angle': theta }, normal
|
python
|
def calc_2d_ellipse_properties(cov,nstd=2):
"""Calculate the properties for 2d ellipse given the covariance matrix."""
def eigsorted(cov):
vals, vecs = np.linalg.eigh(cov)
order = vals.argsort()[::-1]
return vals[order], vecs[:,order]
vals, vecs = eigsorted(cov)
width, height = 2 * nstd * np.sqrt(vals[:2])
normal = vecs[:,2] if vecs[2,2] > 0 else -vecs[:,2]
d = np.cross(normal, (0, 0, 1))
M = rotation_matrix(d)
x_trans = np.dot(M,(1,0,0))
cos_val = np.dot(vecs[:,0],x_trans)/np.linalg.norm(vecs[:,0])/np.linalg.norm(x_trans)
theta = np.degrees(np.arccos(np.clip(cos_val, -1, 1))) # if you really want the angle
return { 'width': width, 'height': height, 'angle': theta }, normal
|
[
"def",
"calc_2d_ellipse_properties",
"(",
"cov",
",",
"nstd",
"=",
"2",
")",
":",
"def",
"eigsorted",
"(",
"cov",
")",
":",
"vals",
",",
"vecs",
"=",
"np",
".",
"linalg",
".",
"eigh",
"(",
"cov",
")",
"order",
"=",
"vals",
".",
"argsort",
"(",
")",
"[",
":",
":",
"-",
"1",
"]",
"return",
"vals",
"[",
"order",
"]",
",",
"vecs",
"[",
":",
",",
"order",
"]",
"vals",
",",
"vecs",
"=",
"eigsorted",
"(",
"cov",
")",
"width",
",",
"height",
"=",
"2",
"*",
"nstd",
"*",
"np",
".",
"sqrt",
"(",
"vals",
"[",
":",
"2",
"]",
")",
"normal",
"=",
"vecs",
"[",
":",
",",
"2",
"]",
"if",
"vecs",
"[",
"2",
",",
"2",
"]",
">",
"0",
"else",
"-",
"vecs",
"[",
":",
",",
"2",
"]",
"d",
"=",
"np",
".",
"cross",
"(",
"normal",
",",
"(",
"0",
",",
"0",
",",
"1",
")",
")",
"M",
"=",
"rotation_matrix",
"(",
"d",
")",
"x_trans",
"=",
"np",
".",
"dot",
"(",
"M",
",",
"(",
"1",
",",
"0",
",",
"0",
")",
")",
"cos_val",
"=",
"np",
".",
"dot",
"(",
"vecs",
"[",
":",
",",
"0",
"]",
",",
"x_trans",
")",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"vecs",
"[",
":",
",",
"0",
"]",
")",
"/",
"np",
".",
"linalg",
".",
"norm",
"(",
"x_trans",
")",
"theta",
"=",
"np",
".",
"degrees",
"(",
"np",
".",
"arccos",
"(",
"np",
".",
"clip",
"(",
"cos_val",
",",
"-",
"1",
",",
"1",
")",
")",
")",
"# if you really want the angle",
"return",
"{",
"'width'",
":",
"width",
",",
"'height'",
":",
"height",
",",
"'angle'",
":",
"theta",
"}",
",",
"normal"
] |
Calculate the properties for 2d ellipse given the covariance matrix.
|
[
"Calculate",
"the",
"properties",
"for",
"2d",
"ellipse",
"given",
"the",
"covariance",
"matrix",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/plotter/covar_plotter3.py#L101-L116
|
7,575
|
mmp2/megaman
|
megaman/plotter/covar_plotter3.py
|
rotation_matrix
|
def rotation_matrix(d):
"""
Calculates a rotation matrix given a vector d. The direction of d
corresponds to the rotation axis. The length of d corresponds to
the sin of the angle of rotation.
Variant of: http://mail.scipy.org/pipermail/numpy-discussion/2009-March/040806.html
"""
sin_angle = np.linalg.norm(d)
if sin_angle == 0:
return np.identity(3)
d /= sin_angle
eye = np.eye(3)
ddt = np.outer(d, d)
skew = np.array([[ 0, d[2], -d[1]],
[-d[2], 0, d[0]],
[ d[1], -d[0], 0]], dtype=np.float64)
M = ddt + np.sqrt(1 - sin_angle**2) * (eye - ddt) + sin_angle * skew
return M
|
python
|
def rotation_matrix(d):
"""
Calculates a rotation matrix given a vector d. The direction of d
corresponds to the rotation axis. The length of d corresponds to
the sin of the angle of rotation.
Variant of: http://mail.scipy.org/pipermail/numpy-discussion/2009-March/040806.html
"""
sin_angle = np.linalg.norm(d)
if sin_angle == 0:
return np.identity(3)
d /= sin_angle
eye = np.eye(3)
ddt = np.outer(d, d)
skew = np.array([[ 0, d[2], -d[1]],
[-d[2], 0, d[0]],
[ d[1], -d[0], 0]], dtype=np.float64)
M = ddt + np.sqrt(1 - sin_angle**2) * (eye - ddt) + sin_angle * skew
return M
|
[
"def",
"rotation_matrix",
"(",
"d",
")",
":",
"sin_angle",
"=",
"np",
".",
"linalg",
".",
"norm",
"(",
"d",
")",
"if",
"sin_angle",
"==",
"0",
":",
"return",
"np",
".",
"identity",
"(",
"3",
")",
"d",
"/=",
"sin_angle",
"eye",
"=",
"np",
".",
"eye",
"(",
"3",
")",
"ddt",
"=",
"np",
".",
"outer",
"(",
"d",
",",
"d",
")",
"skew",
"=",
"np",
".",
"array",
"(",
"[",
"[",
"0",
",",
"d",
"[",
"2",
"]",
",",
"-",
"d",
"[",
"1",
"]",
"]",
",",
"[",
"-",
"d",
"[",
"2",
"]",
",",
"0",
",",
"d",
"[",
"0",
"]",
"]",
",",
"[",
"d",
"[",
"1",
"]",
",",
"-",
"d",
"[",
"0",
"]",
",",
"0",
"]",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
"M",
"=",
"ddt",
"+",
"np",
".",
"sqrt",
"(",
"1",
"-",
"sin_angle",
"**",
"2",
")",
"*",
"(",
"eye",
"-",
"ddt",
")",
"+",
"sin_angle",
"*",
"skew",
"return",
"M"
] |
Calculates a rotation matrix given a vector d. The direction of d
corresponds to the rotation axis. The length of d corresponds to
the sin of the angle of rotation.
Variant of: http://mail.scipy.org/pipermail/numpy-discussion/2009-March/040806.html
|
[
"Calculates",
"a",
"rotation",
"matrix",
"given",
"a",
"vector",
"d",
".",
"The",
"direction",
"of",
"d",
"corresponds",
"to",
"the",
"rotation",
"axis",
".",
"The",
"length",
"of",
"d",
"corresponds",
"to",
"the",
"sin",
"of",
"the",
"angle",
"of",
"rotation",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/plotter/covar_plotter3.py#L118-L140
|
7,576
|
mmp2/megaman
|
megaman/plotter/covar_plotter3.py
|
create_ellipse
|
def create_ellipse(width,height,angle):
"""Create parametric ellipse from 200 points."""
angle = angle / 180.0 * np.pi
thetas = np.linspace(0,2*np.pi,200)
a = width / 2.0
b = height / 2.0
x = a*np.cos(thetas)*np.cos(angle) - b*np.sin(thetas)*np.sin(angle)
y = a*np.cos(thetas)*np.sin(angle) + b*np.sin(thetas)*np.cos(angle)
z = np.zeros(thetas.shape)
return np.vstack((x,y,z)).T
|
python
|
def create_ellipse(width,height,angle):
"""Create parametric ellipse from 200 points."""
angle = angle / 180.0 * np.pi
thetas = np.linspace(0,2*np.pi,200)
a = width / 2.0
b = height / 2.0
x = a*np.cos(thetas)*np.cos(angle) - b*np.sin(thetas)*np.sin(angle)
y = a*np.cos(thetas)*np.sin(angle) + b*np.sin(thetas)*np.cos(angle)
z = np.zeros(thetas.shape)
return np.vstack((x,y,z)).T
|
[
"def",
"create_ellipse",
"(",
"width",
",",
"height",
",",
"angle",
")",
":",
"angle",
"=",
"angle",
"/",
"180.0",
"*",
"np",
".",
"pi",
"thetas",
"=",
"np",
".",
"linspace",
"(",
"0",
",",
"2",
"*",
"np",
".",
"pi",
",",
"200",
")",
"a",
"=",
"width",
"/",
"2.0",
"b",
"=",
"height",
"/",
"2.0",
"x",
"=",
"a",
"*",
"np",
".",
"cos",
"(",
"thetas",
")",
"*",
"np",
".",
"cos",
"(",
"angle",
")",
"-",
"b",
"*",
"np",
".",
"sin",
"(",
"thetas",
")",
"*",
"np",
".",
"sin",
"(",
"angle",
")",
"y",
"=",
"a",
"*",
"np",
".",
"cos",
"(",
"thetas",
")",
"*",
"np",
".",
"sin",
"(",
"angle",
")",
"+",
"b",
"*",
"np",
".",
"sin",
"(",
"thetas",
")",
"*",
"np",
".",
"cos",
"(",
"angle",
")",
"z",
"=",
"np",
".",
"zeros",
"(",
"thetas",
".",
"shape",
")",
"return",
"np",
".",
"vstack",
"(",
"(",
"x",
",",
"y",
",",
"z",
")",
")",
".",
"T"
] |
Create parametric ellipse from 200 points.
|
[
"Create",
"parametric",
"ellipse",
"from",
"200",
"points",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/plotter/covar_plotter3.py#L142-L152
|
7,577
|
mmp2/megaman
|
megaman/plotter/covar_plotter3.py
|
transform_to_3d
|
def transform_to_3d(points,normal,z=0):
"""Project points into 3d from 2d points."""
d = np.cross(normal, (0, 0, 1))
M = rotation_matrix(d)
transformed_points = M.dot(points.T).T + z
return transformed_points
|
python
|
def transform_to_3d(points,normal,z=0):
"""Project points into 3d from 2d points."""
d = np.cross(normal, (0, 0, 1))
M = rotation_matrix(d)
transformed_points = M.dot(points.T).T + z
return transformed_points
|
[
"def",
"transform_to_3d",
"(",
"points",
",",
"normal",
",",
"z",
"=",
"0",
")",
":",
"d",
"=",
"np",
".",
"cross",
"(",
"normal",
",",
"(",
"0",
",",
"0",
",",
"1",
")",
")",
"M",
"=",
"rotation_matrix",
"(",
"d",
")",
"transformed_points",
"=",
"M",
".",
"dot",
"(",
"points",
".",
"T",
")",
".",
"T",
"+",
"z",
"return",
"transformed_points"
] |
Project points into 3d from 2d points.
|
[
"Project",
"points",
"into",
"3d",
"from",
"2d",
"points",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/plotter/covar_plotter3.py#L154-L159
|
7,578
|
mmp2/megaman
|
megaman/plotter/covar_plotter3.py
|
create_ellipse_mesh
|
def create_ellipse_mesh(points,**kwargs):
"""Visualize the ellipse by using the mesh of the points."""
import plotly.graph_objs as go
x,y,z = points.T
return (go.Mesh3d(x=x,y=y,z=z,**kwargs),
go.Scatter3d(x=x, y=y, z=z,
marker=dict(size=0.01),
line=dict(width=2,color='#000000'),
showlegend=False,
hoverinfo='none'
)
)
|
python
|
def create_ellipse_mesh(points,**kwargs):
"""Visualize the ellipse by using the mesh of the points."""
import plotly.graph_objs as go
x,y,z = points.T
return (go.Mesh3d(x=x,y=y,z=z,**kwargs),
go.Scatter3d(x=x, y=y, z=z,
marker=dict(size=0.01),
line=dict(width=2,color='#000000'),
showlegend=False,
hoverinfo='none'
)
)
|
[
"def",
"create_ellipse_mesh",
"(",
"points",
",",
"*",
"*",
"kwargs",
")",
":",
"import",
"plotly",
".",
"graph_objs",
"as",
"go",
"x",
",",
"y",
",",
"z",
"=",
"points",
".",
"T",
"return",
"(",
"go",
".",
"Mesh3d",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"z",
"=",
"z",
",",
"*",
"*",
"kwargs",
")",
",",
"go",
".",
"Scatter3d",
"(",
"x",
"=",
"x",
",",
"y",
"=",
"y",
",",
"z",
"=",
"z",
",",
"marker",
"=",
"dict",
"(",
"size",
"=",
"0.01",
")",
",",
"line",
"=",
"dict",
"(",
"width",
"=",
"2",
",",
"color",
"=",
"'#000000'",
")",
",",
"showlegend",
"=",
"False",
",",
"hoverinfo",
"=",
"'none'",
")",
")"
] |
Visualize the ellipse by using the mesh of the points.
|
[
"Visualize",
"the",
"ellipse",
"by",
"using",
"the",
"mesh",
"of",
"the",
"points",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/plotter/covar_plotter3.py#L166-L177
|
7,579
|
mmp2/megaman
|
megaman/embedding/ltsa.py
|
ltsa
|
def ltsa(geom, n_components, eigen_solver='auto',
random_state=None, solver_kwds=None):
"""
Perform a Local Tangent Space Alignment analysis on the data.
Parameters
----------
geom : a Geometry object from megaman.geometry.geometry
n_components : integer
number of coordinates for the manifold.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
embedding : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
* Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)
"""
if geom.X is None:
raise ValueError("Must pass data matrix X to Geometry")
(N, d_in) = geom.X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
# get the distance matrix and neighbors list
if geom.adjacency_matrix is None:
geom.compute_adjacency_matrix()
(rows, cols) = geom.adjacency_matrix.nonzero()
eigen_solver, solver_kwds = check_eigen_solver(eigen_solver, solver_kwds,
size=geom.adjacency_matrix.shape[0],
nvec=n_components + 1)
if eigen_solver != 'dense':
M = sparse.csr_matrix((N, N))
else:
M = np.zeros((N, N))
for i in range(N):
neighbors_i = cols[rows == i]
n_neighbors_i = len(neighbors_i)
use_svd = (n_neighbors_i > d_in)
Xi = geom.X[neighbors_i]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors_i, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors_i)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors_i, neighbors_i)
with warnings.catch_warnings():
# sparse will complain this is better with lil_matrix but it doesn't work
warnings.simplefilter("ignore")
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors_i, neighbors_i] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
random_state=random_state,solver_kwds=solver_kwds)
|
python
|
def ltsa(geom, n_components, eigen_solver='auto',
random_state=None, solver_kwds=None):
"""
Perform a Local Tangent Space Alignment analysis on the data.
Parameters
----------
geom : a Geometry object from megaman.geometry.geometry
n_components : integer
number of coordinates for the manifold.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
embedding : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
* Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)
"""
if geom.X is None:
raise ValueError("Must pass data matrix X to Geometry")
(N, d_in) = geom.X.shape
if n_components > d_in:
raise ValueError("output dimension must be less than or equal "
"to input dimension")
# get the distance matrix and neighbors list
if geom.adjacency_matrix is None:
geom.compute_adjacency_matrix()
(rows, cols) = geom.adjacency_matrix.nonzero()
eigen_solver, solver_kwds = check_eigen_solver(eigen_solver, solver_kwds,
size=geom.adjacency_matrix.shape[0],
nvec=n_components + 1)
if eigen_solver != 'dense':
M = sparse.csr_matrix((N, N))
else:
M = np.zeros((N, N))
for i in range(N):
neighbors_i = cols[rows == i]
n_neighbors_i = len(neighbors_i)
use_svd = (n_neighbors_i > d_in)
Xi = geom.X[neighbors_i]
Xi -= Xi.mean(0)
# compute n_components largest eigenvalues of Xi * Xi^T
if use_svd:
v = svd(Xi, full_matrices=True)[0]
else:
Ci = np.dot(Xi, Xi.T)
v = eigh(Ci)[1][:, ::-1]
Gi = np.zeros((n_neighbors_i, n_components + 1))
Gi[:, 1:] = v[:, :n_components]
Gi[:, 0] = 1. / np.sqrt(n_neighbors_i)
GiGiT = np.dot(Gi, Gi.T)
nbrs_x, nbrs_y = np.meshgrid(neighbors_i, neighbors_i)
with warnings.catch_warnings():
# sparse will complain this is better with lil_matrix but it doesn't work
warnings.simplefilter("ignore")
M[nbrs_x, nbrs_y] -= GiGiT
M[neighbors_i, neighbors_i] += 1
return null_space(M, n_components, k_skip=1, eigen_solver=eigen_solver,
random_state=random_state,solver_kwds=solver_kwds)
|
[
"def",
"ltsa",
"(",
"geom",
",",
"n_components",
",",
"eigen_solver",
"=",
"'auto'",
",",
"random_state",
"=",
"None",
",",
"solver_kwds",
"=",
"None",
")",
":",
"if",
"geom",
".",
"X",
"is",
"None",
":",
"raise",
"ValueError",
"(",
"\"Must pass data matrix X to Geometry\"",
")",
"(",
"N",
",",
"d_in",
")",
"=",
"geom",
".",
"X",
".",
"shape",
"if",
"n_components",
">",
"d_in",
":",
"raise",
"ValueError",
"(",
"\"output dimension must be less than or equal \"",
"\"to input dimension\"",
")",
"# get the distance matrix and neighbors list",
"if",
"geom",
".",
"adjacency_matrix",
"is",
"None",
":",
"geom",
".",
"compute_adjacency_matrix",
"(",
")",
"(",
"rows",
",",
"cols",
")",
"=",
"geom",
".",
"adjacency_matrix",
".",
"nonzero",
"(",
")",
"eigen_solver",
",",
"solver_kwds",
"=",
"check_eigen_solver",
"(",
"eigen_solver",
",",
"solver_kwds",
",",
"size",
"=",
"geom",
".",
"adjacency_matrix",
".",
"shape",
"[",
"0",
"]",
",",
"nvec",
"=",
"n_components",
"+",
"1",
")",
"if",
"eigen_solver",
"!=",
"'dense'",
":",
"M",
"=",
"sparse",
".",
"csr_matrix",
"(",
"(",
"N",
",",
"N",
")",
")",
"else",
":",
"M",
"=",
"np",
".",
"zeros",
"(",
"(",
"N",
",",
"N",
")",
")",
"for",
"i",
"in",
"range",
"(",
"N",
")",
":",
"neighbors_i",
"=",
"cols",
"[",
"rows",
"==",
"i",
"]",
"n_neighbors_i",
"=",
"len",
"(",
"neighbors_i",
")",
"use_svd",
"=",
"(",
"n_neighbors_i",
">",
"d_in",
")",
"Xi",
"=",
"geom",
".",
"X",
"[",
"neighbors_i",
"]",
"Xi",
"-=",
"Xi",
".",
"mean",
"(",
"0",
")",
"# compute n_components largest eigenvalues of Xi * Xi^T",
"if",
"use_svd",
":",
"v",
"=",
"svd",
"(",
"Xi",
",",
"full_matrices",
"=",
"True",
")",
"[",
"0",
"]",
"else",
":",
"Ci",
"=",
"np",
".",
"dot",
"(",
"Xi",
",",
"Xi",
".",
"T",
")",
"v",
"=",
"eigh",
"(",
"Ci",
")",
"[",
"1",
"]",
"[",
":",
",",
":",
":",
"-",
"1",
"]",
"Gi",
"=",
"np",
".",
"zeros",
"(",
"(",
"n_neighbors_i",
",",
"n_components",
"+",
"1",
")",
")",
"Gi",
"[",
":",
",",
"1",
":",
"]",
"=",
"v",
"[",
":",
",",
":",
"n_components",
"]",
"Gi",
"[",
":",
",",
"0",
"]",
"=",
"1.",
"/",
"np",
".",
"sqrt",
"(",
"n_neighbors_i",
")",
"GiGiT",
"=",
"np",
".",
"dot",
"(",
"Gi",
",",
"Gi",
".",
"T",
")",
"nbrs_x",
",",
"nbrs_y",
"=",
"np",
".",
"meshgrid",
"(",
"neighbors_i",
",",
"neighbors_i",
")",
"with",
"warnings",
".",
"catch_warnings",
"(",
")",
":",
"# sparse will complain this is better with lil_matrix but it doesn't work",
"warnings",
".",
"simplefilter",
"(",
"\"ignore\"",
")",
"M",
"[",
"nbrs_x",
",",
"nbrs_y",
"]",
"-=",
"GiGiT",
"M",
"[",
"neighbors_i",
",",
"neighbors_i",
"]",
"+=",
"1",
"return",
"null_space",
"(",
"M",
",",
"n_components",
",",
"k_skip",
"=",
"1",
",",
"eigen_solver",
"=",
"eigen_solver",
",",
"random_state",
"=",
"random_state",
",",
"solver_kwds",
"=",
"solver_kwds",
")"
] |
Perform a Local Tangent Space Alignment analysis on the data.
Parameters
----------
geom : a Geometry object from megaman.geometry.geometry
n_components : integer
number of coordinates for the manifold.
eigen_solver : {'auto', 'dense', 'arpack', 'lobpcg', or 'amg'}
'auto' :
algorithm will attempt to choose the best method for input data
'dense' :
use standard dense matrix operations for the eigenvalue decomposition.
For this method, M must be an array or matrix type. This method should be avoided for large problems.
'arpack' :
use arnoldi iteration in shift-invert mode. For this method,
M may be a dense matrix, sparse matrix, or general linear operator.
Warning: ARPACK can be unstable for some problems. It is best to
try several random seeds in order to check results.
'lobpcg' :
Locally Optimal Block Preconditioned Conjugate Gradient Method.
A preconditioned eigensolver for large symmetric positive definite
(SPD) generalized eigenproblems.
'amg' :
AMG requires pyamg to be installed. It can be faster on very large,
sparse problems, but may also lead to instabilities.
random_state : numpy.RandomState or int, optional
The generator or seed used to determine the starting vector for arpack
iterations. Defaults to numpy.random.
solver_kwds : any additional keyword arguments to pass to the selected eigen_solver
Returns
-------
embedding : array-like, shape [n_samples, n_components]
Embedding vectors.
squared_error : float
Reconstruction error for the embedding vectors. Equivalent to
``norm(Y - W Y, 'fro')**2``, where W are the reconstruction weights.
References
----------
* Zhang, Z. & Zha, H. Principal manifolds and nonlinear
dimensionality reduction via tangent space alignment.
Journal of Shanghai Univ. 8:406 (2004)
|
[
"Perform",
"a",
"Local",
"Tangent",
"Space",
"Alignment",
"analysis",
"on",
"the",
"data",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/embedding/ltsa.py#L24-L111
|
7,580
|
mmp2/megaman
|
megaman/relaxation/riemannian_relaxation.py
|
run_riemannian_relaxation
|
def run_riemannian_relaxation(laplacian, initial_guess,
intrinsic_dim, relaxation_kwds):
"""Helper function for creating a RiemannianRelaxation class."""
n, s = initial_guess.shape
relaxation_kwds = initialize_kwds(relaxation_kwds, n, s, intrinsic_dim)
if relaxation_kwds['save_init']:
directory = relaxation_kwds['backup_dir']
np.save(os.path.join(directory, 'Y0.npy'),initial_guess)
sp.io.mmwrite(os.path.join(directory, 'L_used.mtx'),
sp.sparse.csc_matrix(laplacian))
lossf = relaxation_kwds['lossf']
return RiemannianRelaxation.init(lossf, laplacian, initial_guess,
intrinsic_dim, relaxation_kwds)
|
python
|
def run_riemannian_relaxation(laplacian, initial_guess,
intrinsic_dim, relaxation_kwds):
"""Helper function for creating a RiemannianRelaxation class."""
n, s = initial_guess.shape
relaxation_kwds = initialize_kwds(relaxation_kwds, n, s, intrinsic_dim)
if relaxation_kwds['save_init']:
directory = relaxation_kwds['backup_dir']
np.save(os.path.join(directory, 'Y0.npy'),initial_guess)
sp.io.mmwrite(os.path.join(directory, 'L_used.mtx'),
sp.sparse.csc_matrix(laplacian))
lossf = relaxation_kwds['lossf']
return RiemannianRelaxation.init(lossf, laplacian, initial_guess,
intrinsic_dim, relaxation_kwds)
|
[
"def",
"run_riemannian_relaxation",
"(",
"laplacian",
",",
"initial_guess",
",",
"intrinsic_dim",
",",
"relaxation_kwds",
")",
":",
"n",
",",
"s",
"=",
"initial_guess",
".",
"shape",
"relaxation_kwds",
"=",
"initialize_kwds",
"(",
"relaxation_kwds",
",",
"n",
",",
"s",
",",
"intrinsic_dim",
")",
"if",
"relaxation_kwds",
"[",
"'save_init'",
"]",
":",
"directory",
"=",
"relaxation_kwds",
"[",
"'backup_dir'",
"]",
"np",
".",
"save",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"'Y0.npy'",
")",
",",
"initial_guess",
")",
"sp",
".",
"io",
".",
"mmwrite",
"(",
"os",
".",
"path",
".",
"join",
"(",
"directory",
",",
"'L_used.mtx'",
")",
",",
"sp",
".",
"sparse",
".",
"csc_matrix",
"(",
"laplacian",
")",
")",
"lossf",
"=",
"relaxation_kwds",
"[",
"'lossf'",
"]",
"return",
"RiemannianRelaxation",
".",
"init",
"(",
"lossf",
",",
"laplacian",
",",
"initial_guess",
",",
"intrinsic_dim",
",",
"relaxation_kwds",
")"
] |
Helper function for creating a RiemannianRelaxation class.
|
[
"Helper",
"function",
"for",
"creating",
"a",
"RiemannianRelaxation",
"class",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/relaxation/riemannian_relaxation.py#L19-L32
|
7,581
|
mmp2/megaman
|
megaman/relaxation/riemannian_relaxation.py
|
RiemannianRelaxation.relax_isometry
|
def relax_isometry(self):
"""Main function for doing riemannian relaxation."""
for ii in range(self.relaxation_kwds['niter']):
self.H = self.compute_dual_rmetric()
self.loss = self.rieman_loss()
self.trace_var.update(ii,self.H,self.Y,self.eta,self.loss)
self.trace_var.print_report(ii)
self.trace_var.save_backup(ii)
self.compute_gradient()
self.make_optimization_step(first_iter=(ii == 0))
self.H = self.compute_dual_rmetric()
self.trace_var.update(-1,self.H,self.Y,self.eta,self.loss)
self.trace_var.print_report(ii)
tracevar_path = os.path.join(self.trace_var.backup_dir, 'results.pyc')
TracingVariable.save(self.trace_var,tracevar_path)
|
python
|
def relax_isometry(self):
"""Main function for doing riemannian relaxation."""
for ii in range(self.relaxation_kwds['niter']):
self.H = self.compute_dual_rmetric()
self.loss = self.rieman_loss()
self.trace_var.update(ii,self.H,self.Y,self.eta,self.loss)
self.trace_var.print_report(ii)
self.trace_var.save_backup(ii)
self.compute_gradient()
self.make_optimization_step(first_iter=(ii == 0))
self.H = self.compute_dual_rmetric()
self.trace_var.update(-1,self.H,self.Y,self.eta,self.loss)
self.trace_var.print_report(ii)
tracevar_path = os.path.join(self.trace_var.backup_dir, 'results.pyc')
TracingVariable.save(self.trace_var,tracevar_path)
|
[
"def",
"relax_isometry",
"(",
"self",
")",
":",
"for",
"ii",
"in",
"range",
"(",
"self",
".",
"relaxation_kwds",
"[",
"'niter'",
"]",
")",
":",
"self",
".",
"H",
"=",
"self",
".",
"compute_dual_rmetric",
"(",
")",
"self",
".",
"loss",
"=",
"self",
".",
"rieman_loss",
"(",
")",
"self",
".",
"trace_var",
".",
"update",
"(",
"ii",
",",
"self",
".",
"H",
",",
"self",
".",
"Y",
",",
"self",
".",
"eta",
",",
"self",
".",
"loss",
")",
"self",
".",
"trace_var",
".",
"print_report",
"(",
"ii",
")",
"self",
".",
"trace_var",
".",
"save_backup",
"(",
"ii",
")",
"self",
".",
"compute_gradient",
"(",
")",
"self",
".",
"make_optimization_step",
"(",
"first_iter",
"=",
"(",
"ii",
"==",
"0",
")",
")",
"self",
".",
"H",
"=",
"self",
".",
"compute_dual_rmetric",
"(",
")",
"self",
".",
"trace_var",
".",
"update",
"(",
"-",
"1",
",",
"self",
".",
"H",
",",
"self",
".",
"Y",
",",
"self",
".",
"eta",
",",
"self",
".",
"loss",
")",
"self",
".",
"trace_var",
".",
"print_report",
"(",
"ii",
")",
"tracevar_path",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"trace_var",
".",
"backup_dir",
",",
"'results.pyc'",
")",
"TracingVariable",
".",
"save",
"(",
"self",
".",
"trace_var",
",",
"tracevar_path",
")"
] |
Main function for doing riemannian relaxation.
|
[
"Main",
"function",
"for",
"doing",
"riemannian",
"relaxation",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/relaxation/riemannian_relaxation.py#L77-L96
|
7,582
|
mmp2/megaman
|
megaman/relaxation/riemannian_relaxation.py
|
RiemannianRelaxation.calc_loss
|
def calc_loss(self, embedding):
"""Helper function to calculate rieman loss given new embedding"""
Hnew = self.compute_dual_rmetric(Ynew=embedding)
return self.rieman_loss(Hnew=Hnew)
|
python
|
def calc_loss(self, embedding):
"""Helper function to calculate rieman loss given new embedding"""
Hnew = self.compute_dual_rmetric(Ynew=embedding)
return self.rieman_loss(Hnew=Hnew)
|
[
"def",
"calc_loss",
"(",
"self",
",",
"embedding",
")",
":",
"Hnew",
"=",
"self",
".",
"compute_dual_rmetric",
"(",
"Ynew",
"=",
"embedding",
")",
"return",
"self",
".",
"rieman_loss",
"(",
"Hnew",
"=",
"Hnew",
")"
] |
Helper function to calculate rieman loss given new embedding
|
[
"Helper",
"function",
"to",
"calculate",
"rieman",
"loss",
"given",
"new",
"embedding"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/relaxation/riemannian_relaxation.py#L98-L101
|
7,583
|
mmp2/megaman
|
megaman/relaxation/riemannian_relaxation.py
|
RiemannianRelaxation.compute_dual_rmetric
|
def compute_dual_rmetric(self,Ynew=None):
"""Helper function to calculate the """
usedY = self.Y if Ynew is None else Ynew
rieman_metric = RiemannMetric(usedY, self.laplacian_matrix)
return rieman_metric.get_dual_rmetric()
|
python
|
def compute_dual_rmetric(self,Ynew=None):
"""Helper function to calculate the """
usedY = self.Y if Ynew is None else Ynew
rieman_metric = RiemannMetric(usedY, self.laplacian_matrix)
return rieman_metric.get_dual_rmetric()
|
[
"def",
"compute_dual_rmetric",
"(",
"self",
",",
"Ynew",
"=",
"None",
")",
":",
"usedY",
"=",
"self",
".",
"Y",
"if",
"Ynew",
"is",
"None",
"else",
"Ynew",
"rieman_metric",
"=",
"RiemannMetric",
"(",
"usedY",
",",
"self",
".",
"laplacian_matrix",
")",
"return",
"rieman_metric",
".",
"get_dual_rmetric",
"(",
")"
] |
Helper function to calculate the
|
[
"Helper",
"function",
"to",
"calculate",
"the"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/relaxation/riemannian_relaxation.py#L103-L107
|
7,584
|
mmp2/megaman
|
doc/sphinxext/numpy_ext/automodsumm.py
|
automodsumm_to_autosummary_lines
|
def automodsumm_to_autosummary_lines(fn, app):
"""
Generates lines from a file with an "automodsumm" entry suitable for
feeding into "autosummary".
Searches the provided file for `automodsumm` directives and returns
a list of lines specifying the `autosummary` commands for the modules
requested. This does *not* return the whole file contents - just an
autosummary section in place of any :automodsumm: entries. Note that
any options given for `automodsumm` are also included in the
generated `autosummary` section.
Parameters
----------
fn : str
The name of the file to search for `automodsumm` entries.
app : sphinx.application.Application
The sphinx Application object
Return
------
lines : list of str
Lines for all `automodsumm` entries with the entries replaced by
`autosummary` and the module's members added.
"""
fullfn = os.path.join(app.builder.env.srcdir, fn)
with open(fullfn) as fr:
if 'astropy_helpers.sphinx.ext.automodapi' in app._extensions:
from astropy_helpers.sphinx.ext.automodapi import automodapi_replace
# Must do the automodapi on the source to get the automodsumm
# that might be in there
docname = os.path.splitext(fn)[0]
filestr = automodapi_replace(fr.read(), app, True, docname, False)
else:
filestr = fr.read()
spl = _automodsummrex.split(filestr)
#0th entry is the stuff before the first automodsumm line
indent1s = spl[1::5]
mods = spl[2::5]
opssecs = spl[3::5]
indent2s = spl[4::5]
remainders = spl[5::5]
# only grab automodsumm sections and convert them to autosummary with the
# entries for all the public objects
newlines = []
#loop over all automodsumms in this document
for i, (i1, i2, modnm, ops, rem) in enumerate(zip(indent1s, indent2s, mods,
opssecs, remainders)):
allindent = i1 + ('' if i2 is None else i2)
#filter out functions-only and classes-only options if present
oplines = ops.split('\n')
toskip = []
allowedpkgnms = []
funcsonly = clssonly = False
for i, ln in reversed(list(enumerate(oplines))):
if ':functions-only:' in ln:
funcsonly = True
del oplines[i]
if ':classes-only:' in ln:
clssonly = True
del oplines[i]
if ':skip:' in ln:
toskip.extend(_str_list_converter(ln.replace(':skip:', '')))
del oplines[i]
if ':allowed-package-names:' in ln:
allowedpkgnms.extend(_str_list_converter(ln.replace(':allowed-package-names:', '')))
del oplines[i]
if funcsonly and clssonly:
msg = ('Defined both functions-only and classes-only options. '
'Skipping this directive.')
lnnum = sum([spl[j].count('\n') for j in range(i * 5 + 1)])
app.warn('[automodsumm]' + msg, (fn, lnnum))
continue
# Use the currentmodule directive so we can just put the local names
# in the autosummary table. Note that this doesn't always seem to
# actually "take" in Sphinx's eyes, so in `Automodsumm.run`, we have to
# force it internally, as well.
newlines.extend([i1 + '.. currentmodule:: ' + modnm,
'',
'.. autosummary::'])
newlines.extend(oplines)
ols = True if len(allowedpkgnms) == 0 else allowedpkgnms
for nm, fqn, obj in zip(*find_mod_objs(modnm, onlylocals=ols)):
if nm in toskip:
continue
if funcsonly and not inspect.isroutine(obj):
continue
if clssonly and not inspect.isclass(obj):
continue
newlines.append(allindent + nm)
# add one newline at the end of the autosummary block
newlines.append('')
return newlines
|
python
|
def automodsumm_to_autosummary_lines(fn, app):
"""
Generates lines from a file with an "automodsumm" entry suitable for
feeding into "autosummary".
Searches the provided file for `automodsumm` directives and returns
a list of lines specifying the `autosummary` commands for the modules
requested. This does *not* return the whole file contents - just an
autosummary section in place of any :automodsumm: entries. Note that
any options given for `automodsumm` are also included in the
generated `autosummary` section.
Parameters
----------
fn : str
The name of the file to search for `automodsumm` entries.
app : sphinx.application.Application
The sphinx Application object
Return
------
lines : list of str
Lines for all `automodsumm` entries with the entries replaced by
`autosummary` and the module's members added.
"""
fullfn = os.path.join(app.builder.env.srcdir, fn)
with open(fullfn) as fr:
if 'astropy_helpers.sphinx.ext.automodapi' in app._extensions:
from astropy_helpers.sphinx.ext.automodapi import automodapi_replace
# Must do the automodapi on the source to get the automodsumm
# that might be in there
docname = os.path.splitext(fn)[0]
filestr = automodapi_replace(fr.read(), app, True, docname, False)
else:
filestr = fr.read()
spl = _automodsummrex.split(filestr)
#0th entry is the stuff before the first automodsumm line
indent1s = spl[1::5]
mods = spl[2::5]
opssecs = spl[3::5]
indent2s = spl[4::5]
remainders = spl[5::5]
# only grab automodsumm sections and convert them to autosummary with the
# entries for all the public objects
newlines = []
#loop over all automodsumms in this document
for i, (i1, i2, modnm, ops, rem) in enumerate(zip(indent1s, indent2s, mods,
opssecs, remainders)):
allindent = i1 + ('' if i2 is None else i2)
#filter out functions-only and classes-only options if present
oplines = ops.split('\n')
toskip = []
allowedpkgnms = []
funcsonly = clssonly = False
for i, ln in reversed(list(enumerate(oplines))):
if ':functions-only:' in ln:
funcsonly = True
del oplines[i]
if ':classes-only:' in ln:
clssonly = True
del oplines[i]
if ':skip:' in ln:
toskip.extend(_str_list_converter(ln.replace(':skip:', '')))
del oplines[i]
if ':allowed-package-names:' in ln:
allowedpkgnms.extend(_str_list_converter(ln.replace(':allowed-package-names:', '')))
del oplines[i]
if funcsonly and clssonly:
msg = ('Defined both functions-only and classes-only options. '
'Skipping this directive.')
lnnum = sum([spl[j].count('\n') for j in range(i * 5 + 1)])
app.warn('[automodsumm]' + msg, (fn, lnnum))
continue
# Use the currentmodule directive so we can just put the local names
# in the autosummary table. Note that this doesn't always seem to
# actually "take" in Sphinx's eyes, so in `Automodsumm.run`, we have to
# force it internally, as well.
newlines.extend([i1 + '.. currentmodule:: ' + modnm,
'',
'.. autosummary::'])
newlines.extend(oplines)
ols = True if len(allowedpkgnms) == 0 else allowedpkgnms
for nm, fqn, obj in zip(*find_mod_objs(modnm, onlylocals=ols)):
if nm in toskip:
continue
if funcsonly and not inspect.isroutine(obj):
continue
if clssonly and not inspect.isclass(obj):
continue
newlines.append(allindent + nm)
# add one newline at the end of the autosummary block
newlines.append('')
return newlines
|
[
"def",
"automodsumm_to_autosummary_lines",
"(",
"fn",
",",
"app",
")",
":",
"fullfn",
"=",
"os",
".",
"path",
".",
"join",
"(",
"app",
".",
"builder",
".",
"env",
".",
"srcdir",
",",
"fn",
")",
"with",
"open",
"(",
"fullfn",
")",
"as",
"fr",
":",
"if",
"'astropy_helpers.sphinx.ext.automodapi'",
"in",
"app",
".",
"_extensions",
":",
"from",
"astropy_helpers",
".",
"sphinx",
".",
"ext",
".",
"automodapi",
"import",
"automodapi_replace",
"# Must do the automodapi on the source to get the automodsumm",
"# that might be in there",
"docname",
"=",
"os",
".",
"path",
".",
"splitext",
"(",
"fn",
")",
"[",
"0",
"]",
"filestr",
"=",
"automodapi_replace",
"(",
"fr",
".",
"read",
"(",
")",
",",
"app",
",",
"True",
",",
"docname",
",",
"False",
")",
"else",
":",
"filestr",
"=",
"fr",
".",
"read",
"(",
")",
"spl",
"=",
"_automodsummrex",
".",
"split",
"(",
"filestr",
")",
"#0th entry is the stuff before the first automodsumm line",
"indent1s",
"=",
"spl",
"[",
"1",
":",
":",
"5",
"]",
"mods",
"=",
"spl",
"[",
"2",
":",
":",
"5",
"]",
"opssecs",
"=",
"spl",
"[",
"3",
":",
":",
"5",
"]",
"indent2s",
"=",
"spl",
"[",
"4",
":",
":",
"5",
"]",
"remainders",
"=",
"spl",
"[",
"5",
":",
":",
"5",
"]",
"# only grab automodsumm sections and convert them to autosummary with the",
"# entries for all the public objects",
"newlines",
"=",
"[",
"]",
"#loop over all automodsumms in this document",
"for",
"i",
",",
"(",
"i1",
",",
"i2",
",",
"modnm",
",",
"ops",
",",
"rem",
")",
"in",
"enumerate",
"(",
"zip",
"(",
"indent1s",
",",
"indent2s",
",",
"mods",
",",
"opssecs",
",",
"remainders",
")",
")",
":",
"allindent",
"=",
"i1",
"+",
"(",
"''",
"if",
"i2",
"is",
"None",
"else",
"i2",
")",
"#filter out functions-only and classes-only options if present",
"oplines",
"=",
"ops",
".",
"split",
"(",
"'\\n'",
")",
"toskip",
"=",
"[",
"]",
"allowedpkgnms",
"=",
"[",
"]",
"funcsonly",
"=",
"clssonly",
"=",
"False",
"for",
"i",
",",
"ln",
"in",
"reversed",
"(",
"list",
"(",
"enumerate",
"(",
"oplines",
")",
")",
")",
":",
"if",
"':functions-only:'",
"in",
"ln",
":",
"funcsonly",
"=",
"True",
"del",
"oplines",
"[",
"i",
"]",
"if",
"':classes-only:'",
"in",
"ln",
":",
"clssonly",
"=",
"True",
"del",
"oplines",
"[",
"i",
"]",
"if",
"':skip:'",
"in",
"ln",
":",
"toskip",
".",
"extend",
"(",
"_str_list_converter",
"(",
"ln",
".",
"replace",
"(",
"':skip:'",
",",
"''",
")",
")",
")",
"del",
"oplines",
"[",
"i",
"]",
"if",
"':allowed-package-names:'",
"in",
"ln",
":",
"allowedpkgnms",
".",
"extend",
"(",
"_str_list_converter",
"(",
"ln",
".",
"replace",
"(",
"':allowed-package-names:'",
",",
"''",
")",
")",
")",
"del",
"oplines",
"[",
"i",
"]",
"if",
"funcsonly",
"and",
"clssonly",
":",
"msg",
"=",
"(",
"'Defined both functions-only and classes-only options. '",
"'Skipping this directive.'",
")",
"lnnum",
"=",
"sum",
"(",
"[",
"spl",
"[",
"j",
"]",
".",
"count",
"(",
"'\\n'",
")",
"for",
"j",
"in",
"range",
"(",
"i",
"*",
"5",
"+",
"1",
")",
"]",
")",
"app",
".",
"warn",
"(",
"'[automodsumm]'",
"+",
"msg",
",",
"(",
"fn",
",",
"lnnum",
")",
")",
"continue",
"# Use the currentmodule directive so we can just put the local names",
"# in the autosummary table. Note that this doesn't always seem to",
"# actually \"take\" in Sphinx's eyes, so in `Automodsumm.run`, we have to",
"# force it internally, as well.",
"newlines",
".",
"extend",
"(",
"[",
"i1",
"+",
"'.. currentmodule:: '",
"+",
"modnm",
",",
"''",
",",
"'.. autosummary::'",
"]",
")",
"newlines",
".",
"extend",
"(",
"oplines",
")",
"ols",
"=",
"True",
"if",
"len",
"(",
"allowedpkgnms",
")",
"==",
"0",
"else",
"allowedpkgnms",
"for",
"nm",
",",
"fqn",
",",
"obj",
"in",
"zip",
"(",
"*",
"find_mod_objs",
"(",
"modnm",
",",
"onlylocals",
"=",
"ols",
")",
")",
":",
"if",
"nm",
"in",
"toskip",
":",
"continue",
"if",
"funcsonly",
"and",
"not",
"inspect",
".",
"isroutine",
"(",
"obj",
")",
":",
"continue",
"if",
"clssonly",
"and",
"not",
"inspect",
".",
"isclass",
"(",
"obj",
")",
":",
"continue",
"newlines",
".",
"append",
"(",
"allindent",
"+",
"nm",
")",
"# add one newline at the end of the autosummary block",
"newlines",
".",
"append",
"(",
"''",
")",
"return",
"newlines"
] |
Generates lines from a file with an "automodsumm" entry suitable for
feeding into "autosummary".
Searches the provided file for `automodsumm` directives and returns
a list of lines specifying the `autosummary` commands for the modules
requested. This does *not* return the whole file contents - just an
autosummary section in place of any :automodsumm: entries. Note that
any options given for `automodsumm` are also included in the
generated `autosummary` section.
Parameters
----------
fn : str
The name of the file to search for `automodsumm` entries.
app : sphinx.application.Application
The sphinx Application object
Return
------
lines : list of str
Lines for all `automodsumm` entries with the entries replaced by
`autosummary` and the module's members added.
|
[
"Generates",
"lines",
"from",
"a",
"file",
"with",
"an",
"automodsumm",
"entry",
"suitable",
"for",
"feeding",
"into",
"autosummary",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/doc/sphinxext/numpy_ext/automodsumm.py#L265-L369
|
7,585
|
mmp2/megaman
|
megaman/geometry/adjacency.py
|
compute_adjacency_matrix
|
def compute_adjacency_matrix(X, method='auto', **kwargs):
"""Compute an adjacency matrix with the given method"""
if method == 'auto':
if X.shape[0] > 10000:
method = 'cyflann'
else:
method = 'kd_tree'
return Adjacency.init(method, **kwargs).adjacency_graph(X.astype('float'))
|
python
|
def compute_adjacency_matrix(X, method='auto', **kwargs):
"""Compute an adjacency matrix with the given method"""
if method == 'auto':
if X.shape[0] > 10000:
method = 'cyflann'
else:
method = 'kd_tree'
return Adjacency.init(method, **kwargs).adjacency_graph(X.astype('float'))
|
[
"def",
"compute_adjacency_matrix",
"(",
"X",
",",
"method",
"=",
"'auto'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"method",
"==",
"'auto'",
":",
"if",
"X",
".",
"shape",
"[",
"0",
"]",
">",
"10000",
":",
"method",
"=",
"'cyflann'",
"else",
":",
"method",
"=",
"'kd_tree'",
"return",
"Adjacency",
".",
"init",
"(",
"method",
",",
"*",
"*",
"kwargs",
")",
".",
"adjacency_graph",
"(",
"X",
".",
"astype",
"(",
"'float'",
")",
")"
] |
Compute an adjacency matrix with the given method
|
[
"Compute",
"an",
"adjacency",
"matrix",
"with",
"the",
"given",
"method"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/geometry/adjacency.py#L17-L24
|
7,586
|
mmp2/megaman
|
megaman/relaxation/utils.py
|
split_kwargs
|
def split_kwargs(relaxation_kwds):
"""Split relaxation keywords to keywords for optimizer and others"""
optimizer_keys_list = [
'step_method',
'linesearch',
'eta_max',
'eta',
'm',
'linesearch_first'
]
optimizer_kwargs = { k:relaxation_kwds.pop(k) for k in optimizer_keys_list if k in relaxation_kwds }
if 'm' in optimizer_kwargs:
optimizer_kwargs['momentum'] = optimizer_kwargs.pop('m')
return optimizer_kwargs, relaxation_kwds
|
python
|
def split_kwargs(relaxation_kwds):
"""Split relaxation keywords to keywords for optimizer and others"""
optimizer_keys_list = [
'step_method',
'linesearch',
'eta_max',
'eta',
'm',
'linesearch_first'
]
optimizer_kwargs = { k:relaxation_kwds.pop(k) for k in optimizer_keys_list if k in relaxation_kwds }
if 'm' in optimizer_kwargs:
optimizer_kwargs['momentum'] = optimizer_kwargs.pop('m')
return optimizer_kwargs, relaxation_kwds
|
[
"def",
"split_kwargs",
"(",
"relaxation_kwds",
")",
":",
"optimizer_keys_list",
"=",
"[",
"'step_method'",
",",
"'linesearch'",
",",
"'eta_max'",
",",
"'eta'",
",",
"'m'",
",",
"'linesearch_first'",
"]",
"optimizer_kwargs",
"=",
"{",
"k",
":",
"relaxation_kwds",
".",
"pop",
"(",
"k",
")",
"for",
"k",
"in",
"optimizer_keys_list",
"if",
"k",
"in",
"relaxation_kwds",
"}",
"if",
"'m'",
"in",
"optimizer_kwargs",
":",
"optimizer_kwargs",
"[",
"'momentum'",
"]",
"=",
"optimizer_kwargs",
".",
"pop",
"(",
"'m'",
")",
"return",
"optimizer_kwargs",
",",
"relaxation_kwds"
] |
Split relaxation keywords to keywords for optimizer and others
|
[
"Split",
"relaxation",
"keywords",
"to",
"keywords",
"for",
"optimizer",
"and",
"others"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/relaxation/utils.py#L10-L23
|
7,587
|
mmp2/megaman
|
megaman/relaxation/utils.py
|
initialize_kwds
|
def initialize_kwds(relaxation_kwds, n_samples, n_components, intrinsic_dim):
"""
Initialize relaxation keywords.
Parameters
----------
relaxation_kwds : dict
weights : numpy array, the weights
step_method : string { 'fixed', 'momentum' }
which optimizers to use
linesearch : bool
whether to do linesearch in search for eta in optimization
verbose : bool
whether to print reports to I/O when doing relaxation
niter : int
number of iterations to run.
niter_trace : int
number of iterations to be traced.
presave : bool
whether to store precomputed keywords to files or not.
sqrd : bool
whether to use squared norm in loss function. Default : True
alpha : float
shrinkage rate for previous gradient. Default : 0
projected : bool
whether or not to optimize via projected gradient descent on differences S
lossf : string { 'epsilon', 'rloss' }
which loss function to optimize.
Default : 'rloss' if n == d, otherwise 'epsilon'
subset : numpy array
Subset to do relaxation on.
sub_dir : string
sub_dir used to store the outputs.
backup_base_dir : string
base directory used to store outputs
Final path will be backup_base_dir/sub_dir
saveiter : int
save backup on every saveiter iterations
printiter : int
print report on every printiter iterations
save_init : bool
whether to save Y0 and L before running relaxation.
"""
new_relaxation_kwds = {
'weights': np.array([],dtype=np.float64),
'step_method': 'fixed',
'linesearch': True,
'verbose': False,
'niter': 2000,
'niter_trace': 0,
'presave': False,
'sqrd': True,
'alpha': 0,
'projected': False,
'lossf': 'epsilon' if n_components > intrinsic_dim else 'rloss',
'subset': np.arange(n_samples),
'sub_dir': current_time_str(),
'backup_base_dir': default_basedir,
'saveiter': 10,
'printiter': 1,
'save_init': False,
}
new_relaxation_kwds.update(relaxation_kwds)
backup_dir = os.path.join(new_relaxation_kwds['backup_base_dir'], new_relaxation_kwds['sub_dir'])
new_relaxation_kwds['backup_dir'] = backup_dir
create_output_dir(backup_dir)
new_relaxation_kwds = convert_to_int(new_relaxation_kwds)
if new_relaxation_kwds['weights'].shape[0] != 0:
weights = np.absolute(new_relaxation_kwds['weights']).astype(np.float64)
new_relaxation_kwds['weights'] = weights / np.sum(weights)
if new_relaxation_kwds['lossf'] == 'epsilon':
new_relaxation_kwds.setdefault('eps_orth', 0.1)
if n_components != intrinsic_dim and new_relaxation_kwds['lossf'] == 'rloss':
raise ValueError('loss function rloss is for n_components equal intrinsic_dim')
if n_components == intrinsic_dim and new_relaxation_kwds['lossf'] == 'epsilon':
raise ValueError('loss function rloss is for n_components equal intrinsic_dim')
if new_relaxation_kwds['projected'] and new_relaxation_kwds['subset'].shape[0] < n_samples:
raise ValueError('Projection derivative not working for subset methods.')
prefix = 'projected' if new_relaxation_kwds['projected'] else 'nonprojected'
new_relaxation_kwds['lossf'] = '{}_{}'.format(prefix,new_relaxation_kwds['lossf'])
step_method = new_relaxation_kwds['step_method']
if new_relaxation_kwds['linesearch'] == True:
new_relaxation_kwds.setdefault('linesearch_first', False)
init_eta_max = 2**11 if new_relaxation_kwds['projected'] else 2**4
new_relaxation_kwds.setdefault('eta_max',init_eta_max)
else:
new_relaxation_kwds.setdefault('eta', 1.0)
if step_method == 'momentum':
new_relaxation_kwds.setdefault('m', 0.05)
return new_relaxation_kwds
|
python
|
def initialize_kwds(relaxation_kwds, n_samples, n_components, intrinsic_dim):
"""
Initialize relaxation keywords.
Parameters
----------
relaxation_kwds : dict
weights : numpy array, the weights
step_method : string { 'fixed', 'momentum' }
which optimizers to use
linesearch : bool
whether to do linesearch in search for eta in optimization
verbose : bool
whether to print reports to I/O when doing relaxation
niter : int
number of iterations to run.
niter_trace : int
number of iterations to be traced.
presave : bool
whether to store precomputed keywords to files or not.
sqrd : bool
whether to use squared norm in loss function. Default : True
alpha : float
shrinkage rate for previous gradient. Default : 0
projected : bool
whether or not to optimize via projected gradient descent on differences S
lossf : string { 'epsilon', 'rloss' }
which loss function to optimize.
Default : 'rloss' if n == d, otherwise 'epsilon'
subset : numpy array
Subset to do relaxation on.
sub_dir : string
sub_dir used to store the outputs.
backup_base_dir : string
base directory used to store outputs
Final path will be backup_base_dir/sub_dir
saveiter : int
save backup on every saveiter iterations
printiter : int
print report on every printiter iterations
save_init : bool
whether to save Y0 and L before running relaxation.
"""
new_relaxation_kwds = {
'weights': np.array([],dtype=np.float64),
'step_method': 'fixed',
'linesearch': True,
'verbose': False,
'niter': 2000,
'niter_trace': 0,
'presave': False,
'sqrd': True,
'alpha': 0,
'projected': False,
'lossf': 'epsilon' if n_components > intrinsic_dim else 'rloss',
'subset': np.arange(n_samples),
'sub_dir': current_time_str(),
'backup_base_dir': default_basedir,
'saveiter': 10,
'printiter': 1,
'save_init': False,
}
new_relaxation_kwds.update(relaxation_kwds)
backup_dir = os.path.join(new_relaxation_kwds['backup_base_dir'], new_relaxation_kwds['sub_dir'])
new_relaxation_kwds['backup_dir'] = backup_dir
create_output_dir(backup_dir)
new_relaxation_kwds = convert_to_int(new_relaxation_kwds)
if new_relaxation_kwds['weights'].shape[0] != 0:
weights = np.absolute(new_relaxation_kwds['weights']).astype(np.float64)
new_relaxation_kwds['weights'] = weights / np.sum(weights)
if new_relaxation_kwds['lossf'] == 'epsilon':
new_relaxation_kwds.setdefault('eps_orth', 0.1)
if n_components != intrinsic_dim and new_relaxation_kwds['lossf'] == 'rloss':
raise ValueError('loss function rloss is for n_components equal intrinsic_dim')
if n_components == intrinsic_dim and new_relaxation_kwds['lossf'] == 'epsilon':
raise ValueError('loss function rloss is for n_components equal intrinsic_dim')
if new_relaxation_kwds['projected'] and new_relaxation_kwds['subset'].shape[0] < n_samples:
raise ValueError('Projection derivative not working for subset methods.')
prefix = 'projected' if new_relaxation_kwds['projected'] else 'nonprojected'
new_relaxation_kwds['lossf'] = '{}_{}'.format(prefix,new_relaxation_kwds['lossf'])
step_method = new_relaxation_kwds['step_method']
if new_relaxation_kwds['linesearch'] == True:
new_relaxation_kwds.setdefault('linesearch_first', False)
init_eta_max = 2**11 if new_relaxation_kwds['projected'] else 2**4
new_relaxation_kwds.setdefault('eta_max',init_eta_max)
else:
new_relaxation_kwds.setdefault('eta', 1.0)
if step_method == 'momentum':
new_relaxation_kwds.setdefault('m', 0.05)
return new_relaxation_kwds
|
[
"def",
"initialize_kwds",
"(",
"relaxation_kwds",
",",
"n_samples",
",",
"n_components",
",",
"intrinsic_dim",
")",
":",
"new_relaxation_kwds",
"=",
"{",
"'weights'",
":",
"np",
".",
"array",
"(",
"[",
"]",
",",
"dtype",
"=",
"np",
".",
"float64",
")",
",",
"'step_method'",
":",
"'fixed'",
",",
"'linesearch'",
":",
"True",
",",
"'verbose'",
":",
"False",
",",
"'niter'",
":",
"2000",
",",
"'niter_trace'",
":",
"0",
",",
"'presave'",
":",
"False",
",",
"'sqrd'",
":",
"True",
",",
"'alpha'",
":",
"0",
",",
"'projected'",
":",
"False",
",",
"'lossf'",
":",
"'epsilon'",
"if",
"n_components",
">",
"intrinsic_dim",
"else",
"'rloss'",
",",
"'subset'",
":",
"np",
".",
"arange",
"(",
"n_samples",
")",
",",
"'sub_dir'",
":",
"current_time_str",
"(",
")",
",",
"'backup_base_dir'",
":",
"default_basedir",
",",
"'saveiter'",
":",
"10",
",",
"'printiter'",
":",
"1",
",",
"'save_init'",
":",
"False",
",",
"}",
"new_relaxation_kwds",
".",
"update",
"(",
"relaxation_kwds",
")",
"backup_dir",
"=",
"os",
".",
"path",
".",
"join",
"(",
"new_relaxation_kwds",
"[",
"'backup_base_dir'",
"]",
",",
"new_relaxation_kwds",
"[",
"'sub_dir'",
"]",
")",
"new_relaxation_kwds",
"[",
"'backup_dir'",
"]",
"=",
"backup_dir",
"create_output_dir",
"(",
"backup_dir",
")",
"new_relaxation_kwds",
"=",
"convert_to_int",
"(",
"new_relaxation_kwds",
")",
"if",
"new_relaxation_kwds",
"[",
"'weights'",
"]",
".",
"shape",
"[",
"0",
"]",
"!=",
"0",
":",
"weights",
"=",
"np",
".",
"absolute",
"(",
"new_relaxation_kwds",
"[",
"'weights'",
"]",
")",
".",
"astype",
"(",
"np",
".",
"float64",
")",
"new_relaxation_kwds",
"[",
"'weights'",
"]",
"=",
"weights",
"/",
"np",
".",
"sum",
"(",
"weights",
")",
"if",
"new_relaxation_kwds",
"[",
"'lossf'",
"]",
"==",
"'epsilon'",
":",
"new_relaxation_kwds",
".",
"setdefault",
"(",
"'eps_orth'",
",",
"0.1",
")",
"if",
"n_components",
"!=",
"intrinsic_dim",
"and",
"new_relaxation_kwds",
"[",
"'lossf'",
"]",
"==",
"'rloss'",
":",
"raise",
"ValueError",
"(",
"'loss function rloss is for n_components equal intrinsic_dim'",
")",
"if",
"n_components",
"==",
"intrinsic_dim",
"and",
"new_relaxation_kwds",
"[",
"'lossf'",
"]",
"==",
"'epsilon'",
":",
"raise",
"ValueError",
"(",
"'loss function rloss is for n_components equal intrinsic_dim'",
")",
"if",
"new_relaxation_kwds",
"[",
"'projected'",
"]",
"and",
"new_relaxation_kwds",
"[",
"'subset'",
"]",
".",
"shape",
"[",
"0",
"]",
"<",
"n_samples",
":",
"raise",
"ValueError",
"(",
"'Projection derivative not working for subset methods.'",
")",
"prefix",
"=",
"'projected'",
"if",
"new_relaxation_kwds",
"[",
"'projected'",
"]",
"else",
"'nonprojected'",
"new_relaxation_kwds",
"[",
"'lossf'",
"]",
"=",
"'{}_{}'",
".",
"format",
"(",
"prefix",
",",
"new_relaxation_kwds",
"[",
"'lossf'",
"]",
")",
"step_method",
"=",
"new_relaxation_kwds",
"[",
"'step_method'",
"]",
"if",
"new_relaxation_kwds",
"[",
"'linesearch'",
"]",
"==",
"True",
":",
"new_relaxation_kwds",
".",
"setdefault",
"(",
"'linesearch_first'",
",",
"False",
")",
"init_eta_max",
"=",
"2",
"**",
"11",
"if",
"new_relaxation_kwds",
"[",
"'projected'",
"]",
"else",
"2",
"**",
"4",
"new_relaxation_kwds",
".",
"setdefault",
"(",
"'eta_max'",
",",
"init_eta_max",
")",
"else",
":",
"new_relaxation_kwds",
".",
"setdefault",
"(",
"'eta'",
",",
"1.0",
")",
"if",
"step_method",
"==",
"'momentum'",
":",
"new_relaxation_kwds",
".",
"setdefault",
"(",
"'m'",
",",
"0.05",
")",
"return",
"new_relaxation_kwds"
] |
Initialize relaxation keywords.
Parameters
----------
relaxation_kwds : dict
weights : numpy array, the weights
step_method : string { 'fixed', 'momentum' }
which optimizers to use
linesearch : bool
whether to do linesearch in search for eta in optimization
verbose : bool
whether to print reports to I/O when doing relaxation
niter : int
number of iterations to run.
niter_trace : int
number of iterations to be traced.
presave : bool
whether to store precomputed keywords to files or not.
sqrd : bool
whether to use squared norm in loss function. Default : True
alpha : float
shrinkage rate for previous gradient. Default : 0
projected : bool
whether or not to optimize via projected gradient descent on differences S
lossf : string { 'epsilon', 'rloss' }
which loss function to optimize.
Default : 'rloss' if n == d, otherwise 'epsilon'
subset : numpy array
Subset to do relaxation on.
sub_dir : string
sub_dir used to store the outputs.
backup_base_dir : string
base directory used to store outputs
Final path will be backup_base_dir/sub_dir
saveiter : int
save backup on every saveiter iterations
printiter : int
print report on every printiter iterations
save_init : bool
whether to save Y0 and L before running relaxation.
|
[
"Initialize",
"relaxation",
"keywords",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/relaxation/utils.py#L26-L127
|
7,588
|
mmp2/megaman
|
megaman/embedding/spectral_embedding.py
|
_graph_connected_component
|
def _graph_connected_component(graph, node_id):
"""
Find the largest graph connected components the contains one
given node
Parameters
----------
graph : array-like, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
node_id : int
The index of the query node of the graph
Returns
-------
connected_components : array-like, shape: (n_samples,)
An array of bool value indicates the indexes of the nodes
belong to the largest connected components of the given query
node
"""
connected_components = np.zeros(shape=(graph.shape[0]), dtype=np.bool)
connected_components[node_id] = True
n_node = graph.shape[0]
for i in range(n_node):
last_num_component = connected_components.sum()
_, node_to_add = np.where(graph[connected_components] != 0)
connected_components[node_to_add] = True
if last_num_component >= connected_components.sum():
break
return connected_components
|
python
|
def _graph_connected_component(graph, node_id):
"""
Find the largest graph connected components the contains one
given node
Parameters
----------
graph : array-like, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
node_id : int
The index of the query node of the graph
Returns
-------
connected_components : array-like, shape: (n_samples,)
An array of bool value indicates the indexes of the nodes
belong to the largest connected components of the given query
node
"""
connected_components = np.zeros(shape=(graph.shape[0]), dtype=np.bool)
connected_components[node_id] = True
n_node = graph.shape[0]
for i in range(n_node):
last_num_component = connected_components.sum()
_, node_to_add = np.where(graph[connected_components] != 0)
connected_components[node_to_add] = True
if last_num_component >= connected_components.sum():
break
return connected_components
|
[
"def",
"_graph_connected_component",
"(",
"graph",
",",
"node_id",
")",
":",
"connected_components",
"=",
"np",
".",
"zeros",
"(",
"shape",
"=",
"(",
"graph",
".",
"shape",
"[",
"0",
"]",
")",
",",
"dtype",
"=",
"np",
".",
"bool",
")",
"connected_components",
"[",
"node_id",
"]",
"=",
"True",
"n_node",
"=",
"graph",
".",
"shape",
"[",
"0",
"]",
"for",
"i",
"in",
"range",
"(",
"n_node",
")",
":",
"last_num_component",
"=",
"connected_components",
".",
"sum",
"(",
")",
"_",
",",
"node_to_add",
"=",
"np",
".",
"where",
"(",
"graph",
"[",
"connected_components",
"]",
"!=",
"0",
")",
"connected_components",
"[",
"node_to_add",
"]",
"=",
"True",
"if",
"last_num_component",
">=",
"connected_components",
".",
"sum",
"(",
")",
":",
"break",
"return",
"connected_components"
] |
Find the largest graph connected components the contains one
given node
Parameters
----------
graph : array-like, shape: (n_samples, n_samples)
adjacency matrix of the graph, non-zero weight means an edge
between the nodes
node_id : int
The index of the query node of the graph
Returns
-------
connected_components : array-like, shape: (n_samples,)
An array of bool value indicates the indexes of the nodes
belong to the largest connected components of the given query
node
|
[
"Find",
"the",
"largest",
"graph",
"connected",
"components",
"the",
"contains",
"one",
"given",
"node"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/embedding/spectral_embedding.py#L28-L58
|
7,589
|
mmp2/megaman
|
megaman/embedding/spectral_embedding.py
|
SpectralEmbedding.predict
|
def predict(self, X_test, y=None):
"""
Predict embedding on new data X_test given the existing embedding on training data
Uses the Nystrom Extension to estimate the eigenvectors.
Currently only works with input_type data (i.e. not affinity or distance)
"""
if not hasattr(self, 'geom_'):
raise RuntimeError('the .fit() function must be called before the .predict() function')
if self.geom_.X is None:
raise NotImplementedError('method only implemented when X passed as data')
# Complete the adjacency matrix
adjacency_kwds = self.geom_.adjacency_kwds
if self.geom_.adjacency_method == 'cyflann':
if 'cyflann_kwds' in adjacency_kwds.keys():
cyflann_kwds = adjacency_kwds['cyflann_kwds']
else:
cyflann_kwds = {}
total_adjacency_matrix = complete_adjacency_matrix(self.geom_.adjacency_matrix,
self.geom_.X,
X_test,adjacency_kwds)
# Compute the affinity matrix, check method and kwds
if self.geom_.affinity_kwds is not None:
affinity_kwds = self.geom_.affinity_kwds
else:
affinity_kwds = {}
if self.geom_.affinity_method is not None:
affinity_method = self.geom_.affinity_method
else:
affinity_method = 'auto'
total_affinity_matrix = compute_affinity_matrix(total_adjacency_matrix, affinity_method,
**affinity_kwds)
# Compute the affinity matrix, check method and kwds
if self.geom_.laplacian_kwds is not None:
laplacian_kwds = self.geom_.laplacian_kwds
else:
laplacian_kwds = {}
if self.geom_.laplacian_method is not None:
laplacian_method = self.geom_.laplacian_method
else:
self.laplacian_method = 'auto'
total_laplacian_matrix = compute_laplacian_matrix(total_affinity_matrix, laplacian_method,
**laplacian_kwds)
# Take the columns of Laplacian and existing embedding and pass to Nystrom Extension
(n_sample_train) = self.geom_.adjacency_matrix.shape[0]
total_laplacian_matrix = total_laplacian_matrix.tocsr()
C = total_laplacian_matrix[:, :n_sample_train]
# warnings.warn(str(C.shape))
eigenvalues, eigenvectors = nystrom_extension(C, self.eigenvectors_, self.eigenvalues_)
# If diffusion maps compute diffusion time etc
if self.diffusion_maps:
embedding = compute_diffusion_maps(laplacian_method, eigenvectors, eigenvalues, self.diffusion_time)
else:
embedding = eigenvectors
(n_sample_test) = X_test.shape[0]
embedding_test=embedding[-n_sample_test:, :]
return embedding_test, embedding
|
python
|
def predict(self, X_test, y=None):
"""
Predict embedding on new data X_test given the existing embedding on training data
Uses the Nystrom Extension to estimate the eigenvectors.
Currently only works with input_type data (i.e. not affinity or distance)
"""
if not hasattr(self, 'geom_'):
raise RuntimeError('the .fit() function must be called before the .predict() function')
if self.geom_.X is None:
raise NotImplementedError('method only implemented when X passed as data')
# Complete the adjacency matrix
adjacency_kwds = self.geom_.adjacency_kwds
if self.geom_.adjacency_method == 'cyflann':
if 'cyflann_kwds' in adjacency_kwds.keys():
cyflann_kwds = adjacency_kwds['cyflann_kwds']
else:
cyflann_kwds = {}
total_adjacency_matrix = complete_adjacency_matrix(self.geom_.adjacency_matrix,
self.geom_.X,
X_test,adjacency_kwds)
# Compute the affinity matrix, check method and kwds
if self.geom_.affinity_kwds is not None:
affinity_kwds = self.geom_.affinity_kwds
else:
affinity_kwds = {}
if self.geom_.affinity_method is not None:
affinity_method = self.geom_.affinity_method
else:
affinity_method = 'auto'
total_affinity_matrix = compute_affinity_matrix(total_adjacency_matrix, affinity_method,
**affinity_kwds)
# Compute the affinity matrix, check method and kwds
if self.geom_.laplacian_kwds is not None:
laplacian_kwds = self.geom_.laplacian_kwds
else:
laplacian_kwds = {}
if self.geom_.laplacian_method is not None:
laplacian_method = self.geom_.laplacian_method
else:
self.laplacian_method = 'auto'
total_laplacian_matrix = compute_laplacian_matrix(total_affinity_matrix, laplacian_method,
**laplacian_kwds)
# Take the columns of Laplacian and existing embedding and pass to Nystrom Extension
(n_sample_train) = self.geom_.adjacency_matrix.shape[0]
total_laplacian_matrix = total_laplacian_matrix.tocsr()
C = total_laplacian_matrix[:, :n_sample_train]
# warnings.warn(str(C.shape))
eigenvalues, eigenvectors = nystrom_extension(C, self.eigenvectors_, self.eigenvalues_)
# If diffusion maps compute diffusion time etc
if self.diffusion_maps:
embedding = compute_diffusion_maps(laplacian_method, eigenvectors, eigenvalues, self.diffusion_time)
else:
embedding = eigenvectors
(n_sample_test) = X_test.shape[0]
embedding_test=embedding[-n_sample_test:, :]
return embedding_test, embedding
|
[
"def",
"predict",
"(",
"self",
",",
"X_test",
",",
"y",
"=",
"None",
")",
":",
"if",
"not",
"hasattr",
"(",
"self",
",",
"'geom_'",
")",
":",
"raise",
"RuntimeError",
"(",
"'the .fit() function must be called before the .predict() function'",
")",
"if",
"self",
".",
"geom_",
".",
"X",
"is",
"None",
":",
"raise",
"NotImplementedError",
"(",
"'method only implemented when X passed as data'",
")",
"# Complete the adjacency matrix",
"adjacency_kwds",
"=",
"self",
".",
"geom_",
".",
"adjacency_kwds",
"if",
"self",
".",
"geom_",
".",
"adjacency_method",
"==",
"'cyflann'",
":",
"if",
"'cyflann_kwds'",
"in",
"adjacency_kwds",
".",
"keys",
"(",
")",
":",
"cyflann_kwds",
"=",
"adjacency_kwds",
"[",
"'cyflann_kwds'",
"]",
"else",
":",
"cyflann_kwds",
"=",
"{",
"}",
"total_adjacency_matrix",
"=",
"complete_adjacency_matrix",
"(",
"self",
".",
"geom_",
".",
"adjacency_matrix",
",",
"self",
".",
"geom_",
".",
"X",
",",
"X_test",
",",
"adjacency_kwds",
")",
"# Compute the affinity matrix, check method and kwds",
"if",
"self",
".",
"geom_",
".",
"affinity_kwds",
"is",
"not",
"None",
":",
"affinity_kwds",
"=",
"self",
".",
"geom_",
".",
"affinity_kwds",
"else",
":",
"affinity_kwds",
"=",
"{",
"}",
"if",
"self",
".",
"geom_",
".",
"affinity_method",
"is",
"not",
"None",
":",
"affinity_method",
"=",
"self",
".",
"geom_",
".",
"affinity_method",
"else",
":",
"affinity_method",
"=",
"'auto'",
"total_affinity_matrix",
"=",
"compute_affinity_matrix",
"(",
"total_adjacency_matrix",
",",
"affinity_method",
",",
"*",
"*",
"affinity_kwds",
")",
"# Compute the affinity matrix, check method and kwds",
"if",
"self",
".",
"geom_",
".",
"laplacian_kwds",
"is",
"not",
"None",
":",
"laplacian_kwds",
"=",
"self",
".",
"geom_",
".",
"laplacian_kwds",
"else",
":",
"laplacian_kwds",
"=",
"{",
"}",
"if",
"self",
".",
"geom_",
".",
"laplacian_method",
"is",
"not",
"None",
":",
"laplacian_method",
"=",
"self",
".",
"geom_",
".",
"laplacian_method",
"else",
":",
"self",
".",
"laplacian_method",
"=",
"'auto'",
"total_laplacian_matrix",
"=",
"compute_laplacian_matrix",
"(",
"total_affinity_matrix",
",",
"laplacian_method",
",",
"*",
"*",
"laplacian_kwds",
")",
"# Take the columns of Laplacian and existing embedding and pass to Nystrom Extension",
"(",
"n_sample_train",
")",
"=",
"self",
".",
"geom_",
".",
"adjacency_matrix",
".",
"shape",
"[",
"0",
"]",
"total_laplacian_matrix",
"=",
"total_laplacian_matrix",
".",
"tocsr",
"(",
")",
"C",
"=",
"total_laplacian_matrix",
"[",
":",
",",
":",
"n_sample_train",
"]",
"# warnings.warn(str(C.shape))",
"eigenvalues",
",",
"eigenvectors",
"=",
"nystrom_extension",
"(",
"C",
",",
"self",
".",
"eigenvectors_",
",",
"self",
".",
"eigenvalues_",
")",
"# If diffusion maps compute diffusion time etc",
"if",
"self",
".",
"diffusion_maps",
":",
"embedding",
"=",
"compute_diffusion_maps",
"(",
"laplacian_method",
",",
"eigenvectors",
",",
"eigenvalues",
",",
"self",
".",
"diffusion_time",
")",
"else",
":",
"embedding",
"=",
"eigenvectors",
"(",
"n_sample_test",
")",
"=",
"X_test",
".",
"shape",
"[",
"0",
"]",
"embedding_test",
"=",
"embedding",
"[",
"-",
"n_sample_test",
":",
",",
":",
"]",
"return",
"embedding_test",
",",
"embedding"
] |
Predict embedding on new data X_test given the existing embedding on training data
Uses the Nystrom Extension to estimate the eigenvectors.
Currently only works with input_type data (i.e. not affinity or distance)
|
[
"Predict",
"embedding",
"on",
"new",
"data",
"X_test",
"given",
"the",
"existing",
"embedding",
"on",
"training",
"data"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/embedding/spectral_embedding.py#L408-L465
|
7,590
|
mmp2/megaman
|
megaman/geometry/laplacian.py
|
compute_laplacian_matrix
|
def compute_laplacian_matrix(affinity_matrix, method='auto', **kwargs):
"""Compute the laplacian matrix with the given method"""
if method == 'auto':
method = 'geometric'
return Laplacian.init(method, **kwargs).laplacian_matrix(affinity_matrix)
|
python
|
def compute_laplacian_matrix(affinity_matrix, method='auto', **kwargs):
"""Compute the laplacian matrix with the given method"""
if method == 'auto':
method = 'geometric'
return Laplacian.init(method, **kwargs).laplacian_matrix(affinity_matrix)
|
[
"def",
"compute_laplacian_matrix",
"(",
"affinity_matrix",
",",
"method",
"=",
"'auto'",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"method",
"==",
"'auto'",
":",
"method",
"=",
"'geometric'",
"return",
"Laplacian",
".",
"init",
"(",
"method",
",",
"*",
"*",
"kwargs",
")",
".",
"laplacian_matrix",
"(",
"affinity_matrix",
")"
] |
Compute the laplacian matrix with the given method
|
[
"Compute",
"the",
"laplacian",
"matrix",
"with",
"the",
"given",
"method"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/geometry/laplacian.py#L10-L14
|
7,591
|
mmp2/megaman
|
megaman/embedding/base.py
|
BaseEmbedding.fit_geometry
|
def fit_geometry(self, X=None, input_type='data'):
"""Inputs self.geom, and produces the fitted geometry self.geom_"""
if self.geom is None:
self.geom_ = Geometry()
elif isinstance(self.geom, Geometry):
self.geom_ = self.geom
else:
try:
kwds = dict(**self.geom)
except TypeError:
raise ValueError("geom must be a Geometry instance or "
"a mappable/dictionary")
self.geom_ = Geometry(**kwds)
if self.radius is not None:
self.geom_.set_radius(self.radius, override=False)
# if self.radius == 'auto':
# if X is not None and input_type != 'affinity':
# self.geom_.set_radius(self.estimate_radius(X, input_type),
# override=False)
# else:
# self.geom_.set_radius(self.radius,
# override=False)
if X is not None:
self.geom_.set_matrix(X, input_type)
return self
|
python
|
def fit_geometry(self, X=None, input_type='data'):
"""Inputs self.geom, and produces the fitted geometry self.geom_"""
if self.geom is None:
self.geom_ = Geometry()
elif isinstance(self.geom, Geometry):
self.geom_ = self.geom
else:
try:
kwds = dict(**self.geom)
except TypeError:
raise ValueError("geom must be a Geometry instance or "
"a mappable/dictionary")
self.geom_ = Geometry(**kwds)
if self.radius is not None:
self.geom_.set_radius(self.radius, override=False)
# if self.radius == 'auto':
# if X is not None and input_type != 'affinity':
# self.geom_.set_radius(self.estimate_radius(X, input_type),
# override=False)
# else:
# self.geom_.set_radius(self.radius,
# override=False)
if X is not None:
self.geom_.set_matrix(X, input_type)
return self
|
[
"def",
"fit_geometry",
"(",
"self",
",",
"X",
"=",
"None",
",",
"input_type",
"=",
"'data'",
")",
":",
"if",
"self",
".",
"geom",
"is",
"None",
":",
"self",
".",
"geom_",
"=",
"Geometry",
"(",
")",
"elif",
"isinstance",
"(",
"self",
".",
"geom",
",",
"Geometry",
")",
":",
"self",
".",
"geom_",
"=",
"self",
".",
"geom",
"else",
":",
"try",
":",
"kwds",
"=",
"dict",
"(",
"*",
"*",
"self",
".",
"geom",
")",
"except",
"TypeError",
":",
"raise",
"ValueError",
"(",
"\"geom must be a Geometry instance or \"",
"\"a mappable/dictionary\"",
")",
"self",
".",
"geom_",
"=",
"Geometry",
"(",
"*",
"*",
"kwds",
")",
"if",
"self",
".",
"radius",
"is",
"not",
"None",
":",
"self",
".",
"geom_",
".",
"set_radius",
"(",
"self",
".",
"radius",
",",
"override",
"=",
"False",
")",
"# if self.radius == 'auto':",
"# if X is not None and input_type != 'affinity':",
"# self.geom_.set_radius(self.estimate_radius(X, input_type),",
"# override=False)",
"# else:",
"# self.geom_.set_radius(self.radius,",
"# override=False)",
"if",
"X",
"is",
"not",
"None",
":",
"self",
".",
"geom_",
".",
"set_matrix",
"(",
"X",
",",
"input_type",
")",
"return",
"self"
] |
Inputs self.geom, and produces the fitted geometry self.geom_
|
[
"Inputs",
"self",
".",
"geom",
"and",
"produces",
"the",
"fitted",
"geometry",
"self",
".",
"geom_"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/embedding/base.py#L87-L115
|
7,592
|
mmp2/megaman
|
megaman/geometry/geometry.py
|
Geometry.set_radius
|
def set_radius(self, radius, override=True, X=None, n_components=2):
"""Set the radius for the adjacency and affinity computation
By default, this will override keyword arguments provided on
initialization.
Parameters
----------
radius : float
radius to set for adjacency and affinity.
override : bool (default: True)
if False, then only set radius if not already defined in
`adjacency_args` and `affinity_args`.
X : ndarray or sparse (optional)
if provided, estimate a suitable radius from this data.
n_components : int (default=2)
the number of components to use when estimating the radius
"""
if radius < 0:
raise ValueError("radius must be non-negative")
if override or ('radius' not in self.adjacency_kwds and
'n_neighbors' not in self.adjacency_kwds):
self.adjacency_kwds['radius'] = radius
if override or ('radius' not in self.affinity_kwds):
self.affinity_kwds['radius'] = radius
|
python
|
def set_radius(self, radius, override=True, X=None, n_components=2):
"""Set the radius for the adjacency and affinity computation
By default, this will override keyword arguments provided on
initialization.
Parameters
----------
radius : float
radius to set for adjacency and affinity.
override : bool (default: True)
if False, then only set radius if not already defined in
`adjacency_args` and `affinity_args`.
X : ndarray or sparse (optional)
if provided, estimate a suitable radius from this data.
n_components : int (default=2)
the number of components to use when estimating the radius
"""
if radius < 0:
raise ValueError("radius must be non-negative")
if override or ('radius' not in self.adjacency_kwds and
'n_neighbors' not in self.adjacency_kwds):
self.adjacency_kwds['radius'] = radius
if override or ('radius' not in self.affinity_kwds):
self.affinity_kwds['radius'] = radius
|
[
"def",
"set_radius",
"(",
"self",
",",
"radius",
",",
"override",
"=",
"True",
",",
"X",
"=",
"None",
",",
"n_components",
"=",
"2",
")",
":",
"if",
"radius",
"<",
"0",
":",
"raise",
"ValueError",
"(",
"\"radius must be non-negative\"",
")",
"if",
"override",
"or",
"(",
"'radius'",
"not",
"in",
"self",
".",
"adjacency_kwds",
"and",
"'n_neighbors'",
"not",
"in",
"self",
".",
"adjacency_kwds",
")",
":",
"self",
".",
"adjacency_kwds",
"[",
"'radius'",
"]",
"=",
"radius",
"if",
"override",
"or",
"(",
"'radius'",
"not",
"in",
"self",
".",
"affinity_kwds",
")",
":",
"self",
".",
"affinity_kwds",
"[",
"'radius'",
"]",
"=",
"radius"
] |
Set the radius for the adjacency and affinity computation
By default, this will override keyword arguments provided on
initialization.
Parameters
----------
radius : float
radius to set for adjacency and affinity.
override : bool (default: True)
if False, then only set radius if not already defined in
`adjacency_args` and `affinity_args`.
X : ndarray or sparse (optional)
if provided, estimate a suitable radius from this data.
n_components : int (default=2)
the number of components to use when estimating the radius
|
[
"Set",
"the",
"radius",
"for",
"the",
"adjacency",
"and",
"affinity",
"computation"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/geometry/geometry.py#L114-L140
|
7,593
|
mmp2/megaman
|
megaman/geometry/rmetric.py
|
RiemannMetric.get_rmetric
|
def get_rmetric( self, mode_inv = 'svd', return_svd = False ):
"""
Compute the Reimannian Metric
"""
if self.H is None:
self.H, self.G, self.Hvv, self.Hsval = riemann_metric(self.Y, self.L, self.mdimG, invert_h = True, mode_inv = mode_inv)
if self.G is None:
self.G, self.Hvv, self.Hsvals, self.Gsvals = compute_G_from_H( self.H, mode_inv = self.mode_inv )
if mode_inv is 'svd' and return_svd:
return self.G, self.Hvv, self.Hsvals, self.Gsvals
else:
return self.G
|
python
|
def get_rmetric( self, mode_inv = 'svd', return_svd = False ):
"""
Compute the Reimannian Metric
"""
if self.H is None:
self.H, self.G, self.Hvv, self.Hsval = riemann_metric(self.Y, self.L, self.mdimG, invert_h = True, mode_inv = mode_inv)
if self.G is None:
self.G, self.Hvv, self.Hsvals, self.Gsvals = compute_G_from_H( self.H, mode_inv = self.mode_inv )
if mode_inv is 'svd' and return_svd:
return self.G, self.Hvv, self.Hsvals, self.Gsvals
else:
return self.G
|
[
"def",
"get_rmetric",
"(",
"self",
",",
"mode_inv",
"=",
"'svd'",
",",
"return_svd",
"=",
"False",
")",
":",
"if",
"self",
".",
"H",
"is",
"None",
":",
"self",
".",
"H",
",",
"self",
".",
"G",
",",
"self",
".",
"Hvv",
",",
"self",
".",
"Hsval",
"=",
"riemann_metric",
"(",
"self",
".",
"Y",
",",
"self",
".",
"L",
",",
"self",
".",
"mdimG",
",",
"invert_h",
"=",
"True",
",",
"mode_inv",
"=",
"mode_inv",
")",
"if",
"self",
".",
"G",
"is",
"None",
":",
"self",
".",
"G",
",",
"self",
".",
"Hvv",
",",
"self",
".",
"Hsvals",
",",
"self",
".",
"Gsvals",
"=",
"compute_G_from_H",
"(",
"self",
".",
"H",
",",
"mode_inv",
"=",
"self",
".",
"mode_inv",
")",
"if",
"mode_inv",
"is",
"'svd'",
"and",
"return_svd",
":",
"return",
"self",
".",
"G",
",",
"self",
".",
"Hvv",
",",
"self",
".",
"Hsvals",
",",
"self",
".",
"Gsvals",
"else",
":",
"return",
"self",
".",
"G"
] |
Compute the Reimannian Metric
|
[
"Compute",
"the",
"Reimannian",
"Metric"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/geometry/rmetric.py#L270-L281
|
7,594
|
mmp2/megaman
|
megaman/relaxation/trace_variable.py
|
TracingVariable.report_and_save_keywords
|
def report_and_save_keywords(self,relaxation_kwds,precomputed_kwds):
"""Save relaxation keywords to .txt and .pyc file"""
report_name = os.path.join(self.backup_dir,'relaxation_keywords.txt')
pretty_relax_kwds = pprint.pformat(relaxation_kwds,indent=4)
with open(report_name,'w') as wf:
wf.write(pretty_relax_kwds)
wf.close()
origin_name = os.path.join(self.backup_dir,'relaxation_keywords.pyc')
with open(origin_name,'wb') as ro:
pickle.dump(relaxation_kwds,ro,protocol=pickle.HIGHEST_PROTOCOL)
ro.close()
if relaxation_kwds['presave']:
precomp_kwds_name = os.path.join(self.backup_dir,
'precomputed_keywords.pyc')
with open(precomp_kwds_name, 'wb') as po:
pickle.dump(precomputed_kwds, po,
protocol=pickle.HIGHEST_PROTOCOL)
po.close()
|
python
|
def report_and_save_keywords(self,relaxation_kwds,precomputed_kwds):
"""Save relaxation keywords to .txt and .pyc file"""
report_name = os.path.join(self.backup_dir,'relaxation_keywords.txt')
pretty_relax_kwds = pprint.pformat(relaxation_kwds,indent=4)
with open(report_name,'w') as wf:
wf.write(pretty_relax_kwds)
wf.close()
origin_name = os.path.join(self.backup_dir,'relaxation_keywords.pyc')
with open(origin_name,'wb') as ro:
pickle.dump(relaxation_kwds,ro,protocol=pickle.HIGHEST_PROTOCOL)
ro.close()
if relaxation_kwds['presave']:
precomp_kwds_name = os.path.join(self.backup_dir,
'precomputed_keywords.pyc')
with open(precomp_kwds_name, 'wb') as po:
pickle.dump(precomputed_kwds, po,
protocol=pickle.HIGHEST_PROTOCOL)
po.close()
|
[
"def",
"report_and_save_keywords",
"(",
"self",
",",
"relaxation_kwds",
",",
"precomputed_kwds",
")",
":",
"report_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"backup_dir",
",",
"'relaxation_keywords.txt'",
")",
"pretty_relax_kwds",
"=",
"pprint",
".",
"pformat",
"(",
"relaxation_kwds",
",",
"indent",
"=",
"4",
")",
"with",
"open",
"(",
"report_name",
",",
"'w'",
")",
"as",
"wf",
":",
"wf",
".",
"write",
"(",
"pretty_relax_kwds",
")",
"wf",
".",
"close",
"(",
")",
"origin_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"backup_dir",
",",
"'relaxation_keywords.pyc'",
")",
"with",
"open",
"(",
"origin_name",
",",
"'wb'",
")",
"as",
"ro",
":",
"pickle",
".",
"dump",
"(",
"relaxation_kwds",
",",
"ro",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"ro",
".",
"close",
"(",
")",
"if",
"relaxation_kwds",
"[",
"'presave'",
"]",
":",
"precomp_kwds_name",
"=",
"os",
".",
"path",
".",
"join",
"(",
"self",
".",
"backup_dir",
",",
"'precomputed_keywords.pyc'",
")",
"with",
"open",
"(",
"precomp_kwds_name",
",",
"'wb'",
")",
"as",
"po",
":",
"pickle",
".",
"dump",
"(",
"precomputed_kwds",
",",
"po",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"po",
".",
"close",
"(",
")"
] |
Save relaxation keywords to .txt and .pyc file
|
[
"Save",
"relaxation",
"keywords",
"to",
".",
"txt",
"and",
".",
"pyc",
"file"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/relaxation/trace_variable.py#L36-L55
|
7,595
|
mmp2/megaman
|
megaman/relaxation/trace_variable.py
|
TracingVariable.update
|
def update(self,iiter,H,Y,eta,loss):
"""Update the trace_var in new iteration"""
if iiter <= self.niter_trace+1:
self.H[iiter] = H
self.Y[iiter] = Y
elif iiter >self.niter - self.niter_trace + 1:
self.H[self.ltrace+iiter-self.niter-1] = H
self.Y[self.ltrace+iiter-self.niter-1] = Y
self.etas[iiter] = eta
self.loss[iiter] = loss
if self.loss[iiter] < self.lmin:
self.Yh = Y
self.lmin = self.loss[iiter]
self.miniter = iiter if not iiter == -1 else self.niter + 1
|
python
|
def update(self,iiter,H,Y,eta,loss):
"""Update the trace_var in new iteration"""
if iiter <= self.niter_trace+1:
self.H[iiter] = H
self.Y[iiter] = Y
elif iiter >self.niter - self.niter_trace + 1:
self.H[self.ltrace+iiter-self.niter-1] = H
self.Y[self.ltrace+iiter-self.niter-1] = Y
self.etas[iiter] = eta
self.loss[iiter] = loss
if self.loss[iiter] < self.lmin:
self.Yh = Y
self.lmin = self.loss[iiter]
self.miniter = iiter if not iiter == -1 else self.niter + 1
|
[
"def",
"update",
"(",
"self",
",",
"iiter",
",",
"H",
",",
"Y",
",",
"eta",
",",
"loss",
")",
":",
"if",
"iiter",
"<=",
"self",
".",
"niter_trace",
"+",
"1",
":",
"self",
".",
"H",
"[",
"iiter",
"]",
"=",
"H",
"self",
".",
"Y",
"[",
"iiter",
"]",
"=",
"Y",
"elif",
"iiter",
">",
"self",
".",
"niter",
"-",
"self",
".",
"niter_trace",
"+",
"1",
":",
"self",
".",
"H",
"[",
"self",
".",
"ltrace",
"+",
"iiter",
"-",
"self",
".",
"niter",
"-",
"1",
"]",
"=",
"H",
"self",
".",
"Y",
"[",
"self",
".",
"ltrace",
"+",
"iiter",
"-",
"self",
".",
"niter",
"-",
"1",
"]",
"=",
"Y",
"self",
".",
"etas",
"[",
"iiter",
"]",
"=",
"eta",
"self",
".",
"loss",
"[",
"iiter",
"]",
"=",
"loss",
"if",
"self",
".",
"loss",
"[",
"iiter",
"]",
"<",
"self",
".",
"lmin",
":",
"self",
".",
"Yh",
"=",
"Y",
"self",
".",
"lmin",
"=",
"self",
".",
"loss",
"[",
"iiter",
"]",
"self",
".",
"miniter",
"=",
"iiter",
"if",
"not",
"iiter",
"==",
"-",
"1",
"else",
"self",
".",
"niter",
"+",
"1"
] |
Update the trace_var in new iteration
|
[
"Update",
"the",
"trace_var",
"in",
"new",
"iteration"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/relaxation/trace_variable.py#L57-L71
|
7,596
|
mmp2/megaman
|
megaman/relaxation/trace_variable.py
|
TracingVariable.save
|
def save(cls,instance,filename):
"""Class method save for saving TracingVariable."""
filename = cls.correct_file_extension(filename)
try:
with open(filename,'wb') as f:
pickle.dump(instance,f,protocol=pickle.HIGHEST_PROTOCOL)
except MemoryError as e:
print ('{} occurred, will downsampled the saved file by 20.'
.format(type(e).__name__))
copy_instance = instance.copy()
copy_instance.H = copy_instance.H[::20,:,:]
copy_instance.Y = copy_instance.Y[::20,:]
with open(filename,'wb') as f:
pickle.dump(copy_instance,f,protocol=pickle.HIGHEST_PROTOCOL)
|
python
|
def save(cls,instance,filename):
"""Class method save for saving TracingVariable."""
filename = cls.correct_file_extension(filename)
try:
with open(filename,'wb') as f:
pickle.dump(instance,f,protocol=pickle.HIGHEST_PROTOCOL)
except MemoryError as e:
print ('{} occurred, will downsampled the saved file by 20.'
.format(type(e).__name__))
copy_instance = instance.copy()
copy_instance.H = copy_instance.H[::20,:,:]
copy_instance.Y = copy_instance.Y[::20,:]
with open(filename,'wb') as f:
pickle.dump(copy_instance,f,protocol=pickle.HIGHEST_PROTOCOL)
|
[
"def",
"save",
"(",
"cls",
",",
"instance",
",",
"filename",
")",
":",
"filename",
"=",
"cls",
".",
"correct_file_extension",
"(",
"filename",
")",
"try",
":",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"instance",
",",
"f",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")",
"except",
"MemoryError",
"as",
"e",
":",
"print",
"(",
"'{} occurred, will downsampled the saved file by 20.'",
".",
"format",
"(",
"type",
"(",
"e",
")",
".",
"__name__",
")",
")",
"copy_instance",
"=",
"instance",
".",
"copy",
"(",
")",
"copy_instance",
".",
"H",
"=",
"copy_instance",
".",
"H",
"[",
":",
":",
"20",
",",
":",
",",
":",
"]",
"copy_instance",
".",
"Y",
"=",
"copy_instance",
".",
"Y",
"[",
":",
":",
"20",
",",
":",
"]",
"with",
"open",
"(",
"filename",
",",
"'wb'",
")",
"as",
"f",
":",
"pickle",
".",
"dump",
"(",
"copy_instance",
",",
"f",
",",
"protocol",
"=",
"pickle",
".",
"HIGHEST_PROTOCOL",
")"
] |
Class method save for saving TracingVariable.
|
[
"Class",
"method",
"save",
"for",
"saving",
"TracingVariable",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/relaxation/trace_variable.py#L93-L106
|
7,597
|
mmp2/megaman
|
megaman/relaxation/trace_variable.py
|
TracingVariable.load
|
def load(cls,filename):
"""Load from stored files"""
filename = cls.correct_file_extension(filename)
with open(filename,'rb') as f:
return pickle.load(f)
|
python
|
def load(cls,filename):
"""Load from stored files"""
filename = cls.correct_file_extension(filename)
with open(filename,'rb') as f:
return pickle.load(f)
|
[
"def",
"load",
"(",
"cls",
",",
"filename",
")",
":",
"filename",
"=",
"cls",
".",
"correct_file_extension",
"(",
"filename",
")",
"with",
"open",
"(",
"filename",
",",
"'rb'",
")",
"as",
"f",
":",
"return",
"pickle",
".",
"load",
"(",
"f",
")"
] |
Load from stored files
|
[
"Load",
"from",
"stored",
"files"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/relaxation/trace_variable.py#L109-L113
|
7,598
|
mmp2/megaman
|
doc/sphinxext/numpy_ext/utils.py
|
find_mod_objs
|
def find_mod_objs(modname, onlylocals=False):
""" Returns all the public attributes of a module referenced by name.
.. note::
The returned list *not* include subpackages or modules of
`modname`,nor does it include private attributes (those that
beginwith '_' or are not in `__all__`).
Parameters
----------
modname : str
The name of the module to search.
onlylocals : bool
If True, only attributes that are either members of `modname` OR one of
its modules or subpackages will be included.
Returns
-------
localnames : list of str
A list of the names of the attributes as they are named in the
module `modname` .
fqnames : list of str
A list of the full qualified names of the attributes (e.g.,
``astropy.utils.misc.find_mod_objs``). For attributes that are
simple variables, this is based on the local name, but for
functions or classes it can be different if they are actually
defined elsewhere and just referenced in `modname`.
objs : list of objects
A list of the actual attributes themselves (in the same order as
the other arguments)
"""
__import__(modname)
mod = sys.modules[modname]
if hasattr(mod, '__all__'):
pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__]
else:
pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != '_']
# filter out modules and pull the names and objs out
ismodule = inspect.ismodule
localnames = [k for k, v in pkgitems if not ismodule(v)]
objs = [v for k, v in pkgitems if not ismodule(v)]
# fully qualified names can be determined from the object's module
fqnames = []
for obj, lnm in zip(objs, localnames):
if hasattr(obj, '__module__') and hasattr(obj, '__name__'):
fqnames.append(obj.__module__ + '.' + obj.__name__)
else:
fqnames.append(modname + '.' + lnm)
if onlylocals:
valids = [fqn.startswith(modname) for fqn in fqnames]
localnames = [e for i, e in enumerate(localnames) if valids[i]]
fqnames = [e for i, e in enumerate(fqnames) if valids[i]]
objs = [e for i, e in enumerate(objs) if valids[i]]
return localnames, fqnames, objs
|
python
|
def find_mod_objs(modname, onlylocals=False):
""" Returns all the public attributes of a module referenced by name.
.. note::
The returned list *not* include subpackages or modules of
`modname`,nor does it include private attributes (those that
beginwith '_' or are not in `__all__`).
Parameters
----------
modname : str
The name of the module to search.
onlylocals : bool
If True, only attributes that are either members of `modname` OR one of
its modules or subpackages will be included.
Returns
-------
localnames : list of str
A list of the names of the attributes as they are named in the
module `modname` .
fqnames : list of str
A list of the full qualified names of the attributes (e.g.,
``astropy.utils.misc.find_mod_objs``). For attributes that are
simple variables, this is based on the local name, but for
functions or classes it can be different if they are actually
defined elsewhere and just referenced in `modname`.
objs : list of objects
A list of the actual attributes themselves (in the same order as
the other arguments)
"""
__import__(modname)
mod = sys.modules[modname]
if hasattr(mod, '__all__'):
pkgitems = [(k, mod.__dict__[k]) for k in mod.__all__]
else:
pkgitems = [(k, mod.__dict__[k]) for k in dir(mod) if k[0] != '_']
# filter out modules and pull the names and objs out
ismodule = inspect.ismodule
localnames = [k for k, v in pkgitems if not ismodule(v)]
objs = [v for k, v in pkgitems if not ismodule(v)]
# fully qualified names can be determined from the object's module
fqnames = []
for obj, lnm in zip(objs, localnames):
if hasattr(obj, '__module__') and hasattr(obj, '__name__'):
fqnames.append(obj.__module__ + '.' + obj.__name__)
else:
fqnames.append(modname + '.' + lnm)
if onlylocals:
valids = [fqn.startswith(modname) for fqn in fqnames]
localnames = [e for i, e in enumerate(localnames) if valids[i]]
fqnames = [e for i, e in enumerate(fqnames) if valids[i]]
objs = [e for i, e in enumerate(objs) if valids[i]]
return localnames, fqnames, objs
|
[
"def",
"find_mod_objs",
"(",
"modname",
",",
"onlylocals",
"=",
"False",
")",
":",
"__import__",
"(",
"modname",
")",
"mod",
"=",
"sys",
".",
"modules",
"[",
"modname",
"]",
"if",
"hasattr",
"(",
"mod",
",",
"'__all__'",
")",
":",
"pkgitems",
"=",
"[",
"(",
"k",
",",
"mod",
".",
"__dict__",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"mod",
".",
"__all__",
"]",
"else",
":",
"pkgitems",
"=",
"[",
"(",
"k",
",",
"mod",
".",
"__dict__",
"[",
"k",
"]",
")",
"for",
"k",
"in",
"dir",
"(",
"mod",
")",
"if",
"k",
"[",
"0",
"]",
"!=",
"'_'",
"]",
"# filter out modules and pull the names and objs out",
"ismodule",
"=",
"inspect",
".",
"ismodule",
"localnames",
"=",
"[",
"k",
"for",
"k",
",",
"v",
"in",
"pkgitems",
"if",
"not",
"ismodule",
"(",
"v",
")",
"]",
"objs",
"=",
"[",
"v",
"for",
"k",
",",
"v",
"in",
"pkgitems",
"if",
"not",
"ismodule",
"(",
"v",
")",
"]",
"# fully qualified names can be determined from the object's module",
"fqnames",
"=",
"[",
"]",
"for",
"obj",
",",
"lnm",
"in",
"zip",
"(",
"objs",
",",
"localnames",
")",
":",
"if",
"hasattr",
"(",
"obj",
",",
"'__module__'",
")",
"and",
"hasattr",
"(",
"obj",
",",
"'__name__'",
")",
":",
"fqnames",
".",
"append",
"(",
"obj",
".",
"__module__",
"+",
"'.'",
"+",
"obj",
".",
"__name__",
")",
"else",
":",
"fqnames",
".",
"append",
"(",
"modname",
"+",
"'.'",
"+",
"lnm",
")",
"if",
"onlylocals",
":",
"valids",
"=",
"[",
"fqn",
".",
"startswith",
"(",
"modname",
")",
"for",
"fqn",
"in",
"fqnames",
"]",
"localnames",
"=",
"[",
"e",
"for",
"i",
",",
"e",
"in",
"enumerate",
"(",
"localnames",
")",
"if",
"valids",
"[",
"i",
"]",
"]",
"fqnames",
"=",
"[",
"e",
"for",
"i",
",",
"e",
"in",
"enumerate",
"(",
"fqnames",
")",
"if",
"valids",
"[",
"i",
"]",
"]",
"objs",
"=",
"[",
"e",
"for",
"i",
",",
"e",
"in",
"enumerate",
"(",
"objs",
")",
"if",
"valids",
"[",
"i",
"]",
"]",
"return",
"localnames",
",",
"fqnames",
",",
"objs"
] |
Returns all the public attributes of a module referenced by name.
.. note::
The returned list *not* include subpackages or modules of
`modname`,nor does it include private attributes (those that
beginwith '_' or are not in `__all__`).
Parameters
----------
modname : str
The name of the module to search.
onlylocals : bool
If True, only attributes that are either members of `modname` OR one of
its modules or subpackages will be included.
Returns
-------
localnames : list of str
A list of the names of the attributes as they are named in the
module `modname` .
fqnames : list of str
A list of the full qualified names of the attributes (e.g.,
``astropy.utils.misc.find_mod_objs``). For attributes that are
simple variables, this is based on the local name, but for
functions or classes it can be different if they are actually
defined elsewhere and just referenced in `modname`.
objs : list of objects
A list of the actual attributes themselves (in the same order as
the other arguments)
|
[
"Returns",
"all",
"the",
"public",
"attributes",
"of",
"a",
"module",
"referenced",
"by",
"name",
"."
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/doc/sphinxext/numpy_ext/utils.py#L5-L65
|
7,599
|
mmp2/megaman
|
megaman/datasets/datasets.py
|
get_megaman_image
|
def get_megaman_image(factor=1):
"""Return an RGBA representation of the megaman icon"""
imfile = os.path.join(os.path.dirname(__file__), 'megaman.png')
data = ndimage.imread(imfile) / 255
if factor > 1:
data = data.repeat(factor, axis=0).repeat(factor, axis=1)
return data
|
python
|
def get_megaman_image(factor=1):
"""Return an RGBA representation of the megaman icon"""
imfile = os.path.join(os.path.dirname(__file__), 'megaman.png')
data = ndimage.imread(imfile) / 255
if factor > 1:
data = data.repeat(factor, axis=0).repeat(factor, axis=1)
return data
|
[
"def",
"get_megaman_image",
"(",
"factor",
"=",
"1",
")",
":",
"imfile",
"=",
"os",
".",
"path",
".",
"join",
"(",
"os",
".",
"path",
".",
"dirname",
"(",
"__file__",
")",
",",
"'megaman.png'",
")",
"data",
"=",
"ndimage",
".",
"imread",
"(",
"imfile",
")",
"/",
"255",
"if",
"factor",
">",
"1",
":",
"data",
"=",
"data",
".",
"repeat",
"(",
"factor",
",",
"axis",
"=",
"0",
")",
".",
"repeat",
"(",
"factor",
",",
"axis",
"=",
"1",
")",
"return",
"data"
] |
Return an RGBA representation of the megaman icon
|
[
"Return",
"an",
"RGBA",
"representation",
"of",
"the",
"megaman",
"icon"
] |
faccaf267aad0a8b18ec8a705735fd9dd838ca1e
|
https://github.com/mmp2/megaman/blob/faccaf267aad0a8b18ec8a705735fd9dd838ca1e/megaman/datasets/datasets.py#L12-L18
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.