id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
21,400
|
apache/incubator-superset
|
superset/tasks/schedules.py
|
destroy_webdriver
|
def destroy_webdriver(driver):
"""
Destroy a driver
"""
# This is some very flaky code in selenium. Hence the retries
# and catch-all exceptions
try:
retry_call(driver.close, tries=2)
except Exception:
pass
try:
driver.quit()
except Exception:
pass
|
python
|
def destroy_webdriver(driver):
"""
Destroy a driver
"""
# This is some very flaky code in selenium. Hence the retries
# and catch-all exceptions
try:
retry_call(driver.close, tries=2)
except Exception:
pass
try:
driver.quit()
except Exception:
pass
|
[
"def",
"destroy_webdriver",
"(",
"driver",
")",
":",
"# This is some very flaky code in selenium. Hence the retries",
"# and catch-all exceptions",
"try",
":",
"retry_call",
"(",
"driver",
".",
"close",
",",
"tries",
"=",
"2",
")",
"except",
"Exception",
":",
"pass",
"try",
":",
"driver",
".",
"quit",
"(",
")",
"except",
"Exception",
":",
"pass"
] |
Destroy a driver
|
[
"Destroy",
"a",
"driver"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/tasks/schedules.py#L190-L204
|
21,401
|
apache/incubator-superset
|
superset/tasks/schedules.py
|
deliver_dashboard
|
def deliver_dashboard(schedule):
"""
Given a schedule, delivery the dashboard as an email report
"""
dashboard = schedule.dashboard
dashboard_url = _get_url_path(
'Superset.dashboard',
dashboard_id=dashboard.id,
)
# Create a driver, fetch the page, wait for the page to render
driver = create_webdriver()
window = config.get('WEBDRIVER_WINDOW')['dashboard']
driver.set_window_size(*window)
driver.get(dashboard_url)
time.sleep(PAGE_RENDER_WAIT)
# Set up a function to retry once for the element.
# This is buggy in certain selenium versions with firefox driver
get_element = getattr(driver, 'find_element_by_class_name')
element = retry_call(
get_element,
fargs=['grid-container'],
tries=2,
delay=PAGE_RENDER_WAIT,
)
try:
screenshot = element.screenshot_as_png
except WebDriverException:
# Some webdrivers do not support screenshots for elements.
# In such cases, take a screenshot of the entire page.
screenshot = driver.screenshot() # pylint: disable=no-member
finally:
destroy_webdriver(driver)
# Generate the email body and attachments
email = _generate_mail_content(
schedule,
screenshot,
dashboard.dashboard_title,
dashboard_url,
)
subject = __(
'%(prefix)s %(title)s',
prefix=config.get('EMAIL_REPORTS_SUBJECT_PREFIX'),
title=dashboard.dashboard_title,
)
_deliver_email(schedule, subject, email)
|
python
|
def deliver_dashboard(schedule):
"""
Given a schedule, delivery the dashboard as an email report
"""
dashboard = schedule.dashboard
dashboard_url = _get_url_path(
'Superset.dashboard',
dashboard_id=dashboard.id,
)
# Create a driver, fetch the page, wait for the page to render
driver = create_webdriver()
window = config.get('WEBDRIVER_WINDOW')['dashboard']
driver.set_window_size(*window)
driver.get(dashboard_url)
time.sleep(PAGE_RENDER_WAIT)
# Set up a function to retry once for the element.
# This is buggy in certain selenium versions with firefox driver
get_element = getattr(driver, 'find_element_by_class_name')
element = retry_call(
get_element,
fargs=['grid-container'],
tries=2,
delay=PAGE_RENDER_WAIT,
)
try:
screenshot = element.screenshot_as_png
except WebDriverException:
# Some webdrivers do not support screenshots for elements.
# In such cases, take a screenshot of the entire page.
screenshot = driver.screenshot() # pylint: disable=no-member
finally:
destroy_webdriver(driver)
# Generate the email body and attachments
email = _generate_mail_content(
schedule,
screenshot,
dashboard.dashboard_title,
dashboard_url,
)
subject = __(
'%(prefix)s %(title)s',
prefix=config.get('EMAIL_REPORTS_SUBJECT_PREFIX'),
title=dashboard.dashboard_title,
)
_deliver_email(schedule, subject, email)
|
[
"def",
"deliver_dashboard",
"(",
"schedule",
")",
":",
"dashboard",
"=",
"schedule",
".",
"dashboard",
"dashboard_url",
"=",
"_get_url_path",
"(",
"'Superset.dashboard'",
",",
"dashboard_id",
"=",
"dashboard",
".",
"id",
",",
")",
"# Create a driver, fetch the page, wait for the page to render",
"driver",
"=",
"create_webdriver",
"(",
")",
"window",
"=",
"config",
".",
"get",
"(",
"'WEBDRIVER_WINDOW'",
")",
"[",
"'dashboard'",
"]",
"driver",
".",
"set_window_size",
"(",
"*",
"window",
")",
"driver",
".",
"get",
"(",
"dashboard_url",
")",
"time",
".",
"sleep",
"(",
"PAGE_RENDER_WAIT",
")",
"# Set up a function to retry once for the element.",
"# This is buggy in certain selenium versions with firefox driver",
"get_element",
"=",
"getattr",
"(",
"driver",
",",
"'find_element_by_class_name'",
")",
"element",
"=",
"retry_call",
"(",
"get_element",
",",
"fargs",
"=",
"[",
"'grid-container'",
"]",
",",
"tries",
"=",
"2",
",",
"delay",
"=",
"PAGE_RENDER_WAIT",
",",
")",
"try",
":",
"screenshot",
"=",
"element",
".",
"screenshot_as_png",
"except",
"WebDriverException",
":",
"# Some webdrivers do not support screenshots for elements.",
"# In such cases, take a screenshot of the entire page.",
"screenshot",
"=",
"driver",
".",
"screenshot",
"(",
")",
"# pylint: disable=no-member",
"finally",
":",
"destroy_webdriver",
"(",
"driver",
")",
"# Generate the email body and attachments",
"email",
"=",
"_generate_mail_content",
"(",
"schedule",
",",
"screenshot",
",",
"dashboard",
".",
"dashboard_title",
",",
"dashboard_url",
",",
")",
"subject",
"=",
"__",
"(",
"'%(prefix)s %(title)s'",
",",
"prefix",
"=",
"config",
".",
"get",
"(",
"'EMAIL_REPORTS_SUBJECT_PREFIX'",
")",
",",
"title",
"=",
"dashboard",
".",
"dashboard_title",
",",
")",
"_deliver_email",
"(",
"schedule",
",",
"subject",
",",
"email",
")"
] |
Given a schedule, delivery the dashboard as an email report
|
[
"Given",
"a",
"schedule",
"delivery",
"the",
"dashboard",
"as",
"an",
"email",
"report"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/tasks/schedules.py#L207-L258
|
21,402
|
apache/incubator-superset
|
superset/tasks/schedules.py
|
deliver_slice
|
def deliver_slice(schedule):
"""
Given a schedule, delivery the slice as an email report
"""
if schedule.email_format == SliceEmailReportFormat.data:
email = _get_slice_data(schedule)
elif schedule.email_format == SliceEmailReportFormat.visualization:
email = _get_slice_visualization(schedule)
else:
raise RuntimeError('Unknown email report format')
subject = __(
'%(prefix)s %(title)s',
prefix=config.get('EMAIL_REPORTS_SUBJECT_PREFIX'),
title=schedule.slice.slice_name,
)
_deliver_email(schedule, subject, email)
|
python
|
def deliver_slice(schedule):
"""
Given a schedule, delivery the slice as an email report
"""
if schedule.email_format == SliceEmailReportFormat.data:
email = _get_slice_data(schedule)
elif schedule.email_format == SliceEmailReportFormat.visualization:
email = _get_slice_visualization(schedule)
else:
raise RuntimeError('Unknown email report format')
subject = __(
'%(prefix)s %(title)s',
prefix=config.get('EMAIL_REPORTS_SUBJECT_PREFIX'),
title=schedule.slice.slice_name,
)
_deliver_email(schedule, subject, email)
|
[
"def",
"deliver_slice",
"(",
"schedule",
")",
":",
"if",
"schedule",
".",
"email_format",
"==",
"SliceEmailReportFormat",
".",
"data",
":",
"email",
"=",
"_get_slice_data",
"(",
"schedule",
")",
"elif",
"schedule",
".",
"email_format",
"==",
"SliceEmailReportFormat",
".",
"visualization",
":",
"email",
"=",
"_get_slice_visualization",
"(",
"schedule",
")",
"else",
":",
"raise",
"RuntimeError",
"(",
"'Unknown email report format'",
")",
"subject",
"=",
"__",
"(",
"'%(prefix)s %(title)s'",
",",
"prefix",
"=",
"config",
".",
"get",
"(",
"'EMAIL_REPORTS_SUBJECT_PREFIX'",
")",
",",
"title",
"=",
"schedule",
".",
"slice",
".",
"slice_name",
",",
")",
"_deliver_email",
"(",
"schedule",
",",
"subject",
",",
"email",
")"
] |
Given a schedule, delivery the slice as an email report
|
[
"Given",
"a",
"schedule",
"delivery",
"the",
"slice",
"as",
"an",
"email",
"report"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/tasks/schedules.py#L356-L373
|
21,403
|
apache/incubator-superset
|
superset/tasks/schedules.py
|
schedule_hourly
|
def schedule_hourly():
""" Celery beat job meant to be invoked hourly """
if not config.get('ENABLE_SCHEDULED_EMAIL_REPORTS'):
logging.info('Scheduled email reports not enabled in config')
return
resolution = config.get('EMAIL_REPORTS_CRON_RESOLUTION', 0) * 60
# Get the top of the hour
start_at = datetime.now(tzlocal()).replace(microsecond=0, second=0, minute=0)
stop_at = start_at + timedelta(seconds=3600)
schedule_window(ScheduleType.dashboard.value, start_at, stop_at, resolution)
schedule_window(ScheduleType.slice.value, start_at, stop_at, resolution)
|
python
|
def schedule_hourly():
""" Celery beat job meant to be invoked hourly """
if not config.get('ENABLE_SCHEDULED_EMAIL_REPORTS'):
logging.info('Scheduled email reports not enabled in config')
return
resolution = config.get('EMAIL_REPORTS_CRON_RESOLUTION', 0) * 60
# Get the top of the hour
start_at = datetime.now(tzlocal()).replace(microsecond=0, second=0, minute=0)
stop_at = start_at + timedelta(seconds=3600)
schedule_window(ScheduleType.dashboard.value, start_at, stop_at, resolution)
schedule_window(ScheduleType.slice.value, start_at, stop_at, resolution)
|
[
"def",
"schedule_hourly",
"(",
")",
":",
"if",
"not",
"config",
".",
"get",
"(",
"'ENABLE_SCHEDULED_EMAIL_REPORTS'",
")",
":",
"logging",
".",
"info",
"(",
"'Scheduled email reports not enabled in config'",
")",
"return",
"resolution",
"=",
"config",
".",
"get",
"(",
"'EMAIL_REPORTS_CRON_RESOLUTION'",
",",
"0",
")",
"*",
"60",
"# Get the top of the hour",
"start_at",
"=",
"datetime",
".",
"now",
"(",
"tzlocal",
"(",
")",
")",
".",
"replace",
"(",
"microsecond",
"=",
"0",
",",
"second",
"=",
"0",
",",
"minute",
"=",
"0",
")",
"stop_at",
"=",
"start_at",
"+",
"timedelta",
"(",
"seconds",
"=",
"3600",
")",
"schedule_window",
"(",
"ScheduleType",
".",
"dashboard",
".",
"value",
",",
"start_at",
",",
"stop_at",
",",
"resolution",
")",
"schedule_window",
"(",
"ScheduleType",
".",
"slice",
".",
"value",
",",
"start_at",
",",
"stop_at",
",",
"resolution",
")"
] |
Celery beat job meant to be invoked hourly
|
[
"Celery",
"beat",
"job",
"meant",
"to",
"be",
"invoked",
"hourly"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/tasks/schedules.py#L444-L457
|
21,404
|
apache/incubator-superset
|
superset/dataframe.py
|
dedup
|
def dedup(l, suffix='__', case_sensitive=True):
"""De-duplicates a list of string by suffixing a counter
Always returns the same number of entries as provided, and always returns
unique values. Case sensitive comparison by default.
>>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'])))
foo,bar,bar__1,bar__2,Bar
>>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'], case_sensitive=False)))
foo,bar,bar__1,bar__2,Bar__3
"""
new_l = []
seen = {}
for s in l:
s_fixed_case = s if case_sensitive else s.lower()
if s_fixed_case in seen:
seen[s_fixed_case] += 1
s += suffix + str(seen[s_fixed_case])
else:
seen[s_fixed_case] = 0
new_l.append(s)
return new_l
|
python
|
def dedup(l, suffix='__', case_sensitive=True):
"""De-duplicates a list of string by suffixing a counter
Always returns the same number of entries as provided, and always returns
unique values. Case sensitive comparison by default.
>>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'])))
foo,bar,bar__1,bar__2,Bar
>>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'], case_sensitive=False)))
foo,bar,bar__1,bar__2,Bar__3
"""
new_l = []
seen = {}
for s in l:
s_fixed_case = s if case_sensitive else s.lower()
if s_fixed_case in seen:
seen[s_fixed_case] += 1
s += suffix + str(seen[s_fixed_case])
else:
seen[s_fixed_case] = 0
new_l.append(s)
return new_l
|
[
"def",
"dedup",
"(",
"l",
",",
"suffix",
"=",
"'__'",
",",
"case_sensitive",
"=",
"True",
")",
":",
"new_l",
"=",
"[",
"]",
"seen",
"=",
"{",
"}",
"for",
"s",
"in",
"l",
":",
"s_fixed_case",
"=",
"s",
"if",
"case_sensitive",
"else",
"s",
".",
"lower",
"(",
")",
"if",
"s_fixed_case",
"in",
"seen",
":",
"seen",
"[",
"s_fixed_case",
"]",
"+=",
"1",
"s",
"+=",
"suffix",
"+",
"str",
"(",
"seen",
"[",
"s_fixed_case",
"]",
")",
"else",
":",
"seen",
"[",
"s_fixed_case",
"]",
"=",
"0",
"new_l",
".",
"append",
"(",
"s",
")",
"return",
"new_l"
] |
De-duplicates a list of string by suffixing a counter
Always returns the same number of entries as provided, and always returns
unique values. Case sensitive comparison by default.
>>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'])))
foo,bar,bar__1,bar__2,Bar
>>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'], case_sensitive=False)))
foo,bar,bar__1,bar__2,Bar__3
|
[
"De",
"-",
"duplicates",
"a",
"list",
"of",
"string",
"by",
"suffixing",
"a",
"counter"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/dataframe.py#L39-L60
|
21,405
|
apache/incubator-superset
|
superset/dataframe.py
|
SupersetDataFrame.db_type
|
def db_type(cls, dtype):
"""Given a numpy dtype, Returns a generic database type"""
if isinstance(dtype, ExtensionDtype):
return cls.type_map.get(dtype.kind)
elif hasattr(dtype, 'char'):
return cls.type_map.get(dtype.char)
|
python
|
def db_type(cls, dtype):
"""Given a numpy dtype, Returns a generic database type"""
if isinstance(dtype, ExtensionDtype):
return cls.type_map.get(dtype.kind)
elif hasattr(dtype, 'char'):
return cls.type_map.get(dtype.char)
|
[
"def",
"db_type",
"(",
"cls",
",",
"dtype",
")",
":",
"if",
"isinstance",
"(",
"dtype",
",",
"ExtensionDtype",
")",
":",
"return",
"cls",
".",
"type_map",
".",
"get",
"(",
"dtype",
".",
"kind",
")",
"elif",
"hasattr",
"(",
"dtype",
",",
"'char'",
")",
":",
"return",
"cls",
".",
"type_map",
".",
"get",
"(",
"dtype",
".",
"char",
")"
] |
Given a numpy dtype, Returns a generic database type
|
[
"Given",
"a",
"numpy",
"dtype",
"Returns",
"a",
"generic",
"database",
"type"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/dataframe.py#L122-L127
|
21,406
|
apache/incubator-superset
|
superset/dataframe.py
|
SupersetDataFrame.columns
|
def columns(self):
"""Provides metadata about columns for data visualization.
:return: dict, with the fields name, type, is_date, is_dim and agg.
"""
if self.df.empty:
return None
columns = []
sample_size = min(INFER_COL_TYPES_SAMPLE_SIZE, len(self.df.index))
sample = self.df
if sample_size:
sample = self.df.sample(sample_size)
for col in self.df.dtypes.keys():
db_type_str = (
self._type_dict.get(col) or
self.db_type(self.df.dtypes[col])
)
column = {
'name': col,
'agg': self.agg_func(self.df.dtypes[col], col),
'type': db_type_str,
'is_date': self.is_date(self.df.dtypes[col], db_type_str),
'is_dim': self.is_dimension(self.df.dtypes[col], col),
}
if not db_type_str or db_type_str.upper() == 'OBJECT':
v = sample[col].iloc[0] if not sample[col].empty else None
if isinstance(v, str):
column['type'] = 'STRING'
elif isinstance(v, int):
column['type'] = 'INT'
elif isinstance(v, float):
column['type'] = 'FLOAT'
elif isinstance(v, (datetime, date)):
column['type'] = 'DATETIME'
column['is_date'] = True
column['is_dim'] = False
# check if encoded datetime
if (
column['type'] == 'STRING' and
self.datetime_conversion_rate(sample[col]) >
INFER_COL_TYPES_THRESHOLD):
column.update({
'is_date': True,
'is_dim': False,
'agg': None,
})
# 'agg' is optional attribute
if not column['agg']:
column.pop('agg', None)
columns.append(column)
return columns
|
python
|
def columns(self):
"""Provides metadata about columns for data visualization.
:return: dict, with the fields name, type, is_date, is_dim and agg.
"""
if self.df.empty:
return None
columns = []
sample_size = min(INFER_COL_TYPES_SAMPLE_SIZE, len(self.df.index))
sample = self.df
if sample_size:
sample = self.df.sample(sample_size)
for col in self.df.dtypes.keys():
db_type_str = (
self._type_dict.get(col) or
self.db_type(self.df.dtypes[col])
)
column = {
'name': col,
'agg': self.agg_func(self.df.dtypes[col], col),
'type': db_type_str,
'is_date': self.is_date(self.df.dtypes[col], db_type_str),
'is_dim': self.is_dimension(self.df.dtypes[col], col),
}
if not db_type_str or db_type_str.upper() == 'OBJECT':
v = sample[col].iloc[0] if not sample[col].empty else None
if isinstance(v, str):
column['type'] = 'STRING'
elif isinstance(v, int):
column['type'] = 'INT'
elif isinstance(v, float):
column['type'] = 'FLOAT'
elif isinstance(v, (datetime, date)):
column['type'] = 'DATETIME'
column['is_date'] = True
column['is_dim'] = False
# check if encoded datetime
if (
column['type'] == 'STRING' and
self.datetime_conversion_rate(sample[col]) >
INFER_COL_TYPES_THRESHOLD):
column.update({
'is_date': True,
'is_dim': False,
'agg': None,
})
# 'agg' is optional attribute
if not column['agg']:
column.pop('agg', None)
columns.append(column)
return columns
|
[
"def",
"columns",
"(",
"self",
")",
":",
"if",
"self",
".",
"df",
".",
"empty",
":",
"return",
"None",
"columns",
"=",
"[",
"]",
"sample_size",
"=",
"min",
"(",
"INFER_COL_TYPES_SAMPLE_SIZE",
",",
"len",
"(",
"self",
".",
"df",
".",
"index",
")",
")",
"sample",
"=",
"self",
".",
"df",
"if",
"sample_size",
":",
"sample",
"=",
"self",
".",
"df",
".",
"sample",
"(",
"sample_size",
")",
"for",
"col",
"in",
"self",
".",
"df",
".",
"dtypes",
".",
"keys",
"(",
")",
":",
"db_type_str",
"=",
"(",
"self",
".",
"_type_dict",
".",
"get",
"(",
"col",
")",
"or",
"self",
".",
"db_type",
"(",
"self",
".",
"df",
".",
"dtypes",
"[",
"col",
"]",
")",
")",
"column",
"=",
"{",
"'name'",
":",
"col",
",",
"'agg'",
":",
"self",
".",
"agg_func",
"(",
"self",
".",
"df",
".",
"dtypes",
"[",
"col",
"]",
",",
"col",
")",
",",
"'type'",
":",
"db_type_str",
",",
"'is_date'",
":",
"self",
".",
"is_date",
"(",
"self",
".",
"df",
".",
"dtypes",
"[",
"col",
"]",
",",
"db_type_str",
")",
",",
"'is_dim'",
":",
"self",
".",
"is_dimension",
"(",
"self",
".",
"df",
".",
"dtypes",
"[",
"col",
"]",
",",
"col",
")",
",",
"}",
"if",
"not",
"db_type_str",
"or",
"db_type_str",
".",
"upper",
"(",
")",
"==",
"'OBJECT'",
":",
"v",
"=",
"sample",
"[",
"col",
"]",
".",
"iloc",
"[",
"0",
"]",
"if",
"not",
"sample",
"[",
"col",
"]",
".",
"empty",
"else",
"None",
"if",
"isinstance",
"(",
"v",
",",
"str",
")",
":",
"column",
"[",
"'type'",
"]",
"=",
"'STRING'",
"elif",
"isinstance",
"(",
"v",
",",
"int",
")",
":",
"column",
"[",
"'type'",
"]",
"=",
"'INT'",
"elif",
"isinstance",
"(",
"v",
",",
"float",
")",
":",
"column",
"[",
"'type'",
"]",
"=",
"'FLOAT'",
"elif",
"isinstance",
"(",
"v",
",",
"(",
"datetime",
",",
"date",
")",
")",
":",
"column",
"[",
"'type'",
"]",
"=",
"'DATETIME'",
"column",
"[",
"'is_date'",
"]",
"=",
"True",
"column",
"[",
"'is_dim'",
"]",
"=",
"False",
"# check if encoded datetime",
"if",
"(",
"column",
"[",
"'type'",
"]",
"==",
"'STRING'",
"and",
"self",
".",
"datetime_conversion_rate",
"(",
"sample",
"[",
"col",
"]",
")",
">",
"INFER_COL_TYPES_THRESHOLD",
")",
":",
"column",
".",
"update",
"(",
"{",
"'is_date'",
":",
"True",
",",
"'is_dim'",
":",
"False",
",",
"'agg'",
":",
"None",
",",
"}",
")",
"# 'agg' is optional attribute",
"if",
"not",
"column",
"[",
"'agg'",
"]",
":",
"column",
".",
"pop",
"(",
"'agg'",
",",
"None",
")",
"columns",
".",
"append",
"(",
"column",
")",
"return",
"columns"
] |
Provides metadata about columns for data visualization.
:return: dict, with the fields name, type, is_date, is_dim and agg.
|
[
"Provides",
"metadata",
"about",
"columns",
"for",
"data",
"visualization",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/dataframe.py#L177-L229
|
21,407
|
apache/incubator-superset
|
superset/connectors/sqla/models.py
|
TableColumn.get_timestamp_expression
|
def get_timestamp_expression(self, time_grain):
"""Getting the time component of the query"""
label = utils.DTTM_ALIAS
db = self.table.database
pdf = self.python_date_format
is_epoch = pdf in ('epoch_s', 'epoch_ms')
if not self.expression and not time_grain and not is_epoch:
sqla_col = column(self.column_name, type_=DateTime)
return self.table.make_sqla_column_compatible(sqla_col, label)
grain = None
if time_grain:
grain = db.grains_dict().get(time_grain)
if not grain:
raise NotImplementedError(
f'No grain spec for {time_grain} for database {db.database_name}')
col = db.db_engine_spec.get_timestamp_column(self.expression, self.column_name)
expr = db.db_engine_spec.get_time_expr(col, pdf, time_grain, grain)
sqla_col = literal_column(expr, type_=DateTime)
return self.table.make_sqla_column_compatible(sqla_col, label)
|
python
|
def get_timestamp_expression(self, time_grain):
"""Getting the time component of the query"""
label = utils.DTTM_ALIAS
db = self.table.database
pdf = self.python_date_format
is_epoch = pdf in ('epoch_s', 'epoch_ms')
if not self.expression and not time_grain and not is_epoch:
sqla_col = column(self.column_name, type_=DateTime)
return self.table.make_sqla_column_compatible(sqla_col, label)
grain = None
if time_grain:
grain = db.grains_dict().get(time_grain)
if not grain:
raise NotImplementedError(
f'No grain spec for {time_grain} for database {db.database_name}')
col = db.db_engine_spec.get_timestamp_column(self.expression, self.column_name)
expr = db.db_engine_spec.get_time_expr(col, pdf, time_grain, grain)
sqla_col = literal_column(expr, type_=DateTime)
return self.table.make_sqla_column_compatible(sqla_col, label)
|
[
"def",
"get_timestamp_expression",
"(",
"self",
",",
"time_grain",
")",
":",
"label",
"=",
"utils",
".",
"DTTM_ALIAS",
"db",
"=",
"self",
".",
"table",
".",
"database",
"pdf",
"=",
"self",
".",
"python_date_format",
"is_epoch",
"=",
"pdf",
"in",
"(",
"'epoch_s'",
",",
"'epoch_ms'",
")",
"if",
"not",
"self",
".",
"expression",
"and",
"not",
"time_grain",
"and",
"not",
"is_epoch",
":",
"sqla_col",
"=",
"column",
"(",
"self",
".",
"column_name",
",",
"type_",
"=",
"DateTime",
")",
"return",
"self",
".",
"table",
".",
"make_sqla_column_compatible",
"(",
"sqla_col",
",",
"label",
")",
"grain",
"=",
"None",
"if",
"time_grain",
":",
"grain",
"=",
"db",
".",
"grains_dict",
"(",
")",
".",
"get",
"(",
"time_grain",
")",
"if",
"not",
"grain",
":",
"raise",
"NotImplementedError",
"(",
"f'No grain spec for {time_grain} for database {db.database_name}'",
")",
"col",
"=",
"db",
".",
"db_engine_spec",
".",
"get_timestamp_column",
"(",
"self",
".",
"expression",
",",
"self",
".",
"column_name",
")",
"expr",
"=",
"db",
".",
"db_engine_spec",
".",
"get_time_expr",
"(",
"col",
",",
"pdf",
",",
"time_grain",
",",
"grain",
")",
"sqla_col",
"=",
"literal_column",
"(",
"expr",
",",
"type_",
"=",
"DateTime",
")",
"return",
"self",
".",
"table",
".",
"make_sqla_column_compatible",
"(",
"sqla_col",
",",
"label",
")"
] |
Getting the time component of the query
|
[
"Getting",
"the",
"time",
"component",
"of",
"the",
"query"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/models.py#L143-L162
|
21,408
|
apache/incubator-superset
|
superset/connectors/sqla/models.py
|
TableColumn.dttm_sql_literal
|
def dttm_sql_literal(self, dttm, is_epoch_in_utc):
"""Convert datetime object to a SQL expression string
If database_expression is empty, the internal dttm
will be parsed as the string with the pattern that
the user inputted (python_date_format)
If database_expression is not empty, the internal dttm
will be parsed as the sql sentence for the database to convert
"""
tf = self.python_date_format
if self.database_expression:
return self.database_expression.format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
elif tf:
if is_epoch_in_utc:
seconds_since_epoch = dttm.timestamp()
else:
seconds_since_epoch = (dttm - datetime(1970, 1, 1)).total_seconds()
seconds_since_epoch = int(seconds_since_epoch)
if tf == 'epoch_s':
return str(seconds_since_epoch)
elif tf == 'epoch_ms':
return str(seconds_since_epoch * 1000)
return "'{}'".format(dttm.strftime(tf))
else:
s = self.table.database.db_engine_spec.convert_dttm(
self.type or '', dttm)
return s or "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S.%f'))
|
python
|
def dttm_sql_literal(self, dttm, is_epoch_in_utc):
"""Convert datetime object to a SQL expression string
If database_expression is empty, the internal dttm
will be parsed as the string with the pattern that
the user inputted (python_date_format)
If database_expression is not empty, the internal dttm
will be parsed as the sql sentence for the database to convert
"""
tf = self.python_date_format
if self.database_expression:
return self.database_expression.format(dttm.strftime('%Y-%m-%d %H:%M:%S'))
elif tf:
if is_epoch_in_utc:
seconds_since_epoch = dttm.timestamp()
else:
seconds_since_epoch = (dttm - datetime(1970, 1, 1)).total_seconds()
seconds_since_epoch = int(seconds_since_epoch)
if tf == 'epoch_s':
return str(seconds_since_epoch)
elif tf == 'epoch_ms':
return str(seconds_since_epoch * 1000)
return "'{}'".format(dttm.strftime(tf))
else:
s = self.table.database.db_engine_spec.convert_dttm(
self.type or '', dttm)
return s or "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S.%f'))
|
[
"def",
"dttm_sql_literal",
"(",
"self",
",",
"dttm",
",",
"is_epoch_in_utc",
")",
":",
"tf",
"=",
"self",
".",
"python_date_format",
"if",
"self",
".",
"database_expression",
":",
"return",
"self",
".",
"database_expression",
".",
"format",
"(",
"dttm",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S'",
")",
")",
"elif",
"tf",
":",
"if",
"is_epoch_in_utc",
":",
"seconds_since_epoch",
"=",
"dttm",
".",
"timestamp",
"(",
")",
"else",
":",
"seconds_since_epoch",
"=",
"(",
"dttm",
"-",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
")",
")",
".",
"total_seconds",
"(",
")",
"seconds_since_epoch",
"=",
"int",
"(",
"seconds_since_epoch",
")",
"if",
"tf",
"==",
"'epoch_s'",
":",
"return",
"str",
"(",
"seconds_since_epoch",
")",
"elif",
"tf",
"==",
"'epoch_ms'",
":",
"return",
"str",
"(",
"seconds_since_epoch",
"*",
"1000",
")",
"return",
"\"'{}'\"",
".",
"format",
"(",
"dttm",
".",
"strftime",
"(",
"tf",
")",
")",
"else",
":",
"s",
"=",
"self",
".",
"table",
".",
"database",
".",
"db_engine_spec",
".",
"convert_dttm",
"(",
"self",
".",
"type",
"or",
"''",
",",
"dttm",
")",
"return",
"s",
"or",
"\"'{}'\"",
".",
"format",
"(",
"dttm",
".",
"strftime",
"(",
"'%Y-%m-%d %H:%M:%S.%f'",
")",
")"
] |
Convert datetime object to a SQL expression string
If database_expression is empty, the internal dttm
will be parsed as the string with the pattern that
the user inputted (python_date_format)
If database_expression is not empty, the internal dttm
will be parsed as the sql sentence for the database to convert
|
[
"Convert",
"datetime",
"object",
"to",
"a",
"SQL",
"expression",
"string"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/models.py#L172-L198
|
21,409
|
apache/incubator-superset
|
superset/connectors/sqla/models.py
|
SqlaTable.values_for_column
|
def values_for_column(self, column_name, limit=10000):
"""Runs query against sqla to retrieve some
sample values for the given column.
"""
cols = {col.column_name: col for col in self.columns}
target_col = cols[column_name]
tp = self.get_template_processor()
qry = (
select([target_col.get_sqla_col()])
.select_from(self.get_from_clause(tp))
.distinct()
)
if limit:
qry = qry.limit(limit)
if self.fetch_values_predicate:
tp = self.get_template_processor()
qry = qry.where(tp.process_template(self.fetch_values_predicate))
engine = self.database.get_sqla_engine()
sql = '{}'.format(
qry.compile(engine, compile_kwargs={'literal_binds': True}),
)
sql = self.mutate_query_from_config(sql)
df = pd.read_sql_query(sql=sql, con=engine)
return [row[0] for row in df.to_records(index=False)]
|
python
|
def values_for_column(self, column_name, limit=10000):
"""Runs query against sqla to retrieve some
sample values for the given column.
"""
cols = {col.column_name: col for col in self.columns}
target_col = cols[column_name]
tp = self.get_template_processor()
qry = (
select([target_col.get_sqla_col()])
.select_from(self.get_from_clause(tp))
.distinct()
)
if limit:
qry = qry.limit(limit)
if self.fetch_values_predicate:
tp = self.get_template_processor()
qry = qry.where(tp.process_template(self.fetch_values_predicate))
engine = self.database.get_sqla_engine()
sql = '{}'.format(
qry.compile(engine, compile_kwargs={'literal_binds': True}),
)
sql = self.mutate_query_from_config(sql)
df = pd.read_sql_query(sql=sql, con=engine)
return [row[0] for row in df.to_records(index=False)]
|
[
"def",
"values_for_column",
"(",
"self",
",",
"column_name",
",",
"limit",
"=",
"10000",
")",
":",
"cols",
"=",
"{",
"col",
".",
"column_name",
":",
"col",
"for",
"col",
"in",
"self",
".",
"columns",
"}",
"target_col",
"=",
"cols",
"[",
"column_name",
"]",
"tp",
"=",
"self",
".",
"get_template_processor",
"(",
")",
"qry",
"=",
"(",
"select",
"(",
"[",
"target_col",
".",
"get_sqla_col",
"(",
")",
"]",
")",
".",
"select_from",
"(",
"self",
".",
"get_from_clause",
"(",
"tp",
")",
")",
".",
"distinct",
"(",
")",
")",
"if",
"limit",
":",
"qry",
"=",
"qry",
".",
"limit",
"(",
"limit",
")",
"if",
"self",
".",
"fetch_values_predicate",
":",
"tp",
"=",
"self",
".",
"get_template_processor",
"(",
")",
"qry",
"=",
"qry",
".",
"where",
"(",
"tp",
".",
"process_template",
"(",
"self",
".",
"fetch_values_predicate",
")",
")",
"engine",
"=",
"self",
".",
"database",
".",
"get_sqla_engine",
"(",
")",
"sql",
"=",
"'{}'",
".",
"format",
"(",
"qry",
".",
"compile",
"(",
"engine",
",",
"compile_kwargs",
"=",
"{",
"'literal_binds'",
":",
"True",
"}",
")",
",",
")",
"sql",
"=",
"self",
".",
"mutate_query_from_config",
"(",
"sql",
")",
"df",
"=",
"pd",
".",
"read_sql_query",
"(",
"sql",
"=",
"sql",
",",
"con",
"=",
"engine",
")",
"return",
"[",
"row",
"[",
"0",
"]",
"for",
"row",
"in",
"df",
".",
"to_records",
"(",
"index",
"=",
"False",
")",
"]"
] |
Runs query against sqla to retrieve some
sample values for the given column.
|
[
"Runs",
"query",
"against",
"sqla",
"to",
"retrieve",
"some",
"sample",
"values",
"for",
"the",
"given",
"column",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/models.py#L437-L464
|
21,410
|
apache/incubator-superset
|
superset/connectors/sqla/models.py
|
SqlaTable.mutate_query_from_config
|
def mutate_query_from_config(self, sql):
"""Apply config's SQL_QUERY_MUTATOR
Typically adds comments to the query with context"""
SQL_QUERY_MUTATOR = config.get('SQL_QUERY_MUTATOR')
if SQL_QUERY_MUTATOR:
username = utils.get_username()
sql = SQL_QUERY_MUTATOR(sql, username, security_manager, self.database)
return sql
|
python
|
def mutate_query_from_config(self, sql):
"""Apply config's SQL_QUERY_MUTATOR
Typically adds comments to the query with context"""
SQL_QUERY_MUTATOR = config.get('SQL_QUERY_MUTATOR')
if SQL_QUERY_MUTATOR:
username = utils.get_username()
sql = SQL_QUERY_MUTATOR(sql, username, security_manager, self.database)
return sql
|
[
"def",
"mutate_query_from_config",
"(",
"self",
",",
"sql",
")",
":",
"SQL_QUERY_MUTATOR",
"=",
"config",
".",
"get",
"(",
"'SQL_QUERY_MUTATOR'",
")",
"if",
"SQL_QUERY_MUTATOR",
":",
"username",
"=",
"utils",
".",
"get_username",
"(",
")",
"sql",
"=",
"SQL_QUERY_MUTATOR",
"(",
"sql",
",",
"username",
",",
"security_manager",
",",
"self",
".",
"database",
")",
"return",
"sql"
] |
Apply config's SQL_QUERY_MUTATOR
Typically adds comments to the query with context
|
[
"Apply",
"config",
"s",
"SQL_QUERY_MUTATOR"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/models.py#L466-L474
|
21,411
|
apache/incubator-superset
|
superset/connectors/sqla/models.py
|
SqlaTable.adhoc_metric_to_sqla
|
def adhoc_metric_to_sqla(self, metric, cols):
"""
Turn an adhoc metric into a sqlalchemy column.
:param dict metric: Adhoc metric definition
:param dict cols: Columns for the current table
:returns: The metric defined as a sqlalchemy column
:rtype: sqlalchemy.sql.column
"""
expression_type = metric.get('expressionType')
label = utils.get_metric_name(metric)
if expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SIMPLE']:
column_name = metric.get('column').get('column_name')
table_column = cols.get(column_name)
if table_column:
sqla_column = table_column.get_sqla_col()
else:
sqla_column = column(column_name)
sqla_metric = self.sqla_aggregations[metric.get('aggregate')](sqla_column)
elif expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SQL']:
sqla_metric = literal_column(metric.get('sqlExpression'))
else:
return None
return self.make_sqla_column_compatible(sqla_metric, label)
|
python
|
def adhoc_metric_to_sqla(self, metric, cols):
"""
Turn an adhoc metric into a sqlalchemy column.
:param dict metric: Adhoc metric definition
:param dict cols: Columns for the current table
:returns: The metric defined as a sqlalchemy column
:rtype: sqlalchemy.sql.column
"""
expression_type = metric.get('expressionType')
label = utils.get_metric_name(metric)
if expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SIMPLE']:
column_name = metric.get('column').get('column_name')
table_column = cols.get(column_name)
if table_column:
sqla_column = table_column.get_sqla_col()
else:
sqla_column = column(column_name)
sqla_metric = self.sqla_aggregations[metric.get('aggregate')](sqla_column)
elif expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SQL']:
sqla_metric = literal_column(metric.get('sqlExpression'))
else:
return None
return self.make_sqla_column_compatible(sqla_metric, label)
|
[
"def",
"adhoc_metric_to_sqla",
"(",
"self",
",",
"metric",
",",
"cols",
")",
":",
"expression_type",
"=",
"metric",
".",
"get",
"(",
"'expressionType'",
")",
"label",
"=",
"utils",
".",
"get_metric_name",
"(",
"metric",
")",
"if",
"expression_type",
"==",
"utils",
".",
"ADHOC_METRIC_EXPRESSION_TYPES",
"[",
"'SIMPLE'",
"]",
":",
"column_name",
"=",
"metric",
".",
"get",
"(",
"'column'",
")",
".",
"get",
"(",
"'column_name'",
")",
"table_column",
"=",
"cols",
".",
"get",
"(",
"column_name",
")",
"if",
"table_column",
":",
"sqla_column",
"=",
"table_column",
".",
"get_sqla_col",
"(",
")",
"else",
":",
"sqla_column",
"=",
"column",
"(",
"column_name",
")",
"sqla_metric",
"=",
"self",
".",
"sqla_aggregations",
"[",
"metric",
".",
"get",
"(",
"'aggregate'",
")",
"]",
"(",
"sqla_column",
")",
"elif",
"expression_type",
"==",
"utils",
".",
"ADHOC_METRIC_EXPRESSION_TYPES",
"[",
"'SQL'",
"]",
":",
"sqla_metric",
"=",
"literal_column",
"(",
"metric",
".",
"get",
"(",
"'sqlExpression'",
")",
")",
"else",
":",
"return",
"None",
"return",
"self",
".",
"make_sqla_column_compatible",
"(",
"sqla_metric",
",",
"label",
")"
] |
Turn an adhoc metric into a sqlalchemy column.
:param dict metric: Adhoc metric definition
:param dict cols: Columns for the current table
:returns: The metric defined as a sqlalchemy column
:rtype: sqlalchemy.sql.column
|
[
"Turn",
"an",
"adhoc",
"metric",
"into",
"a",
"sqlalchemy",
"column",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/models.py#L509-L534
|
21,412
|
apache/incubator-superset
|
superset/connectors/sqla/models.py
|
SqlaTable.fetch_metadata
|
def fetch_metadata(self):
"""Fetches the metadata for the table and merges it in"""
try:
table = self.get_sqla_table_object()
except Exception as e:
logging.exception(e)
raise Exception(_(
"Table [{}] doesn't seem to exist in the specified database, "
"couldn't fetch column information").format(self.table_name))
M = SqlMetric # noqa
metrics = []
any_date_col = None
db_engine_spec = self.database.db_engine_spec
db_dialect = self.database.get_dialect()
dbcols = (
db.session.query(TableColumn)
.filter(TableColumn.table == self)
.filter(or_(TableColumn.column_name == col.name
for col in table.columns)))
dbcols = {dbcol.column_name: dbcol for dbcol in dbcols}
for col in table.columns:
try:
datatype = col.type.compile(dialect=db_dialect).upper()
except Exception as e:
datatype = 'UNKNOWN'
logging.error(
'Unrecognized data type in {}.{}'.format(table, col.name))
logging.exception(e)
dbcol = dbcols.get(col.name, None)
if not dbcol:
dbcol = TableColumn(column_name=col.name, type=datatype)
dbcol.sum = dbcol.is_num
dbcol.avg = dbcol.is_num
dbcol.is_dttm = dbcol.is_time
db_engine_spec.alter_new_orm_column(dbcol)
else:
dbcol.type = datatype
dbcol.groupby = True
dbcol.filterable = True
self.columns.append(dbcol)
if not any_date_col and dbcol.is_time:
any_date_col = col.name
metrics.append(M(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
expression='COUNT(*)',
))
if not self.main_dttm_col:
self.main_dttm_col = any_date_col
self.add_missing_metrics(metrics)
db.session.merge(self)
db.session.commit()
|
python
|
def fetch_metadata(self):
"""Fetches the metadata for the table and merges it in"""
try:
table = self.get_sqla_table_object()
except Exception as e:
logging.exception(e)
raise Exception(_(
"Table [{}] doesn't seem to exist in the specified database, "
"couldn't fetch column information").format(self.table_name))
M = SqlMetric # noqa
metrics = []
any_date_col = None
db_engine_spec = self.database.db_engine_spec
db_dialect = self.database.get_dialect()
dbcols = (
db.session.query(TableColumn)
.filter(TableColumn.table == self)
.filter(or_(TableColumn.column_name == col.name
for col in table.columns)))
dbcols = {dbcol.column_name: dbcol for dbcol in dbcols}
for col in table.columns:
try:
datatype = col.type.compile(dialect=db_dialect).upper()
except Exception as e:
datatype = 'UNKNOWN'
logging.error(
'Unrecognized data type in {}.{}'.format(table, col.name))
logging.exception(e)
dbcol = dbcols.get(col.name, None)
if not dbcol:
dbcol = TableColumn(column_name=col.name, type=datatype)
dbcol.sum = dbcol.is_num
dbcol.avg = dbcol.is_num
dbcol.is_dttm = dbcol.is_time
db_engine_spec.alter_new_orm_column(dbcol)
else:
dbcol.type = datatype
dbcol.groupby = True
dbcol.filterable = True
self.columns.append(dbcol)
if not any_date_col and dbcol.is_time:
any_date_col = col.name
metrics.append(M(
metric_name='count',
verbose_name='COUNT(*)',
metric_type='count',
expression='COUNT(*)',
))
if not self.main_dttm_col:
self.main_dttm_col = any_date_col
self.add_missing_metrics(metrics)
db.session.merge(self)
db.session.commit()
|
[
"def",
"fetch_metadata",
"(",
"self",
")",
":",
"try",
":",
"table",
"=",
"self",
".",
"get_sqla_table_object",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"exception",
"(",
"e",
")",
"raise",
"Exception",
"(",
"_",
"(",
"\"Table [{}] doesn't seem to exist in the specified database, \"",
"\"couldn't fetch column information\"",
")",
".",
"format",
"(",
"self",
".",
"table_name",
")",
")",
"M",
"=",
"SqlMetric",
"# noqa",
"metrics",
"=",
"[",
"]",
"any_date_col",
"=",
"None",
"db_engine_spec",
"=",
"self",
".",
"database",
".",
"db_engine_spec",
"db_dialect",
"=",
"self",
".",
"database",
".",
"get_dialect",
"(",
")",
"dbcols",
"=",
"(",
"db",
".",
"session",
".",
"query",
"(",
"TableColumn",
")",
".",
"filter",
"(",
"TableColumn",
".",
"table",
"==",
"self",
")",
".",
"filter",
"(",
"or_",
"(",
"TableColumn",
".",
"column_name",
"==",
"col",
".",
"name",
"for",
"col",
"in",
"table",
".",
"columns",
")",
")",
")",
"dbcols",
"=",
"{",
"dbcol",
".",
"column_name",
":",
"dbcol",
"for",
"dbcol",
"in",
"dbcols",
"}",
"for",
"col",
"in",
"table",
".",
"columns",
":",
"try",
":",
"datatype",
"=",
"col",
".",
"type",
".",
"compile",
"(",
"dialect",
"=",
"db_dialect",
")",
".",
"upper",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"datatype",
"=",
"'UNKNOWN'",
"logging",
".",
"error",
"(",
"'Unrecognized data type in {}.{}'",
".",
"format",
"(",
"table",
",",
"col",
".",
"name",
")",
")",
"logging",
".",
"exception",
"(",
"e",
")",
"dbcol",
"=",
"dbcols",
".",
"get",
"(",
"col",
".",
"name",
",",
"None",
")",
"if",
"not",
"dbcol",
":",
"dbcol",
"=",
"TableColumn",
"(",
"column_name",
"=",
"col",
".",
"name",
",",
"type",
"=",
"datatype",
")",
"dbcol",
".",
"sum",
"=",
"dbcol",
".",
"is_num",
"dbcol",
".",
"avg",
"=",
"dbcol",
".",
"is_num",
"dbcol",
".",
"is_dttm",
"=",
"dbcol",
".",
"is_time",
"db_engine_spec",
".",
"alter_new_orm_column",
"(",
"dbcol",
")",
"else",
":",
"dbcol",
".",
"type",
"=",
"datatype",
"dbcol",
".",
"groupby",
"=",
"True",
"dbcol",
".",
"filterable",
"=",
"True",
"self",
".",
"columns",
".",
"append",
"(",
"dbcol",
")",
"if",
"not",
"any_date_col",
"and",
"dbcol",
".",
"is_time",
":",
"any_date_col",
"=",
"col",
".",
"name",
"metrics",
".",
"append",
"(",
"M",
"(",
"metric_name",
"=",
"'count'",
",",
"verbose_name",
"=",
"'COUNT(*)'",
",",
"metric_type",
"=",
"'count'",
",",
"expression",
"=",
"'COUNT(*)'",
",",
")",
")",
"if",
"not",
"self",
".",
"main_dttm_col",
":",
"self",
".",
"main_dttm_col",
"=",
"any_date_col",
"self",
".",
"add_missing_metrics",
"(",
"metrics",
")",
"db",
".",
"session",
".",
"merge",
"(",
"self",
")",
"db",
".",
"session",
".",
"commit",
"(",
")"
] |
Fetches the metadata for the table and merges it in
|
[
"Fetches",
"the",
"metadata",
"for",
"the",
"table",
"and",
"merges",
"it",
"in"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/models.py#L875-L930
|
21,413
|
apache/incubator-superset
|
superset/views/datasource.py
|
Datasource.external_metadata
|
def external_metadata(self, datasource_type=None, datasource_id=None):
"""Gets column info from the source system"""
if datasource_type == 'druid':
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
elif datasource_type == 'table':
database = (
db.session
.query(Database)
.filter_by(id=request.args.get('db_id'))
.one()
)
Table = ConnectorRegistry.sources['table']
datasource = Table(
database=database,
table_name=request.args.get('table_name'),
schema=request.args.get('schema') or None,
)
external_metadata = datasource.external_metadata()
return self.json_response(external_metadata)
|
python
|
def external_metadata(self, datasource_type=None, datasource_id=None):
"""Gets column info from the source system"""
if datasource_type == 'druid':
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
elif datasource_type == 'table':
database = (
db.session
.query(Database)
.filter_by(id=request.args.get('db_id'))
.one()
)
Table = ConnectorRegistry.sources['table']
datasource = Table(
database=database,
table_name=request.args.get('table_name'),
schema=request.args.get('schema') or None,
)
external_metadata = datasource.external_metadata()
return self.json_response(external_metadata)
|
[
"def",
"external_metadata",
"(",
"self",
",",
"datasource_type",
"=",
"None",
",",
"datasource_id",
"=",
"None",
")",
":",
"if",
"datasource_type",
"==",
"'druid'",
":",
"datasource",
"=",
"ConnectorRegistry",
".",
"get_datasource",
"(",
"datasource_type",
",",
"datasource_id",
",",
"db",
".",
"session",
")",
"elif",
"datasource_type",
"==",
"'table'",
":",
"database",
"=",
"(",
"db",
".",
"session",
".",
"query",
"(",
"Database",
")",
".",
"filter_by",
"(",
"id",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'db_id'",
")",
")",
".",
"one",
"(",
")",
")",
"Table",
"=",
"ConnectorRegistry",
".",
"sources",
"[",
"'table'",
"]",
"datasource",
"=",
"Table",
"(",
"database",
"=",
"database",
",",
"table_name",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'table_name'",
")",
",",
"schema",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'schema'",
")",
"or",
"None",
",",
")",
"external_metadata",
"=",
"datasource",
".",
"external_metadata",
"(",
")",
"return",
"self",
".",
"json_response",
"(",
"external_metadata",
")"
] |
Gets column info from the source system
|
[
"Gets",
"column",
"info",
"from",
"the",
"source",
"system"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/datasource.py#L70-L89
|
21,414
|
apache/incubator-superset
|
superset/forms.py
|
filter_not_empty_values
|
def filter_not_empty_values(value):
"""Returns a list of non empty values or None"""
if not value:
return None
data = [x for x in value if x]
if not data:
return None
return data
|
python
|
def filter_not_empty_values(value):
"""Returns a list of non empty values or None"""
if not value:
return None
data = [x for x in value if x]
if not data:
return None
return data
|
[
"def",
"filter_not_empty_values",
"(",
"value",
")",
":",
"if",
"not",
"value",
":",
"return",
"None",
"data",
"=",
"[",
"x",
"for",
"x",
"in",
"value",
"if",
"x",
"]",
"if",
"not",
"data",
":",
"return",
"None",
"return",
"data"
] |
Returns a list of non empty values or None
|
[
"Returns",
"a",
"list",
"of",
"non",
"empty",
"values",
"or",
"None"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/forms.py#L50-L57
|
21,415
|
apache/incubator-superset
|
superset/forms.py
|
CsvToDatabaseForm.at_least_one_schema_is_allowed
|
def at_least_one_schema_is_allowed(database):
"""
If the user has access to the database or all datasource
1. if schemas_allowed_for_csv_upload is empty
a) if database does not support schema
user is able to upload csv without specifying schema name
b) if database supports schema
user is able to upload csv to any schema
2. if schemas_allowed_for_csv_upload is not empty
a) if database does not support schema
This situation is impossible and upload will fail
b) if database supports schema
user is able to upload to schema in schemas_allowed_for_csv_upload
elif the user does not access to the database or all datasource
1. if schemas_allowed_for_csv_upload is empty
a) if database does not support schema
user is unable to upload csv
b) if database supports schema
user is unable to upload csv
2. if schemas_allowed_for_csv_upload is not empty
a) if database does not support schema
This situation is impossible and user is unable to upload csv
b) if database supports schema
user is able to upload to schema in schemas_allowed_for_csv_upload
"""
if (security_manager.database_access(database) or
security_manager.all_datasource_access()):
return True
schemas = database.get_schema_access_for_csv_upload()
if (schemas and
security_manager.schemas_accessible_by_user(
database, schemas, False)):
return True
return False
|
python
|
def at_least_one_schema_is_allowed(database):
"""
If the user has access to the database or all datasource
1. if schemas_allowed_for_csv_upload is empty
a) if database does not support schema
user is able to upload csv without specifying schema name
b) if database supports schema
user is able to upload csv to any schema
2. if schemas_allowed_for_csv_upload is not empty
a) if database does not support schema
This situation is impossible and upload will fail
b) if database supports schema
user is able to upload to schema in schemas_allowed_for_csv_upload
elif the user does not access to the database or all datasource
1. if schemas_allowed_for_csv_upload is empty
a) if database does not support schema
user is unable to upload csv
b) if database supports schema
user is unable to upload csv
2. if schemas_allowed_for_csv_upload is not empty
a) if database does not support schema
This situation is impossible and user is unable to upload csv
b) if database supports schema
user is able to upload to schema in schemas_allowed_for_csv_upload
"""
if (security_manager.database_access(database) or
security_manager.all_datasource_access()):
return True
schemas = database.get_schema_access_for_csv_upload()
if (schemas and
security_manager.schemas_accessible_by_user(
database, schemas, False)):
return True
return False
|
[
"def",
"at_least_one_schema_is_allowed",
"(",
"database",
")",
":",
"if",
"(",
"security_manager",
".",
"database_access",
"(",
"database",
")",
"or",
"security_manager",
".",
"all_datasource_access",
"(",
")",
")",
":",
"return",
"True",
"schemas",
"=",
"database",
".",
"get_schema_access_for_csv_upload",
"(",
")",
"if",
"(",
"schemas",
"and",
"security_manager",
".",
"schemas_accessible_by_user",
"(",
"database",
",",
"schemas",
",",
"False",
")",
")",
":",
"return",
"True",
"return",
"False"
] |
If the user has access to the database or all datasource
1. if schemas_allowed_for_csv_upload is empty
a) if database does not support schema
user is able to upload csv without specifying schema name
b) if database supports schema
user is able to upload csv to any schema
2. if schemas_allowed_for_csv_upload is not empty
a) if database does not support schema
This situation is impossible and upload will fail
b) if database supports schema
user is able to upload to schema in schemas_allowed_for_csv_upload
elif the user does not access to the database or all datasource
1. if schemas_allowed_for_csv_upload is empty
a) if database does not support schema
user is unable to upload csv
b) if database supports schema
user is unable to upload csv
2. if schemas_allowed_for_csv_upload is not empty
a) if database does not support schema
This situation is impossible and user is unable to upload csv
b) if database supports schema
user is able to upload to schema in schemas_allowed_for_csv_upload
|
[
"If",
"the",
"user",
"has",
"access",
"to",
"the",
"database",
"or",
"all",
"datasource",
"1",
".",
"if",
"schemas_allowed_for_csv_upload",
"is",
"empty",
"a",
")",
"if",
"database",
"does",
"not",
"support",
"schema",
"user",
"is",
"able",
"to",
"upload",
"csv",
"without",
"specifying",
"schema",
"name",
"b",
")",
"if",
"database",
"supports",
"schema",
"user",
"is",
"able",
"to",
"upload",
"csv",
"to",
"any",
"schema",
"2",
".",
"if",
"schemas_allowed_for_csv_upload",
"is",
"not",
"empty",
"a",
")",
"if",
"database",
"does",
"not",
"support",
"schema",
"This",
"situation",
"is",
"impossible",
"and",
"upload",
"will",
"fail",
"b",
")",
"if",
"database",
"supports",
"schema",
"user",
"is",
"able",
"to",
"upload",
"to",
"schema",
"in",
"schemas_allowed_for_csv_upload",
"elif",
"the",
"user",
"does",
"not",
"access",
"to",
"the",
"database",
"or",
"all",
"datasource",
"1",
".",
"if",
"schemas_allowed_for_csv_upload",
"is",
"empty",
"a",
")",
"if",
"database",
"does",
"not",
"support",
"schema",
"user",
"is",
"unable",
"to",
"upload",
"csv",
"b",
")",
"if",
"database",
"supports",
"schema",
"user",
"is",
"unable",
"to",
"upload",
"csv",
"2",
".",
"if",
"schemas_allowed_for_csv_upload",
"is",
"not",
"empty",
"a",
")",
"if",
"database",
"does",
"not",
"support",
"schema",
"This",
"situation",
"is",
"impossible",
"and",
"user",
"is",
"unable",
"to",
"upload",
"csv",
"b",
")",
"if",
"database",
"supports",
"schema",
"user",
"is",
"able",
"to",
"upload",
"to",
"schema",
"in",
"schemas_allowed_for_csv_upload"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/forms.py#L73-L106
|
21,416
|
apache/incubator-superset
|
superset/views/sql_lab.py
|
QueryFilter.apply
|
def apply(
self,
query: BaseQuery,
func: Callable) -> BaseQuery:
"""
Filter queries to only those owned by current user if
can_only_access_owned_queries permission is set.
:returns: query
"""
if security_manager.can_only_access_owned_queries():
query = (
query
.filter(Query.user_id == g.user.get_user_id())
)
return query
|
python
|
def apply(
self,
query: BaseQuery,
func: Callable) -> BaseQuery:
"""
Filter queries to only those owned by current user if
can_only_access_owned_queries permission is set.
:returns: query
"""
if security_manager.can_only_access_owned_queries():
query = (
query
.filter(Query.user_id == g.user.get_user_id())
)
return query
|
[
"def",
"apply",
"(",
"self",
",",
"query",
":",
"BaseQuery",
",",
"func",
":",
"Callable",
")",
"->",
"BaseQuery",
":",
"if",
"security_manager",
".",
"can_only_access_owned_queries",
"(",
")",
":",
"query",
"=",
"(",
"query",
".",
"filter",
"(",
"Query",
".",
"user_id",
"==",
"g",
".",
"user",
".",
"get_user_id",
"(",
")",
")",
")",
"return",
"query"
] |
Filter queries to only those owned by current user if
can_only_access_owned_queries permission is set.
:returns: query
|
[
"Filter",
"queries",
"to",
"only",
"those",
"owned",
"by",
"current",
"user",
"if",
"can_only_access_owned_queries",
"permission",
"is",
"set",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/sql_lab.py#L34-L49
|
21,417
|
apache/incubator-superset
|
superset/connectors/sqla/views.py
|
TableModelView.edit
|
def edit(self, pk):
"""Simple hack to redirect to explore view after saving"""
resp = super(TableModelView, self).edit(pk)
if isinstance(resp, str):
return resp
return redirect('/superset/explore/table/{}/'.format(pk))
|
python
|
def edit(self, pk):
"""Simple hack to redirect to explore view after saving"""
resp = super(TableModelView, self).edit(pk)
if isinstance(resp, str):
return resp
return redirect('/superset/explore/table/{}/'.format(pk))
|
[
"def",
"edit",
"(",
"self",
",",
"pk",
")",
":",
"resp",
"=",
"super",
"(",
"TableModelView",
",",
"self",
")",
".",
"edit",
"(",
"pk",
")",
"if",
"isinstance",
"(",
"resp",
",",
"str",
")",
":",
"return",
"resp",
"return",
"redirect",
"(",
"'/superset/explore/table/{}/'",
".",
"format",
"(",
"pk",
")",
")"
] |
Simple hack to redirect to explore view after saving
|
[
"Simple",
"hack",
"to",
"redirect",
"to",
"explore",
"view",
"after",
"saving"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/views.py#L305-L310
|
21,418
|
apache/incubator-superset
|
superset/tasks/cache.py
|
get_form_data
|
def get_form_data(chart_id, dashboard=None):
"""
Build `form_data` for chart GET request from dashboard's `default_filters`.
When a dashboard has `default_filters` they need to be added as extra
filters in the GET request for charts.
"""
form_data = {'slice_id': chart_id}
if dashboard is None or not dashboard.json_metadata:
return form_data
json_metadata = json.loads(dashboard.json_metadata)
# do not apply filters if chart is immune to them
if chart_id in json_metadata.get('filter_immune_slices', []):
return form_data
default_filters = json.loads(json_metadata.get('default_filters', 'null'))
if not default_filters:
return form_data
# are some of the fields in the chart immune to filters?
filter_immune_slice_fields = json_metadata.get('filter_immune_slice_fields', {})
immune_fields = filter_immune_slice_fields.get(str(chart_id), [])
extra_filters = []
for filters in default_filters.values():
for col, val in filters.items():
if col not in immune_fields:
extra_filters.append({'col': col, 'op': 'in', 'val': val})
if extra_filters:
form_data['extra_filters'] = extra_filters
return form_data
|
python
|
def get_form_data(chart_id, dashboard=None):
"""
Build `form_data` for chart GET request from dashboard's `default_filters`.
When a dashboard has `default_filters` they need to be added as extra
filters in the GET request for charts.
"""
form_data = {'slice_id': chart_id}
if dashboard is None or not dashboard.json_metadata:
return form_data
json_metadata = json.loads(dashboard.json_metadata)
# do not apply filters if chart is immune to them
if chart_id in json_metadata.get('filter_immune_slices', []):
return form_data
default_filters = json.loads(json_metadata.get('default_filters', 'null'))
if not default_filters:
return form_data
# are some of the fields in the chart immune to filters?
filter_immune_slice_fields = json_metadata.get('filter_immune_slice_fields', {})
immune_fields = filter_immune_slice_fields.get(str(chart_id), [])
extra_filters = []
for filters in default_filters.values():
for col, val in filters.items():
if col not in immune_fields:
extra_filters.append({'col': col, 'op': 'in', 'val': val})
if extra_filters:
form_data['extra_filters'] = extra_filters
return form_data
|
[
"def",
"get_form_data",
"(",
"chart_id",
",",
"dashboard",
"=",
"None",
")",
":",
"form_data",
"=",
"{",
"'slice_id'",
":",
"chart_id",
"}",
"if",
"dashboard",
"is",
"None",
"or",
"not",
"dashboard",
".",
"json_metadata",
":",
"return",
"form_data",
"json_metadata",
"=",
"json",
".",
"loads",
"(",
"dashboard",
".",
"json_metadata",
")",
"# do not apply filters if chart is immune to them",
"if",
"chart_id",
"in",
"json_metadata",
".",
"get",
"(",
"'filter_immune_slices'",
",",
"[",
"]",
")",
":",
"return",
"form_data",
"default_filters",
"=",
"json",
".",
"loads",
"(",
"json_metadata",
".",
"get",
"(",
"'default_filters'",
",",
"'null'",
")",
")",
"if",
"not",
"default_filters",
":",
"return",
"form_data",
"# are some of the fields in the chart immune to filters?",
"filter_immune_slice_fields",
"=",
"json_metadata",
".",
"get",
"(",
"'filter_immune_slice_fields'",
",",
"{",
"}",
")",
"immune_fields",
"=",
"filter_immune_slice_fields",
".",
"get",
"(",
"str",
"(",
"chart_id",
")",
",",
"[",
"]",
")",
"extra_filters",
"=",
"[",
"]",
"for",
"filters",
"in",
"default_filters",
".",
"values",
"(",
")",
":",
"for",
"col",
",",
"val",
"in",
"filters",
".",
"items",
"(",
")",
":",
"if",
"col",
"not",
"in",
"immune_fields",
":",
"extra_filters",
".",
"append",
"(",
"{",
"'col'",
":",
"col",
",",
"'op'",
":",
"'in'",
",",
"'val'",
":",
"val",
"}",
")",
"if",
"extra_filters",
":",
"form_data",
"[",
"'extra_filters'",
"]",
"=",
"extra_filters",
"return",
"form_data"
] |
Build `form_data` for chart GET request from dashboard's `default_filters`.
When a dashboard has `default_filters` they need to be added as extra
filters in the GET request for charts.
|
[
"Build",
"form_data",
"for",
"chart",
"GET",
"request",
"from",
"dashboard",
"s",
"default_filters",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/tasks/cache.py#L40-L75
|
21,419
|
apache/incubator-superset
|
superset/tasks/cache.py
|
cache_warmup
|
def cache_warmup(strategy_name, *args, **kwargs):
"""
Warm up cache.
This task periodically hits charts to warm up the cache.
"""
logger.info('Loading strategy')
class_ = None
for class_ in strategies:
if class_.name == strategy_name:
break
else:
message = f'No strategy {strategy_name} found!'
logger.error(message)
return message
logger.info(f'Loading {class_.__name__}')
try:
strategy = class_(*args, **kwargs)
logger.info('Success!')
except TypeError:
message = 'Error loading strategy!'
logger.exception(message)
return message
results = {'success': [], 'errors': []}
for url in strategy.get_urls():
try:
logger.info(f'Fetching {url}')
requests.get(url)
results['success'].append(url)
except RequestException:
logger.exception('Error warming up cache!')
results['errors'].append(url)
return results
|
python
|
def cache_warmup(strategy_name, *args, **kwargs):
"""
Warm up cache.
This task periodically hits charts to warm up the cache.
"""
logger.info('Loading strategy')
class_ = None
for class_ in strategies:
if class_.name == strategy_name:
break
else:
message = f'No strategy {strategy_name} found!'
logger.error(message)
return message
logger.info(f'Loading {class_.__name__}')
try:
strategy = class_(*args, **kwargs)
logger.info('Success!')
except TypeError:
message = 'Error loading strategy!'
logger.exception(message)
return message
results = {'success': [], 'errors': []}
for url in strategy.get_urls():
try:
logger.info(f'Fetching {url}')
requests.get(url)
results['success'].append(url)
except RequestException:
logger.exception('Error warming up cache!')
results['errors'].append(url)
return results
|
[
"def",
"cache_warmup",
"(",
"strategy_name",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"logger",
".",
"info",
"(",
"'Loading strategy'",
")",
"class_",
"=",
"None",
"for",
"class_",
"in",
"strategies",
":",
"if",
"class_",
".",
"name",
"==",
"strategy_name",
":",
"break",
"else",
":",
"message",
"=",
"f'No strategy {strategy_name} found!'",
"logger",
".",
"error",
"(",
"message",
")",
"return",
"message",
"logger",
".",
"info",
"(",
"f'Loading {class_.__name__}'",
")",
"try",
":",
"strategy",
"=",
"class_",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"logger",
".",
"info",
"(",
"'Success!'",
")",
"except",
"TypeError",
":",
"message",
"=",
"'Error loading strategy!'",
"logger",
".",
"exception",
"(",
"message",
")",
"return",
"message",
"results",
"=",
"{",
"'success'",
":",
"[",
"]",
",",
"'errors'",
":",
"[",
"]",
"}",
"for",
"url",
"in",
"strategy",
".",
"get_urls",
"(",
")",
":",
"try",
":",
"logger",
".",
"info",
"(",
"f'Fetching {url}'",
")",
"requests",
".",
"get",
"(",
"url",
")",
"results",
"[",
"'success'",
"]",
".",
"append",
"(",
"url",
")",
"except",
"RequestException",
":",
"logger",
".",
"exception",
"(",
"'Error warming up cache!'",
")",
"results",
"[",
"'errors'",
"]",
".",
"append",
"(",
"url",
")",
"return",
"results"
] |
Warm up cache.
This task periodically hits charts to warm up the cache.
|
[
"Warm",
"up",
"cache",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/tasks/cache.py#L280-L316
|
21,420
|
apache/incubator-superset
|
superset/connectors/druid/models.py
|
DruidCluster.refresh_datasources
|
def refresh_datasources(
self,
datasource_name=None,
merge_flag=True,
refreshAll=True):
"""Refresh metadata of all datasources in the cluster
If ``datasource_name`` is specified, only that datasource is updated
"""
ds_list = self.get_datasources()
blacklist = conf.get('DRUID_DATA_SOURCE_BLACKLIST', [])
ds_refresh = []
if not datasource_name:
ds_refresh = list(filter(lambda ds: ds not in blacklist, ds_list))
elif datasource_name not in blacklist and datasource_name in ds_list:
ds_refresh.append(datasource_name)
else:
return
self.refresh(ds_refresh, merge_flag, refreshAll)
|
python
|
def refresh_datasources(
self,
datasource_name=None,
merge_flag=True,
refreshAll=True):
"""Refresh metadata of all datasources in the cluster
If ``datasource_name`` is specified, only that datasource is updated
"""
ds_list = self.get_datasources()
blacklist = conf.get('DRUID_DATA_SOURCE_BLACKLIST', [])
ds_refresh = []
if not datasource_name:
ds_refresh = list(filter(lambda ds: ds not in blacklist, ds_list))
elif datasource_name not in blacklist and datasource_name in ds_list:
ds_refresh.append(datasource_name)
else:
return
self.refresh(ds_refresh, merge_flag, refreshAll)
|
[
"def",
"refresh_datasources",
"(",
"self",
",",
"datasource_name",
"=",
"None",
",",
"merge_flag",
"=",
"True",
",",
"refreshAll",
"=",
"True",
")",
":",
"ds_list",
"=",
"self",
".",
"get_datasources",
"(",
")",
"blacklist",
"=",
"conf",
".",
"get",
"(",
"'DRUID_DATA_SOURCE_BLACKLIST'",
",",
"[",
"]",
")",
"ds_refresh",
"=",
"[",
"]",
"if",
"not",
"datasource_name",
":",
"ds_refresh",
"=",
"list",
"(",
"filter",
"(",
"lambda",
"ds",
":",
"ds",
"not",
"in",
"blacklist",
",",
"ds_list",
")",
")",
"elif",
"datasource_name",
"not",
"in",
"blacklist",
"and",
"datasource_name",
"in",
"ds_list",
":",
"ds_refresh",
".",
"append",
"(",
"datasource_name",
")",
"else",
":",
"return",
"self",
".",
"refresh",
"(",
"ds_refresh",
",",
"merge_flag",
",",
"refreshAll",
")"
] |
Refresh metadata of all datasources in the cluster
If ``datasource_name`` is specified, only that datasource is updated
|
[
"Refresh",
"metadata",
"of",
"all",
"datasources",
"in",
"the",
"cluster",
"If",
"datasource_name",
"is",
"specified",
"only",
"that",
"datasource",
"is",
"updated"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L165-L182
|
21,421
|
apache/incubator-superset
|
superset/connectors/druid/models.py
|
DruidCluster.refresh
|
def refresh(self, datasource_names, merge_flag, refreshAll):
"""
Fetches metadata for the specified datasources and
merges to the Superset database
"""
session = db.session
ds_list = (
session.query(DruidDatasource)
.filter(DruidDatasource.cluster_name == self.cluster_name)
.filter(DruidDatasource.datasource_name.in_(datasource_names))
)
ds_map = {ds.name: ds for ds in ds_list}
for ds_name in datasource_names:
datasource = ds_map.get(ds_name, None)
if not datasource:
datasource = DruidDatasource(datasource_name=ds_name)
with session.no_autoflush:
session.add(datasource)
flasher(
_('Adding new datasource [{}]').format(ds_name), 'success')
ds_map[ds_name] = datasource
elif refreshAll:
flasher(
_('Refreshing datasource [{}]').format(ds_name), 'info')
else:
del ds_map[ds_name]
continue
datasource.cluster = self
datasource.merge_flag = merge_flag
session.flush()
# Prepare multithreaded executation
pool = ThreadPool()
ds_refresh = list(ds_map.values())
metadata = pool.map(_fetch_metadata_for, ds_refresh)
pool.close()
pool.join()
for i in range(0, len(ds_refresh)):
datasource = ds_refresh[i]
cols = metadata[i]
if cols:
col_objs_list = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_id == datasource.id)
.filter(DruidColumn.column_name.in_(cols.keys()))
)
col_objs = {col.column_name: col for col in col_objs_list}
for col in cols:
if col == '__time': # skip the time column
continue
col_obj = col_objs.get(col)
if not col_obj:
col_obj = DruidColumn(
datasource_id=datasource.id,
column_name=col)
with session.no_autoflush:
session.add(col_obj)
col_obj.type = cols[col]['type']
col_obj.datasource = datasource
if col_obj.type == 'STRING':
col_obj.groupby = True
col_obj.filterable = True
datasource.refresh_metrics()
session.commit()
|
python
|
def refresh(self, datasource_names, merge_flag, refreshAll):
"""
Fetches metadata for the specified datasources and
merges to the Superset database
"""
session = db.session
ds_list = (
session.query(DruidDatasource)
.filter(DruidDatasource.cluster_name == self.cluster_name)
.filter(DruidDatasource.datasource_name.in_(datasource_names))
)
ds_map = {ds.name: ds for ds in ds_list}
for ds_name in datasource_names:
datasource = ds_map.get(ds_name, None)
if not datasource:
datasource = DruidDatasource(datasource_name=ds_name)
with session.no_autoflush:
session.add(datasource)
flasher(
_('Adding new datasource [{}]').format(ds_name), 'success')
ds_map[ds_name] = datasource
elif refreshAll:
flasher(
_('Refreshing datasource [{}]').format(ds_name), 'info')
else:
del ds_map[ds_name]
continue
datasource.cluster = self
datasource.merge_flag = merge_flag
session.flush()
# Prepare multithreaded executation
pool = ThreadPool()
ds_refresh = list(ds_map.values())
metadata = pool.map(_fetch_metadata_for, ds_refresh)
pool.close()
pool.join()
for i in range(0, len(ds_refresh)):
datasource = ds_refresh[i]
cols = metadata[i]
if cols:
col_objs_list = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_id == datasource.id)
.filter(DruidColumn.column_name.in_(cols.keys()))
)
col_objs = {col.column_name: col for col in col_objs_list}
for col in cols:
if col == '__time': # skip the time column
continue
col_obj = col_objs.get(col)
if not col_obj:
col_obj = DruidColumn(
datasource_id=datasource.id,
column_name=col)
with session.no_autoflush:
session.add(col_obj)
col_obj.type = cols[col]['type']
col_obj.datasource = datasource
if col_obj.type == 'STRING':
col_obj.groupby = True
col_obj.filterable = True
datasource.refresh_metrics()
session.commit()
|
[
"def",
"refresh",
"(",
"self",
",",
"datasource_names",
",",
"merge_flag",
",",
"refreshAll",
")",
":",
"session",
"=",
"db",
".",
"session",
"ds_list",
"=",
"(",
"session",
".",
"query",
"(",
"DruidDatasource",
")",
".",
"filter",
"(",
"DruidDatasource",
".",
"cluster_name",
"==",
"self",
".",
"cluster_name",
")",
".",
"filter",
"(",
"DruidDatasource",
".",
"datasource_name",
".",
"in_",
"(",
"datasource_names",
")",
")",
")",
"ds_map",
"=",
"{",
"ds",
".",
"name",
":",
"ds",
"for",
"ds",
"in",
"ds_list",
"}",
"for",
"ds_name",
"in",
"datasource_names",
":",
"datasource",
"=",
"ds_map",
".",
"get",
"(",
"ds_name",
",",
"None",
")",
"if",
"not",
"datasource",
":",
"datasource",
"=",
"DruidDatasource",
"(",
"datasource_name",
"=",
"ds_name",
")",
"with",
"session",
".",
"no_autoflush",
":",
"session",
".",
"add",
"(",
"datasource",
")",
"flasher",
"(",
"_",
"(",
"'Adding new datasource [{}]'",
")",
".",
"format",
"(",
"ds_name",
")",
",",
"'success'",
")",
"ds_map",
"[",
"ds_name",
"]",
"=",
"datasource",
"elif",
"refreshAll",
":",
"flasher",
"(",
"_",
"(",
"'Refreshing datasource [{}]'",
")",
".",
"format",
"(",
"ds_name",
")",
",",
"'info'",
")",
"else",
":",
"del",
"ds_map",
"[",
"ds_name",
"]",
"continue",
"datasource",
".",
"cluster",
"=",
"self",
"datasource",
".",
"merge_flag",
"=",
"merge_flag",
"session",
".",
"flush",
"(",
")",
"# Prepare multithreaded executation",
"pool",
"=",
"ThreadPool",
"(",
")",
"ds_refresh",
"=",
"list",
"(",
"ds_map",
".",
"values",
"(",
")",
")",
"metadata",
"=",
"pool",
".",
"map",
"(",
"_fetch_metadata_for",
",",
"ds_refresh",
")",
"pool",
".",
"close",
"(",
")",
"pool",
".",
"join",
"(",
")",
"for",
"i",
"in",
"range",
"(",
"0",
",",
"len",
"(",
"ds_refresh",
")",
")",
":",
"datasource",
"=",
"ds_refresh",
"[",
"i",
"]",
"cols",
"=",
"metadata",
"[",
"i",
"]",
"if",
"cols",
":",
"col_objs_list",
"=",
"(",
"session",
".",
"query",
"(",
"DruidColumn",
")",
".",
"filter",
"(",
"DruidColumn",
".",
"datasource_id",
"==",
"datasource",
".",
"id",
")",
".",
"filter",
"(",
"DruidColumn",
".",
"column_name",
".",
"in_",
"(",
"cols",
".",
"keys",
"(",
")",
")",
")",
")",
"col_objs",
"=",
"{",
"col",
".",
"column_name",
":",
"col",
"for",
"col",
"in",
"col_objs_list",
"}",
"for",
"col",
"in",
"cols",
":",
"if",
"col",
"==",
"'__time'",
":",
"# skip the time column",
"continue",
"col_obj",
"=",
"col_objs",
".",
"get",
"(",
"col",
")",
"if",
"not",
"col_obj",
":",
"col_obj",
"=",
"DruidColumn",
"(",
"datasource_id",
"=",
"datasource",
".",
"id",
",",
"column_name",
"=",
"col",
")",
"with",
"session",
".",
"no_autoflush",
":",
"session",
".",
"add",
"(",
"col_obj",
")",
"col_obj",
".",
"type",
"=",
"cols",
"[",
"col",
"]",
"[",
"'type'",
"]",
"col_obj",
".",
"datasource",
"=",
"datasource",
"if",
"col_obj",
".",
"type",
"==",
"'STRING'",
":",
"col_obj",
".",
"groupby",
"=",
"True",
"col_obj",
".",
"filterable",
"=",
"True",
"datasource",
".",
"refresh_metrics",
"(",
")",
"session",
".",
"commit",
"(",
")"
] |
Fetches metadata for the specified datasources and
merges to the Superset database
|
[
"Fetches",
"metadata",
"for",
"the",
"specified",
"datasources",
"and",
"merges",
"to",
"the",
"Superset",
"database"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L184-L248
|
21,422
|
apache/incubator-superset
|
superset/connectors/druid/models.py
|
DruidColumn.refresh_metrics
|
def refresh_metrics(self):
"""Refresh metrics based on the column metadata"""
metrics = self.get_metrics()
dbmetrics = (
db.session.query(DruidMetric)
.filter(DruidMetric.datasource_id == self.datasource_id)
.filter(DruidMetric.metric_name.in_(metrics.keys()))
)
dbmetrics = {metric.metric_name: metric for metric in dbmetrics}
for metric in metrics.values():
dbmetric = dbmetrics.get(metric.metric_name)
if dbmetric:
for attr in ['json', 'metric_type']:
setattr(dbmetric, attr, getattr(metric, attr))
else:
with db.session.no_autoflush:
metric.datasource_id = self.datasource_id
db.session.add(metric)
|
python
|
def refresh_metrics(self):
"""Refresh metrics based on the column metadata"""
metrics = self.get_metrics()
dbmetrics = (
db.session.query(DruidMetric)
.filter(DruidMetric.datasource_id == self.datasource_id)
.filter(DruidMetric.metric_name.in_(metrics.keys()))
)
dbmetrics = {metric.metric_name: metric for metric in dbmetrics}
for metric in metrics.values():
dbmetric = dbmetrics.get(metric.metric_name)
if dbmetric:
for attr in ['json', 'metric_type']:
setattr(dbmetric, attr, getattr(metric, attr))
else:
with db.session.no_autoflush:
metric.datasource_id = self.datasource_id
db.session.add(metric)
|
[
"def",
"refresh_metrics",
"(",
"self",
")",
":",
"metrics",
"=",
"self",
".",
"get_metrics",
"(",
")",
"dbmetrics",
"=",
"(",
"db",
".",
"session",
".",
"query",
"(",
"DruidMetric",
")",
".",
"filter",
"(",
"DruidMetric",
".",
"datasource_id",
"==",
"self",
".",
"datasource_id",
")",
".",
"filter",
"(",
"DruidMetric",
".",
"metric_name",
".",
"in_",
"(",
"metrics",
".",
"keys",
"(",
")",
")",
")",
")",
"dbmetrics",
"=",
"{",
"metric",
".",
"metric_name",
":",
"metric",
"for",
"metric",
"in",
"dbmetrics",
"}",
"for",
"metric",
"in",
"metrics",
".",
"values",
"(",
")",
":",
"dbmetric",
"=",
"dbmetrics",
".",
"get",
"(",
"metric",
".",
"metric_name",
")",
"if",
"dbmetric",
":",
"for",
"attr",
"in",
"[",
"'json'",
",",
"'metric_type'",
"]",
":",
"setattr",
"(",
"dbmetric",
",",
"attr",
",",
"getattr",
"(",
"metric",
",",
"attr",
")",
")",
"else",
":",
"with",
"db",
".",
"session",
".",
"no_autoflush",
":",
"metric",
".",
"datasource_id",
"=",
"self",
".",
"datasource_id",
"db",
".",
"session",
".",
"add",
"(",
"metric",
")"
] |
Refresh metrics based on the column metadata
|
[
"Refresh",
"metrics",
"based",
"on",
"the",
"column",
"metadata"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L309-L326
|
21,423
|
apache/incubator-superset
|
superset/connectors/druid/models.py
|
DruidDatasource.sync_to_db_from_config
|
def sync_to_db_from_config(
cls,
druid_config,
user,
cluster,
refresh=True):
"""Merges the ds config from druid_config into one stored in the db."""
session = db.session
datasource = (
session.query(cls)
.filter_by(datasource_name=druid_config['name'])
.first()
)
# Create a new datasource.
if not datasource:
datasource = cls(
datasource_name=druid_config['name'],
cluster=cluster,
owners=[user],
changed_by_fk=user.id,
created_by_fk=user.id,
)
session.add(datasource)
elif not refresh:
return
dimensions = druid_config['dimensions']
col_objs = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_id == datasource.id)
.filter(DruidColumn.column_name.in_(dimensions))
)
col_objs = {col.column_name: col for col in col_objs}
for dim in dimensions:
col_obj = col_objs.get(dim, None)
if not col_obj:
col_obj = DruidColumn(
datasource_id=datasource.id,
column_name=dim,
groupby=True,
filterable=True,
# TODO: fetch type from Hive.
type='STRING',
datasource=datasource,
)
session.add(col_obj)
# Import Druid metrics
metric_objs = (
session.query(DruidMetric)
.filter(DruidMetric.datasource_id == datasource.id)
.filter(DruidMetric.metric_name.in_(
spec['name'] for spec in druid_config['metrics_spec']
))
)
metric_objs = {metric.metric_name: metric for metric in metric_objs}
for metric_spec in druid_config['metrics_spec']:
metric_name = metric_spec['name']
metric_type = metric_spec['type']
metric_json = json.dumps(metric_spec)
if metric_type == 'count':
metric_type = 'longSum'
metric_json = json.dumps({
'type': 'longSum',
'name': metric_name,
'fieldName': metric_name,
})
metric_obj = metric_objs.get(metric_name, None)
if not metric_obj:
metric_obj = DruidMetric(
metric_name=metric_name,
metric_type=metric_type,
verbose_name='%s(%s)' % (metric_type, metric_name),
datasource=datasource,
json=metric_json,
description=(
'Imported from the airolap config dir for %s' %
druid_config['name']),
)
session.add(metric_obj)
session.commit()
|
python
|
def sync_to_db_from_config(
cls,
druid_config,
user,
cluster,
refresh=True):
"""Merges the ds config from druid_config into one stored in the db."""
session = db.session
datasource = (
session.query(cls)
.filter_by(datasource_name=druid_config['name'])
.first()
)
# Create a new datasource.
if not datasource:
datasource = cls(
datasource_name=druid_config['name'],
cluster=cluster,
owners=[user],
changed_by_fk=user.id,
created_by_fk=user.id,
)
session.add(datasource)
elif not refresh:
return
dimensions = druid_config['dimensions']
col_objs = (
session.query(DruidColumn)
.filter(DruidColumn.datasource_id == datasource.id)
.filter(DruidColumn.column_name.in_(dimensions))
)
col_objs = {col.column_name: col for col in col_objs}
for dim in dimensions:
col_obj = col_objs.get(dim, None)
if not col_obj:
col_obj = DruidColumn(
datasource_id=datasource.id,
column_name=dim,
groupby=True,
filterable=True,
# TODO: fetch type from Hive.
type='STRING',
datasource=datasource,
)
session.add(col_obj)
# Import Druid metrics
metric_objs = (
session.query(DruidMetric)
.filter(DruidMetric.datasource_id == datasource.id)
.filter(DruidMetric.metric_name.in_(
spec['name'] for spec in druid_config['metrics_spec']
))
)
metric_objs = {metric.metric_name: metric for metric in metric_objs}
for metric_spec in druid_config['metrics_spec']:
metric_name = metric_spec['name']
metric_type = metric_spec['type']
metric_json = json.dumps(metric_spec)
if metric_type == 'count':
metric_type = 'longSum'
metric_json = json.dumps({
'type': 'longSum',
'name': metric_name,
'fieldName': metric_name,
})
metric_obj = metric_objs.get(metric_name, None)
if not metric_obj:
metric_obj = DruidMetric(
metric_name=metric_name,
metric_type=metric_type,
verbose_name='%s(%s)' % (metric_type, metric_name),
datasource=datasource,
json=metric_json,
description=(
'Imported from the airolap config dir for %s' %
druid_config['name']),
)
session.add(metric_obj)
session.commit()
|
[
"def",
"sync_to_db_from_config",
"(",
"cls",
",",
"druid_config",
",",
"user",
",",
"cluster",
",",
"refresh",
"=",
"True",
")",
":",
"session",
"=",
"db",
".",
"session",
"datasource",
"=",
"(",
"session",
".",
"query",
"(",
"cls",
")",
".",
"filter_by",
"(",
"datasource_name",
"=",
"druid_config",
"[",
"'name'",
"]",
")",
".",
"first",
"(",
")",
")",
"# Create a new datasource.",
"if",
"not",
"datasource",
":",
"datasource",
"=",
"cls",
"(",
"datasource_name",
"=",
"druid_config",
"[",
"'name'",
"]",
",",
"cluster",
"=",
"cluster",
",",
"owners",
"=",
"[",
"user",
"]",
",",
"changed_by_fk",
"=",
"user",
".",
"id",
",",
"created_by_fk",
"=",
"user",
".",
"id",
",",
")",
"session",
".",
"add",
"(",
"datasource",
")",
"elif",
"not",
"refresh",
":",
"return",
"dimensions",
"=",
"druid_config",
"[",
"'dimensions'",
"]",
"col_objs",
"=",
"(",
"session",
".",
"query",
"(",
"DruidColumn",
")",
".",
"filter",
"(",
"DruidColumn",
".",
"datasource_id",
"==",
"datasource",
".",
"id",
")",
".",
"filter",
"(",
"DruidColumn",
".",
"column_name",
".",
"in_",
"(",
"dimensions",
")",
")",
")",
"col_objs",
"=",
"{",
"col",
".",
"column_name",
":",
"col",
"for",
"col",
"in",
"col_objs",
"}",
"for",
"dim",
"in",
"dimensions",
":",
"col_obj",
"=",
"col_objs",
".",
"get",
"(",
"dim",
",",
"None",
")",
"if",
"not",
"col_obj",
":",
"col_obj",
"=",
"DruidColumn",
"(",
"datasource_id",
"=",
"datasource",
".",
"id",
",",
"column_name",
"=",
"dim",
",",
"groupby",
"=",
"True",
",",
"filterable",
"=",
"True",
",",
"# TODO: fetch type from Hive.",
"type",
"=",
"'STRING'",
",",
"datasource",
"=",
"datasource",
",",
")",
"session",
".",
"add",
"(",
"col_obj",
")",
"# Import Druid metrics",
"metric_objs",
"=",
"(",
"session",
".",
"query",
"(",
"DruidMetric",
")",
".",
"filter",
"(",
"DruidMetric",
".",
"datasource_id",
"==",
"datasource",
".",
"id",
")",
".",
"filter",
"(",
"DruidMetric",
".",
"metric_name",
".",
"in_",
"(",
"spec",
"[",
"'name'",
"]",
"for",
"spec",
"in",
"druid_config",
"[",
"'metrics_spec'",
"]",
")",
")",
")",
"metric_objs",
"=",
"{",
"metric",
".",
"metric_name",
":",
"metric",
"for",
"metric",
"in",
"metric_objs",
"}",
"for",
"metric_spec",
"in",
"druid_config",
"[",
"'metrics_spec'",
"]",
":",
"metric_name",
"=",
"metric_spec",
"[",
"'name'",
"]",
"metric_type",
"=",
"metric_spec",
"[",
"'type'",
"]",
"metric_json",
"=",
"json",
".",
"dumps",
"(",
"metric_spec",
")",
"if",
"metric_type",
"==",
"'count'",
":",
"metric_type",
"=",
"'longSum'",
"metric_json",
"=",
"json",
".",
"dumps",
"(",
"{",
"'type'",
":",
"'longSum'",
",",
"'name'",
":",
"metric_name",
",",
"'fieldName'",
":",
"metric_name",
",",
"}",
")",
"metric_obj",
"=",
"metric_objs",
".",
"get",
"(",
"metric_name",
",",
"None",
")",
"if",
"not",
"metric_obj",
":",
"metric_obj",
"=",
"DruidMetric",
"(",
"metric_name",
"=",
"metric_name",
",",
"metric_type",
"=",
"metric_type",
",",
"verbose_name",
"=",
"'%s(%s)'",
"%",
"(",
"metric_type",
",",
"metric_name",
")",
",",
"datasource",
"=",
"datasource",
",",
"json",
"=",
"metric_json",
",",
"description",
"=",
"(",
"'Imported from the airolap config dir for %s'",
"%",
"druid_config",
"[",
"'name'",
"]",
")",
",",
")",
"session",
".",
"add",
"(",
"metric_obj",
")",
"session",
".",
"commit",
"(",
")"
] |
Merges the ds config from druid_config into one stored in the db.
|
[
"Merges",
"the",
"ds",
"config",
"from",
"druid_config",
"into",
"one",
"stored",
"in",
"the",
"db",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L590-L671
|
21,424
|
apache/incubator-superset
|
superset/connectors/druid/models.py
|
DruidDatasource.get_post_agg
|
def get_post_agg(mconf):
"""
For a metric specified as `postagg` returns the
kind of post aggregation for pydruid.
"""
if mconf.get('type') == 'javascript':
return JavascriptPostAggregator(
name=mconf.get('name', ''),
field_names=mconf.get('fieldNames', []),
function=mconf.get('function', ''))
elif mconf.get('type') == 'quantile':
return Quantile(
mconf.get('name', ''),
mconf.get('probability', ''),
)
elif mconf.get('type') == 'quantiles':
return Quantiles(
mconf.get('name', ''),
mconf.get('probabilities', ''),
)
elif mconf.get('type') == 'fieldAccess':
return Field(mconf.get('name'))
elif mconf.get('type') == 'constant':
return Const(
mconf.get('value'),
output_name=mconf.get('name', ''),
)
elif mconf.get('type') == 'hyperUniqueCardinality':
return HyperUniqueCardinality(
mconf.get('name'),
)
elif mconf.get('type') == 'arithmetic':
return Postaggregator(
mconf.get('fn', '/'),
mconf.get('fields', []),
mconf.get('name', ''))
else:
return CustomPostAggregator(
mconf.get('name', ''),
mconf)
|
python
|
def get_post_agg(mconf):
"""
For a metric specified as `postagg` returns the
kind of post aggregation for pydruid.
"""
if mconf.get('type') == 'javascript':
return JavascriptPostAggregator(
name=mconf.get('name', ''),
field_names=mconf.get('fieldNames', []),
function=mconf.get('function', ''))
elif mconf.get('type') == 'quantile':
return Quantile(
mconf.get('name', ''),
mconf.get('probability', ''),
)
elif mconf.get('type') == 'quantiles':
return Quantiles(
mconf.get('name', ''),
mconf.get('probabilities', ''),
)
elif mconf.get('type') == 'fieldAccess':
return Field(mconf.get('name'))
elif mconf.get('type') == 'constant':
return Const(
mconf.get('value'),
output_name=mconf.get('name', ''),
)
elif mconf.get('type') == 'hyperUniqueCardinality':
return HyperUniqueCardinality(
mconf.get('name'),
)
elif mconf.get('type') == 'arithmetic':
return Postaggregator(
mconf.get('fn', '/'),
mconf.get('fields', []),
mconf.get('name', ''))
else:
return CustomPostAggregator(
mconf.get('name', ''),
mconf)
|
[
"def",
"get_post_agg",
"(",
"mconf",
")",
":",
"if",
"mconf",
".",
"get",
"(",
"'type'",
")",
"==",
"'javascript'",
":",
"return",
"JavascriptPostAggregator",
"(",
"name",
"=",
"mconf",
".",
"get",
"(",
"'name'",
",",
"''",
")",
",",
"field_names",
"=",
"mconf",
".",
"get",
"(",
"'fieldNames'",
",",
"[",
"]",
")",
",",
"function",
"=",
"mconf",
".",
"get",
"(",
"'function'",
",",
"''",
")",
")",
"elif",
"mconf",
".",
"get",
"(",
"'type'",
")",
"==",
"'quantile'",
":",
"return",
"Quantile",
"(",
"mconf",
".",
"get",
"(",
"'name'",
",",
"''",
")",
",",
"mconf",
".",
"get",
"(",
"'probability'",
",",
"''",
")",
",",
")",
"elif",
"mconf",
".",
"get",
"(",
"'type'",
")",
"==",
"'quantiles'",
":",
"return",
"Quantiles",
"(",
"mconf",
".",
"get",
"(",
"'name'",
",",
"''",
")",
",",
"mconf",
".",
"get",
"(",
"'probabilities'",
",",
"''",
")",
",",
")",
"elif",
"mconf",
".",
"get",
"(",
"'type'",
")",
"==",
"'fieldAccess'",
":",
"return",
"Field",
"(",
"mconf",
".",
"get",
"(",
"'name'",
")",
")",
"elif",
"mconf",
".",
"get",
"(",
"'type'",
")",
"==",
"'constant'",
":",
"return",
"Const",
"(",
"mconf",
".",
"get",
"(",
"'value'",
")",
",",
"output_name",
"=",
"mconf",
".",
"get",
"(",
"'name'",
",",
"''",
")",
",",
")",
"elif",
"mconf",
".",
"get",
"(",
"'type'",
")",
"==",
"'hyperUniqueCardinality'",
":",
"return",
"HyperUniqueCardinality",
"(",
"mconf",
".",
"get",
"(",
"'name'",
")",
",",
")",
"elif",
"mconf",
".",
"get",
"(",
"'type'",
")",
"==",
"'arithmetic'",
":",
"return",
"Postaggregator",
"(",
"mconf",
".",
"get",
"(",
"'fn'",
",",
"'/'",
")",
",",
"mconf",
".",
"get",
"(",
"'fields'",
",",
"[",
"]",
")",
",",
"mconf",
".",
"get",
"(",
"'name'",
",",
"''",
")",
")",
"else",
":",
"return",
"CustomPostAggregator",
"(",
"mconf",
".",
"get",
"(",
"'name'",
",",
"''",
")",
",",
"mconf",
")"
] |
For a metric specified as `postagg` returns the
kind of post aggregation for pydruid.
|
[
"For",
"a",
"metric",
"specified",
"as",
"postagg",
"returns",
"the",
"kind",
"of",
"post",
"aggregation",
"for",
"pydruid",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L731-L770
|
21,425
|
apache/incubator-superset
|
superset/connectors/druid/models.py
|
DruidDatasource.find_postaggs_for
|
def find_postaggs_for(postagg_names, metrics_dict):
"""Return a list of metrics that are post aggregations"""
postagg_metrics = [
metrics_dict[name] for name in postagg_names
if metrics_dict[name].metric_type == POST_AGG_TYPE
]
# Remove post aggregations that were found
for postagg in postagg_metrics:
postagg_names.remove(postagg.metric_name)
return postagg_metrics
|
python
|
def find_postaggs_for(postagg_names, metrics_dict):
"""Return a list of metrics that are post aggregations"""
postagg_metrics = [
metrics_dict[name] for name in postagg_names
if metrics_dict[name].metric_type == POST_AGG_TYPE
]
# Remove post aggregations that were found
for postagg in postagg_metrics:
postagg_names.remove(postagg.metric_name)
return postagg_metrics
|
[
"def",
"find_postaggs_for",
"(",
"postagg_names",
",",
"metrics_dict",
")",
":",
"postagg_metrics",
"=",
"[",
"metrics_dict",
"[",
"name",
"]",
"for",
"name",
"in",
"postagg_names",
"if",
"metrics_dict",
"[",
"name",
"]",
".",
"metric_type",
"==",
"POST_AGG_TYPE",
"]",
"# Remove post aggregations that were found",
"for",
"postagg",
"in",
"postagg_metrics",
":",
"postagg_names",
".",
"remove",
"(",
"postagg",
".",
"metric_name",
")",
"return",
"postagg_metrics"
] |
Return a list of metrics that are post aggregations
|
[
"Return",
"a",
"list",
"of",
"metrics",
"that",
"are",
"post",
"aggregations"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L773-L782
|
21,426
|
apache/incubator-superset
|
superset/connectors/druid/models.py
|
DruidDatasource.values_for_column
|
def values_for_column(self,
column_name,
limit=10000):
"""Retrieve some values for the given column"""
logging.info(
'Getting values for columns [{}] limited to [{}]'
.format(column_name, limit))
# TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid
if self.fetch_values_from:
from_dttm = utils.parse_human_datetime(self.fetch_values_from)
else:
from_dttm = datetime(1970, 1, 1)
qry = dict(
datasource=self.datasource_name,
granularity='all',
intervals=from_dttm.isoformat() + '/' + datetime.now().isoformat(),
aggregations=dict(count=count('count')),
dimension=column_name,
metric='count',
threshold=limit,
)
client = self.cluster.get_pydruid_client()
client.topn(**qry)
df = client.export_pandas()
return [row[column_name] for row in df.to_records(index=False)]
|
python
|
def values_for_column(self,
column_name,
limit=10000):
"""Retrieve some values for the given column"""
logging.info(
'Getting values for columns [{}] limited to [{}]'
.format(column_name, limit))
# TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid
if self.fetch_values_from:
from_dttm = utils.parse_human_datetime(self.fetch_values_from)
else:
from_dttm = datetime(1970, 1, 1)
qry = dict(
datasource=self.datasource_name,
granularity='all',
intervals=from_dttm.isoformat() + '/' + datetime.now().isoformat(),
aggregations=dict(count=count('count')),
dimension=column_name,
metric='count',
threshold=limit,
)
client = self.cluster.get_pydruid_client()
client.topn(**qry)
df = client.export_pandas()
return [row[column_name] for row in df.to_records(index=False)]
|
[
"def",
"values_for_column",
"(",
"self",
",",
"column_name",
",",
"limit",
"=",
"10000",
")",
":",
"logging",
".",
"info",
"(",
"'Getting values for columns [{}] limited to [{}]'",
".",
"format",
"(",
"column_name",
",",
"limit",
")",
")",
"# TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid",
"if",
"self",
".",
"fetch_values_from",
":",
"from_dttm",
"=",
"utils",
".",
"parse_human_datetime",
"(",
"self",
".",
"fetch_values_from",
")",
"else",
":",
"from_dttm",
"=",
"datetime",
"(",
"1970",
",",
"1",
",",
"1",
")",
"qry",
"=",
"dict",
"(",
"datasource",
"=",
"self",
".",
"datasource_name",
",",
"granularity",
"=",
"'all'",
",",
"intervals",
"=",
"from_dttm",
".",
"isoformat",
"(",
")",
"+",
"'/'",
"+",
"datetime",
".",
"now",
"(",
")",
".",
"isoformat",
"(",
")",
",",
"aggregations",
"=",
"dict",
"(",
"count",
"=",
"count",
"(",
"'count'",
")",
")",
",",
"dimension",
"=",
"column_name",
",",
"metric",
"=",
"'count'",
",",
"threshold",
"=",
"limit",
",",
")",
"client",
"=",
"self",
".",
"cluster",
".",
"get_pydruid_client",
"(",
")",
"client",
".",
"topn",
"(",
"*",
"*",
"qry",
")",
"df",
"=",
"client",
".",
"export_pandas",
"(",
")",
"return",
"[",
"row",
"[",
"column_name",
"]",
"for",
"row",
"in",
"df",
".",
"to_records",
"(",
"index",
"=",
"False",
")",
"]"
] |
Retrieve some values for the given column
|
[
"Retrieve",
"some",
"values",
"for",
"the",
"given",
"column"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L857-L883
|
21,427
|
apache/incubator-superset
|
superset/connectors/druid/models.py
|
DruidDatasource.get_aggregations
|
def get_aggregations(metrics_dict, saved_metrics, adhoc_metrics=[]):
"""
Returns a dictionary of aggregation metric names to aggregation json objects
:param metrics_dict: dictionary of all the metrics
:param saved_metrics: list of saved metric names
:param adhoc_metrics: list of adhoc metric names
:raise SupersetException: if one or more metric names are not aggregations
"""
aggregations = OrderedDict()
invalid_metric_names = []
for metric_name in saved_metrics:
if metric_name in metrics_dict:
metric = metrics_dict[metric_name]
if metric.metric_type == POST_AGG_TYPE:
invalid_metric_names.append(metric_name)
else:
aggregations[metric_name] = metric.json_obj
else:
invalid_metric_names.append(metric_name)
if len(invalid_metric_names) > 0:
raise SupersetException(
_('Metric(s) {} must be aggregations.').format(invalid_metric_names))
for adhoc_metric in adhoc_metrics:
aggregations[adhoc_metric['label']] = {
'fieldName': adhoc_metric['column']['column_name'],
'fieldNames': [adhoc_metric['column']['column_name']],
'type': DruidDatasource.druid_type_from_adhoc_metric(adhoc_metric),
'name': adhoc_metric['label'],
}
return aggregations
|
python
|
def get_aggregations(metrics_dict, saved_metrics, adhoc_metrics=[]):
"""
Returns a dictionary of aggregation metric names to aggregation json objects
:param metrics_dict: dictionary of all the metrics
:param saved_metrics: list of saved metric names
:param adhoc_metrics: list of adhoc metric names
:raise SupersetException: if one or more metric names are not aggregations
"""
aggregations = OrderedDict()
invalid_metric_names = []
for metric_name in saved_metrics:
if metric_name in metrics_dict:
metric = metrics_dict[metric_name]
if metric.metric_type == POST_AGG_TYPE:
invalid_metric_names.append(metric_name)
else:
aggregations[metric_name] = metric.json_obj
else:
invalid_metric_names.append(metric_name)
if len(invalid_metric_names) > 0:
raise SupersetException(
_('Metric(s) {} must be aggregations.').format(invalid_metric_names))
for adhoc_metric in adhoc_metrics:
aggregations[adhoc_metric['label']] = {
'fieldName': adhoc_metric['column']['column_name'],
'fieldNames': [adhoc_metric['column']['column_name']],
'type': DruidDatasource.druid_type_from_adhoc_metric(adhoc_metric),
'name': adhoc_metric['label'],
}
return aggregations
|
[
"def",
"get_aggregations",
"(",
"metrics_dict",
",",
"saved_metrics",
",",
"adhoc_metrics",
"=",
"[",
"]",
")",
":",
"aggregations",
"=",
"OrderedDict",
"(",
")",
"invalid_metric_names",
"=",
"[",
"]",
"for",
"metric_name",
"in",
"saved_metrics",
":",
"if",
"metric_name",
"in",
"metrics_dict",
":",
"metric",
"=",
"metrics_dict",
"[",
"metric_name",
"]",
"if",
"metric",
".",
"metric_type",
"==",
"POST_AGG_TYPE",
":",
"invalid_metric_names",
".",
"append",
"(",
"metric_name",
")",
"else",
":",
"aggregations",
"[",
"metric_name",
"]",
"=",
"metric",
".",
"json_obj",
"else",
":",
"invalid_metric_names",
".",
"append",
"(",
"metric_name",
")",
"if",
"len",
"(",
"invalid_metric_names",
")",
">",
"0",
":",
"raise",
"SupersetException",
"(",
"_",
"(",
"'Metric(s) {} must be aggregations.'",
")",
".",
"format",
"(",
"invalid_metric_names",
")",
")",
"for",
"adhoc_metric",
"in",
"adhoc_metrics",
":",
"aggregations",
"[",
"adhoc_metric",
"[",
"'label'",
"]",
"]",
"=",
"{",
"'fieldName'",
":",
"adhoc_metric",
"[",
"'column'",
"]",
"[",
"'column_name'",
"]",
",",
"'fieldNames'",
":",
"[",
"adhoc_metric",
"[",
"'column'",
"]",
"[",
"'column_name'",
"]",
"]",
",",
"'type'",
":",
"DruidDatasource",
".",
"druid_type_from_adhoc_metric",
"(",
"adhoc_metric",
")",
",",
"'name'",
":",
"adhoc_metric",
"[",
"'label'",
"]",
",",
"}",
"return",
"aggregations"
] |
Returns a dictionary of aggregation metric names to aggregation json objects
:param metrics_dict: dictionary of all the metrics
:param saved_metrics: list of saved metric names
:param adhoc_metrics: list of adhoc metric names
:raise SupersetException: if one or more metric names are not aggregations
|
[
"Returns",
"a",
"dictionary",
"of",
"aggregation",
"metric",
"names",
"to",
"aggregation",
"json",
"objects"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L940-L970
|
21,428
|
apache/incubator-superset
|
superset/connectors/druid/models.py
|
DruidDatasource._dimensions_to_values
|
def _dimensions_to_values(dimensions):
"""
Replace dimensions specs with their `dimension`
values, and ignore those without
"""
values = []
for dimension in dimensions:
if isinstance(dimension, dict):
if 'extractionFn' in dimension:
values.append(dimension)
elif 'dimension' in dimension:
values.append(dimension['dimension'])
else:
values.append(dimension)
return values
|
python
|
def _dimensions_to_values(dimensions):
"""
Replace dimensions specs with their `dimension`
values, and ignore those without
"""
values = []
for dimension in dimensions:
if isinstance(dimension, dict):
if 'extractionFn' in dimension:
values.append(dimension)
elif 'dimension' in dimension:
values.append(dimension['dimension'])
else:
values.append(dimension)
return values
|
[
"def",
"_dimensions_to_values",
"(",
"dimensions",
")",
":",
"values",
"=",
"[",
"]",
"for",
"dimension",
"in",
"dimensions",
":",
"if",
"isinstance",
"(",
"dimension",
",",
"dict",
")",
":",
"if",
"'extractionFn'",
"in",
"dimension",
":",
"values",
".",
"append",
"(",
"dimension",
")",
"elif",
"'dimension'",
"in",
"dimension",
":",
"values",
".",
"append",
"(",
"dimension",
"[",
"'dimension'",
"]",
")",
"else",
":",
"values",
".",
"append",
"(",
"dimension",
")",
"return",
"values"
] |
Replace dimensions specs with their `dimension`
values, and ignore those without
|
[
"Replace",
"dimensions",
"specs",
"with",
"their",
"dimension",
"values",
"and",
"ignore",
"those",
"without"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L1010-L1025
|
21,429
|
apache/incubator-superset
|
superset/connectors/druid/models.py
|
DruidDatasource.homogenize_types
|
def homogenize_types(df, groupby_cols):
"""Converting all GROUPBY columns to strings
When grouping by a numeric (say FLOAT) column, pydruid returns
strings in the dataframe. This creates issues downstream related
to having mixed types in the dataframe
Here we replace None with <NULL> and make the whole series a
str instead of an object.
"""
for col in groupby_cols:
df[col] = df[col].fillna('<NULL>').astype('unicode')
return df
|
python
|
def homogenize_types(df, groupby_cols):
"""Converting all GROUPBY columns to strings
When grouping by a numeric (say FLOAT) column, pydruid returns
strings in the dataframe. This creates issues downstream related
to having mixed types in the dataframe
Here we replace None with <NULL> and make the whole series a
str instead of an object.
"""
for col in groupby_cols:
df[col] = df[col].fillna('<NULL>').astype('unicode')
return df
|
[
"def",
"homogenize_types",
"(",
"df",
",",
"groupby_cols",
")",
":",
"for",
"col",
"in",
"groupby_cols",
":",
"df",
"[",
"col",
"]",
"=",
"df",
"[",
"col",
"]",
".",
"fillna",
"(",
"'<NULL>'",
")",
".",
"astype",
"(",
"'unicode'",
")",
"return",
"df"
] |
Converting all GROUPBY columns to strings
When grouping by a numeric (say FLOAT) column, pydruid returns
strings in the dataframe. This creates issues downstream related
to having mixed types in the dataframe
Here we replace None with <NULL> and make the whole series a
str instead of an object.
|
[
"Converting",
"all",
"GROUPBY",
"columns",
"to",
"strings"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L1271-L1283
|
21,430
|
apache/incubator-superset
|
contrib/docker/superset_config.py
|
get_env_variable
|
def get_env_variable(var_name, default=None):
"""Get the environment variable or raise exception."""
try:
return os.environ[var_name]
except KeyError:
if default is not None:
return default
else:
error_msg = 'The environment variable {} was missing, abort...'\
.format(var_name)
raise EnvironmentError(error_msg)
|
python
|
def get_env_variable(var_name, default=None):
"""Get the environment variable or raise exception."""
try:
return os.environ[var_name]
except KeyError:
if default is not None:
return default
else:
error_msg = 'The environment variable {} was missing, abort...'\
.format(var_name)
raise EnvironmentError(error_msg)
|
[
"def",
"get_env_variable",
"(",
"var_name",
",",
"default",
"=",
"None",
")",
":",
"try",
":",
"return",
"os",
".",
"environ",
"[",
"var_name",
"]",
"except",
"KeyError",
":",
"if",
"default",
"is",
"not",
"None",
":",
"return",
"default",
"else",
":",
"error_msg",
"=",
"'The environment variable {} was missing, abort...'",
".",
"format",
"(",
"var_name",
")",
"raise",
"EnvironmentError",
"(",
"error_msg",
")"
] |
Get the environment variable or raise exception.
|
[
"Get",
"the",
"environment",
"variable",
"or",
"raise",
"exception",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/contrib/docker/superset_config.py#L20-L30
|
21,431
|
apache/incubator-superset
|
superset/connectors/connector_registry.py
|
ConnectorRegistry.get_eager_datasource
|
def get_eager_datasource(cls, session, datasource_type, datasource_id):
"""Returns datasource with columns and metrics."""
datasource_class = ConnectorRegistry.sources[datasource_type]
return (
session.query(datasource_class)
.options(
subqueryload(datasource_class.columns),
subqueryload(datasource_class.metrics),
)
.filter_by(id=datasource_id)
.one()
)
|
python
|
def get_eager_datasource(cls, session, datasource_type, datasource_id):
"""Returns datasource with columns and metrics."""
datasource_class = ConnectorRegistry.sources[datasource_type]
return (
session.query(datasource_class)
.options(
subqueryload(datasource_class.columns),
subqueryload(datasource_class.metrics),
)
.filter_by(id=datasource_id)
.one()
)
|
[
"def",
"get_eager_datasource",
"(",
"cls",
",",
"session",
",",
"datasource_type",
",",
"datasource_id",
")",
":",
"datasource_class",
"=",
"ConnectorRegistry",
".",
"sources",
"[",
"datasource_type",
"]",
"return",
"(",
"session",
".",
"query",
"(",
"datasource_class",
")",
".",
"options",
"(",
"subqueryload",
"(",
"datasource_class",
".",
"columns",
")",
",",
"subqueryload",
"(",
"datasource_class",
".",
"metrics",
")",
",",
")",
".",
"filter_by",
"(",
"id",
"=",
"datasource_id",
")",
".",
"one",
"(",
")",
")"
] |
Returns datasource with columns and metrics.
|
[
"Returns",
"datasource",
"with",
"columns",
"and",
"metrics",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/connector_registry.py#L76-L87
|
21,432
|
apache/incubator-superset
|
superset/data/misc_dashboard.py
|
load_misc_dashboard
|
def load_misc_dashboard():
"""Loading a dashboard featuring misc charts"""
print('Creating the dashboard')
db.session.expunge_all()
dash = db.session.query(Dash).filter_by(slug=DASH_SLUG).first()
if not dash:
dash = Dash()
js = textwrap.dedent("""\
{
"CHART-BkeVbh8ANQ": {
"children": [],
"id": "CHART-BkeVbh8ANQ",
"meta": {
"chartId": 4004,
"height": 34,
"sliceName": "Multi Line",
"width": 8
},
"type": "CHART"
},
"CHART-H1HYNzEANX": {
"children": [],
"id": "CHART-H1HYNzEANX",
"meta": {
"chartId": 3940,
"height": 50,
"sliceName": "Energy Sankey",
"width": 6
},
"type": "CHART"
},
"CHART-HJOYVMV0E7": {
"children": [],
"id": "CHART-HJOYVMV0E7",
"meta": {
"chartId": 3969,
"height": 63,
"sliceName": "Mapbox Long/Lat",
"width": 6
},
"type": "CHART"
},
"CHART-S1WYNz4AVX": {
"children": [],
"id": "CHART-S1WYNz4AVX",
"meta": {
"chartId": 3989,
"height": 25,
"sliceName": "Parallel Coordinates",
"width": 4
},
"type": "CHART"
},
"CHART-r19KVMNCE7": {
"children": [],
"id": "CHART-r19KVMNCE7",
"meta": {
"chartId": 3971,
"height": 34,
"sliceName": "Calendar Heatmap multiformat 0",
"width": 4
},
"type": "CHART"
},
"CHART-rJ4K4GV04Q": {
"children": [],
"id": "CHART-rJ4K4GV04Q",
"meta": {
"chartId": 3941,
"height": 63,
"sliceName": "Energy Force Layout",
"width": 6
},
"type": "CHART"
},
"CHART-rkgF4G4A4X": {
"children": [],
"id": "CHART-rkgF4G4A4X",
"meta": {
"chartId": 3970,
"height": 25,
"sliceName": "Birth in France by department in 2016",
"width": 8
},
"type": "CHART"
},
"CHART-rywK4GVR4X": {
"children": [],
"id": "CHART-rywK4GVR4X",
"meta": {
"chartId": 3942,
"height": 50,
"sliceName": "Heatmap",
"width": 6
},
"type": "CHART"
},
"COLUMN-ByUFVf40EQ": {
"children": [
"CHART-rywK4GVR4X",
"CHART-HJOYVMV0E7"
],
"id": "COLUMN-ByUFVf40EQ",
"meta": {
"background": "BACKGROUND_TRANSPARENT",
"width": 6
},
"type": "COLUMN"
},
"COLUMN-rkmYVGN04Q": {
"children": [
"CHART-rJ4K4GV04Q",
"CHART-H1HYNzEANX"
],
"id": "COLUMN-rkmYVGN04Q",
"meta": {
"background": "BACKGROUND_TRANSPARENT",
"width": 6
},
"type": "COLUMN"
},
"GRID_ID": {
"children": [
"ROW-SytNzNA4X",
"ROW-S1MK4M4A4X",
"ROW-HkFFEzVRVm"
],
"id": "GRID_ID",
"type": "GRID"
},
"HEADER_ID": {
"id": "HEADER_ID",
"meta": {
"text": "Misc Charts"
},
"type": "HEADER"
},
"ROOT_ID": {
"children": [
"GRID_ID"
],
"id": "ROOT_ID",
"type": "ROOT"
},
"ROW-HkFFEzVRVm": {
"children": [
"CHART-r19KVMNCE7",
"CHART-BkeVbh8ANQ"
],
"id": "ROW-HkFFEzVRVm",
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW"
},
"ROW-S1MK4M4A4X": {
"children": [
"COLUMN-rkmYVGN04Q",
"COLUMN-ByUFVf40EQ"
],
"id": "ROW-S1MK4M4A4X",
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW"
},
"ROW-SytNzNA4X": {
"children": [
"CHART-rkgF4G4A4X",
"CHART-S1WYNz4AVX"
],
"id": "ROW-SytNzNA4X",
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW"
},
"DASHBOARD_VERSION_KEY": "v2"
}
""")
pos = json.loads(js)
slices = (
db.session
.query(Slice)
.filter(Slice.slice_name.in_(misc_dash_slices))
.all()
)
slices = sorted(slices, key=lambda x: x.id)
update_slice_ids(pos, slices)
dash.dashboard_title = 'Misc Charts'
dash.position_json = json.dumps(pos, indent=4)
dash.slug = DASH_SLUG
dash.slices = slices
db.session.merge(dash)
db.session.commit()
|
python
|
def load_misc_dashboard():
"""Loading a dashboard featuring misc charts"""
print('Creating the dashboard')
db.session.expunge_all()
dash = db.session.query(Dash).filter_by(slug=DASH_SLUG).first()
if not dash:
dash = Dash()
js = textwrap.dedent("""\
{
"CHART-BkeVbh8ANQ": {
"children": [],
"id": "CHART-BkeVbh8ANQ",
"meta": {
"chartId": 4004,
"height": 34,
"sliceName": "Multi Line",
"width": 8
},
"type": "CHART"
},
"CHART-H1HYNzEANX": {
"children": [],
"id": "CHART-H1HYNzEANX",
"meta": {
"chartId": 3940,
"height": 50,
"sliceName": "Energy Sankey",
"width": 6
},
"type": "CHART"
},
"CHART-HJOYVMV0E7": {
"children": [],
"id": "CHART-HJOYVMV0E7",
"meta": {
"chartId": 3969,
"height": 63,
"sliceName": "Mapbox Long/Lat",
"width": 6
},
"type": "CHART"
},
"CHART-S1WYNz4AVX": {
"children": [],
"id": "CHART-S1WYNz4AVX",
"meta": {
"chartId": 3989,
"height": 25,
"sliceName": "Parallel Coordinates",
"width": 4
},
"type": "CHART"
},
"CHART-r19KVMNCE7": {
"children": [],
"id": "CHART-r19KVMNCE7",
"meta": {
"chartId": 3971,
"height": 34,
"sliceName": "Calendar Heatmap multiformat 0",
"width": 4
},
"type": "CHART"
},
"CHART-rJ4K4GV04Q": {
"children": [],
"id": "CHART-rJ4K4GV04Q",
"meta": {
"chartId": 3941,
"height": 63,
"sliceName": "Energy Force Layout",
"width": 6
},
"type": "CHART"
},
"CHART-rkgF4G4A4X": {
"children": [],
"id": "CHART-rkgF4G4A4X",
"meta": {
"chartId": 3970,
"height": 25,
"sliceName": "Birth in France by department in 2016",
"width": 8
},
"type": "CHART"
},
"CHART-rywK4GVR4X": {
"children": [],
"id": "CHART-rywK4GVR4X",
"meta": {
"chartId": 3942,
"height": 50,
"sliceName": "Heatmap",
"width": 6
},
"type": "CHART"
},
"COLUMN-ByUFVf40EQ": {
"children": [
"CHART-rywK4GVR4X",
"CHART-HJOYVMV0E7"
],
"id": "COLUMN-ByUFVf40EQ",
"meta": {
"background": "BACKGROUND_TRANSPARENT",
"width": 6
},
"type": "COLUMN"
},
"COLUMN-rkmYVGN04Q": {
"children": [
"CHART-rJ4K4GV04Q",
"CHART-H1HYNzEANX"
],
"id": "COLUMN-rkmYVGN04Q",
"meta": {
"background": "BACKGROUND_TRANSPARENT",
"width": 6
},
"type": "COLUMN"
},
"GRID_ID": {
"children": [
"ROW-SytNzNA4X",
"ROW-S1MK4M4A4X",
"ROW-HkFFEzVRVm"
],
"id": "GRID_ID",
"type": "GRID"
},
"HEADER_ID": {
"id": "HEADER_ID",
"meta": {
"text": "Misc Charts"
},
"type": "HEADER"
},
"ROOT_ID": {
"children": [
"GRID_ID"
],
"id": "ROOT_ID",
"type": "ROOT"
},
"ROW-HkFFEzVRVm": {
"children": [
"CHART-r19KVMNCE7",
"CHART-BkeVbh8ANQ"
],
"id": "ROW-HkFFEzVRVm",
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW"
},
"ROW-S1MK4M4A4X": {
"children": [
"COLUMN-rkmYVGN04Q",
"COLUMN-ByUFVf40EQ"
],
"id": "ROW-S1MK4M4A4X",
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW"
},
"ROW-SytNzNA4X": {
"children": [
"CHART-rkgF4G4A4X",
"CHART-S1WYNz4AVX"
],
"id": "ROW-SytNzNA4X",
"meta": {
"background": "BACKGROUND_TRANSPARENT"
},
"type": "ROW"
},
"DASHBOARD_VERSION_KEY": "v2"
}
""")
pos = json.loads(js)
slices = (
db.session
.query(Slice)
.filter(Slice.slice_name.in_(misc_dash_slices))
.all()
)
slices = sorted(slices, key=lambda x: x.id)
update_slice_ids(pos, slices)
dash.dashboard_title = 'Misc Charts'
dash.position_json = json.dumps(pos, indent=4)
dash.slug = DASH_SLUG
dash.slices = slices
db.session.merge(dash)
db.session.commit()
|
[
"def",
"load_misc_dashboard",
"(",
")",
":",
"print",
"(",
"'Creating the dashboard'",
")",
"db",
".",
"session",
".",
"expunge_all",
"(",
")",
"dash",
"=",
"db",
".",
"session",
".",
"query",
"(",
"Dash",
")",
".",
"filter_by",
"(",
"slug",
"=",
"DASH_SLUG",
")",
".",
"first",
"(",
")",
"if",
"not",
"dash",
":",
"dash",
"=",
"Dash",
"(",
")",
"js",
"=",
"textwrap",
".",
"dedent",
"(",
"\"\"\"\\\n{\n \"CHART-BkeVbh8ANQ\": {\n \"children\": [],\n \"id\": \"CHART-BkeVbh8ANQ\",\n \"meta\": {\n \"chartId\": 4004,\n \"height\": 34,\n \"sliceName\": \"Multi Line\",\n \"width\": 8\n },\n \"type\": \"CHART\"\n },\n \"CHART-H1HYNzEANX\": {\n \"children\": [],\n \"id\": \"CHART-H1HYNzEANX\",\n \"meta\": {\n \"chartId\": 3940,\n \"height\": 50,\n \"sliceName\": \"Energy Sankey\",\n \"width\": 6\n },\n \"type\": \"CHART\"\n },\n \"CHART-HJOYVMV0E7\": {\n \"children\": [],\n \"id\": \"CHART-HJOYVMV0E7\",\n \"meta\": {\n \"chartId\": 3969,\n \"height\": 63,\n \"sliceName\": \"Mapbox Long/Lat\",\n \"width\": 6\n },\n \"type\": \"CHART\"\n },\n \"CHART-S1WYNz4AVX\": {\n \"children\": [],\n \"id\": \"CHART-S1WYNz4AVX\",\n \"meta\": {\n \"chartId\": 3989,\n \"height\": 25,\n \"sliceName\": \"Parallel Coordinates\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-r19KVMNCE7\": {\n \"children\": [],\n \"id\": \"CHART-r19KVMNCE7\",\n \"meta\": {\n \"chartId\": 3971,\n \"height\": 34,\n \"sliceName\": \"Calendar Heatmap multiformat 0\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-rJ4K4GV04Q\": {\n \"children\": [],\n \"id\": \"CHART-rJ4K4GV04Q\",\n \"meta\": {\n \"chartId\": 3941,\n \"height\": 63,\n \"sliceName\": \"Energy Force Layout\",\n \"width\": 6\n },\n \"type\": \"CHART\"\n },\n \"CHART-rkgF4G4A4X\": {\n \"children\": [],\n \"id\": \"CHART-rkgF4G4A4X\",\n \"meta\": {\n \"chartId\": 3970,\n \"height\": 25,\n \"sliceName\": \"Birth in France by department in 2016\",\n \"width\": 8\n },\n \"type\": \"CHART\"\n },\n \"CHART-rywK4GVR4X\": {\n \"children\": [],\n \"id\": \"CHART-rywK4GVR4X\",\n \"meta\": {\n \"chartId\": 3942,\n \"height\": 50,\n \"sliceName\": \"Heatmap\",\n \"width\": 6\n },\n \"type\": \"CHART\"\n },\n \"COLUMN-ByUFVf40EQ\": {\n \"children\": [\n \"CHART-rywK4GVR4X\",\n \"CHART-HJOYVMV0E7\"\n ],\n \"id\": \"COLUMN-ByUFVf40EQ\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\",\n \"width\": 6\n },\n \"type\": \"COLUMN\"\n },\n \"COLUMN-rkmYVGN04Q\": {\n \"children\": [\n \"CHART-rJ4K4GV04Q\",\n \"CHART-H1HYNzEANX\"\n ],\n \"id\": \"COLUMN-rkmYVGN04Q\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\",\n \"width\": 6\n },\n \"type\": \"COLUMN\"\n },\n \"GRID_ID\": {\n \"children\": [\n \"ROW-SytNzNA4X\",\n \"ROW-S1MK4M4A4X\",\n \"ROW-HkFFEzVRVm\"\n ],\n \"id\": \"GRID_ID\",\n \"type\": \"GRID\"\n },\n \"HEADER_ID\": {\n \"id\": \"HEADER_ID\",\n \"meta\": {\n \"text\": \"Misc Charts\"\n },\n \"type\": \"HEADER\"\n },\n \"ROOT_ID\": {\n \"children\": [\n \"GRID_ID\"\n ],\n \"id\": \"ROOT_ID\",\n \"type\": \"ROOT\"\n },\n \"ROW-HkFFEzVRVm\": {\n \"children\": [\n \"CHART-r19KVMNCE7\",\n \"CHART-BkeVbh8ANQ\"\n ],\n \"id\": \"ROW-HkFFEzVRVm\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-S1MK4M4A4X\": {\n \"children\": [\n \"COLUMN-rkmYVGN04Q\",\n \"COLUMN-ByUFVf40EQ\"\n ],\n \"id\": \"ROW-S1MK4M4A4X\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-SytNzNA4X\": {\n \"children\": [\n \"CHART-rkgF4G4A4X\",\n \"CHART-S1WYNz4AVX\"\n ],\n \"id\": \"ROW-SytNzNA4X\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"DASHBOARD_VERSION_KEY\": \"v2\"\n}\n \"\"\"",
")",
"pos",
"=",
"json",
".",
"loads",
"(",
"js",
")",
"slices",
"=",
"(",
"db",
".",
"session",
".",
"query",
"(",
"Slice",
")",
".",
"filter",
"(",
"Slice",
".",
"slice_name",
".",
"in_",
"(",
"misc_dash_slices",
")",
")",
".",
"all",
"(",
")",
")",
"slices",
"=",
"sorted",
"(",
"slices",
",",
"key",
"=",
"lambda",
"x",
":",
"x",
".",
"id",
")",
"update_slice_ids",
"(",
"pos",
",",
"slices",
")",
"dash",
".",
"dashboard_title",
"=",
"'Misc Charts'",
"dash",
".",
"position_json",
"=",
"json",
".",
"dumps",
"(",
"pos",
",",
"indent",
"=",
"4",
")",
"dash",
".",
"slug",
"=",
"DASH_SLUG",
"dash",
".",
"slices",
"=",
"slices",
"db",
".",
"session",
".",
"merge",
"(",
"dash",
")",
"db",
".",
"session",
".",
"commit",
"(",
")"
] |
Loading a dashboard featuring misc charts
|
[
"Loading",
"a",
"dashboard",
"featuring",
"misc",
"charts"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/data/misc_dashboard.py#L32-L228
|
21,433
|
apache/incubator-superset
|
superset/data/country_map.py
|
load_country_map_data
|
def load_country_map_data():
"""Loading data for map with country map"""
csv_bytes = get_example_data(
'birth_france_data_for_country_map.csv', is_gzip=False, make_bytes=True)
data = pd.read_csv(csv_bytes, encoding='utf-8')
data['dttm'] = datetime.datetime.now().date()
data.to_sql( # pylint: disable=no-member
'birth_france_by_region',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'DEPT_ID': String(10),
'2003': BigInteger,
'2004': BigInteger,
'2005': BigInteger,
'2006': BigInteger,
'2007': BigInteger,
'2008': BigInteger,
'2009': BigInteger,
'2010': BigInteger,
'2011': BigInteger,
'2012': BigInteger,
'2013': BigInteger,
'2014': BigInteger,
'dttm': Date(),
},
index=False)
print('Done loading table!')
print('-' * 80)
print('Creating table reference')
obj = db.session.query(TBL).filter_by(table_name='birth_france_by_region').first()
if not obj:
obj = TBL(table_name='birth_france_by_region')
obj.main_dttm_col = 'dttm'
obj.database = utils.get_or_create_main_db()
if not any(col.metric_name == 'avg__2004' for col in obj.metrics):
obj.metrics.append(SqlMetric(
metric_name='avg__2004',
expression='AVG(2004)',
))
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
'granularity_sqla': '',
'since': '',
'until': '',
'where': '',
'viz_type': 'country_map',
'entity': 'DEPT_ID',
'metric': {
'expressionType': 'SIMPLE',
'column': {
'type': 'INT',
'column_name': '2004',
},
'aggregate': 'AVG',
'label': 'Boys',
'optionName': 'metric_112342',
},
'row_limit': 500000,
}
print('Creating a slice')
slc = Slice(
slice_name='Birth in France by department in 2016',
viz_type='country_map',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
|
python
|
def load_country_map_data():
"""Loading data for map with country map"""
csv_bytes = get_example_data(
'birth_france_data_for_country_map.csv', is_gzip=False, make_bytes=True)
data = pd.read_csv(csv_bytes, encoding='utf-8')
data['dttm'] = datetime.datetime.now().date()
data.to_sql( # pylint: disable=no-member
'birth_france_by_region',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'DEPT_ID': String(10),
'2003': BigInteger,
'2004': BigInteger,
'2005': BigInteger,
'2006': BigInteger,
'2007': BigInteger,
'2008': BigInteger,
'2009': BigInteger,
'2010': BigInteger,
'2011': BigInteger,
'2012': BigInteger,
'2013': BigInteger,
'2014': BigInteger,
'dttm': Date(),
},
index=False)
print('Done loading table!')
print('-' * 80)
print('Creating table reference')
obj = db.session.query(TBL).filter_by(table_name='birth_france_by_region').first()
if not obj:
obj = TBL(table_name='birth_france_by_region')
obj.main_dttm_col = 'dttm'
obj.database = utils.get_or_create_main_db()
if not any(col.metric_name == 'avg__2004' for col in obj.metrics):
obj.metrics.append(SqlMetric(
metric_name='avg__2004',
expression='AVG(2004)',
))
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
slice_data = {
'granularity_sqla': '',
'since': '',
'until': '',
'where': '',
'viz_type': 'country_map',
'entity': 'DEPT_ID',
'metric': {
'expressionType': 'SIMPLE',
'column': {
'type': 'INT',
'column_name': '2004',
},
'aggregate': 'AVG',
'label': 'Boys',
'optionName': 'metric_112342',
},
'row_limit': 500000,
}
print('Creating a slice')
slc = Slice(
slice_name='Birth in France by department in 2016',
viz_type='country_map',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
misc_dash_slices.add(slc.slice_name)
merge_slice(slc)
|
[
"def",
"load_country_map_data",
"(",
")",
":",
"csv_bytes",
"=",
"get_example_data",
"(",
"'birth_france_data_for_country_map.csv'",
",",
"is_gzip",
"=",
"False",
",",
"make_bytes",
"=",
"True",
")",
"data",
"=",
"pd",
".",
"read_csv",
"(",
"csv_bytes",
",",
"encoding",
"=",
"'utf-8'",
")",
"data",
"[",
"'dttm'",
"]",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
".",
"date",
"(",
")",
"data",
".",
"to_sql",
"(",
"# pylint: disable=no-member",
"'birth_france_by_region'",
",",
"db",
".",
"engine",
",",
"if_exists",
"=",
"'replace'",
",",
"chunksize",
"=",
"500",
",",
"dtype",
"=",
"{",
"'DEPT_ID'",
":",
"String",
"(",
"10",
")",
",",
"'2003'",
":",
"BigInteger",
",",
"'2004'",
":",
"BigInteger",
",",
"'2005'",
":",
"BigInteger",
",",
"'2006'",
":",
"BigInteger",
",",
"'2007'",
":",
"BigInteger",
",",
"'2008'",
":",
"BigInteger",
",",
"'2009'",
":",
"BigInteger",
",",
"'2010'",
":",
"BigInteger",
",",
"'2011'",
":",
"BigInteger",
",",
"'2012'",
":",
"BigInteger",
",",
"'2013'",
":",
"BigInteger",
",",
"'2014'",
":",
"BigInteger",
",",
"'dttm'",
":",
"Date",
"(",
")",
",",
"}",
",",
"index",
"=",
"False",
")",
"print",
"(",
"'Done loading table!'",
")",
"print",
"(",
"'-'",
"*",
"80",
")",
"print",
"(",
"'Creating table reference'",
")",
"obj",
"=",
"db",
".",
"session",
".",
"query",
"(",
"TBL",
")",
".",
"filter_by",
"(",
"table_name",
"=",
"'birth_france_by_region'",
")",
".",
"first",
"(",
")",
"if",
"not",
"obj",
":",
"obj",
"=",
"TBL",
"(",
"table_name",
"=",
"'birth_france_by_region'",
")",
"obj",
".",
"main_dttm_col",
"=",
"'dttm'",
"obj",
".",
"database",
"=",
"utils",
".",
"get_or_create_main_db",
"(",
")",
"if",
"not",
"any",
"(",
"col",
".",
"metric_name",
"==",
"'avg__2004'",
"for",
"col",
"in",
"obj",
".",
"metrics",
")",
":",
"obj",
".",
"metrics",
".",
"append",
"(",
"SqlMetric",
"(",
"metric_name",
"=",
"'avg__2004'",
",",
"expression",
"=",
"'AVG(2004)'",
",",
")",
")",
"db",
".",
"session",
".",
"merge",
"(",
"obj",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"obj",
".",
"fetch_metadata",
"(",
")",
"tbl",
"=",
"obj",
"slice_data",
"=",
"{",
"'granularity_sqla'",
":",
"''",
",",
"'since'",
":",
"''",
",",
"'until'",
":",
"''",
",",
"'where'",
":",
"''",
",",
"'viz_type'",
":",
"'country_map'",
",",
"'entity'",
":",
"'DEPT_ID'",
",",
"'metric'",
":",
"{",
"'expressionType'",
":",
"'SIMPLE'",
",",
"'column'",
":",
"{",
"'type'",
":",
"'INT'",
",",
"'column_name'",
":",
"'2004'",
",",
"}",
",",
"'aggregate'",
":",
"'AVG'",
",",
"'label'",
":",
"'Boys'",
",",
"'optionName'",
":",
"'metric_112342'",
",",
"}",
",",
"'row_limit'",
":",
"500000",
",",
"}",
"print",
"(",
"'Creating a slice'",
")",
"slc",
"=",
"Slice",
"(",
"slice_name",
"=",
"'Birth in France by department in 2016'",
",",
"viz_type",
"=",
"'country_map'",
",",
"datasource_type",
"=",
"'table'",
",",
"datasource_id",
"=",
"tbl",
".",
"id",
",",
"params",
"=",
"get_slice_json",
"(",
"slice_data",
")",
",",
")",
"misc_dash_slices",
".",
"add",
"(",
"slc",
".",
"slice_name",
")",
"merge_slice",
"(",
"slc",
")"
] |
Loading data for map with country map
|
[
"Loading",
"data",
"for",
"map",
"with",
"country",
"map"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/data/country_map.py#L35-L110
|
21,434
|
apache/incubator-superset
|
superset/sql_parse.py
|
ParsedQuery.get_statements
|
def get_statements(self):
"""Returns a list of SQL statements as strings, stripped"""
statements = []
for statement in self._parsed:
if statement:
sql = str(statement).strip(' \n;\t')
if sql:
statements.append(sql)
return statements
|
python
|
def get_statements(self):
"""Returns a list of SQL statements as strings, stripped"""
statements = []
for statement in self._parsed:
if statement:
sql = str(statement).strip(' \n;\t')
if sql:
statements.append(sql)
return statements
|
[
"def",
"get_statements",
"(",
"self",
")",
":",
"statements",
"=",
"[",
"]",
"for",
"statement",
"in",
"self",
".",
"_parsed",
":",
"if",
"statement",
":",
"sql",
"=",
"str",
"(",
"statement",
")",
".",
"strip",
"(",
"' \\n;\\t'",
")",
"if",
"sql",
":",
"statements",
".",
"append",
"(",
"sql",
")",
"return",
"statements"
] |
Returns a list of SQL statements as strings, stripped
|
[
"Returns",
"a",
"list",
"of",
"SQL",
"statements",
"as",
"strings",
"stripped"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/sql_parse.py#L67-L75
|
21,435
|
apache/incubator-superset
|
superset/sql_parse.py
|
ParsedQuery.as_create_table
|
def as_create_table(self, table_name, overwrite=False):
"""Reformats the query into the create table as query.
Works only for the single select SQL statements, in all other cases
the sql query is not modified.
:param superset_query: string, sql query that will be executed
:param table_name: string, will contain the results of the
query execution
:param overwrite, boolean, table table_name will be dropped if true
:return: string, create table as query
"""
exec_sql = ''
sql = self.stripped()
if overwrite:
exec_sql = f'DROP TABLE IF EXISTS {table_name};\n'
exec_sql += f'CREATE TABLE {table_name} AS \n{sql}'
return exec_sql
|
python
|
def as_create_table(self, table_name, overwrite=False):
"""Reformats the query into the create table as query.
Works only for the single select SQL statements, in all other cases
the sql query is not modified.
:param superset_query: string, sql query that will be executed
:param table_name: string, will contain the results of the
query execution
:param overwrite, boolean, table table_name will be dropped if true
:return: string, create table as query
"""
exec_sql = ''
sql = self.stripped()
if overwrite:
exec_sql = f'DROP TABLE IF EXISTS {table_name};\n'
exec_sql += f'CREATE TABLE {table_name} AS \n{sql}'
return exec_sql
|
[
"def",
"as_create_table",
"(",
"self",
",",
"table_name",
",",
"overwrite",
"=",
"False",
")",
":",
"exec_sql",
"=",
"''",
"sql",
"=",
"self",
".",
"stripped",
"(",
")",
"if",
"overwrite",
":",
"exec_sql",
"=",
"f'DROP TABLE IF EXISTS {table_name};\\n'",
"exec_sql",
"+=",
"f'CREATE TABLE {table_name} AS \\n{sql}'",
"return",
"exec_sql"
] |
Reformats the query into the create table as query.
Works only for the single select SQL statements, in all other cases
the sql query is not modified.
:param superset_query: string, sql query that will be executed
:param table_name: string, will contain the results of the
query execution
:param overwrite, boolean, table table_name will be dropped if true
:return: string, create table as query
|
[
"Reformats",
"the",
"query",
"into",
"the",
"create",
"table",
"as",
"query",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/sql_parse.py#L105-L121
|
21,436
|
apache/incubator-superset
|
superset/sql_parse.py
|
ParsedQuery.get_query_with_new_limit
|
def get_query_with_new_limit(self, new_limit):
"""returns the query with the specified limit"""
"""does not change the underlying query"""
if not self._limit:
return self.sql + ' LIMIT ' + str(new_limit)
limit_pos = None
tokens = self._parsed[0].tokens
# Add all items to before_str until there is a limit
for pos, item in enumerate(tokens):
if item.ttype in Keyword and item.value.lower() == 'limit':
limit_pos = pos
break
limit = tokens[limit_pos + 2]
if limit.ttype == sqlparse.tokens.Literal.Number.Integer:
tokens[limit_pos + 2].value = new_limit
elif limit.is_group:
tokens[limit_pos + 2].value = (
'{}, {}'.format(next(limit.get_identifiers()), new_limit)
)
str_res = ''
for i in tokens:
str_res += str(i.value)
return str_res
|
python
|
def get_query_with_new_limit(self, new_limit):
"""returns the query with the specified limit"""
"""does not change the underlying query"""
if not self._limit:
return self.sql + ' LIMIT ' + str(new_limit)
limit_pos = None
tokens = self._parsed[0].tokens
# Add all items to before_str until there is a limit
for pos, item in enumerate(tokens):
if item.ttype in Keyword and item.value.lower() == 'limit':
limit_pos = pos
break
limit = tokens[limit_pos + 2]
if limit.ttype == sqlparse.tokens.Literal.Number.Integer:
tokens[limit_pos + 2].value = new_limit
elif limit.is_group:
tokens[limit_pos + 2].value = (
'{}, {}'.format(next(limit.get_identifiers()), new_limit)
)
str_res = ''
for i in tokens:
str_res += str(i.value)
return str_res
|
[
"def",
"get_query_with_new_limit",
"(",
"self",
",",
"new_limit",
")",
":",
"\"\"\"does not change the underlying query\"\"\"",
"if",
"not",
"self",
".",
"_limit",
":",
"return",
"self",
".",
"sql",
"+",
"' LIMIT '",
"+",
"str",
"(",
"new_limit",
")",
"limit_pos",
"=",
"None",
"tokens",
"=",
"self",
".",
"_parsed",
"[",
"0",
"]",
".",
"tokens",
"# Add all items to before_str until there is a limit",
"for",
"pos",
",",
"item",
"in",
"enumerate",
"(",
"tokens",
")",
":",
"if",
"item",
".",
"ttype",
"in",
"Keyword",
"and",
"item",
".",
"value",
".",
"lower",
"(",
")",
"==",
"'limit'",
":",
"limit_pos",
"=",
"pos",
"break",
"limit",
"=",
"tokens",
"[",
"limit_pos",
"+",
"2",
"]",
"if",
"limit",
".",
"ttype",
"==",
"sqlparse",
".",
"tokens",
".",
"Literal",
".",
"Number",
".",
"Integer",
":",
"tokens",
"[",
"limit_pos",
"+",
"2",
"]",
".",
"value",
"=",
"new_limit",
"elif",
"limit",
".",
"is_group",
":",
"tokens",
"[",
"limit_pos",
"+",
"2",
"]",
".",
"value",
"=",
"(",
"'{}, {}'",
".",
"format",
"(",
"next",
"(",
"limit",
".",
"get_identifiers",
"(",
")",
")",
",",
"new_limit",
")",
")",
"str_res",
"=",
"''",
"for",
"i",
"in",
"tokens",
":",
"str_res",
"+=",
"str",
"(",
"i",
".",
"value",
")",
"return",
"str_res"
] |
returns the query with the specified limit
|
[
"returns",
"the",
"query",
"with",
"the",
"specified",
"limit"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/sql_parse.py#L166-L189
|
21,437
|
apache/incubator-superset
|
superset/jinja_context.py
|
url_param
|
def url_param(param, default=None):
"""Read a url or post parameter and use it in your SQL Lab query
When in SQL Lab, it's possible to add arbitrary URL "query string"
parameters, and use those in your SQL code. For instance you can
alter your url and add `?foo=bar`, as in
`{domain}/superset/sqllab?foo=bar`. Then if your query is something like
SELECT * FROM foo = '{{ url_param('foo') }}', it will be parsed at
runtime and replaced by the value in the URL.
As you create a visualization form this SQL Lab query, you can pass
parameters in the explore view as well as from the dashboard, and
it should carry through to your queries.
:param param: the parameter to lookup
:type param: str
:param default: the value to return in the absence of the parameter
:type default: str
"""
if request.args.get(param):
return request.args.get(param, default)
# Supporting POST as well as get
if request.form.get('form_data'):
form_data = json.loads(request.form.get('form_data'))
url_params = form_data.get('url_params') or {}
return url_params.get(param, default)
return default
|
python
|
def url_param(param, default=None):
"""Read a url or post parameter and use it in your SQL Lab query
When in SQL Lab, it's possible to add arbitrary URL "query string"
parameters, and use those in your SQL code. For instance you can
alter your url and add `?foo=bar`, as in
`{domain}/superset/sqllab?foo=bar`. Then if your query is something like
SELECT * FROM foo = '{{ url_param('foo') }}', it will be parsed at
runtime and replaced by the value in the URL.
As you create a visualization form this SQL Lab query, you can pass
parameters in the explore view as well as from the dashboard, and
it should carry through to your queries.
:param param: the parameter to lookup
:type param: str
:param default: the value to return in the absence of the parameter
:type default: str
"""
if request.args.get(param):
return request.args.get(param, default)
# Supporting POST as well as get
if request.form.get('form_data'):
form_data = json.loads(request.form.get('form_data'))
url_params = form_data.get('url_params') or {}
return url_params.get(param, default)
return default
|
[
"def",
"url_param",
"(",
"param",
",",
"default",
"=",
"None",
")",
":",
"if",
"request",
".",
"args",
".",
"get",
"(",
"param",
")",
":",
"return",
"request",
".",
"args",
".",
"get",
"(",
"param",
",",
"default",
")",
"# Supporting POST as well as get",
"if",
"request",
".",
"form",
".",
"get",
"(",
"'form_data'",
")",
":",
"form_data",
"=",
"json",
".",
"loads",
"(",
"request",
".",
"form",
".",
"get",
"(",
"'form_data'",
")",
")",
"url_params",
"=",
"form_data",
".",
"get",
"(",
"'url_params'",
")",
"or",
"{",
"}",
"return",
"url_params",
".",
"get",
"(",
"param",
",",
"default",
")",
"return",
"default"
] |
Read a url or post parameter and use it in your SQL Lab query
When in SQL Lab, it's possible to add arbitrary URL "query string"
parameters, and use those in your SQL code. For instance you can
alter your url and add `?foo=bar`, as in
`{domain}/superset/sqllab?foo=bar`. Then if your query is something like
SELECT * FROM foo = '{{ url_param('foo') }}', it will be parsed at
runtime and replaced by the value in the URL.
As you create a visualization form this SQL Lab query, you can pass
parameters in the explore view as well as from the dashboard, and
it should carry through to your queries.
:param param: the parameter to lookup
:type param: str
:param default: the value to return in the absence of the parameter
:type default: str
|
[
"Read",
"a",
"url",
"or",
"post",
"parameter",
"and",
"use",
"it",
"in",
"your",
"SQL",
"Lab",
"query"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/jinja_context.py#L44-L70
|
21,438
|
apache/incubator-superset
|
superset/jinja_context.py
|
filter_values
|
def filter_values(column, default=None):
""" Gets a values for a particular filter as a list
This is useful if:
- you want to use a filter box to filter a query where the name of filter box
column doesn't match the one in the select statement
- you want to have the ability for filter inside the main query for speed purposes
This searches for "filters" and "extra_filters" in form_data for a match
Usage example:
SELECT action, count(*) as times
FROM logs
WHERE action in ( {{ "'" + "','".join(filter_values('action_type')) + "'" }} )
GROUP BY 1
:param column: column/filter name to lookup
:type column: str
:param default: default value to return if there's no matching columns
:type default: str
:return: returns a list of filter values
:type: list
"""
form_data = json.loads(request.form.get('form_data', '{}'))
return_val = []
for filter_type in ['filters', 'extra_filters']:
if filter_type not in form_data:
continue
for f in form_data[filter_type]:
if f['col'] == column:
for v in f['val']:
return_val.append(v)
if return_val:
return return_val
if default:
return [default]
else:
return []
|
python
|
def filter_values(column, default=None):
""" Gets a values for a particular filter as a list
This is useful if:
- you want to use a filter box to filter a query where the name of filter box
column doesn't match the one in the select statement
- you want to have the ability for filter inside the main query for speed purposes
This searches for "filters" and "extra_filters" in form_data for a match
Usage example:
SELECT action, count(*) as times
FROM logs
WHERE action in ( {{ "'" + "','".join(filter_values('action_type')) + "'" }} )
GROUP BY 1
:param column: column/filter name to lookup
:type column: str
:param default: default value to return if there's no matching columns
:type default: str
:return: returns a list of filter values
:type: list
"""
form_data = json.loads(request.form.get('form_data', '{}'))
return_val = []
for filter_type in ['filters', 'extra_filters']:
if filter_type not in form_data:
continue
for f in form_data[filter_type]:
if f['col'] == column:
for v in f['val']:
return_val.append(v)
if return_val:
return return_val
if default:
return [default]
else:
return []
|
[
"def",
"filter_values",
"(",
"column",
",",
"default",
"=",
"None",
")",
":",
"form_data",
"=",
"json",
".",
"loads",
"(",
"request",
".",
"form",
".",
"get",
"(",
"'form_data'",
",",
"'{}'",
")",
")",
"return_val",
"=",
"[",
"]",
"for",
"filter_type",
"in",
"[",
"'filters'",
",",
"'extra_filters'",
"]",
":",
"if",
"filter_type",
"not",
"in",
"form_data",
":",
"continue",
"for",
"f",
"in",
"form_data",
"[",
"filter_type",
"]",
":",
"if",
"f",
"[",
"'col'",
"]",
"==",
"column",
":",
"for",
"v",
"in",
"f",
"[",
"'val'",
"]",
":",
"return_val",
".",
"append",
"(",
"v",
")",
"if",
"return_val",
":",
"return",
"return_val",
"if",
"default",
":",
"return",
"[",
"default",
"]",
"else",
":",
"return",
"[",
"]"
] |
Gets a values for a particular filter as a list
This is useful if:
- you want to use a filter box to filter a query where the name of filter box
column doesn't match the one in the select statement
- you want to have the ability for filter inside the main query for speed purposes
This searches for "filters" and "extra_filters" in form_data for a match
Usage example:
SELECT action, count(*) as times
FROM logs
WHERE action in ( {{ "'" + "','".join(filter_values('action_type')) + "'" }} )
GROUP BY 1
:param column: column/filter name to lookup
:type column: str
:param default: default value to return if there's no matching columns
:type default: str
:return: returns a list of filter values
:type: list
|
[
"Gets",
"a",
"values",
"for",
"a",
"particular",
"filter",
"as",
"a",
"list"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/jinja_context.py#L85-L125
|
21,439
|
apache/incubator-superset
|
superset/jinja_context.py
|
BaseTemplateProcessor.process_template
|
def process_template(self, sql, **kwargs):
"""Processes a sql template
>>> sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}'"
>>> process_template(sql)
"SELECT '2017-01-01T00:00:00'"
"""
template = self.env.from_string(sql)
kwargs.update(self.context)
return template.render(kwargs)
|
python
|
def process_template(self, sql, **kwargs):
"""Processes a sql template
>>> sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}'"
>>> process_template(sql)
"SELECT '2017-01-01T00:00:00'"
"""
template = self.env.from_string(sql)
kwargs.update(self.context)
return template.render(kwargs)
|
[
"def",
"process_template",
"(",
"self",
",",
"sql",
",",
"*",
"*",
"kwargs",
")",
":",
"template",
"=",
"self",
".",
"env",
".",
"from_string",
"(",
"sql",
")",
"kwargs",
".",
"update",
"(",
"self",
".",
"context",
")",
"return",
"template",
".",
"render",
"(",
"kwargs",
")"
] |
Processes a sql template
>>> sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}'"
>>> process_template(sql)
"SELECT '2017-01-01T00:00:00'"
|
[
"Processes",
"a",
"sql",
"template"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/jinja_context.py#L165-L174
|
21,440
|
apache/incubator-superset
|
superset/views/utils.py
|
get_datasource_info
|
def get_datasource_info(datasource_id, datasource_type, form_data):
"""Compatibility layer for handling of datasource info
datasource_id & datasource_type used to be passed in the URL
directory, now they should come as part of the form_data,
This function allows supporting both without duplicating code"""
datasource = form_data.get('datasource', '')
if '__' in datasource:
datasource_id, datasource_type = datasource.split('__')
# The case where the datasource has been deleted
datasource_id = None if datasource_id == 'None' else datasource_id
if not datasource_id:
raise Exception(
'The datasource associated with this chart no longer exists')
datasource_id = int(datasource_id)
return datasource_id, datasource_type
|
python
|
def get_datasource_info(datasource_id, datasource_type, form_data):
"""Compatibility layer for handling of datasource info
datasource_id & datasource_type used to be passed in the URL
directory, now they should come as part of the form_data,
This function allows supporting both without duplicating code"""
datasource = form_data.get('datasource', '')
if '__' in datasource:
datasource_id, datasource_type = datasource.split('__')
# The case where the datasource has been deleted
datasource_id = None if datasource_id == 'None' else datasource_id
if not datasource_id:
raise Exception(
'The datasource associated with this chart no longer exists')
datasource_id = int(datasource_id)
return datasource_id, datasource_type
|
[
"def",
"get_datasource_info",
"(",
"datasource_id",
",",
"datasource_type",
",",
"form_data",
")",
":",
"datasource",
"=",
"form_data",
".",
"get",
"(",
"'datasource'",
",",
"''",
")",
"if",
"'__'",
"in",
"datasource",
":",
"datasource_id",
",",
"datasource_type",
"=",
"datasource",
".",
"split",
"(",
"'__'",
")",
"# The case where the datasource has been deleted",
"datasource_id",
"=",
"None",
"if",
"datasource_id",
"==",
"'None'",
"else",
"datasource_id",
"if",
"not",
"datasource_id",
":",
"raise",
"Exception",
"(",
"'The datasource associated with this chart no longer exists'",
")",
"datasource_id",
"=",
"int",
"(",
"datasource_id",
")",
"return",
"datasource_id",
",",
"datasource_type"
] |
Compatibility layer for handling of datasource info
datasource_id & datasource_type used to be passed in the URL
directory, now they should come as part of the form_data,
This function allows supporting both without duplicating code
|
[
"Compatibility",
"layer",
"for",
"handling",
"of",
"datasource",
"info"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/utils.py#L170-L186
|
21,441
|
apache/incubator-superset
|
superset/security.py
|
SupersetSecurityManager.create_missing_perms
|
def create_missing_perms(self):
"""Creates missing perms for datasources, schemas and metrics"""
from superset import db
from superset.models import core as models
logging.info(
'Fetching a set of all perms to lookup which ones are missing')
all_pvs = set()
for pv in self.get_session.query(self.permissionview_model).all():
if pv.permission and pv.view_menu:
all_pvs.add((pv.permission.name, pv.view_menu.name))
def merge_pv(view_menu, perm):
"""Create permission view menu only if it doesn't exist"""
if view_menu and perm and (view_menu, perm) not in all_pvs:
self.merge_perm(view_menu, perm)
logging.info('Creating missing datasource permissions.')
datasources = ConnectorRegistry.get_all_datasources(db.session)
for datasource in datasources:
merge_pv('datasource_access', datasource.get_perm())
merge_pv('schema_access', datasource.schema_perm)
logging.info('Creating missing database permissions.')
databases = db.session.query(models.Database).all()
for database in databases:
merge_pv('database_access', database.perm)
logging.info('Creating missing metrics permissions')
metrics = []
for datasource_class in ConnectorRegistry.sources.values():
metrics += list(db.session.query(datasource_class.metric_class).all())
for metric in metrics:
if metric.is_restricted:
merge_pv('metric_access', metric.perm)
|
python
|
def create_missing_perms(self):
"""Creates missing perms for datasources, schemas and metrics"""
from superset import db
from superset.models import core as models
logging.info(
'Fetching a set of all perms to lookup which ones are missing')
all_pvs = set()
for pv in self.get_session.query(self.permissionview_model).all():
if pv.permission and pv.view_menu:
all_pvs.add((pv.permission.name, pv.view_menu.name))
def merge_pv(view_menu, perm):
"""Create permission view menu only if it doesn't exist"""
if view_menu and perm and (view_menu, perm) not in all_pvs:
self.merge_perm(view_menu, perm)
logging.info('Creating missing datasource permissions.')
datasources = ConnectorRegistry.get_all_datasources(db.session)
for datasource in datasources:
merge_pv('datasource_access', datasource.get_perm())
merge_pv('schema_access', datasource.schema_perm)
logging.info('Creating missing database permissions.')
databases = db.session.query(models.Database).all()
for database in databases:
merge_pv('database_access', database.perm)
logging.info('Creating missing metrics permissions')
metrics = []
for datasource_class in ConnectorRegistry.sources.values():
metrics += list(db.session.query(datasource_class.metric_class).all())
for metric in metrics:
if metric.is_restricted:
merge_pv('metric_access', metric.perm)
|
[
"def",
"create_missing_perms",
"(",
"self",
")",
":",
"from",
"superset",
"import",
"db",
"from",
"superset",
".",
"models",
"import",
"core",
"as",
"models",
"logging",
".",
"info",
"(",
"'Fetching a set of all perms to lookup which ones are missing'",
")",
"all_pvs",
"=",
"set",
"(",
")",
"for",
"pv",
"in",
"self",
".",
"get_session",
".",
"query",
"(",
"self",
".",
"permissionview_model",
")",
".",
"all",
"(",
")",
":",
"if",
"pv",
".",
"permission",
"and",
"pv",
".",
"view_menu",
":",
"all_pvs",
".",
"add",
"(",
"(",
"pv",
".",
"permission",
".",
"name",
",",
"pv",
".",
"view_menu",
".",
"name",
")",
")",
"def",
"merge_pv",
"(",
"view_menu",
",",
"perm",
")",
":",
"\"\"\"Create permission view menu only if it doesn't exist\"\"\"",
"if",
"view_menu",
"and",
"perm",
"and",
"(",
"view_menu",
",",
"perm",
")",
"not",
"in",
"all_pvs",
":",
"self",
".",
"merge_perm",
"(",
"view_menu",
",",
"perm",
")",
"logging",
".",
"info",
"(",
"'Creating missing datasource permissions.'",
")",
"datasources",
"=",
"ConnectorRegistry",
".",
"get_all_datasources",
"(",
"db",
".",
"session",
")",
"for",
"datasource",
"in",
"datasources",
":",
"merge_pv",
"(",
"'datasource_access'",
",",
"datasource",
".",
"get_perm",
"(",
")",
")",
"merge_pv",
"(",
"'schema_access'",
",",
"datasource",
".",
"schema_perm",
")",
"logging",
".",
"info",
"(",
"'Creating missing database permissions.'",
")",
"databases",
"=",
"db",
".",
"session",
".",
"query",
"(",
"models",
".",
"Database",
")",
".",
"all",
"(",
")",
"for",
"database",
"in",
"databases",
":",
"merge_pv",
"(",
"'database_access'",
",",
"database",
".",
"perm",
")",
"logging",
".",
"info",
"(",
"'Creating missing metrics permissions'",
")",
"metrics",
"=",
"[",
"]",
"for",
"datasource_class",
"in",
"ConnectorRegistry",
".",
"sources",
".",
"values",
"(",
")",
":",
"metrics",
"+=",
"list",
"(",
"db",
".",
"session",
".",
"query",
"(",
"datasource_class",
".",
"metric_class",
")",
".",
"all",
"(",
")",
")",
"for",
"metric",
"in",
"metrics",
":",
"if",
"metric",
".",
"is_restricted",
":",
"merge_pv",
"(",
"'metric_access'",
",",
"metric",
".",
"perm",
")"
] |
Creates missing perms for datasources, schemas and metrics
|
[
"Creates",
"missing",
"perms",
"for",
"datasources",
"schemas",
"and",
"metrics"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/security.py#L287-L322
|
21,442
|
apache/incubator-superset
|
superset/security.py
|
SupersetSecurityManager.clean_perms
|
def clean_perms(self):
"""FAB leaves faulty permissions that need to be cleaned up"""
logging.info('Cleaning faulty perms')
sesh = self.get_session
pvms = (
sesh.query(ab_models.PermissionView)
.filter(or_(
ab_models.PermissionView.permission == None, # NOQA
ab_models.PermissionView.view_menu == None, # NOQA
))
)
deleted_count = pvms.delete()
sesh.commit()
if deleted_count:
logging.info('Deleted {} faulty permissions'.format(deleted_count))
|
python
|
def clean_perms(self):
"""FAB leaves faulty permissions that need to be cleaned up"""
logging.info('Cleaning faulty perms')
sesh = self.get_session
pvms = (
sesh.query(ab_models.PermissionView)
.filter(or_(
ab_models.PermissionView.permission == None, # NOQA
ab_models.PermissionView.view_menu == None, # NOQA
))
)
deleted_count = pvms.delete()
sesh.commit()
if deleted_count:
logging.info('Deleted {} faulty permissions'.format(deleted_count))
|
[
"def",
"clean_perms",
"(",
"self",
")",
":",
"logging",
".",
"info",
"(",
"'Cleaning faulty perms'",
")",
"sesh",
"=",
"self",
".",
"get_session",
"pvms",
"=",
"(",
"sesh",
".",
"query",
"(",
"ab_models",
".",
"PermissionView",
")",
".",
"filter",
"(",
"or_",
"(",
"ab_models",
".",
"PermissionView",
".",
"permission",
"==",
"None",
",",
"# NOQA",
"ab_models",
".",
"PermissionView",
".",
"view_menu",
"==",
"None",
",",
"# NOQA",
")",
")",
")",
"deleted_count",
"=",
"pvms",
".",
"delete",
"(",
")",
"sesh",
".",
"commit",
"(",
")",
"if",
"deleted_count",
":",
"logging",
".",
"info",
"(",
"'Deleted {} faulty permissions'",
".",
"format",
"(",
"deleted_count",
")",
")"
] |
FAB leaves faulty permissions that need to be cleaned up
|
[
"FAB",
"leaves",
"faulty",
"permissions",
"that",
"need",
"to",
"be",
"cleaned",
"up"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/security.py#L324-L338
|
21,443
|
apache/incubator-superset
|
superset/security.py
|
SupersetSecurityManager.sync_role_definitions
|
def sync_role_definitions(self):
"""Inits the Superset application with security roles and such"""
from superset import conf
logging.info('Syncing role definition')
self.create_custom_permissions()
# Creating default roles
self.set_role('Admin', self.is_admin_pvm)
self.set_role('Alpha', self.is_alpha_pvm)
self.set_role('Gamma', self.is_gamma_pvm)
self.set_role('granter', self.is_granter_pvm)
self.set_role('sql_lab', self.is_sql_lab_pvm)
if conf.get('PUBLIC_ROLE_LIKE_GAMMA', False):
self.set_role('Public', self.is_gamma_pvm)
self.create_missing_perms()
# commit role and view menu updates
self.get_session.commit()
self.clean_perms()
|
python
|
def sync_role_definitions(self):
"""Inits the Superset application with security roles and such"""
from superset import conf
logging.info('Syncing role definition')
self.create_custom_permissions()
# Creating default roles
self.set_role('Admin', self.is_admin_pvm)
self.set_role('Alpha', self.is_alpha_pvm)
self.set_role('Gamma', self.is_gamma_pvm)
self.set_role('granter', self.is_granter_pvm)
self.set_role('sql_lab', self.is_sql_lab_pvm)
if conf.get('PUBLIC_ROLE_LIKE_GAMMA', False):
self.set_role('Public', self.is_gamma_pvm)
self.create_missing_perms()
# commit role and view menu updates
self.get_session.commit()
self.clean_perms()
|
[
"def",
"sync_role_definitions",
"(",
"self",
")",
":",
"from",
"superset",
"import",
"conf",
"logging",
".",
"info",
"(",
"'Syncing role definition'",
")",
"self",
".",
"create_custom_permissions",
"(",
")",
"# Creating default roles",
"self",
".",
"set_role",
"(",
"'Admin'",
",",
"self",
".",
"is_admin_pvm",
")",
"self",
".",
"set_role",
"(",
"'Alpha'",
",",
"self",
".",
"is_alpha_pvm",
")",
"self",
".",
"set_role",
"(",
"'Gamma'",
",",
"self",
".",
"is_gamma_pvm",
")",
"self",
".",
"set_role",
"(",
"'granter'",
",",
"self",
".",
"is_granter_pvm",
")",
"self",
".",
"set_role",
"(",
"'sql_lab'",
",",
"self",
".",
"is_sql_lab_pvm",
")",
"if",
"conf",
".",
"get",
"(",
"'PUBLIC_ROLE_LIKE_GAMMA'",
",",
"False",
")",
":",
"self",
".",
"set_role",
"(",
"'Public'",
",",
"self",
".",
"is_gamma_pvm",
")",
"self",
".",
"create_missing_perms",
"(",
")",
"# commit role and view menu updates",
"self",
".",
"get_session",
".",
"commit",
"(",
")",
"self",
".",
"clean_perms",
"(",
")"
] |
Inits the Superset application with security roles and such
|
[
"Inits",
"the",
"Superset",
"application",
"with",
"security",
"roles",
"and",
"such"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/security.py#L340-L361
|
21,444
|
apache/incubator-superset
|
superset/utils/dict_import_export.py
|
export_to_dict
|
def export_to_dict(session,
recursive,
back_references,
include_defaults):
"""Exports databases and druid clusters to a dictionary"""
logging.info('Starting export')
dbs = session.query(Database)
databases = [database.export_to_dict(recursive=recursive,
include_parent_ref=back_references,
include_defaults=include_defaults) for database in dbs]
logging.info('Exported %d %s', len(databases), DATABASES_KEY)
cls = session.query(DruidCluster)
clusters = [cluster.export_to_dict(recursive=recursive,
include_parent_ref=back_references,
include_defaults=include_defaults) for cluster in cls]
logging.info('Exported %d %s', len(clusters), DRUID_CLUSTERS_KEY)
data = dict()
if databases:
data[DATABASES_KEY] = databases
if clusters:
data[DRUID_CLUSTERS_KEY] = clusters
return data
|
python
|
def export_to_dict(session,
recursive,
back_references,
include_defaults):
"""Exports databases and druid clusters to a dictionary"""
logging.info('Starting export')
dbs = session.query(Database)
databases = [database.export_to_dict(recursive=recursive,
include_parent_ref=back_references,
include_defaults=include_defaults) for database in dbs]
logging.info('Exported %d %s', len(databases), DATABASES_KEY)
cls = session.query(DruidCluster)
clusters = [cluster.export_to_dict(recursive=recursive,
include_parent_ref=back_references,
include_defaults=include_defaults) for cluster in cls]
logging.info('Exported %d %s', len(clusters), DRUID_CLUSTERS_KEY)
data = dict()
if databases:
data[DATABASES_KEY] = databases
if clusters:
data[DRUID_CLUSTERS_KEY] = clusters
return data
|
[
"def",
"export_to_dict",
"(",
"session",
",",
"recursive",
",",
"back_references",
",",
"include_defaults",
")",
":",
"logging",
".",
"info",
"(",
"'Starting export'",
")",
"dbs",
"=",
"session",
".",
"query",
"(",
"Database",
")",
"databases",
"=",
"[",
"database",
".",
"export_to_dict",
"(",
"recursive",
"=",
"recursive",
",",
"include_parent_ref",
"=",
"back_references",
",",
"include_defaults",
"=",
"include_defaults",
")",
"for",
"database",
"in",
"dbs",
"]",
"logging",
".",
"info",
"(",
"'Exported %d %s'",
",",
"len",
"(",
"databases",
")",
",",
"DATABASES_KEY",
")",
"cls",
"=",
"session",
".",
"query",
"(",
"DruidCluster",
")",
"clusters",
"=",
"[",
"cluster",
".",
"export_to_dict",
"(",
"recursive",
"=",
"recursive",
",",
"include_parent_ref",
"=",
"back_references",
",",
"include_defaults",
"=",
"include_defaults",
")",
"for",
"cluster",
"in",
"cls",
"]",
"logging",
".",
"info",
"(",
"'Exported %d %s'",
",",
"len",
"(",
"clusters",
")",
",",
"DRUID_CLUSTERS_KEY",
")",
"data",
"=",
"dict",
"(",
")",
"if",
"databases",
":",
"data",
"[",
"DATABASES_KEY",
"]",
"=",
"databases",
"if",
"clusters",
":",
"data",
"[",
"DRUID_CLUSTERS_KEY",
"]",
"=",
"clusters",
"return",
"data"
] |
Exports databases and druid clusters to a dictionary
|
[
"Exports",
"databases",
"and",
"druid",
"clusters",
"to",
"a",
"dictionary"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/dict_import_export.py#L42-L63
|
21,445
|
apache/incubator-superset
|
superset/utils/dict_import_export.py
|
import_from_dict
|
def import_from_dict(session, data, sync=[]):
"""Imports databases and druid clusters from dictionary"""
if isinstance(data, dict):
logging.info('Importing %d %s',
len(data.get(DATABASES_KEY, [])),
DATABASES_KEY)
for database in data.get(DATABASES_KEY, []):
Database.import_from_dict(session, database, sync=sync)
logging.info('Importing %d %s',
len(data.get(DRUID_CLUSTERS_KEY, [])),
DRUID_CLUSTERS_KEY)
for datasource in data.get(DRUID_CLUSTERS_KEY, []):
DruidCluster.import_from_dict(session, datasource, sync=sync)
session.commit()
else:
logging.info('Supplied object is not a dictionary.')
|
python
|
def import_from_dict(session, data, sync=[]):
"""Imports databases and druid clusters from dictionary"""
if isinstance(data, dict):
logging.info('Importing %d %s',
len(data.get(DATABASES_KEY, [])),
DATABASES_KEY)
for database in data.get(DATABASES_KEY, []):
Database.import_from_dict(session, database, sync=sync)
logging.info('Importing %d %s',
len(data.get(DRUID_CLUSTERS_KEY, [])),
DRUID_CLUSTERS_KEY)
for datasource in data.get(DRUID_CLUSTERS_KEY, []):
DruidCluster.import_from_dict(session, datasource, sync=sync)
session.commit()
else:
logging.info('Supplied object is not a dictionary.')
|
[
"def",
"import_from_dict",
"(",
"session",
",",
"data",
",",
"sync",
"=",
"[",
"]",
")",
":",
"if",
"isinstance",
"(",
"data",
",",
"dict",
")",
":",
"logging",
".",
"info",
"(",
"'Importing %d %s'",
",",
"len",
"(",
"data",
".",
"get",
"(",
"DATABASES_KEY",
",",
"[",
"]",
")",
")",
",",
"DATABASES_KEY",
")",
"for",
"database",
"in",
"data",
".",
"get",
"(",
"DATABASES_KEY",
",",
"[",
"]",
")",
":",
"Database",
".",
"import_from_dict",
"(",
"session",
",",
"database",
",",
"sync",
"=",
"sync",
")",
"logging",
".",
"info",
"(",
"'Importing %d %s'",
",",
"len",
"(",
"data",
".",
"get",
"(",
"DRUID_CLUSTERS_KEY",
",",
"[",
"]",
")",
")",
",",
"DRUID_CLUSTERS_KEY",
")",
"for",
"datasource",
"in",
"data",
".",
"get",
"(",
"DRUID_CLUSTERS_KEY",
",",
"[",
"]",
")",
":",
"DruidCluster",
".",
"import_from_dict",
"(",
"session",
",",
"datasource",
",",
"sync",
"=",
"sync",
")",
"session",
".",
"commit",
"(",
")",
"else",
":",
"logging",
".",
"info",
"(",
"'Supplied object is not a dictionary.'",
")"
] |
Imports databases and druid clusters from dictionary
|
[
"Imports",
"databases",
"and",
"druid",
"clusters",
"from",
"dictionary"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/dict_import_export.py#L66-L82
|
21,446
|
apache/incubator-superset
|
superset/data/css_templates.py
|
load_css_templates
|
def load_css_templates():
"""Loads 2 css templates to demonstrate the feature"""
print('Creating default CSS templates')
obj = db.session.query(CssTemplate).filter_by(template_name='Flat').first()
if not obj:
obj = CssTemplate(template_name='Flat')
css = textwrap.dedent("""\
.gridster div.widget {
transition: background-color 0.5s ease;
background-color: #FAFAFA;
border: 1px solid #CCC;
box-shadow: none;
border-radius: 0px;
}
.gridster div.widget:hover {
border: 1px solid #000;
background-color: #EAEAEA;
}
.navbar {
transition: opacity 0.5s ease;
opacity: 0.05;
}
.navbar:hover {
opacity: 1;
}
.chart-header .header{
font-weight: normal;
font-size: 12px;
}
/*
var bnbColors = [
//rausch hackb kazan babu lima beach tirol
'#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c',
'#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a',
'#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e',
];
*/
""")
obj.css = css
db.session.merge(obj)
db.session.commit()
obj = (
db.session.query(CssTemplate).filter_by(template_name='Courier Black').first())
if not obj:
obj = CssTemplate(template_name='Courier Black')
css = textwrap.dedent("""\
.gridster div.widget {
transition: background-color 0.5s ease;
background-color: #EEE;
border: 2px solid #444;
border-radius: 15px;
box-shadow: none;
}
h2 {
color: white;
font-size: 52px;
}
.navbar {
box-shadow: none;
}
.gridster div.widget:hover {
border: 2px solid #000;
background-color: #EAEAEA;
}
.navbar {
transition: opacity 0.5s ease;
opacity: 0.05;
}
.navbar:hover {
opacity: 1;
}
.chart-header .header{
font-weight: normal;
font-size: 12px;
}
.nvd3 text {
font-size: 12px;
font-family: inherit;
}
body{
background: #000;
font-family: Courier, Monaco, monospace;;
}
/*
var bnbColors = [
//rausch hackb kazan babu lima beach tirol
'#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c',
'#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a',
'#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e',
];
*/
""")
obj.css = css
db.session.merge(obj)
db.session.commit()
|
python
|
def load_css_templates():
"""Loads 2 css templates to demonstrate the feature"""
print('Creating default CSS templates')
obj = db.session.query(CssTemplate).filter_by(template_name='Flat').first()
if not obj:
obj = CssTemplate(template_name='Flat')
css = textwrap.dedent("""\
.gridster div.widget {
transition: background-color 0.5s ease;
background-color: #FAFAFA;
border: 1px solid #CCC;
box-shadow: none;
border-radius: 0px;
}
.gridster div.widget:hover {
border: 1px solid #000;
background-color: #EAEAEA;
}
.navbar {
transition: opacity 0.5s ease;
opacity: 0.05;
}
.navbar:hover {
opacity: 1;
}
.chart-header .header{
font-weight: normal;
font-size: 12px;
}
/*
var bnbColors = [
//rausch hackb kazan babu lima beach tirol
'#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c',
'#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a',
'#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e',
];
*/
""")
obj.css = css
db.session.merge(obj)
db.session.commit()
obj = (
db.session.query(CssTemplate).filter_by(template_name='Courier Black').first())
if not obj:
obj = CssTemplate(template_name='Courier Black')
css = textwrap.dedent("""\
.gridster div.widget {
transition: background-color 0.5s ease;
background-color: #EEE;
border: 2px solid #444;
border-radius: 15px;
box-shadow: none;
}
h2 {
color: white;
font-size: 52px;
}
.navbar {
box-shadow: none;
}
.gridster div.widget:hover {
border: 2px solid #000;
background-color: #EAEAEA;
}
.navbar {
transition: opacity 0.5s ease;
opacity: 0.05;
}
.navbar:hover {
opacity: 1;
}
.chart-header .header{
font-weight: normal;
font-size: 12px;
}
.nvd3 text {
font-size: 12px;
font-family: inherit;
}
body{
background: #000;
font-family: Courier, Monaco, monospace;;
}
/*
var bnbColors = [
//rausch hackb kazan babu lima beach tirol
'#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c',
'#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a',
'#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e',
];
*/
""")
obj.css = css
db.session.merge(obj)
db.session.commit()
|
[
"def",
"load_css_templates",
"(",
")",
":",
"print",
"(",
"'Creating default CSS templates'",
")",
"obj",
"=",
"db",
".",
"session",
".",
"query",
"(",
"CssTemplate",
")",
".",
"filter_by",
"(",
"template_name",
"=",
"'Flat'",
")",
".",
"first",
"(",
")",
"if",
"not",
"obj",
":",
"obj",
"=",
"CssTemplate",
"(",
"template_name",
"=",
"'Flat'",
")",
"css",
"=",
"textwrap",
".",
"dedent",
"(",
"\"\"\"\\\n .gridster div.widget {\n transition: background-color 0.5s ease;\n background-color: #FAFAFA;\n border: 1px solid #CCC;\n box-shadow: none;\n border-radius: 0px;\n }\n .gridster div.widget:hover {\n border: 1px solid #000;\n background-color: #EAEAEA;\n }\n .navbar {\n transition: opacity 0.5s ease;\n opacity: 0.05;\n }\n .navbar:hover {\n opacity: 1;\n }\n .chart-header .header{\n font-weight: normal;\n font-size: 12px;\n }\n /*\n var bnbColors = [\n //rausch hackb kazan babu lima beach tirol\n '#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c',\n '#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a',\n '#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e',\n ];\n */\n \"\"\"",
")",
"obj",
".",
"css",
"=",
"css",
"db",
".",
"session",
".",
"merge",
"(",
"obj",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"obj",
"=",
"(",
"db",
".",
"session",
".",
"query",
"(",
"CssTemplate",
")",
".",
"filter_by",
"(",
"template_name",
"=",
"'Courier Black'",
")",
".",
"first",
"(",
")",
")",
"if",
"not",
"obj",
":",
"obj",
"=",
"CssTemplate",
"(",
"template_name",
"=",
"'Courier Black'",
")",
"css",
"=",
"textwrap",
".",
"dedent",
"(",
"\"\"\"\\\n .gridster div.widget {\n transition: background-color 0.5s ease;\n background-color: #EEE;\n border: 2px solid #444;\n border-radius: 15px;\n box-shadow: none;\n }\n h2 {\n color: white;\n font-size: 52px;\n }\n .navbar {\n box-shadow: none;\n }\n .gridster div.widget:hover {\n border: 2px solid #000;\n background-color: #EAEAEA;\n }\n .navbar {\n transition: opacity 0.5s ease;\n opacity: 0.05;\n }\n .navbar:hover {\n opacity: 1;\n }\n .chart-header .header{\n font-weight: normal;\n font-size: 12px;\n }\n .nvd3 text {\n font-size: 12px;\n font-family: inherit;\n }\n body{\n background: #000;\n font-family: Courier, Monaco, monospace;;\n }\n /*\n var bnbColors = [\n //rausch hackb kazan babu lima beach tirol\n '#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c',\n '#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a',\n '#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e',\n ];\n */\n \"\"\"",
")",
"obj",
".",
"css",
"=",
"css",
"db",
".",
"session",
".",
"merge",
"(",
"obj",
")",
"db",
".",
"session",
".",
"commit",
"(",
")"
] |
Loads 2 css templates to demonstrate the feature
|
[
"Loads",
"2",
"css",
"templates",
"to",
"demonstrate",
"the",
"feature"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/data/css_templates.py#L23-L119
|
21,447
|
apache/incubator-superset
|
superset/models/helpers.py
|
ImportMixin._parent_foreign_key_mappings
|
def _parent_foreign_key_mappings(cls):
"""Get a mapping of foreign name to the local name of foreign keys"""
parent_rel = cls.__mapper__.relationships.get(cls.export_parent)
if parent_rel:
return {l.name: r.name for (l, r) in parent_rel.local_remote_pairs}
return {}
|
python
|
def _parent_foreign_key_mappings(cls):
"""Get a mapping of foreign name to the local name of foreign keys"""
parent_rel = cls.__mapper__.relationships.get(cls.export_parent)
if parent_rel:
return {l.name: r.name for (l, r) in parent_rel.local_remote_pairs}
return {}
|
[
"def",
"_parent_foreign_key_mappings",
"(",
"cls",
")",
":",
"parent_rel",
"=",
"cls",
".",
"__mapper__",
".",
"relationships",
".",
"get",
"(",
"cls",
".",
"export_parent",
")",
"if",
"parent_rel",
":",
"return",
"{",
"l",
".",
"name",
":",
"r",
".",
"name",
"for",
"(",
"l",
",",
"r",
")",
"in",
"parent_rel",
".",
"local_remote_pairs",
"}",
"return",
"{",
"}"
] |
Get a mapping of foreign name to the local name of foreign keys
|
[
"Get",
"a",
"mapping",
"of",
"foreign",
"name",
"to",
"the",
"local",
"name",
"of",
"foreign",
"keys"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/models/helpers.py#L60-L65
|
21,448
|
apache/incubator-superset
|
superset/models/helpers.py
|
ImportMixin.export_schema
|
def export_schema(cls, recursive=True, include_parent_ref=False):
"""Export schema as a dictionary"""
parent_excludes = {}
if not include_parent_ref:
parent_ref = cls.__mapper__.relationships.get(cls.export_parent)
if parent_ref:
parent_excludes = {c.name for c in parent_ref.local_columns}
def formatter(c):
return ('{0} Default ({1})'.format(
str(c.type), c.default.arg) if c.default else str(c.type))
schema = {c.name: formatter(c) for c in cls.__table__.columns
if (c.name in cls.export_fields and
c.name not in parent_excludes)}
if recursive:
for c in cls.export_children:
child_class = cls.__mapper__.relationships[c].argument.class_
schema[c] = [child_class.export_schema(recursive=recursive,
include_parent_ref=include_parent_ref)]
return schema
|
python
|
def export_schema(cls, recursive=True, include_parent_ref=False):
"""Export schema as a dictionary"""
parent_excludes = {}
if not include_parent_ref:
parent_ref = cls.__mapper__.relationships.get(cls.export_parent)
if parent_ref:
parent_excludes = {c.name for c in parent_ref.local_columns}
def formatter(c):
return ('{0} Default ({1})'.format(
str(c.type), c.default.arg) if c.default else str(c.type))
schema = {c.name: formatter(c) for c in cls.__table__.columns
if (c.name in cls.export_fields and
c.name not in parent_excludes)}
if recursive:
for c in cls.export_children:
child_class = cls.__mapper__.relationships[c].argument.class_
schema[c] = [child_class.export_schema(recursive=recursive,
include_parent_ref=include_parent_ref)]
return schema
|
[
"def",
"export_schema",
"(",
"cls",
",",
"recursive",
"=",
"True",
",",
"include_parent_ref",
"=",
"False",
")",
":",
"parent_excludes",
"=",
"{",
"}",
"if",
"not",
"include_parent_ref",
":",
"parent_ref",
"=",
"cls",
".",
"__mapper__",
".",
"relationships",
".",
"get",
"(",
"cls",
".",
"export_parent",
")",
"if",
"parent_ref",
":",
"parent_excludes",
"=",
"{",
"c",
".",
"name",
"for",
"c",
"in",
"parent_ref",
".",
"local_columns",
"}",
"def",
"formatter",
"(",
"c",
")",
":",
"return",
"(",
"'{0} Default ({1})'",
".",
"format",
"(",
"str",
"(",
"c",
".",
"type",
")",
",",
"c",
".",
"default",
".",
"arg",
")",
"if",
"c",
".",
"default",
"else",
"str",
"(",
"c",
".",
"type",
")",
")",
"schema",
"=",
"{",
"c",
".",
"name",
":",
"formatter",
"(",
"c",
")",
"for",
"c",
"in",
"cls",
".",
"__table__",
".",
"columns",
"if",
"(",
"c",
".",
"name",
"in",
"cls",
".",
"export_fields",
"and",
"c",
".",
"name",
"not",
"in",
"parent_excludes",
")",
"}",
"if",
"recursive",
":",
"for",
"c",
"in",
"cls",
".",
"export_children",
":",
"child_class",
"=",
"cls",
".",
"__mapper__",
".",
"relationships",
"[",
"c",
"]",
".",
"argument",
".",
"class_",
"schema",
"[",
"c",
"]",
"=",
"[",
"child_class",
".",
"export_schema",
"(",
"recursive",
"=",
"recursive",
",",
"include_parent_ref",
"=",
"include_parent_ref",
")",
"]",
"return",
"schema"
] |
Export schema as a dictionary
|
[
"Export",
"schema",
"as",
"a",
"dictionary"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/models/helpers.py#L76-L96
|
21,449
|
apache/incubator-superset
|
superset/models/helpers.py
|
ImportMixin.export_to_dict
|
def export_to_dict(self, recursive=True, include_parent_ref=False,
include_defaults=False):
"""Export obj to dictionary"""
cls = self.__class__
parent_excludes = {}
if recursive and not include_parent_ref:
parent_ref = cls.__mapper__.relationships.get(cls.export_parent)
if parent_ref:
parent_excludes = {c.name for c in parent_ref.local_columns}
dict_rep = {c.name: getattr(self, c.name)
for c in cls.__table__.columns
if (c.name in self.export_fields and
c.name not in parent_excludes and
(include_defaults or (
getattr(self, c.name) is not None and
(not c.default or
getattr(self, c.name) != c.default.arg))))
}
if recursive:
for c in self.export_children:
# sorting to make lists of children stable
dict_rep[c] = sorted(
[
child.export_to_dict(
recursive=recursive,
include_parent_ref=include_parent_ref,
include_defaults=include_defaults,
) for child in getattr(self, c)
],
key=lambda k: sorted(k.items()))
return dict_rep
|
python
|
def export_to_dict(self, recursive=True, include_parent_ref=False,
include_defaults=False):
"""Export obj to dictionary"""
cls = self.__class__
parent_excludes = {}
if recursive and not include_parent_ref:
parent_ref = cls.__mapper__.relationships.get(cls.export_parent)
if parent_ref:
parent_excludes = {c.name for c in parent_ref.local_columns}
dict_rep = {c.name: getattr(self, c.name)
for c in cls.__table__.columns
if (c.name in self.export_fields and
c.name not in parent_excludes and
(include_defaults or (
getattr(self, c.name) is not None and
(not c.default or
getattr(self, c.name) != c.default.arg))))
}
if recursive:
for c in self.export_children:
# sorting to make lists of children stable
dict_rep[c] = sorted(
[
child.export_to_dict(
recursive=recursive,
include_parent_ref=include_parent_ref,
include_defaults=include_defaults,
) for child in getattr(self, c)
],
key=lambda k: sorted(k.items()))
return dict_rep
|
[
"def",
"export_to_dict",
"(",
"self",
",",
"recursive",
"=",
"True",
",",
"include_parent_ref",
"=",
"False",
",",
"include_defaults",
"=",
"False",
")",
":",
"cls",
"=",
"self",
".",
"__class__",
"parent_excludes",
"=",
"{",
"}",
"if",
"recursive",
"and",
"not",
"include_parent_ref",
":",
"parent_ref",
"=",
"cls",
".",
"__mapper__",
".",
"relationships",
".",
"get",
"(",
"cls",
".",
"export_parent",
")",
"if",
"parent_ref",
":",
"parent_excludes",
"=",
"{",
"c",
".",
"name",
"for",
"c",
"in",
"parent_ref",
".",
"local_columns",
"}",
"dict_rep",
"=",
"{",
"c",
".",
"name",
":",
"getattr",
"(",
"self",
",",
"c",
".",
"name",
")",
"for",
"c",
"in",
"cls",
".",
"__table__",
".",
"columns",
"if",
"(",
"c",
".",
"name",
"in",
"self",
".",
"export_fields",
"and",
"c",
".",
"name",
"not",
"in",
"parent_excludes",
"and",
"(",
"include_defaults",
"or",
"(",
"getattr",
"(",
"self",
",",
"c",
".",
"name",
")",
"is",
"not",
"None",
"and",
"(",
"not",
"c",
".",
"default",
"or",
"getattr",
"(",
"self",
",",
"c",
".",
"name",
")",
"!=",
"c",
".",
"default",
".",
"arg",
")",
")",
")",
")",
"}",
"if",
"recursive",
":",
"for",
"c",
"in",
"self",
".",
"export_children",
":",
"# sorting to make lists of children stable",
"dict_rep",
"[",
"c",
"]",
"=",
"sorted",
"(",
"[",
"child",
".",
"export_to_dict",
"(",
"recursive",
"=",
"recursive",
",",
"include_parent_ref",
"=",
"include_parent_ref",
",",
"include_defaults",
"=",
"include_defaults",
",",
")",
"for",
"child",
"in",
"getattr",
"(",
"self",
",",
"c",
")",
"]",
",",
"key",
"=",
"lambda",
"k",
":",
"sorted",
"(",
"k",
".",
"items",
"(",
")",
")",
")",
"return",
"dict_rep"
] |
Export obj to dictionary
|
[
"Export",
"obj",
"to",
"dictionary"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/models/helpers.py#L186-L217
|
21,450
|
apache/incubator-superset
|
superset/models/helpers.py
|
ImportMixin.override
|
def override(self, obj):
"""Overrides the plain fields of the dashboard."""
for field in obj.__class__.export_fields:
setattr(self, field, getattr(obj, field))
|
python
|
def override(self, obj):
"""Overrides the plain fields of the dashboard."""
for field in obj.__class__.export_fields:
setattr(self, field, getattr(obj, field))
|
[
"def",
"override",
"(",
"self",
",",
"obj",
")",
":",
"for",
"field",
"in",
"obj",
".",
"__class__",
".",
"export_fields",
":",
"setattr",
"(",
"self",
",",
"field",
",",
"getattr",
"(",
"obj",
",",
"field",
")",
")"
] |
Overrides the plain fields of the dashboard.
|
[
"Overrides",
"the",
"plain",
"fields",
"of",
"the",
"dashboard",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/models/helpers.py#L219-L222
|
21,451
|
apache/incubator-superset
|
superset/legacy.py
|
update_time_range
|
def update_time_range(form_data):
"""Move since and until to time_range."""
if 'since' in form_data or 'until' in form_data:
form_data['time_range'] = '{} : {}'.format(
form_data.pop('since', '') or '',
form_data.pop('until', '') or '',
)
|
python
|
def update_time_range(form_data):
"""Move since and until to time_range."""
if 'since' in form_data or 'until' in form_data:
form_data['time_range'] = '{} : {}'.format(
form_data.pop('since', '') or '',
form_data.pop('until', '') or '',
)
|
[
"def",
"update_time_range",
"(",
"form_data",
")",
":",
"if",
"'since'",
"in",
"form_data",
"or",
"'until'",
"in",
"form_data",
":",
"form_data",
"[",
"'time_range'",
"]",
"=",
"'{} : {}'",
".",
"format",
"(",
"form_data",
".",
"pop",
"(",
"'since'",
",",
"''",
")",
"or",
"''",
",",
"form_data",
".",
"pop",
"(",
"'until'",
",",
"''",
")",
"or",
"''",
",",
")"
] |
Move since and until to time_range.
|
[
"Move",
"since",
"and",
"until",
"to",
"time_range",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/legacy.py#L21-L27
|
21,452
|
apache/incubator-superset
|
superset/utils/cache.py
|
memoized_func
|
def memoized_func(key=view_cache_key, attribute_in_key=None):
"""Use this decorator to cache functions that have predefined first arg.
enable_cache is treated as True by default,
except enable_cache = False is passed to the decorated function.
force means whether to force refresh the cache and is treated as False by default,
except force = True is passed to the decorated function.
timeout of cache is set to 600 seconds by default,
except cache_timeout = {timeout in seconds} is passed to the decorated function.
memoized_func uses simple_cache and stored the data in memory.
Key is a callable function that takes function arguments and
returns the caching key.
"""
def wrap(f):
if tables_cache:
def wrapped_f(self, *args, **kwargs):
if not kwargs.get('cache', True):
return f(self, *args, **kwargs)
if attribute_in_key:
cache_key = key(*args, **kwargs).format(
getattr(self, attribute_in_key))
else:
cache_key = key(*args, **kwargs)
o = tables_cache.get(cache_key)
if not kwargs.get('force') and o is not None:
return o
o = f(self, *args, **kwargs)
tables_cache.set(cache_key, o,
timeout=kwargs.get('cache_timeout'))
return o
else:
# noop
def wrapped_f(self, *args, **kwargs):
return f(self, *args, **kwargs)
return wrapped_f
return wrap
|
python
|
def memoized_func(key=view_cache_key, attribute_in_key=None):
"""Use this decorator to cache functions that have predefined first arg.
enable_cache is treated as True by default,
except enable_cache = False is passed to the decorated function.
force means whether to force refresh the cache and is treated as False by default,
except force = True is passed to the decorated function.
timeout of cache is set to 600 seconds by default,
except cache_timeout = {timeout in seconds} is passed to the decorated function.
memoized_func uses simple_cache and stored the data in memory.
Key is a callable function that takes function arguments and
returns the caching key.
"""
def wrap(f):
if tables_cache:
def wrapped_f(self, *args, **kwargs):
if not kwargs.get('cache', True):
return f(self, *args, **kwargs)
if attribute_in_key:
cache_key = key(*args, **kwargs).format(
getattr(self, attribute_in_key))
else:
cache_key = key(*args, **kwargs)
o = tables_cache.get(cache_key)
if not kwargs.get('force') and o is not None:
return o
o = f(self, *args, **kwargs)
tables_cache.set(cache_key, o,
timeout=kwargs.get('cache_timeout'))
return o
else:
# noop
def wrapped_f(self, *args, **kwargs):
return f(self, *args, **kwargs)
return wrapped_f
return wrap
|
[
"def",
"memoized_func",
"(",
"key",
"=",
"view_cache_key",
",",
"attribute_in_key",
"=",
"None",
")",
":",
"def",
"wrap",
"(",
"f",
")",
":",
"if",
"tables_cache",
":",
"def",
"wrapped_f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"if",
"not",
"kwargs",
".",
"get",
"(",
"'cache'",
",",
"True",
")",
":",
"return",
"f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"if",
"attribute_in_key",
":",
"cache_key",
"=",
"key",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
".",
"format",
"(",
"getattr",
"(",
"self",
",",
"attribute_in_key",
")",
")",
"else",
":",
"cache_key",
"=",
"key",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"o",
"=",
"tables_cache",
".",
"get",
"(",
"cache_key",
")",
"if",
"not",
"kwargs",
".",
"get",
"(",
"'force'",
")",
"and",
"o",
"is",
"not",
"None",
":",
"return",
"o",
"o",
"=",
"f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"tables_cache",
".",
"set",
"(",
"cache_key",
",",
"o",
",",
"timeout",
"=",
"kwargs",
".",
"get",
"(",
"'cache_timeout'",
")",
")",
"return",
"o",
"else",
":",
"# noop",
"def",
"wrapped_f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"return",
"f",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"return",
"wrapped_f",
"return",
"wrap"
] |
Use this decorator to cache functions that have predefined first arg.
enable_cache is treated as True by default,
except enable_cache = False is passed to the decorated function.
force means whether to force refresh the cache and is treated as False by default,
except force = True is passed to the decorated function.
timeout of cache is set to 600 seconds by default,
except cache_timeout = {timeout in seconds} is passed to the decorated function.
memoized_func uses simple_cache and stored the data in memory.
Key is a callable function that takes function arguments and
returns the caching key.
|
[
"Use",
"this",
"decorator",
"to",
"cache",
"functions",
"that",
"have",
"predefined",
"first",
"arg",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/cache.py#L28-L67
|
21,453
|
apache/incubator-superset
|
superset/views/core.py
|
check_datasource_perms
|
def check_datasource_perms(self, datasource_type=None, datasource_id=None):
"""
Check if user can access a cached response from explore_json.
This function takes `self` since it must have the same signature as the
the decorated method.
"""
form_data = get_form_data()[0]
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data)
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=False,
)
security_manager.assert_datasource_permission(viz_obj.datasource)
|
python
|
def check_datasource_perms(self, datasource_type=None, datasource_id=None):
"""
Check if user can access a cached response from explore_json.
This function takes `self` since it must have the same signature as the
the decorated method.
"""
form_data = get_form_data()[0]
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data)
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=False,
)
security_manager.assert_datasource_permission(viz_obj.datasource)
|
[
"def",
"check_datasource_perms",
"(",
"self",
",",
"datasource_type",
"=",
"None",
",",
"datasource_id",
"=",
"None",
")",
":",
"form_data",
"=",
"get_form_data",
"(",
")",
"[",
"0",
"]",
"datasource_id",
",",
"datasource_type",
"=",
"get_datasource_info",
"(",
"datasource_id",
",",
"datasource_type",
",",
"form_data",
")",
"viz_obj",
"=",
"get_viz",
"(",
"datasource_type",
"=",
"datasource_type",
",",
"datasource_id",
"=",
"datasource_id",
",",
"form_data",
"=",
"form_data",
",",
"force",
"=",
"False",
",",
")",
"security_manager",
".",
"assert_datasource_permission",
"(",
"viz_obj",
".",
"datasource",
")"
] |
Check if user can access a cached response from explore_json.
This function takes `self` since it must have the same signature as the
the decorated method.
|
[
"Check",
"if",
"user",
"can",
"access",
"a",
"cached",
"response",
"from",
"explore_json",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L106-L123
|
21,454
|
apache/incubator-superset
|
superset/views/core.py
|
check_slice_perms
|
def check_slice_perms(self, slice_id):
"""
Check if user can access a cached response from slice_json.
This function takes `self` since it must have the same signature as the
the decorated method.
"""
form_data, slc = get_form_data(slice_id, use_slice_data=True)
datasource_type = slc.datasource.type
datasource_id = slc.datasource.id
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=False,
)
security_manager.assert_datasource_permission(viz_obj.datasource)
|
python
|
def check_slice_perms(self, slice_id):
"""
Check if user can access a cached response from slice_json.
This function takes `self` since it must have the same signature as the
the decorated method.
"""
form_data, slc = get_form_data(slice_id, use_slice_data=True)
datasource_type = slc.datasource.type
datasource_id = slc.datasource.id
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=False,
)
security_manager.assert_datasource_permission(viz_obj.datasource)
|
[
"def",
"check_slice_perms",
"(",
"self",
",",
"slice_id",
")",
":",
"form_data",
",",
"slc",
"=",
"get_form_data",
"(",
"slice_id",
",",
"use_slice_data",
"=",
"True",
")",
"datasource_type",
"=",
"slc",
".",
"datasource",
".",
"type",
"datasource_id",
"=",
"slc",
".",
"datasource",
".",
"id",
"viz_obj",
"=",
"get_viz",
"(",
"datasource_type",
"=",
"datasource_type",
",",
"datasource_id",
"=",
"datasource_id",
",",
"form_data",
"=",
"form_data",
",",
"force",
"=",
"False",
",",
")",
"security_manager",
".",
"assert_datasource_permission",
"(",
"viz_obj",
".",
"datasource",
")"
] |
Check if user can access a cached response from slice_json.
This function takes `self` since it must have the same signature as the
the decorated method.
|
[
"Check",
"if",
"user",
"can",
"access",
"a",
"cached",
"response",
"from",
"slice_json",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L126-L143
|
21,455
|
apache/incubator-superset
|
superset/views/core.py
|
apply_caching
|
def apply_caching(response):
"""Applies the configuration's http headers to all responses"""
for k, v in config.get('HTTP_HEADERS').items():
response.headers[k] = v
return response
|
python
|
def apply_caching(response):
"""Applies the configuration's http headers to all responses"""
for k, v in config.get('HTTP_HEADERS').items():
response.headers[k] = v
return response
|
[
"def",
"apply_caching",
"(",
"response",
")",
":",
"for",
"k",
",",
"v",
"in",
"config",
".",
"get",
"(",
"'HTTP_HEADERS'",
")",
".",
"items",
"(",
")",
":",
"response",
".",
"headers",
"[",
"k",
"]",
"=",
"v",
"return",
"response"
] |
Applies the configuration's http headers to all responses
|
[
"Applies",
"the",
"configuration",
"s",
"http",
"headers",
"to",
"all",
"responses"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L3017-L3021
|
21,456
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.override_role_permissions
|
def override_role_permissions(self):
"""Updates the role with the give datasource permissions.
Permissions not in the request will be revoked. This endpoint should
be available to admins only. Expects JSON in the format:
{
'role_name': '{role_name}',
'database': [{
'datasource_type': '{table|druid}',
'name': '{database_name}',
'schema': [{
'name': '{schema_name}',
'datasources': ['{datasource name}, {datasource name}']
}]
}]
}
"""
data = request.get_json(force=True)
role_name = data['role_name']
databases = data['database']
db_ds_names = set()
for dbs in databases:
for schema in dbs['schema']:
for ds_name in schema['datasources']:
fullname = utils.get_datasource_full_name(
dbs['name'], ds_name, schema=schema['name'])
db_ds_names.add(fullname)
existing_datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [
d for d in existing_datasources if d.full_name in db_ds_names]
role = security_manager.find_role(role_name)
# remove all permissions
role.permissions = []
# grant permissions to the list of datasources
granted_perms = []
for datasource in datasources:
view_menu_perm = security_manager.find_permission_view_menu(
view_menu_name=datasource.perm,
permission_name='datasource_access')
# prevent creating empty permissions
if view_menu_perm and view_menu_perm.view_menu:
role.permissions.append(view_menu_perm)
granted_perms.append(view_menu_perm.view_menu.name)
db.session.commit()
return self.json_response({
'granted': granted_perms,
'requested': list(db_ds_names),
}, status=201)
|
python
|
def override_role_permissions(self):
"""Updates the role with the give datasource permissions.
Permissions not in the request will be revoked. This endpoint should
be available to admins only. Expects JSON in the format:
{
'role_name': '{role_name}',
'database': [{
'datasource_type': '{table|druid}',
'name': '{database_name}',
'schema': [{
'name': '{schema_name}',
'datasources': ['{datasource name}, {datasource name}']
}]
}]
}
"""
data = request.get_json(force=True)
role_name = data['role_name']
databases = data['database']
db_ds_names = set()
for dbs in databases:
for schema in dbs['schema']:
for ds_name in schema['datasources']:
fullname = utils.get_datasource_full_name(
dbs['name'], ds_name, schema=schema['name'])
db_ds_names.add(fullname)
existing_datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [
d for d in existing_datasources if d.full_name in db_ds_names]
role = security_manager.find_role(role_name)
# remove all permissions
role.permissions = []
# grant permissions to the list of datasources
granted_perms = []
for datasource in datasources:
view_menu_perm = security_manager.find_permission_view_menu(
view_menu_name=datasource.perm,
permission_name='datasource_access')
# prevent creating empty permissions
if view_menu_perm and view_menu_perm.view_menu:
role.permissions.append(view_menu_perm)
granted_perms.append(view_menu_perm.view_menu.name)
db.session.commit()
return self.json_response({
'granted': granted_perms,
'requested': list(db_ds_names),
}, status=201)
|
[
"def",
"override_role_permissions",
"(",
"self",
")",
":",
"data",
"=",
"request",
".",
"get_json",
"(",
"force",
"=",
"True",
")",
"role_name",
"=",
"data",
"[",
"'role_name'",
"]",
"databases",
"=",
"data",
"[",
"'database'",
"]",
"db_ds_names",
"=",
"set",
"(",
")",
"for",
"dbs",
"in",
"databases",
":",
"for",
"schema",
"in",
"dbs",
"[",
"'schema'",
"]",
":",
"for",
"ds_name",
"in",
"schema",
"[",
"'datasources'",
"]",
":",
"fullname",
"=",
"utils",
".",
"get_datasource_full_name",
"(",
"dbs",
"[",
"'name'",
"]",
",",
"ds_name",
",",
"schema",
"=",
"schema",
"[",
"'name'",
"]",
")",
"db_ds_names",
".",
"add",
"(",
"fullname",
")",
"existing_datasources",
"=",
"ConnectorRegistry",
".",
"get_all_datasources",
"(",
"db",
".",
"session",
")",
"datasources",
"=",
"[",
"d",
"for",
"d",
"in",
"existing_datasources",
"if",
"d",
".",
"full_name",
"in",
"db_ds_names",
"]",
"role",
"=",
"security_manager",
".",
"find_role",
"(",
"role_name",
")",
"# remove all permissions",
"role",
".",
"permissions",
"=",
"[",
"]",
"# grant permissions to the list of datasources",
"granted_perms",
"=",
"[",
"]",
"for",
"datasource",
"in",
"datasources",
":",
"view_menu_perm",
"=",
"security_manager",
".",
"find_permission_view_menu",
"(",
"view_menu_name",
"=",
"datasource",
".",
"perm",
",",
"permission_name",
"=",
"'datasource_access'",
")",
"# prevent creating empty permissions",
"if",
"view_menu_perm",
"and",
"view_menu_perm",
".",
"view_menu",
":",
"role",
".",
"permissions",
".",
"append",
"(",
"view_menu_perm",
")",
"granted_perms",
".",
"append",
"(",
"view_menu_perm",
".",
"view_menu",
".",
"name",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"return",
"self",
".",
"json_response",
"(",
"{",
"'granted'",
":",
"granted_perms",
",",
"'requested'",
":",
"list",
"(",
"db_ds_names",
")",
",",
"}",
",",
"status",
"=",
"201",
")"
] |
Updates the role with the give datasource permissions.
Permissions not in the request will be revoked. This endpoint should
be available to admins only. Expects JSON in the format:
{
'role_name': '{role_name}',
'database': [{
'datasource_type': '{table|druid}',
'name': '{database_name}',
'schema': [{
'name': '{schema_name}',
'datasources': ['{datasource name}, {datasource name}']
}]
}]
}
|
[
"Updates",
"the",
"role",
"with",
"the",
"give",
"datasource",
"permissions",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L862-L911
|
21,457
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.explore_json
|
def explore_json(self, datasource_type=None, datasource_id=None):
"""Serves all request that GET or POST form_data
This endpoint evolved to be the entry point of many different
requests that GETs or POSTs a form_data.
`self.generate_json` receives this input and returns different
payloads based on the request args in the first block
TODO: break into one endpoint for each return shape"""
csv = request.args.get('csv') == 'true'
query = request.args.get('query') == 'true'
results = request.args.get('results') == 'true'
samples = request.args.get('samples') == 'true'
force = request.args.get('force') == 'true'
form_data = get_form_data()[0]
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data)
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=force,
)
return self.generate_json(
viz_obj,
csv=csv,
query=query,
results=results,
samples=samples,
)
|
python
|
def explore_json(self, datasource_type=None, datasource_id=None):
"""Serves all request that GET or POST form_data
This endpoint evolved to be the entry point of many different
requests that GETs or POSTs a form_data.
`self.generate_json` receives this input and returns different
payloads based on the request args in the first block
TODO: break into one endpoint for each return shape"""
csv = request.args.get('csv') == 'true'
query = request.args.get('query') == 'true'
results = request.args.get('results') == 'true'
samples = request.args.get('samples') == 'true'
force = request.args.get('force') == 'true'
form_data = get_form_data()[0]
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data)
viz_obj = get_viz(
datasource_type=datasource_type,
datasource_id=datasource_id,
form_data=form_data,
force=force,
)
return self.generate_json(
viz_obj,
csv=csv,
query=query,
results=results,
samples=samples,
)
|
[
"def",
"explore_json",
"(",
"self",
",",
"datasource_type",
"=",
"None",
",",
"datasource_id",
"=",
"None",
")",
":",
"csv",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'csv'",
")",
"==",
"'true'",
"query",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'query'",
")",
"==",
"'true'",
"results",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'results'",
")",
"==",
"'true'",
"samples",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'samples'",
")",
"==",
"'true'",
"force",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'force'",
")",
"==",
"'true'",
"form_data",
"=",
"get_form_data",
"(",
")",
"[",
"0",
"]",
"datasource_id",
",",
"datasource_type",
"=",
"get_datasource_info",
"(",
"datasource_id",
",",
"datasource_type",
",",
"form_data",
")",
"viz_obj",
"=",
"get_viz",
"(",
"datasource_type",
"=",
"datasource_type",
",",
"datasource_id",
"=",
"datasource_id",
",",
"form_data",
"=",
"form_data",
",",
"force",
"=",
"force",
",",
")",
"return",
"self",
".",
"generate_json",
"(",
"viz_obj",
",",
"csv",
"=",
"csv",
",",
"query",
"=",
"query",
",",
"results",
"=",
"results",
",",
"samples",
"=",
"samples",
",",
")"
] |
Serves all request that GET or POST form_data
This endpoint evolved to be the entry point of many different
requests that GETs or POSTs a form_data.
`self.generate_json` receives this input and returns different
payloads based on the request args in the first block
TODO: break into one endpoint for each return shape
|
[
"Serves",
"all",
"request",
"that",
"GET",
"or",
"POST",
"form_data"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1233-L1265
|
21,458
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.import_dashboards
|
def import_dashboards(self):
"""Overrides the dashboards using json instances from the file."""
f = request.files.get('file')
if request.method == 'POST' and f:
dashboard_import_export.import_dashboards(db.session, f.stream)
return redirect('/dashboard/list/')
return self.render_template('superset/import_dashboards.html')
|
python
|
def import_dashboards(self):
"""Overrides the dashboards using json instances from the file."""
f = request.files.get('file')
if request.method == 'POST' and f:
dashboard_import_export.import_dashboards(db.session, f.stream)
return redirect('/dashboard/list/')
return self.render_template('superset/import_dashboards.html')
|
[
"def",
"import_dashboards",
"(",
"self",
")",
":",
"f",
"=",
"request",
".",
"files",
".",
"get",
"(",
"'file'",
")",
"if",
"request",
".",
"method",
"==",
"'POST'",
"and",
"f",
":",
"dashboard_import_export",
".",
"import_dashboards",
"(",
"db",
".",
"session",
",",
"f",
".",
"stream",
")",
"return",
"redirect",
"(",
"'/dashboard/list/'",
")",
"return",
"self",
".",
"render_template",
"(",
"'superset/import_dashboards.html'",
")"
] |
Overrides the dashboards using json instances from the file.
|
[
"Overrides",
"the",
"dashboards",
"using",
"json",
"instances",
"from",
"the",
"file",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1270-L1276
|
21,459
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.explorev2
|
def explorev2(self, datasource_type, datasource_id):
"""Deprecated endpoint, here for backward compatibility of urls"""
return redirect(url_for(
'Superset.explore',
datasource_type=datasource_type,
datasource_id=datasource_id,
**request.args))
|
python
|
def explorev2(self, datasource_type, datasource_id):
"""Deprecated endpoint, here for backward compatibility of urls"""
return redirect(url_for(
'Superset.explore',
datasource_type=datasource_type,
datasource_id=datasource_id,
**request.args))
|
[
"def",
"explorev2",
"(",
"self",
",",
"datasource_type",
",",
"datasource_id",
")",
":",
"return",
"redirect",
"(",
"url_for",
"(",
"'Superset.explore'",
",",
"datasource_type",
"=",
"datasource_type",
",",
"datasource_id",
"=",
"datasource_id",
",",
"*",
"*",
"request",
".",
"args",
")",
")"
] |
Deprecated endpoint, here for backward compatibility of urls
|
[
"Deprecated",
"endpoint",
"here",
"for",
"backward",
"compatibility",
"of",
"urls"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1281-L1287
|
21,460
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.filter
|
def filter(self, datasource_type, datasource_id, column):
"""
Endpoint to retrieve values for specified column.
:param datasource_type: Type of datasource e.g. table
:param datasource_id: Datasource id
:param column: Column name to retrieve values for
:return:
"""
# TODO: Cache endpoint by user, datasource and column
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
security_manager.assert_datasource_permission(datasource)
payload = json.dumps(
datasource.values_for_column(
column,
config.get('FILTER_SELECT_ROW_LIMIT', 10000),
),
default=utils.json_int_dttm_ser)
return json_success(payload)
|
python
|
def filter(self, datasource_type, datasource_id, column):
"""
Endpoint to retrieve values for specified column.
:param datasource_type: Type of datasource e.g. table
:param datasource_id: Datasource id
:param column: Column name to retrieve values for
:return:
"""
# TODO: Cache endpoint by user, datasource and column
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session)
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
security_manager.assert_datasource_permission(datasource)
payload = json.dumps(
datasource.values_for_column(
column,
config.get('FILTER_SELECT_ROW_LIMIT', 10000),
),
default=utils.json_int_dttm_ser)
return json_success(payload)
|
[
"def",
"filter",
"(",
"self",
",",
"datasource_type",
",",
"datasource_id",
",",
"column",
")",
":",
"# TODO: Cache endpoint by user, datasource and column",
"datasource",
"=",
"ConnectorRegistry",
".",
"get_datasource",
"(",
"datasource_type",
",",
"datasource_id",
",",
"db",
".",
"session",
")",
"if",
"not",
"datasource",
":",
"return",
"json_error_response",
"(",
"DATASOURCE_MISSING_ERR",
")",
"security_manager",
".",
"assert_datasource_permission",
"(",
"datasource",
")",
"payload",
"=",
"json",
".",
"dumps",
"(",
"datasource",
".",
"values_for_column",
"(",
"column",
",",
"config",
".",
"get",
"(",
"'FILTER_SELECT_ROW_LIMIT'",
",",
"10000",
")",
",",
")",
",",
"default",
"=",
"utils",
".",
"json_int_dttm_ser",
")",
"return",
"json_success",
"(",
"payload",
")"
] |
Endpoint to retrieve values for specified column.
:param datasource_type: Type of datasource e.g. table
:param datasource_id: Datasource id
:param column: Column name to retrieve values for
:return:
|
[
"Endpoint",
"to",
"retrieve",
"values",
"for",
"specified",
"column",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1394-L1415
|
21,461
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.save_or_overwrite_slice
|
def save_or_overwrite_slice(
self, args, slc, slice_add_perm, slice_overwrite_perm, slice_download_perm,
datasource_id, datasource_type, datasource_name):
"""Save or overwrite a slice"""
slice_name = args.get('slice_name')
action = args.get('action')
form_data = get_form_data()[0]
if action in ('saveas'):
if 'slice_id' in form_data:
form_data.pop('slice_id') # don't save old slice_id
slc = models.Slice(owners=[g.user] if g.user else [])
slc.params = json.dumps(form_data, indent=2, sort_keys=True)
slc.datasource_name = datasource_name
slc.viz_type = form_data['viz_type']
slc.datasource_type = datasource_type
slc.datasource_id = datasource_id
slc.slice_name = slice_name
if action in ('saveas') and slice_add_perm:
self.save_slice(slc)
elif action == 'overwrite' and slice_overwrite_perm:
self.overwrite_slice(slc)
# Adding slice to a dashboard if requested
dash = None
if request.args.get('add_to_dash') == 'existing':
dash = (
db.session.query(models.Dashboard)
.filter_by(id=int(request.args.get('save_to_dashboard_id')))
.one()
)
# check edit dashboard permissions
dash_overwrite_perm = check_ownership(dash, raise_if_false=False)
if not dash_overwrite_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('alter this ') +
_('dashboard'),
status=400)
flash(
_('Chart [{}] was added to dashboard [{}]').format(
slc.slice_name,
dash.dashboard_title),
'info')
elif request.args.get('add_to_dash') == 'new':
# check create dashboard permissions
dash_add_perm = security_manager.can_access('can_add', 'DashboardModelView')
if not dash_add_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('create a ') + _('dashboard'),
status=400)
dash = models.Dashboard(
dashboard_title=request.args.get('new_dashboard_name'),
owners=[g.user] if g.user else [])
flash(
_('Dashboard [{}] just got created and chart [{}] was added '
'to it').format(
dash.dashboard_title,
slc.slice_name),
'info')
if dash and slc not in dash.slices:
dash.slices.append(slc)
db.session.commit()
response = {
'can_add': slice_add_perm,
'can_download': slice_download_perm,
'can_overwrite': is_owner(slc, g.user),
'form_data': slc.form_data,
'slice': slc.data,
'dashboard_id': dash.id if dash else None,
}
if request.args.get('goto_dash') == 'true':
response.update({'dashboard': dash.url})
return json_success(json.dumps(response))
|
python
|
def save_or_overwrite_slice(
self, args, slc, slice_add_perm, slice_overwrite_perm, slice_download_perm,
datasource_id, datasource_type, datasource_name):
"""Save or overwrite a slice"""
slice_name = args.get('slice_name')
action = args.get('action')
form_data = get_form_data()[0]
if action in ('saveas'):
if 'slice_id' in form_data:
form_data.pop('slice_id') # don't save old slice_id
slc = models.Slice(owners=[g.user] if g.user else [])
slc.params = json.dumps(form_data, indent=2, sort_keys=True)
slc.datasource_name = datasource_name
slc.viz_type = form_data['viz_type']
slc.datasource_type = datasource_type
slc.datasource_id = datasource_id
slc.slice_name = slice_name
if action in ('saveas') and slice_add_perm:
self.save_slice(slc)
elif action == 'overwrite' and slice_overwrite_perm:
self.overwrite_slice(slc)
# Adding slice to a dashboard if requested
dash = None
if request.args.get('add_to_dash') == 'existing':
dash = (
db.session.query(models.Dashboard)
.filter_by(id=int(request.args.get('save_to_dashboard_id')))
.one()
)
# check edit dashboard permissions
dash_overwrite_perm = check_ownership(dash, raise_if_false=False)
if not dash_overwrite_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('alter this ') +
_('dashboard'),
status=400)
flash(
_('Chart [{}] was added to dashboard [{}]').format(
slc.slice_name,
dash.dashboard_title),
'info')
elif request.args.get('add_to_dash') == 'new':
# check create dashboard permissions
dash_add_perm = security_manager.can_access('can_add', 'DashboardModelView')
if not dash_add_perm:
return json_error_response(
_('You don\'t have the rights to ') + _('create a ') + _('dashboard'),
status=400)
dash = models.Dashboard(
dashboard_title=request.args.get('new_dashboard_name'),
owners=[g.user] if g.user else [])
flash(
_('Dashboard [{}] just got created and chart [{}] was added '
'to it').format(
dash.dashboard_title,
slc.slice_name),
'info')
if dash and slc not in dash.slices:
dash.slices.append(slc)
db.session.commit()
response = {
'can_add': slice_add_perm,
'can_download': slice_download_perm,
'can_overwrite': is_owner(slc, g.user),
'form_data': slc.form_data,
'slice': slc.data,
'dashboard_id': dash.id if dash else None,
}
if request.args.get('goto_dash') == 'true':
response.update({'dashboard': dash.url})
return json_success(json.dumps(response))
|
[
"def",
"save_or_overwrite_slice",
"(",
"self",
",",
"args",
",",
"slc",
",",
"slice_add_perm",
",",
"slice_overwrite_perm",
",",
"slice_download_perm",
",",
"datasource_id",
",",
"datasource_type",
",",
"datasource_name",
")",
":",
"slice_name",
"=",
"args",
".",
"get",
"(",
"'slice_name'",
")",
"action",
"=",
"args",
".",
"get",
"(",
"'action'",
")",
"form_data",
"=",
"get_form_data",
"(",
")",
"[",
"0",
"]",
"if",
"action",
"in",
"(",
"'saveas'",
")",
":",
"if",
"'slice_id'",
"in",
"form_data",
":",
"form_data",
".",
"pop",
"(",
"'slice_id'",
")",
"# don't save old slice_id",
"slc",
"=",
"models",
".",
"Slice",
"(",
"owners",
"=",
"[",
"g",
".",
"user",
"]",
"if",
"g",
".",
"user",
"else",
"[",
"]",
")",
"slc",
".",
"params",
"=",
"json",
".",
"dumps",
"(",
"form_data",
",",
"indent",
"=",
"2",
",",
"sort_keys",
"=",
"True",
")",
"slc",
".",
"datasource_name",
"=",
"datasource_name",
"slc",
".",
"viz_type",
"=",
"form_data",
"[",
"'viz_type'",
"]",
"slc",
".",
"datasource_type",
"=",
"datasource_type",
"slc",
".",
"datasource_id",
"=",
"datasource_id",
"slc",
".",
"slice_name",
"=",
"slice_name",
"if",
"action",
"in",
"(",
"'saveas'",
")",
"and",
"slice_add_perm",
":",
"self",
".",
"save_slice",
"(",
"slc",
")",
"elif",
"action",
"==",
"'overwrite'",
"and",
"slice_overwrite_perm",
":",
"self",
".",
"overwrite_slice",
"(",
"slc",
")",
"# Adding slice to a dashboard if requested",
"dash",
"=",
"None",
"if",
"request",
".",
"args",
".",
"get",
"(",
"'add_to_dash'",
")",
"==",
"'existing'",
":",
"dash",
"=",
"(",
"db",
".",
"session",
".",
"query",
"(",
"models",
".",
"Dashboard",
")",
".",
"filter_by",
"(",
"id",
"=",
"int",
"(",
"request",
".",
"args",
".",
"get",
"(",
"'save_to_dashboard_id'",
")",
")",
")",
".",
"one",
"(",
")",
")",
"# check edit dashboard permissions",
"dash_overwrite_perm",
"=",
"check_ownership",
"(",
"dash",
",",
"raise_if_false",
"=",
"False",
")",
"if",
"not",
"dash_overwrite_perm",
":",
"return",
"json_error_response",
"(",
"_",
"(",
"'You don\\'t have the rights to '",
")",
"+",
"_",
"(",
"'alter this '",
")",
"+",
"_",
"(",
"'dashboard'",
")",
",",
"status",
"=",
"400",
")",
"flash",
"(",
"_",
"(",
"'Chart [{}] was added to dashboard [{}]'",
")",
".",
"format",
"(",
"slc",
".",
"slice_name",
",",
"dash",
".",
"dashboard_title",
")",
",",
"'info'",
")",
"elif",
"request",
".",
"args",
".",
"get",
"(",
"'add_to_dash'",
")",
"==",
"'new'",
":",
"# check create dashboard permissions",
"dash_add_perm",
"=",
"security_manager",
".",
"can_access",
"(",
"'can_add'",
",",
"'DashboardModelView'",
")",
"if",
"not",
"dash_add_perm",
":",
"return",
"json_error_response",
"(",
"_",
"(",
"'You don\\'t have the rights to '",
")",
"+",
"_",
"(",
"'create a '",
")",
"+",
"_",
"(",
"'dashboard'",
")",
",",
"status",
"=",
"400",
")",
"dash",
"=",
"models",
".",
"Dashboard",
"(",
"dashboard_title",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'new_dashboard_name'",
")",
",",
"owners",
"=",
"[",
"g",
".",
"user",
"]",
"if",
"g",
".",
"user",
"else",
"[",
"]",
")",
"flash",
"(",
"_",
"(",
"'Dashboard [{}] just got created and chart [{}] was added '",
"'to it'",
")",
".",
"format",
"(",
"dash",
".",
"dashboard_title",
",",
"slc",
".",
"slice_name",
")",
",",
"'info'",
")",
"if",
"dash",
"and",
"slc",
"not",
"in",
"dash",
".",
"slices",
":",
"dash",
".",
"slices",
".",
"append",
"(",
"slc",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"response",
"=",
"{",
"'can_add'",
":",
"slice_add_perm",
",",
"'can_download'",
":",
"slice_download_perm",
",",
"'can_overwrite'",
":",
"is_owner",
"(",
"slc",
",",
"g",
".",
"user",
")",
",",
"'form_data'",
":",
"slc",
".",
"form_data",
",",
"'slice'",
":",
"slc",
".",
"data",
",",
"'dashboard_id'",
":",
"dash",
".",
"id",
"if",
"dash",
"else",
"None",
",",
"}",
"if",
"request",
".",
"args",
".",
"get",
"(",
"'goto_dash'",
")",
"==",
"'true'",
":",
"response",
".",
"update",
"(",
"{",
"'dashboard'",
":",
"dash",
".",
"url",
"}",
")",
"return",
"json_success",
"(",
"json",
".",
"dumps",
"(",
"response",
")",
")"
] |
Save or overwrite a slice
|
[
"Save",
"or",
"overwrite",
"a",
"slice"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1417-L1498
|
21,462
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.tables
|
def tables(self, db_id, schema, substr, force_refresh='false'):
"""Endpoint to fetch the list of tables for given database"""
db_id = int(db_id)
force_refresh = force_refresh.lower() == 'true'
schema = utils.js_string_to_python(schema)
substr = utils.js_string_to_python(substr)
database = db.session.query(models.Database).filter_by(id=db_id).one()
if schema:
table_names = database.all_table_names_in_schema(
schema=schema, force=force_refresh,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout)
view_names = database.all_view_names_in_schema(
schema=schema, force=force_refresh,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout)
else:
table_names = database.all_table_names_in_database(
cache=True, force=False, cache_timeout=24 * 60 * 60)
view_names = database.all_view_names_in_database(
cache=True, force=False, cache_timeout=24 * 60 * 60)
table_names = security_manager.accessible_by_user(database, table_names, schema)
view_names = security_manager.accessible_by_user(database, view_names, schema)
if substr:
table_names = [tn for tn in table_names if substr in tn]
view_names = [vn for vn in view_names if substr in vn]
if not schema and database.default_schemas:
def get_schema(tbl_or_view_name):
return tbl_or_view_name.split('.')[0] if '.' in tbl_or_view_name else None
user_schema = g.user.email.split('@')[0]
valid_schemas = set(database.default_schemas + [user_schema])
table_names = [tn for tn in table_names if get_schema(tn) in valid_schemas]
view_names = [vn for vn in view_names if get_schema(vn) in valid_schemas]
max_items = config.get('MAX_TABLE_NAMES') or len(table_names)
total_items = len(table_names) + len(view_names)
max_tables = len(table_names)
max_views = len(view_names)
if total_items and substr:
max_tables = max_items * len(table_names) // total_items
max_views = max_items * len(view_names) // total_items
table_options = [{'value': tn, 'label': tn}
for tn in table_names[:max_tables]]
table_options.extend([{'value': vn, 'label': '[view] {}'.format(vn)}
for vn in view_names[:max_views]])
payload = {
'tableLength': len(table_names) + len(view_names),
'options': table_options,
}
return json_success(json.dumps(payload))
|
python
|
def tables(self, db_id, schema, substr, force_refresh='false'):
"""Endpoint to fetch the list of tables for given database"""
db_id = int(db_id)
force_refresh = force_refresh.lower() == 'true'
schema = utils.js_string_to_python(schema)
substr = utils.js_string_to_python(substr)
database = db.session.query(models.Database).filter_by(id=db_id).one()
if schema:
table_names = database.all_table_names_in_schema(
schema=schema, force=force_refresh,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout)
view_names = database.all_view_names_in_schema(
schema=schema, force=force_refresh,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout)
else:
table_names = database.all_table_names_in_database(
cache=True, force=False, cache_timeout=24 * 60 * 60)
view_names = database.all_view_names_in_database(
cache=True, force=False, cache_timeout=24 * 60 * 60)
table_names = security_manager.accessible_by_user(database, table_names, schema)
view_names = security_manager.accessible_by_user(database, view_names, schema)
if substr:
table_names = [tn for tn in table_names if substr in tn]
view_names = [vn for vn in view_names if substr in vn]
if not schema and database.default_schemas:
def get_schema(tbl_or_view_name):
return tbl_or_view_name.split('.')[0] if '.' in tbl_or_view_name else None
user_schema = g.user.email.split('@')[0]
valid_schemas = set(database.default_schemas + [user_schema])
table_names = [tn for tn in table_names if get_schema(tn) in valid_schemas]
view_names = [vn for vn in view_names if get_schema(vn) in valid_schemas]
max_items = config.get('MAX_TABLE_NAMES') or len(table_names)
total_items = len(table_names) + len(view_names)
max_tables = len(table_names)
max_views = len(view_names)
if total_items and substr:
max_tables = max_items * len(table_names) // total_items
max_views = max_items * len(view_names) // total_items
table_options = [{'value': tn, 'label': tn}
for tn in table_names[:max_tables]]
table_options.extend([{'value': vn, 'label': '[view] {}'.format(vn)}
for vn in view_names[:max_views]])
payload = {
'tableLength': len(table_names) + len(view_names),
'options': table_options,
}
return json_success(json.dumps(payload))
|
[
"def",
"tables",
"(",
"self",
",",
"db_id",
",",
"schema",
",",
"substr",
",",
"force_refresh",
"=",
"'false'",
")",
":",
"db_id",
"=",
"int",
"(",
"db_id",
")",
"force_refresh",
"=",
"force_refresh",
".",
"lower",
"(",
")",
"==",
"'true'",
"schema",
"=",
"utils",
".",
"js_string_to_python",
"(",
"schema",
")",
"substr",
"=",
"utils",
".",
"js_string_to_python",
"(",
"substr",
")",
"database",
"=",
"db",
".",
"session",
".",
"query",
"(",
"models",
".",
"Database",
")",
".",
"filter_by",
"(",
"id",
"=",
"db_id",
")",
".",
"one",
"(",
")",
"if",
"schema",
":",
"table_names",
"=",
"database",
".",
"all_table_names_in_schema",
"(",
"schema",
"=",
"schema",
",",
"force",
"=",
"force_refresh",
",",
"cache",
"=",
"database",
".",
"table_cache_enabled",
",",
"cache_timeout",
"=",
"database",
".",
"table_cache_timeout",
")",
"view_names",
"=",
"database",
".",
"all_view_names_in_schema",
"(",
"schema",
"=",
"schema",
",",
"force",
"=",
"force_refresh",
",",
"cache",
"=",
"database",
".",
"table_cache_enabled",
",",
"cache_timeout",
"=",
"database",
".",
"table_cache_timeout",
")",
"else",
":",
"table_names",
"=",
"database",
".",
"all_table_names_in_database",
"(",
"cache",
"=",
"True",
",",
"force",
"=",
"False",
",",
"cache_timeout",
"=",
"24",
"*",
"60",
"*",
"60",
")",
"view_names",
"=",
"database",
".",
"all_view_names_in_database",
"(",
"cache",
"=",
"True",
",",
"force",
"=",
"False",
",",
"cache_timeout",
"=",
"24",
"*",
"60",
"*",
"60",
")",
"table_names",
"=",
"security_manager",
".",
"accessible_by_user",
"(",
"database",
",",
"table_names",
",",
"schema",
")",
"view_names",
"=",
"security_manager",
".",
"accessible_by_user",
"(",
"database",
",",
"view_names",
",",
"schema",
")",
"if",
"substr",
":",
"table_names",
"=",
"[",
"tn",
"for",
"tn",
"in",
"table_names",
"if",
"substr",
"in",
"tn",
"]",
"view_names",
"=",
"[",
"vn",
"for",
"vn",
"in",
"view_names",
"if",
"substr",
"in",
"vn",
"]",
"if",
"not",
"schema",
"and",
"database",
".",
"default_schemas",
":",
"def",
"get_schema",
"(",
"tbl_or_view_name",
")",
":",
"return",
"tbl_or_view_name",
".",
"split",
"(",
"'.'",
")",
"[",
"0",
"]",
"if",
"'.'",
"in",
"tbl_or_view_name",
"else",
"None",
"user_schema",
"=",
"g",
".",
"user",
".",
"email",
".",
"split",
"(",
"'@'",
")",
"[",
"0",
"]",
"valid_schemas",
"=",
"set",
"(",
"database",
".",
"default_schemas",
"+",
"[",
"user_schema",
"]",
")",
"table_names",
"=",
"[",
"tn",
"for",
"tn",
"in",
"table_names",
"if",
"get_schema",
"(",
"tn",
")",
"in",
"valid_schemas",
"]",
"view_names",
"=",
"[",
"vn",
"for",
"vn",
"in",
"view_names",
"if",
"get_schema",
"(",
"vn",
")",
"in",
"valid_schemas",
"]",
"max_items",
"=",
"config",
".",
"get",
"(",
"'MAX_TABLE_NAMES'",
")",
"or",
"len",
"(",
"table_names",
")",
"total_items",
"=",
"len",
"(",
"table_names",
")",
"+",
"len",
"(",
"view_names",
")",
"max_tables",
"=",
"len",
"(",
"table_names",
")",
"max_views",
"=",
"len",
"(",
"view_names",
")",
"if",
"total_items",
"and",
"substr",
":",
"max_tables",
"=",
"max_items",
"*",
"len",
"(",
"table_names",
")",
"//",
"total_items",
"max_views",
"=",
"max_items",
"*",
"len",
"(",
"view_names",
")",
"//",
"total_items",
"table_options",
"=",
"[",
"{",
"'value'",
":",
"tn",
",",
"'label'",
":",
"tn",
"}",
"for",
"tn",
"in",
"table_names",
"[",
":",
"max_tables",
"]",
"]",
"table_options",
".",
"extend",
"(",
"[",
"{",
"'value'",
":",
"vn",
",",
"'label'",
":",
"'[view] {}'",
".",
"format",
"(",
"vn",
")",
"}",
"for",
"vn",
"in",
"view_names",
"[",
":",
"max_views",
"]",
"]",
")",
"payload",
"=",
"{",
"'tableLength'",
":",
"len",
"(",
"table_names",
")",
"+",
"len",
"(",
"view_names",
")",
",",
"'options'",
":",
"table_options",
",",
"}",
"return",
"json_success",
"(",
"json",
".",
"dumps",
"(",
"payload",
")",
")"
] |
Endpoint to fetch the list of tables for given database
|
[
"Endpoint",
"to",
"fetch",
"the",
"list",
"of",
"tables",
"for",
"given",
"database"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1564-L1619
|
21,463
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.save_dash
|
def save_dash(self, dashboard_id):
"""Save a dashboard's metadata"""
session = db.session()
dash = (session
.query(models.Dashboard)
.filter_by(id=dashboard_id).first())
check_ownership(dash, raise_if_false=True)
data = json.loads(request.form.get('data'))
self._set_dash_metadata(dash, data)
session.merge(dash)
session.commit()
session.close()
return json_success(json.dumps({'status': 'SUCCESS'}))
|
python
|
def save_dash(self, dashboard_id):
"""Save a dashboard's metadata"""
session = db.session()
dash = (session
.query(models.Dashboard)
.filter_by(id=dashboard_id).first())
check_ownership(dash, raise_if_false=True)
data = json.loads(request.form.get('data'))
self._set_dash_metadata(dash, data)
session.merge(dash)
session.commit()
session.close()
return json_success(json.dumps({'status': 'SUCCESS'}))
|
[
"def",
"save_dash",
"(",
"self",
",",
"dashboard_id",
")",
":",
"session",
"=",
"db",
".",
"session",
"(",
")",
"dash",
"=",
"(",
"session",
".",
"query",
"(",
"models",
".",
"Dashboard",
")",
".",
"filter_by",
"(",
"id",
"=",
"dashboard_id",
")",
".",
"first",
"(",
")",
")",
"check_ownership",
"(",
"dash",
",",
"raise_if_false",
"=",
"True",
")",
"data",
"=",
"json",
".",
"loads",
"(",
"request",
".",
"form",
".",
"get",
"(",
"'data'",
")",
")",
"self",
".",
"_set_dash_metadata",
"(",
"dash",
",",
"data",
")",
"session",
".",
"merge",
"(",
"dash",
")",
"session",
".",
"commit",
"(",
")",
"session",
".",
"close",
"(",
")",
"return",
"json_success",
"(",
"json",
".",
"dumps",
"(",
"{",
"'status'",
":",
"'SUCCESS'",
"}",
")",
")"
] |
Save a dashboard's metadata
|
[
"Save",
"a",
"dashboard",
"s",
"metadata"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1674-L1686
|
21,464
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.add_slices
|
def add_slices(self, dashboard_id):
"""Add and save slices to a dashboard"""
data = json.loads(request.form.get('data'))
session = db.session()
Slice = models.Slice # noqa
dash = (
session.query(models.Dashboard).filter_by(id=dashboard_id).first())
check_ownership(dash, raise_if_false=True)
new_slices = session.query(Slice).filter(
Slice.id.in_(data['slice_ids']))
dash.slices += new_slices
session.merge(dash)
session.commit()
session.close()
return 'SLICES ADDED'
|
python
|
def add_slices(self, dashboard_id):
"""Add and save slices to a dashboard"""
data = json.loads(request.form.get('data'))
session = db.session()
Slice = models.Slice # noqa
dash = (
session.query(models.Dashboard).filter_by(id=dashboard_id).first())
check_ownership(dash, raise_if_false=True)
new_slices = session.query(Slice).filter(
Slice.id.in_(data['slice_ids']))
dash.slices += new_slices
session.merge(dash)
session.commit()
session.close()
return 'SLICES ADDED'
|
[
"def",
"add_slices",
"(",
"self",
",",
"dashboard_id",
")",
":",
"data",
"=",
"json",
".",
"loads",
"(",
"request",
".",
"form",
".",
"get",
"(",
"'data'",
")",
")",
"session",
"=",
"db",
".",
"session",
"(",
")",
"Slice",
"=",
"models",
".",
"Slice",
"# noqa",
"dash",
"=",
"(",
"session",
".",
"query",
"(",
"models",
".",
"Dashboard",
")",
".",
"filter_by",
"(",
"id",
"=",
"dashboard_id",
")",
".",
"first",
"(",
")",
")",
"check_ownership",
"(",
"dash",
",",
"raise_if_false",
"=",
"True",
")",
"new_slices",
"=",
"session",
".",
"query",
"(",
"Slice",
")",
".",
"filter",
"(",
"Slice",
".",
"id",
".",
"in_",
"(",
"data",
"[",
"'slice_ids'",
"]",
")",
")",
"dash",
".",
"slices",
"+=",
"new_slices",
"session",
".",
"merge",
"(",
"dash",
")",
"session",
".",
"commit",
"(",
")",
"session",
".",
"close",
"(",
")",
"return",
"'SLICES ADDED'"
] |
Add and save slices to a dashboard
|
[
"Add",
"and",
"save",
"slices",
"to",
"a",
"dashboard"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1749-L1763
|
21,465
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.fave_dashboards_by_username
|
def fave_dashboards_by_username(self, username):
"""This lets us use a user's username to pull favourite dashboards"""
user = security_manager.find_user(username=username)
return self.fave_dashboards(user.get_id())
|
python
|
def fave_dashboards_by_username(self, username):
"""This lets us use a user's username to pull favourite dashboards"""
user = security_manager.find_user(username=username)
return self.fave_dashboards(user.get_id())
|
[
"def",
"fave_dashboards_by_username",
"(",
"self",
",",
"username",
")",
":",
"user",
"=",
"security_manager",
".",
"find_user",
"(",
"username",
"=",
"username",
")",
"return",
"self",
".",
"fave_dashboards",
"(",
"user",
".",
"get_id",
"(",
")",
")"
] |
This lets us use a user's username to pull favourite dashboards
|
[
"This",
"lets",
"us",
"use",
"a",
"user",
"s",
"username",
"to",
"pull",
"favourite",
"dashboards"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1900-L1903
|
21,466
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.user_slices
|
def user_slices(self, user_id=None):
"""List of slices a user created, or faved"""
if not user_id:
user_id = g.user.id
Slice = models.Slice # noqa
FavStar = models.FavStar # noqa
qry = (
db.session.query(Slice,
FavStar.dttm).join(
models.FavStar,
sqla.and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == 'slice',
models.Slice.id == models.FavStar.obj_id,
),
isouter=True).filter(
sqla.or_(
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
FavStar.user_id == user_id,
),
)
.order_by(Slice.slice_name.asc())
)
payload = [{
'id': o.Slice.id,
'title': o.Slice.slice_name,
'url': o.Slice.slice_url,
'data': o.Slice.form_data,
'dttm': o.dttm if o.dttm else o.Slice.changed_on,
'viz_type': o.Slice.viz_type,
} for o in qry.all()]
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
|
python
|
def user_slices(self, user_id=None):
"""List of slices a user created, or faved"""
if not user_id:
user_id = g.user.id
Slice = models.Slice # noqa
FavStar = models.FavStar # noqa
qry = (
db.session.query(Slice,
FavStar.dttm).join(
models.FavStar,
sqla.and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == 'slice',
models.Slice.id == models.FavStar.obj_id,
),
isouter=True).filter(
sqla.or_(
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
FavStar.user_id == user_id,
),
)
.order_by(Slice.slice_name.asc())
)
payload = [{
'id': o.Slice.id,
'title': o.Slice.slice_name,
'url': o.Slice.slice_url,
'data': o.Slice.form_data,
'dttm': o.dttm if o.dttm else o.Slice.changed_on,
'viz_type': o.Slice.viz_type,
} for o in qry.all()]
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
|
[
"def",
"user_slices",
"(",
"self",
",",
"user_id",
"=",
"None",
")",
":",
"if",
"not",
"user_id",
":",
"user_id",
"=",
"g",
".",
"user",
".",
"id",
"Slice",
"=",
"models",
".",
"Slice",
"# noqa",
"FavStar",
"=",
"models",
".",
"FavStar",
"# noqa",
"qry",
"=",
"(",
"db",
".",
"session",
".",
"query",
"(",
"Slice",
",",
"FavStar",
".",
"dttm",
")",
".",
"join",
"(",
"models",
".",
"FavStar",
",",
"sqla",
".",
"and_",
"(",
"models",
".",
"FavStar",
".",
"user_id",
"==",
"int",
"(",
"user_id",
")",
",",
"models",
".",
"FavStar",
".",
"class_name",
"==",
"'slice'",
",",
"models",
".",
"Slice",
".",
"id",
"==",
"models",
".",
"FavStar",
".",
"obj_id",
",",
")",
",",
"isouter",
"=",
"True",
")",
".",
"filter",
"(",
"sqla",
".",
"or_",
"(",
"Slice",
".",
"created_by_fk",
"==",
"user_id",
",",
"Slice",
".",
"changed_by_fk",
"==",
"user_id",
",",
"FavStar",
".",
"user_id",
"==",
"user_id",
",",
")",
",",
")",
".",
"order_by",
"(",
"Slice",
".",
"slice_name",
".",
"asc",
"(",
")",
")",
")",
"payload",
"=",
"[",
"{",
"'id'",
":",
"o",
".",
"Slice",
".",
"id",
",",
"'title'",
":",
"o",
".",
"Slice",
".",
"slice_name",
",",
"'url'",
":",
"o",
".",
"Slice",
".",
"slice_url",
",",
"'data'",
":",
"o",
".",
"Slice",
".",
"form_data",
",",
"'dttm'",
":",
"o",
".",
"dttm",
"if",
"o",
".",
"dttm",
"else",
"o",
".",
"Slice",
".",
"changed_on",
",",
"'viz_type'",
":",
"o",
".",
"Slice",
".",
"viz_type",
",",
"}",
"for",
"o",
"in",
"qry",
".",
"all",
"(",
")",
"]",
"return",
"json_success",
"(",
"json",
".",
"dumps",
"(",
"payload",
",",
"default",
"=",
"utils",
".",
"json_int_dttm_ser",
")",
")"
] |
List of slices a user created, or faved
|
[
"List",
"of",
"slices",
"a",
"user",
"created",
"or",
"faved"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1977-L2010
|
21,467
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.created_slices
|
def created_slices(self, user_id=None):
"""List of slices created by this user"""
if not user_id:
user_id = g.user.id
Slice = models.Slice # noqa
qry = (
db.session.query(Slice)
.filter(
sqla.or_(
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
),
)
.order_by(Slice.changed_on.desc())
)
payload = [{
'id': o.id,
'title': o.slice_name,
'url': o.slice_url,
'dttm': o.changed_on,
'viz_type': o.viz_type,
} for o in qry.all()]
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
|
python
|
def created_slices(self, user_id=None):
"""List of slices created by this user"""
if not user_id:
user_id = g.user.id
Slice = models.Slice # noqa
qry = (
db.session.query(Slice)
.filter(
sqla.or_(
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
),
)
.order_by(Slice.changed_on.desc())
)
payload = [{
'id': o.id,
'title': o.slice_name,
'url': o.slice_url,
'dttm': o.changed_on,
'viz_type': o.viz_type,
} for o in qry.all()]
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
|
[
"def",
"created_slices",
"(",
"self",
",",
"user_id",
"=",
"None",
")",
":",
"if",
"not",
"user_id",
":",
"user_id",
"=",
"g",
".",
"user",
".",
"id",
"Slice",
"=",
"models",
".",
"Slice",
"# noqa",
"qry",
"=",
"(",
"db",
".",
"session",
".",
"query",
"(",
"Slice",
")",
".",
"filter",
"(",
"sqla",
".",
"or_",
"(",
"Slice",
".",
"created_by_fk",
"==",
"user_id",
",",
"Slice",
".",
"changed_by_fk",
"==",
"user_id",
",",
")",
",",
")",
".",
"order_by",
"(",
"Slice",
".",
"changed_on",
".",
"desc",
"(",
")",
")",
")",
"payload",
"=",
"[",
"{",
"'id'",
":",
"o",
".",
"id",
",",
"'title'",
":",
"o",
".",
"slice_name",
",",
"'url'",
":",
"o",
".",
"slice_url",
",",
"'dttm'",
":",
"o",
".",
"changed_on",
",",
"'viz_type'",
":",
"o",
".",
"viz_type",
",",
"}",
"for",
"o",
"in",
"qry",
".",
"all",
"(",
")",
"]",
"return",
"json_success",
"(",
"json",
".",
"dumps",
"(",
"payload",
",",
"default",
"=",
"utils",
".",
"json_int_dttm_ser",
")",
")"
] |
List of slices created by this user
|
[
"List",
"of",
"slices",
"created",
"by",
"this",
"user"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2016-L2039
|
21,468
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.fave_slices
|
def fave_slices(self, user_id=None):
"""Favorite slices for a user"""
if not user_id:
user_id = g.user.id
qry = (
db.session.query(
models.Slice,
models.FavStar.dttm,
)
.join(
models.FavStar,
sqla.and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == 'slice',
models.Slice.id == models.FavStar.obj_id,
),
)
.order_by(
models.FavStar.dttm.desc(),
)
)
payload = []
for o in qry.all():
d = {
'id': o.Slice.id,
'title': o.Slice.slice_name,
'url': o.Slice.slice_url,
'dttm': o.dttm,
'viz_type': o.Slice.viz_type,
}
if o.Slice.created_by:
user = o.Slice.created_by
d['creator'] = str(user)
d['creator_url'] = '/superset/profile/{}/'.format(
user.username)
payload.append(d)
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
|
python
|
def fave_slices(self, user_id=None):
"""Favorite slices for a user"""
if not user_id:
user_id = g.user.id
qry = (
db.session.query(
models.Slice,
models.FavStar.dttm,
)
.join(
models.FavStar,
sqla.and_(
models.FavStar.user_id == int(user_id),
models.FavStar.class_name == 'slice',
models.Slice.id == models.FavStar.obj_id,
),
)
.order_by(
models.FavStar.dttm.desc(),
)
)
payload = []
for o in qry.all():
d = {
'id': o.Slice.id,
'title': o.Slice.slice_name,
'url': o.Slice.slice_url,
'dttm': o.dttm,
'viz_type': o.Slice.viz_type,
}
if o.Slice.created_by:
user = o.Slice.created_by
d['creator'] = str(user)
d['creator_url'] = '/superset/profile/{}/'.format(
user.username)
payload.append(d)
return json_success(
json.dumps(payload, default=utils.json_int_dttm_ser))
|
[
"def",
"fave_slices",
"(",
"self",
",",
"user_id",
"=",
"None",
")",
":",
"if",
"not",
"user_id",
":",
"user_id",
"=",
"g",
".",
"user",
".",
"id",
"qry",
"=",
"(",
"db",
".",
"session",
".",
"query",
"(",
"models",
".",
"Slice",
",",
"models",
".",
"FavStar",
".",
"dttm",
",",
")",
".",
"join",
"(",
"models",
".",
"FavStar",
",",
"sqla",
".",
"and_",
"(",
"models",
".",
"FavStar",
".",
"user_id",
"==",
"int",
"(",
"user_id",
")",
",",
"models",
".",
"FavStar",
".",
"class_name",
"==",
"'slice'",
",",
"models",
".",
"Slice",
".",
"id",
"==",
"models",
".",
"FavStar",
".",
"obj_id",
",",
")",
",",
")",
".",
"order_by",
"(",
"models",
".",
"FavStar",
".",
"dttm",
".",
"desc",
"(",
")",
",",
")",
")",
"payload",
"=",
"[",
"]",
"for",
"o",
"in",
"qry",
".",
"all",
"(",
")",
":",
"d",
"=",
"{",
"'id'",
":",
"o",
".",
"Slice",
".",
"id",
",",
"'title'",
":",
"o",
".",
"Slice",
".",
"slice_name",
",",
"'url'",
":",
"o",
".",
"Slice",
".",
"slice_url",
",",
"'dttm'",
":",
"o",
".",
"dttm",
",",
"'viz_type'",
":",
"o",
".",
"Slice",
".",
"viz_type",
",",
"}",
"if",
"o",
".",
"Slice",
".",
"created_by",
":",
"user",
"=",
"o",
".",
"Slice",
".",
"created_by",
"d",
"[",
"'creator'",
"]",
"=",
"str",
"(",
"user",
")",
"d",
"[",
"'creator_url'",
"]",
"=",
"'/superset/profile/{}/'",
".",
"format",
"(",
"user",
".",
"username",
")",
"payload",
".",
"append",
"(",
"d",
")",
"return",
"json_success",
"(",
"json",
".",
"dumps",
"(",
"payload",
",",
"default",
"=",
"utils",
".",
"json_int_dttm_ser",
")",
")"
] |
Favorite slices for a user
|
[
"Favorite",
"slices",
"for",
"a",
"user"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2045-L2082
|
21,469
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.warm_up_cache
|
def warm_up_cache(self):
"""Warms up the cache for the slice or table.
Note for slices a force refresh occurs.
"""
slices = None
session = db.session()
slice_id = request.args.get('slice_id')
table_name = request.args.get('table_name')
db_name = request.args.get('db_name')
if not slice_id and not (table_name and db_name):
return json_error_response(__(
'Malformed request. slice_id or table_name and db_name '
'arguments are expected'), status=400)
if slice_id:
slices = session.query(models.Slice).filter_by(id=slice_id).all()
if not slices:
return json_error_response(__(
'Chart %(id)s not found', id=slice_id), status=404)
elif table_name and db_name:
SqlaTable = ConnectorRegistry.sources['table']
table = (
session.query(SqlaTable)
.join(models.Database)
.filter(
models.Database.database_name == db_name or
SqlaTable.table_name == table_name)
).first()
if not table:
return json_error_response(__(
"Table %(t)s wasn't found in the database %(d)s",
t=table_name, s=db_name), status=404)
slices = session.query(models.Slice).filter_by(
datasource_id=table.id,
datasource_type=table.type).all()
for slc in slices:
try:
form_data = get_form_data(slc.id, use_slice_data=True)[0]
obj = get_viz(
datasource_type=slc.datasource.type,
datasource_id=slc.datasource.id,
form_data=form_data,
force=True,
)
obj.get_json()
except Exception as e:
return json_error_response(utils.error_msg_from_exception(e))
return json_success(json.dumps(
[{'slice_id': slc.id, 'slice_name': slc.slice_name}
for slc in slices]))
|
python
|
def warm_up_cache(self):
"""Warms up the cache for the slice or table.
Note for slices a force refresh occurs.
"""
slices = None
session = db.session()
slice_id = request.args.get('slice_id')
table_name = request.args.get('table_name')
db_name = request.args.get('db_name')
if not slice_id and not (table_name and db_name):
return json_error_response(__(
'Malformed request. slice_id or table_name and db_name '
'arguments are expected'), status=400)
if slice_id:
slices = session.query(models.Slice).filter_by(id=slice_id).all()
if not slices:
return json_error_response(__(
'Chart %(id)s not found', id=slice_id), status=404)
elif table_name and db_name:
SqlaTable = ConnectorRegistry.sources['table']
table = (
session.query(SqlaTable)
.join(models.Database)
.filter(
models.Database.database_name == db_name or
SqlaTable.table_name == table_name)
).first()
if not table:
return json_error_response(__(
"Table %(t)s wasn't found in the database %(d)s",
t=table_name, s=db_name), status=404)
slices = session.query(models.Slice).filter_by(
datasource_id=table.id,
datasource_type=table.type).all()
for slc in slices:
try:
form_data = get_form_data(slc.id, use_slice_data=True)[0]
obj = get_viz(
datasource_type=slc.datasource.type,
datasource_id=slc.datasource.id,
form_data=form_data,
force=True,
)
obj.get_json()
except Exception as e:
return json_error_response(utils.error_msg_from_exception(e))
return json_success(json.dumps(
[{'slice_id': slc.id, 'slice_name': slc.slice_name}
for slc in slices]))
|
[
"def",
"warm_up_cache",
"(",
"self",
")",
":",
"slices",
"=",
"None",
"session",
"=",
"db",
".",
"session",
"(",
")",
"slice_id",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'slice_id'",
")",
"table_name",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'table_name'",
")",
"db_name",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'db_name'",
")",
"if",
"not",
"slice_id",
"and",
"not",
"(",
"table_name",
"and",
"db_name",
")",
":",
"return",
"json_error_response",
"(",
"__",
"(",
"'Malformed request. slice_id or table_name and db_name '",
"'arguments are expected'",
")",
",",
"status",
"=",
"400",
")",
"if",
"slice_id",
":",
"slices",
"=",
"session",
".",
"query",
"(",
"models",
".",
"Slice",
")",
".",
"filter_by",
"(",
"id",
"=",
"slice_id",
")",
".",
"all",
"(",
")",
"if",
"not",
"slices",
":",
"return",
"json_error_response",
"(",
"__",
"(",
"'Chart %(id)s not found'",
",",
"id",
"=",
"slice_id",
")",
",",
"status",
"=",
"404",
")",
"elif",
"table_name",
"and",
"db_name",
":",
"SqlaTable",
"=",
"ConnectorRegistry",
".",
"sources",
"[",
"'table'",
"]",
"table",
"=",
"(",
"session",
".",
"query",
"(",
"SqlaTable",
")",
".",
"join",
"(",
"models",
".",
"Database",
")",
".",
"filter",
"(",
"models",
".",
"Database",
".",
"database_name",
"==",
"db_name",
"or",
"SqlaTable",
".",
"table_name",
"==",
"table_name",
")",
")",
".",
"first",
"(",
")",
"if",
"not",
"table",
":",
"return",
"json_error_response",
"(",
"__",
"(",
"\"Table %(t)s wasn't found in the database %(d)s\"",
",",
"t",
"=",
"table_name",
",",
"s",
"=",
"db_name",
")",
",",
"status",
"=",
"404",
")",
"slices",
"=",
"session",
".",
"query",
"(",
"models",
".",
"Slice",
")",
".",
"filter_by",
"(",
"datasource_id",
"=",
"table",
".",
"id",
",",
"datasource_type",
"=",
"table",
".",
"type",
")",
".",
"all",
"(",
")",
"for",
"slc",
"in",
"slices",
":",
"try",
":",
"form_data",
"=",
"get_form_data",
"(",
"slc",
".",
"id",
",",
"use_slice_data",
"=",
"True",
")",
"[",
"0",
"]",
"obj",
"=",
"get_viz",
"(",
"datasource_type",
"=",
"slc",
".",
"datasource",
".",
"type",
",",
"datasource_id",
"=",
"slc",
".",
"datasource",
".",
"id",
",",
"form_data",
"=",
"form_data",
",",
"force",
"=",
"True",
",",
")",
"obj",
".",
"get_json",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"return",
"json_error_response",
"(",
"utils",
".",
"error_msg_from_exception",
"(",
"e",
")",
")",
"return",
"json_success",
"(",
"json",
".",
"dumps",
"(",
"[",
"{",
"'slice_id'",
":",
"slc",
".",
"id",
",",
"'slice_name'",
":",
"slc",
".",
"slice_name",
"}",
"for",
"slc",
"in",
"slices",
"]",
")",
")"
] |
Warms up the cache for the slice or table.
Note for slices a force refresh occurs.
|
[
"Warms",
"up",
"the",
"cache",
"for",
"the",
"slice",
"or",
"table",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2087-L2138
|
21,470
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.favstar
|
def favstar(self, class_name, obj_id, action):
"""Toggle favorite stars on Slices and Dashboard"""
session = db.session()
FavStar = models.FavStar # noqa
count = 0
favs = session.query(FavStar).filter_by(
class_name=class_name, obj_id=obj_id,
user_id=g.user.get_id()).all()
if action == 'select':
if not favs:
session.add(
FavStar(
class_name=class_name,
obj_id=obj_id,
user_id=g.user.get_id(),
dttm=datetime.now(),
),
)
count = 1
elif action == 'unselect':
for fav in favs:
session.delete(fav)
else:
count = len(favs)
session.commit()
return json_success(json.dumps({'count': count}))
|
python
|
def favstar(self, class_name, obj_id, action):
"""Toggle favorite stars on Slices and Dashboard"""
session = db.session()
FavStar = models.FavStar # noqa
count = 0
favs = session.query(FavStar).filter_by(
class_name=class_name, obj_id=obj_id,
user_id=g.user.get_id()).all()
if action == 'select':
if not favs:
session.add(
FavStar(
class_name=class_name,
obj_id=obj_id,
user_id=g.user.get_id(),
dttm=datetime.now(),
),
)
count = 1
elif action == 'unselect':
for fav in favs:
session.delete(fav)
else:
count = len(favs)
session.commit()
return json_success(json.dumps({'count': count}))
|
[
"def",
"favstar",
"(",
"self",
",",
"class_name",
",",
"obj_id",
",",
"action",
")",
":",
"session",
"=",
"db",
".",
"session",
"(",
")",
"FavStar",
"=",
"models",
".",
"FavStar",
"# noqa",
"count",
"=",
"0",
"favs",
"=",
"session",
".",
"query",
"(",
"FavStar",
")",
".",
"filter_by",
"(",
"class_name",
"=",
"class_name",
",",
"obj_id",
"=",
"obj_id",
",",
"user_id",
"=",
"g",
".",
"user",
".",
"get_id",
"(",
")",
")",
".",
"all",
"(",
")",
"if",
"action",
"==",
"'select'",
":",
"if",
"not",
"favs",
":",
"session",
".",
"add",
"(",
"FavStar",
"(",
"class_name",
"=",
"class_name",
",",
"obj_id",
"=",
"obj_id",
",",
"user_id",
"=",
"g",
".",
"user",
".",
"get_id",
"(",
")",
",",
"dttm",
"=",
"datetime",
".",
"now",
"(",
")",
",",
")",
",",
")",
"count",
"=",
"1",
"elif",
"action",
"==",
"'unselect'",
":",
"for",
"fav",
"in",
"favs",
":",
"session",
".",
"delete",
"(",
"fav",
")",
"else",
":",
"count",
"=",
"len",
"(",
"favs",
")",
"session",
".",
"commit",
"(",
")",
"return",
"json_success",
"(",
"json",
".",
"dumps",
"(",
"{",
"'count'",
":",
"count",
"}",
")",
")"
] |
Toggle favorite stars on Slices and Dashboard
|
[
"Toggle",
"favorite",
"stars",
"on",
"Slices",
"and",
"Dashboard"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2142-L2167
|
21,471
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.dashboard
|
def dashboard(self, dashboard_id):
"""Server side rendering for a dashboard"""
session = db.session()
qry = session.query(models.Dashboard)
if dashboard_id.isdigit():
qry = qry.filter_by(id=int(dashboard_id))
else:
qry = qry.filter_by(slug=dashboard_id)
dash = qry.one_or_none()
if not dash:
abort(404)
datasources = set()
for slc in dash.slices:
datasource = slc.datasource
if datasource:
datasources.add(datasource)
if config.get('ENABLE_ACCESS_REQUEST'):
for datasource in datasources:
if datasource and not security_manager.datasource_access(datasource):
flash(
__(security_manager.get_datasource_access_error_msg(datasource)),
'danger')
return redirect(
'superset/request_access/?'
f'dashboard_id={dash.id}&')
dash_edit_perm = check_ownership(dash, raise_if_false=False) and \
security_manager.can_access('can_save_dash', 'Superset')
dash_save_perm = security_manager.can_access('can_save_dash', 'Superset')
superset_can_explore = security_manager.can_access('can_explore', 'Superset')
superset_can_csv = security_manager.can_access('can_csv', 'Superset')
slice_can_edit = security_manager.can_access('can_edit', 'SliceModelView')
standalone_mode = request.args.get('standalone') == 'true'
edit_mode = request.args.get('edit') == 'true'
# Hack to log the dashboard_id properly, even when getting a slug
@log_this
def dashboard(**kwargs): # noqa
pass
dashboard(
dashboard_id=dash.id,
dashboard_version='v2',
dash_edit_perm=dash_edit_perm,
edit_mode=edit_mode)
dashboard_data = dash.data
dashboard_data.update({
'standalone_mode': standalone_mode,
'dash_save_perm': dash_save_perm,
'dash_edit_perm': dash_edit_perm,
'superset_can_explore': superset_can_explore,
'superset_can_csv': superset_can_csv,
'slice_can_edit': slice_can_edit,
})
bootstrap_data = {
'user_id': g.user.get_id(),
'dashboard_data': dashboard_data,
'datasources': {ds.uid: ds.data for ds in datasources},
'common': self.common_bootsrap_payload(),
'editMode': edit_mode,
}
if request.args.get('json') == 'true':
return json_success(json.dumps(bootstrap_data))
return self.render_template(
'superset/dashboard.html',
entry='dashboard',
standalone_mode=standalone_mode,
title=dash.dashboard_title,
bootstrap_data=json.dumps(bootstrap_data),
)
|
python
|
def dashboard(self, dashboard_id):
"""Server side rendering for a dashboard"""
session = db.session()
qry = session.query(models.Dashboard)
if dashboard_id.isdigit():
qry = qry.filter_by(id=int(dashboard_id))
else:
qry = qry.filter_by(slug=dashboard_id)
dash = qry.one_or_none()
if not dash:
abort(404)
datasources = set()
for slc in dash.slices:
datasource = slc.datasource
if datasource:
datasources.add(datasource)
if config.get('ENABLE_ACCESS_REQUEST'):
for datasource in datasources:
if datasource and not security_manager.datasource_access(datasource):
flash(
__(security_manager.get_datasource_access_error_msg(datasource)),
'danger')
return redirect(
'superset/request_access/?'
f'dashboard_id={dash.id}&')
dash_edit_perm = check_ownership(dash, raise_if_false=False) and \
security_manager.can_access('can_save_dash', 'Superset')
dash_save_perm = security_manager.can_access('can_save_dash', 'Superset')
superset_can_explore = security_manager.can_access('can_explore', 'Superset')
superset_can_csv = security_manager.can_access('can_csv', 'Superset')
slice_can_edit = security_manager.can_access('can_edit', 'SliceModelView')
standalone_mode = request.args.get('standalone') == 'true'
edit_mode = request.args.get('edit') == 'true'
# Hack to log the dashboard_id properly, even when getting a slug
@log_this
def dashboard(**kwargs): # noqa
pass
dashboard(
dashboard_id=dash.id,
dashboard_version='v2',
dash_edit_perm=dash_edit_perm,
edit_mode=edit_mode)
dashboard_data = dash.data
dashboard_data.update({
'standalone_mode': standalone_mode,
'dash_save_perm': dash_save_perm,
'dash_edit_perm': dash_edit_perm,
'superset_can_explore': superset_can_explore,
'superset_can_csv': superset_can_csv,
'slice_can_edit': slice_can_edit,
})
bootstrap_data = {
'user_id': g.user.get_id(),
'dashboard_data': dashboard_data,
'datasources': {ds.uid: ds.data for ds in datasources},
'common': self.common_bootsrap_payload(),
'editMode': edit_mode,
}
if request.args.get('json') == 'true':
return json_success(json.dumps(bootstrap_data))
return self.render_template(
'superset/dashboard.html',
entry='dashboard',
standalone_mode=standalone_mode,
title=dash.dashboard_title,
bootstrap_data=json.dumps(bootstrap_data),
)
|
[
"def",
"dashboard",
"(",
"self",
",",
"dashboard_id",
")",
":",
"session",
"=",
"db",
".",
"session",
"(",
")",
"qry",
"=",
"session",
".",
"query",
"(",
"models",
".",
"Dashboard",
")",
"if",
"dashboard_id",
".",
"isdigit",
"(",
")",
":",
"qry",
"=",
"qry",
".",
"filter_by",
"(",
"id",
"=",
"int",
"(",
"dashboard_id",
")",
")",
"else",
":",
"qry",
"=",
"qry",
".",
"filter_by",
"(",
"slug",
"=",
"dashboard_id",
")",
"dash",
"=",
"qry",
".",
"one_or_none",
"(",
")",
"if",
"not",
"dash",
":",
"abort",
"(",
"404",
")",
"datasources",
"=",
"set",
"(",
")",
"for",
"slc",
"in",
"dash",
".",
"slices",
":",
"datasource",
"=",
"slc",
".",
"datasource",
"if",
"datasource",
":",
"datasources",
".",
"add",
"(",
"datasource",
")",
"if",
"config",
".",
"get",
"(",
"'ENABLE_ACCESS_REQUEST'",
")",
":",
"for",
"datasource",
"in",
"datasources",
":",
"if",
"datasource",
"and",
"not",
"security_manager",
".",
"datasource_access",
"(",
"datasource",
")",
":",
"flash",
"(",
"__",
"(",
"security_manager",
".",
"get_datasource_access_error_msg",
"(",
"datasource",
")",
")",
",",
"'danger'",
")",
"return",
"redirect",
"(",
"'superset/request_access/?'",
"f'dashboard_id={dash.id}&'",
")",
"dash_edit_perm",
"=",
"check_ownership",
"(",
"dash",
",",
"raise_if_false",
"=",
"False",
")",
"and",
"security_manager",
".",
"can_access",
"(",
"'can_save_dash'",
",",
"'Superset'",
")",
"dash_save_perm",
"=",
"security_manager",
".",
"can_access",
"(",
"'can_save_dash'",
",",
"'Superset'",
")",
"superset_can_explore",
"=",
"security_manager",
".",
"can_access",
"(",
"'can_explore'",
",",
"'Superset'",
")",
"superset_can_csv",
"=",
"security_manager",
".",
"can_access",
"(",
"'can_csv'",
",",
"'Superset'",
")",
"slice_can_edit",
"=",
"security_manager",
".",
"can_access",
"(",
"'can_edit'",
",",
"'SliceModelView'",
")",
"standalone_mode",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'standalone'",
")",
"==",
"'true'",
"edit_mode",
"=",
"request",
".",
"args",
".",
"get",
"(",
"'edit'",
")",
"==",
"'true'",
"# Hack to log the dashboard_id properly, even when getting a slug",
"@",
"log_this",
"def",
"dashboard",
"(",
"*",
"*",
"kwargs",
")",
":",
"# noqa",
"pass",
"dashboard",
"(",
"dashboard_id",
"=",
"dash",
".",
"id",
",",
"dashboard_version",
"=",
"'v2'",
",",
"dash_edit_perm",
"=",
"dash_edit_perm",
",",
"edit_mode",
"=",
"edit_mode",
")",
"dashboard_data",
"=",
"dash",
".",
"data",
"dashboard_data",
".",
"update",
"(",
"{",
"'standalone_mode'",
":",
"standalone_mode",
",",
"'dash_save_perm'",
":",
"dash_save_perm",
",",
"'dash_edit_perm'",
":",
"dash_edit_perm",
",",
"'superset_can_explore'",
":",
"superset_can_explore",
",",
"'superset_can_csv'",
":",
"superset_can_csv",
",",
"'slice_can_edit'",
":",
"slice_can_edit",
",",
"}",
")",
"bootstrap_data",
"=",
"{",
"'user_id'",
":",
"g",
".",
"user",
".",
"get_id",
"(",
")",
",",
"'dashboard_data'",
":",
"dashboard_data",
",",
"'datasources'",
":",
"{",
"ds",
".",
"uid",
":",
"ds",
".",
"data",
"for",
"ds",
"in",
"datasources",
"}",
",",
"'common'",
":",
"self",
".",
"common_bootsrap_payload",
"(",
")",
",",
"'editMode'",
":",
"edit_mode",
",",
"}",
"if",
"request",
".",
"args",
".",
"get",
"(",
"'json'",
")",
"==",
"'true'",
":",
"return",
"json_success",
"(",
"json",
".",
"dumps",
"(",
"bootstrap_data",
")",
")",
"return",
"self",
".",
"render_template",
"(",
"'superset/dashboard.html'",
",",
"entry",
"=",
"'dashboard'",
",",
"standalone_mode",
"=",
"standalone_mode",
",",
"title",
"=",
"dash",
".",
"dashboard_title",
",",
"bootstrap_data",
"=",
"json",
".",
"dumps",
"(",
"bootstrap_data",
")",
",",
")"
] |
Server side rendering for a dashboard
|
[
"Server",
"side",
"rendering",
"for",
"a",
"dashboard"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2171-L2246
|
21,472
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.sync_druid_source
|
def sync_druid_source(self):
"""Syncs the druid datasource in main db with the provided config.
The endpoint takes 3 arguments:
user - user name to perform the operation as
cluster - name of the druid cluster
config - configuration stored in json that contains:
name: druid datasource name
dimensions: list of the dimensions, they become druid columns
with the type STRING
metrics_spec: list of metrics (dictionary). Metric consists of
2 attributes: type and name. Type can be count,
etc. `count` type is stored internally as longSum
other fields will be ignored.
Example: {
'name': 'test_click',
'metrics_spec': [{'type': 'count', 'name': 'count'}],
'dimensions': ['affiliate_id', 'campaign', 'first_seen']
}
"""
payload = request.get_json(force=True)
druid_config = payload['config']
user_name = payload['user']
cluster_name = payload['cluster']
user = security_manager.find_user(username=user_name)
DruidDatasource = ConnectorRegistry.sources['druid']
DruidCluster = DruidDatasource.cluster_class
if not user:
err_msg = __("Can't find User '%(name)s', please ask your admin "
'to create one.', name=user_name)
logging.error(err_msg)
return json_error_response(err_msg)
cluster = db.session.query(DruidCluster).filter_by(
cluster_name=cluster_name).first()
if not cluster:
err_msg = __("Can't find DruidCluster with cluster_name = "
"'%(name)s'", name=cluster_name)
logging.error(err_msg)
return json_error_response(err_msg)
try:
DruidDatasource.sync_to_db_from_config(
druid_config, user, cluster)
except Exception as e:
logging.exception(utils.error_msg_from_exception(e))
return json_error_response(utils.error_msg_from_exception(e))
return Response(status=201)
|
python
|
def sync_druid_source(self):
"""Syncs the druid datasource in main db with the provided config.
The endpoint takes 3 arguments:
user - user name to perform the operation as
cluster - name of the druid cluster
config - configuration stored in json that contains:
name: druid datasource name
dimensions: list of the dimensions, they become druid columns
with the type STRING
metrics_spec: list of metrics (dictionary). Metric consists of
2 attributes: type and name. Type can be count,
etc. `count` type is stored internally as longSum
other fields will be ignored.
Example: {
'name': 'test_click',
'metrics_spec': [{'type': 'count', 'name': 'count'}],
'dimensions': ['affiliate_id', 'campaign', 'first_seen']
}
"""
payload = request.get_json(force=True)
druid_config = payload['config']
user_name = payload['user']
cluster_name = payload['cluster']
user = security_manager.find_user(username=user_name)
DruidDatasource = ConnectorRegistry.sources['druid']
DruidCluster = DruidDatasource.cluster_class
if not user:
err_msg = __("Can't find User '%(name)s', please ask your admin "
'to create one.', name=user_name)
logging.error(err_msg)
return json_error_response(err_msg)
cluster = db.session.query(DruidCluster).filter_by(
cluster_name=cluster_name).first()
if not cluster:
err_msg = __("Can't find DruidCluster with cluster_name = "
"'%(name)s'", name=cluster_name)
logging.error(err_msg)
return json_error_response(err_msg)
try:
DruidDatasource.sync_to_db_from_config(
druid_config, user, cluster)
except Exception as e:
logging.exception(utils.error_msg_from_exception(e))
return json_error_response(utils.error_msg_from_exception(e))
return Response(status=201)
|
[
"def",
"sync_druid_source",
"(",
"self",
")",
":",
"payload",
"=",
"request",
".",
"get_json",
"(",
"force",
"=",
"True",
")",
"druid_config",
"=",
"payload",
"[",
"'config'",
"]",
"user_name",
"=",
"payload",
"[",
"'user'",
"]",
"cluster_name",
"=",
"payload",
"[",
"'cluster'",
"]",
"user",
"=",
"security_manager",
".",
"find_user",
"(",
"username",
"=",
"user_name",
")",
"DruidDatasource",
"=",
"ConnectorRegistry",
".",
"sources",
"[",
"'druid'",
"]",
"DruidCluster",
"=",
"DruidDatasource",
".",
"cluster_class",
"if",
"not",
"user",
":",
"err_msg",
"=",
"__",
"(",
"\"Can't find User '%(name)s', please ask your admin \"",
"'to create one.'",
",",
"name",
"=",
"user_name",
")",
"logging",
".",
"error",
"(",
"err_msg",
")",
"return",
"json_error_response",
"(",
"err_msg",
")",
"cluster",
"=",
"db",
".",
"session",
".",
"query",
"(",
"DruidCluster",
")",
".",
"filter_by",
"(",
"cluster_name",
"=",
"cluster_name",
")",
".",
"first",
"(",
")",
"if",
"not",
"cluster",
":",
"err_msg",
"=",
"__",
"(",
"\"Can't find DruidCluster with cluster_name = \"",
"\"'%(name)s'\"",
",",
"name",
"=",
"cluster_name",
")",
"logging",
".",
"error",
"(",
"err_msg",
")",
"return",
"json_error_response",
"(",
"err_msg",
")",
"try",
":",
"DruidDatasource",
".",
"sync_to_db_from_config",
"(",
"druid_config",
",",
"user",
",",
"cluster",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"exception",
"(",
"utils",
".",
"error_msg_from_exception",
"(",
"e",
")",
")",
"return",
"json_error_response",
"(",
"utils",
".",
"error_msg_from_exception",
"(",
"e",
")",
")",
"return",
"Response",
"(",
"status",
"=",
"201",
")"
] |
Syncs the druid datasource in main db with the provided config.
The endpoint takes 3 arguments:
user - user name to perform the operation as
cluster - name of the druid cluster
config - configuration stored in json that contains:
name: druid datasource name
dimensions: list of the dimensions, they become druid columns
with the type STRING
metrics_spec: list of metrics (dictionary). Metric consists of
2 attributes: type and name. Type can be count,
etc. `count` type is stored internally as longSum
other fields will be ignored.
Example: {
'name': 'test_click',
'metrics_spec': [{'type': 'count', 'name': 'count'}],
'dimensions': ['affiliate_id', 'campaign', 'first_seen']
}
|
[
"Syncs",
"the",
"druid",
"datasource",
"in",
"main",
"db",
"with",
"the",
"provided",
"config",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2257-L2304
|
21,473
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.cache_key_exist
|
def cache_key_exist(self, key):
"""Returns if a key from cache exist"""
key_exist = True if cache.get(key) else False
status = 200 if key_exist else 404
return json_success(json.dumps({'key_exist': key_exist}),
status=status)
|
python
|
def cache_key_exist(self, key):
"""Returns if a key from cache exist"""
key_exist = True if cache.get(key) else False
status = 200 if key_exist else 404
return json_success(json.dumps({'key_exist': key_exist}),
status=status)
|
[
"def",
"cache_key_exist",
"(",
"self",
",",
"key",
")",
":",
"key_exist",
"=",
"True",
"if",
"cache",
".",
"get",
"(",
"key",
")",
"else",
"False",
"status",
"=",
"200",
"if",
"key_exist",
"else",
"404",
"return",
"json_success",
"(",
"json",
".",
"dumps",
"(",
"{",
"'key_exist'",
":",
"key_exist",
"}",
")",
",",
"status",
"=",
"status",
")"
] |
Returns if a key from cache exist
|
[
"Returns",
"if",
"a",
"key",
"from",
"cache",
"exist"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2455-L2460
|
21,474
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.results
|
def results(self, key):
"""Serves a key off of the results backend"""
if not results_backend:
return json_error_response("Results backend isn't configured")
read_from_results_backend_start = now_as_float()
blob = results_backend.get(key)
stats_logger.timing(
'sqllab.query.results_backend_read',
now_as_float() - read_from_results_backend_start,
)
if not blob:
return json_error_response(
'Data could not be retrieved. '
'You may want to re-run the query.',
status=410,
)
query = db.session.query(Query).filter_by(results_key=key).one()
rejected_tables = security_manager.rejected_datasources(
query.sql, query.database, query.schema)
if rejected_tables:
return json_error_response(security_manager.get_table_access_error_msg(
'{}'.format(rejected_tables)), status=403)
payload = utils.zlib_decompress_to_string(blob)
display_limit = app.config.get('DEFAULT_SQLLAB_LIMIT', None)
if display_limit:
payload_json = json.loads(payload)
payload_json['data'] = payload_json['data'][:display_limit]
return json_success(
json.dumps(
payload_json,
default=utils.json_iso_dttm_ser,
ignore_nan=True,
),
)
|
python
|
def results(self, key):
"""Serves a key off of the results backend"""
if not results_backend:
return json_error_response("Results backend isn't configured")
read_from_results_backend_start = now_as_float()
blob = results_backend.get(key)
stats_logger.timing(
'sqllab.query.results_backend_read',
now_as_float() - read_from_results_backend_start,
)
if not blob:
return json_error_response(
'Data could not be retrieved. '
'You may want to re-run the query.',
status=410,
)
query = db.session.query(Query).filter_by(results_key=key).one()
rejected_tables = security_manager.rejected_datasources(
query.sql, query.database, query.schema)
if rejected_tables:
return json_error_response(security_manager.get_table_access_error_msg(
'{}'.format(rejected_tables)), status=403)
payload = utils.zlib_decompress_to_string(blob)
display_limit = app.config.get('DEFAULT_SQLLAB_LIMIT', None)
if display_limit:
payload_json = json.loads(payload)
payload_json['data'] = payload_json['data'][:display_limit]
return json_success(
json.dumps(
payload_json,
default=utils.json_iso_dttm_ser,
ignore_nan=True,
),
)
|
[
"def",
"results",
"(",
"self",
",",
"key",
")",
":",
"if",
"not",
"results_backend",
":",
"return",
"json_error_response",
"(",
"\"Results backend isn't configured\"",
")",
"read_from_results_backend_start",
"=",
"now_as_float",
"(",
")",
"blob",
"=",
"results_backend",
".",
"get",
"(",
"key",
")",
"stats_logger",
".",
"timing",
"(",
"'sqllab.query.results_backend_read'",
",",
"now_as_float",
"(",
")",
"-",
"read_from_results_backend_start",
",",
")",
"if",
"not",
"blob",
":",
"return",
"json_error_response",
"(",
"'Data could not be retrieved. '",
"'You may want to re-run the query.'",
",",
"status",
"=",
"410",
",",
")",
"query",
"=",
"db",
".",
"session",
".",
"query",
"(",
"Query",
")",
".",
"filter_by",
"(",
"results_key",
"=",
"key",
")",
".",
"one",
"(",
")",
"rejected_tables",
"=",
"security_manager",
".",
"rejected_datasources",
"(",
"query",
".",
"sql",
",",
"query",
".",
"database",
",",
"query",
".",
"schema",
")",
"if",
"rejected_tables",
":",
"return",
"json_error_response",
"(",
"security_manager",
".",
"get_table_access_error_msg",
"(",
"'{}'",
".",
"format",
"(",
"rejected_tables",
")",
")",
",",
"status",
"=",
"403",
")",
"payload",
"=",
"utils",
".",
"zlib_decompress_to_string",
"(",
"blob",
")",
"display_limit",
"=",
"app",
".",
"config",
".",
"get",
"(",
"'DEFAULT_SQLLAB_LIMIT'",
",",
"None",
")",
"if",
"display_limit",
":",
"payload_json",
"=",
"json",
".",
"loads",
"(",
"payload",
")",
"payload_json",
"[",
"'data'",
"]",
"=",
"payload_json",
"[",
"'data'",
"]",
"[",
":",
"display_limit",
"]",
"return",
"json_success",
"(",
"json",
".",
"dumps",
"(",
"payload_json",
",",
"default",
"=",
"utils",
".",
"json_iso_dttm_ser",
",",
"ignore_nan",
"=",
"True",
",",
")",
",",
")"
] |
Serves a key off of the results backend
|
[
"Serves",
"a",
"key",
"off",
"of",
"the",
"results",
"backend"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2465-L2501
|
21,475
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.csv
|
def csv(self, client_id):
"""Download the query results as csv."""
logging.info('Exporting CSV file [{}]'.format(client_id))
query = (
db.session.query(Query)
.filter_by(client_id=client_id)
.one()
)
rejected_tables = security_manager.rejected_datasources(
query.sql, query.database, query.schema)
if rejected_tables:
flash(
security_manager.get_table_access_error_msg('{}'.format(rejected_tables)))
return redirect('/')
blob = None
if results_backend and query.results_key:
logging.info(
'Fetching CSV from results backend '
'[{}]'.format(query.results_key))
blob = results_backend.get(query.results_key)
if blob:
logging.info('Decompressing')
json_payload = utils.zlib_decompress_to_string(blob)
obj = json.loads(json_payload)
columns = [c['name'] for c in obj['columns']]
df = pd.DataFrame.from_records(obj['data'], columns=columns)
logging.info('Using pandas to convert to CSV')
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
else:
logging.info('Running a query to turn into CSV')
sql = query.select_sql or query.executed_sql
df = query.database.get_df(sql, query.schema)
# TODO(bkyryliuk): add compression=gzip for big files.
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
response = Response(csv, mimetype='text/csv')
response.headers['Content-Disposition'] = f'attachment; filename={query.name}.csv'
logging.info('Ready to return response')
return response
|
python
|
def csv(self, client_id):
"""Download the query results as csv."""
logging.info('Exporting CSV file [{}]'.format(client_id))
query = (
db.session.query(Query)
.filter_by(client_id=client_id)
.one()
)
rejected_tables = security_manager.rejected_datasources(
query.sql, query.database, query.schema)
if rejected_tables:
flash(
security_manager.get_table_access_error_msg('{}'.format(rejected_tables)))
return redirect('/')
blob = None
if results_backend and query.results_key:
logging.info(
'Fetching CSV from results backend '
'[{}]'.format(query.results_key))
blob = results_backend.get(query.results_key)
if blob:
logging.info('Decompressing')
json_payload = utils.zlib_decompress_to_string(blob)
obj = json.loads(json_payload)
columns = [c['name'] for c in obj['columns']]
df = pd.DataFrame.from_records(obj['data'], columns=columns)
logging.info('Using pandas to convert to CSV')
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
else:
logging.info('Running a query to turn into CSV')
sql = query.select_sql or query.executed_sql
df = query.database.get_df(sql, query.schema)
# TODO(bkyryliuk): add compression=gzip for big files.
csv = df.to_csv(index=False, **config.get('CSV_EXPORT'))
response = Response(csv, mimetype='text/csv')
response.headers['Content-Disposition'] = f'attachment; filename={query.name}.csv'
logging.info('Ready to return response')
return response
|
[
"def",
"csv",
"(",
"self",
",",
"client_id",
")",
":",
"logging",
".",
"info",
"(",
"'Exporting CSV file [{}]'",
".",
"format",
"(",
"client_id",
")",
")",
"query",
"=",
"(",
"db",
".",
"session",
".",
"query",
"(",
"Query",
")",
".",
"filter_by",
"(",
"client_id",
"=",
"client_id",
")",
".",
"one",
"(",
")",
")",
"rejected_tables",
"=",
"security_manager",
".",
"rejected_datasources",
"(",
"query",
".",
"sql",
",",
"query",
".",
"database",
",",
"query",
".",
"schema",
")",
"if",
"rejected_tables",
":",
"flash",
"(",
"security_manager",
".",
"get_table_access_error_msg",
"(",
"'{}'",
".",
"format",
"(",
"rejected_tables",
")",
")",
")",
"return",
"redirect",
"(",
"'/'",
")",
"blob",
"=",
"None",
"if",
"results_backend",
"and",
"query",
".",
"results_key",
":",
"logging",
".",
"info",
"(",
"'Fetching CSV from results backend '",
"'[{}]'",
".",
"format",
"(",
"query",
".",
"results_key",
")",
")",
"blob",
"=",
"results_backend",
".",
"get",
"(",
"query",
".",
"results_key",
")",
"if",
"blob",
":",
"logging",
".",
"info",
"(",
"'Decompressing'",
")",
"json_payload",
"=",
"utils",
".",
"zlib_decompress_to_string",
"(",
"blob",
")",
"obj",
"=",
"json",
".",
"loads",
"(",
"json_payload",
")",
"columns",
"=",
"[",
"c",
"[",
"'name'",
"]",
"for",
"c",
"in",
"obj",
"[",
"'columns'",
"]",
"]",
"df",
"=",
"pd",
".",
"DataFrame",
".",
"from_records",
"(",
"obj",
"[",
"'data'",
"]",
",",
"columns",
"=",
"columns",
")",
"logging",
".",
"info",
"(",
"'Using pandas to convert to CSV'",
")",
"csv",
"=",
"df",
".",
"to_csv",
"(",
"index",
"=",
"False",
",",
"*",
"*",
"config",
".",
"get",
"(",
"'CSV_EXPORT'",
")",
")",
"else",
":",
"logging",
".",
"info",
"(",
"'Running a query to turn into CSV'",
")",
"sql",
"=",
"query",
".",
"select_sql",
"or",
"query",
".",
"executed_sql",
"df",
"=",
"query",
".",
"database",
".",
"get_df",
"(",
"sql",
",",
"query",
".",
"schema",
")",
"# TODO(bkyryliuk): add compression=gzip for big files.",
"csv",
"=",
"df",
".",
"to_csv",
"(",
"index",
"=",
"False",
",",
"*",
"*",
"config",
".",
"get",
"(",
"'CSV_EXPORT'",
")",
")",
"response",
"=",
"Response",
"(",
"csv",
",",
"mimetype",
"=",
"'text/csv'",
")",
"response",
".",
"headers",
"[",
"'Content-Disposition'",
"]",
"=",
"f'attachment; filename={query.name}.csv'",
"logging",
".",
"info",
"(",
"'Ready to return response'",
")",
"return",
"response"
] |
Download the query results as csv.
|
[
"Download",
"the",
"query",
"results",
"as",
"csv",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2654-L2692
|
21,476
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.queries
|
def queries(self, last_updated_ms):
"""Get the updated queries."""
stats_logger.incr('queries')
if not g.user.get_id():
return json_error_response(
'Please login to access the queries.', status=403)
# Unix time, milliseconds.
last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0
# UTC date time, same that is stored in the DB.
last_updated_dt = utils.EPOCH + timedelta(seconds=last_updated_ms_int / 1000)
sql_queries = (
db.session.query(Query)
.filter(
Query.user_id == g.user.get_id(),
Query.changed_on >= last_updated_dt,
)
.all()
)
dict_queries = {q.client_id: q.to_dict() for q in sql_queries}
now = int(round(time.time() * 1000))
unfinished_states = [
QueryStatus.PENDING,
QueryStatus.RUNNING,
]
queries_to_timeout = [
client_id for client_id, query_dict in dict_queries.items()
if (
query_dict['state'] in unfinished_states and (
now - query_dict['startDttm'] >
config.get('SQLLAB_ASYNC_TIME_LIMIT_SEC') * 1000
)
)
]
if queries_to_timeout:
update(Query).where(
and_(
Query.user_id == g.user.get_id(),
Query.client_id in queries_to_timeout,
),
).values(state=QueryStatus.TIMED_OUT)
for client_id in queries_to_timeout:
dict_queries[client_id]['status'] = QueryStatus.TIMED_OUT
return json_success(
json.dumps(dict_queries, default=utils.json_int_dttm_ser))
|
python
|
def queries(self, last_updated_ms):
"""Get the updated queries."""
stats_logger.incr('queries')
if not g.user.get_id():
return json_error_response(
'Please login to access the queries.', status=403)
# Unix time, milliseconds.
last_updated_ms_int = int(float(last_updated_ms)) if last_updated_ms else 0
# UTC date time, same that is stored in the DB.
last_updated_dt = utils.EPOCH + timedelta(seconds=last_updated_ms_int / 1000)
sql_queries = (
db.session.query(Query)
.filter(
Query.user_id == g.user.get_id(),
Query.changed_on >= last_updated_dt,
)
.all()
)
dict_queries = {q.client_id: q.to_dict() for q in sql_queries}
now = int(round(time.time() * 1000))
unfinished_states = [
QueryStatus.PENDING,
QueryStatus.RUNNING,
]
queries_to_timeout = [
client_id for client_id, query_dict in dict_queries.items()
if (
query_dict['state'] in unfinished_states and (
now - query_dict['startDttm'] >
config.get('SQLLAB_ASYNC_TIME_LIMIT_SEC') * 1000
)
)
]
if queries_to_timeout:
update(Query).where(
and_(
Query.user_id == g.user.get_id(),
Query.client_id in queries_to_timeout,
),
).values(state=QueryStatus.TIMED_OUT)
for client_id in queries_to_timeout:
dict_queries[client_id]['status'] = QueryStatus.TIMED_OUT
return json_success(
json.dumps(dict_queries, default=utils.json_int_dttm_ser))
|
[
"def",
"queries",
"(",
"self",
",",
"last_updated_ms",
")",
":",
"stats_logger",
".",
"incr",
"(",
"'queries'",
")",
"if",
"not",
"g",
".",
"user",
".",
"get_id",
"(",
")",
":",
"return",
"json_error_response",
"(",
"'Please login to access the queries.'",
",",
"status",
"=",
"403",
")",
"# Unix time, milliseconds.",
"last_updated_ms_int",
"=",
"int",
"(",
"float",
"(",
"last_updated_ms",
")",
")",
"if",
"last_updated_ms",
"else",
"0",
"# UTC date time, same that is stored in the DB.",
"last_updated_dt",
"=",
"utils",
".",
"EPOCH",
"+",
"timedelta",
"(",
"seconds",
"=",
"last_updated_ms_int",
"/",
"1000",
")",
"sql_queries",
"=",
"(",
"db",
".",
"session",
".",
"query",
"(",
"Query",
")",
".",
"filter",
"(",
"Query",
".",
"user_id",
"==",
"g",
".",
"user",
".",
"get_id",
"(",
")",
",",
"Query",
".",
"changed_on",
">=",
"last_updated_dt",
",",
")",
".",
"all",
"(",
")",
")",
"dict_queries",
"=",
"{",
"q",
".",
"client_id",
":",
"q",
".",
"to_dict",
"(",
")",
"for",
"q",
"in",
"sql_queries",
"}",
"now",
"=",
"int",
"(",
"round",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1000",
")",
")",
"unfinished_states",
"=",
"[",
"QueryStatus",
".",
"PENDING",
",",
"QueryStatus",
".",
"RUNNING",
",",
"]",
"queries_to_timeout",
"=",
"[",
"client_id",
"for",
"client_id",
",",
"query_dict",
"in",
"dict_queries",
".",
"items",
"(",
")",
"if",
"(",
"query_dict",
"[",
"'state'",
"]",
"in",
"unfinished_states",
"and",
"(",
"now",
"-",
"query_dict",
"[",
"'startDttm'",
"]",
">",
"config",
".",
"get",
"(",
"'SQLLAB_ASYNC_TIME_LIMIT_SEC'",
")",
"*",
"1000",
")",
")",
"]",
"if",
"queries_to_timeout",
":",
"update",
"(",
"Query",
")",
".",
"where",
"(",
"and_",
"(",
"Query",
".",
"user_id",
"==",
"g",
".",
"user",
".",
"get_id",
"(",
")",
",",
"Query",
".",
"client_id",
"in",
"queries_to_timeout",
",",
")",
",",
")",
".",
"values",
"(",
"state",
"=",
"QueryStatus",
".",
"TIMED_OUT",
")",
"for",
"client_id",
"in",
"queries_to_timeout",
":",
"dict_queries",
"[",
"client_id",
"]",
"[",
"'status'",
"]",
"=",
"QueryStatus",
".",
"TIMED_OUT",
"return",
"json_success",
"(",
"json",
".",
"dumps",
"(",
"dict_queries",
",",
"default",
"=",
"utils",
".",
"json_int_dttm_ser",
")",
")"
] |
Get the updated queries.
|
[
"Get",
"the",
"updated",
"queries",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2714-L2766
|
21,477
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.welcome
|
def welcome(self):
"""Personalized welcome page"""
if not g.user or not g.user.get_id():
return redirect(appbuilder.get_url_for_login)
welcome_dashboard_id = (
db.session
.query(UserAttribute.welcome_dashboard_id)
.filter_by(user_id=g.user.get_id())
.scalar()
)
if welcome_dashboard_id:
return self.dashboard(str(welcome_dashboard_id))
payload = {
'user': bootstrap_user_data(),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
entry='welcome',
title='Superset',
bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),
)
|
python
|
def welcome(self):
"""Personalized welcome page"""
if not g.user or not g.user.get_id():
return redirect(appbuilder.get_url_for_login)
welcome_dashboard_id = (
db.session
.query(UserAttribute.welcome_dashboard_id)
.filter_by(user_id=g.user.get_id())
.scalar()
)
if welcome_dashboard_id:
return self.dashboard(str(welcome_dashboard_id))
payload = {
'user': bootstrap_user_data(),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
entry='welcome',
title='Superset',
bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),
)
|
[
"def",
"welcome",
"(",
"self",
")",
":",
"if",
"not",
"g",
".",
"user",
"or",
"not",
"g",
".",
"user",
".",
"get_id",
"(",
")",
":",
"return",
"redirect",
"(",
"appbuilder",
".",
"get_url_for_login",
")",
"welcome_dashboard_id",
"=",
"(",
"db",
".",
"session",
".",
"query",
"(",
"UserAttribute",
".",
"welcome_dashboard_id",
")",
".",
"filter_by",
"(",
"user_id",
"=",
"g",
".",
"user",
".",
"get_id",
"(",
")",
")",
".",
"scalar",
"(",
")",
")",
"if",
"welcome_dashboard_id",
":",
"return",
"self",
".",
"dashboard",
"(",
"str",
"(",
"welcome_dashboard_id",
")",
")",
"payload",
"=",
"{",
"'user'",
":",
"bootstrap_user_data",
"(",
")",
",",
"'common'",
":",
"self",
".",
"common_bootsrap_payload",
"(",
")",
",",
"}",
"return",
"self",
".",
"render_template",
"(",
"'superset/basic.html'",
",",
"entry",
"=",
"'welcome'",
",",
"title",
"=",
"'Superset'",
",",
"bootstrap_data",
"=",
"json",
".",
"dumps",
"(",
"payload",
",",
"default",
"=",
"utils",
".",
"json_iso_dttm_ser",
")",
",",
")"
] |
Personalized welcome page
|
[
"Personalized",
"welcome",
"page"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2838-L2862
|
21,478
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.profile
|
def profile(self, username):
"""User profile page"""
if not username and g.user:
username = g.user.username
payload = {
'user': bootstrap_user_data(username, include_perms=True),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
title=_("%(user)s's profile", user=username),
entry='profile',
bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),
)
|
python
|
def profile(self, username):
"""User profile page"""
if not username and g.user:
username = g.user.username
payload = {
'user': bootstrap_user_data(username, include_perms=True),
'common': self.common_bootsrap_payload(),
}
return self.render_template(
'superset/basic.html',
title=_("%(user)s's profile", user=username),
entry='profile',
bootstrap_data=json.dumps(payload, default=utils.json_iso_dttm_ser),
)
|
[
"def",
"profile",
"(",
"self",
",",
"username",
")",
":",
"if",
"not",
"username",
"and",
"g",
".",
"user",
":",
"username",
"=",
"g",
".",
"user",
".",
"username",
"payload",
"=",
"{",
"'user'",
":",
"bootstrap_user_data",
"(",
"username",
",",
"include_perms",
"=",
"True",
")",
",",
"'common'",
":",
"self",
".",
"common_bootsrap_payload",
"(",
")",
",",
"}",
"return",
"self",
".",
"render_template",
"(",
"'superset/basic.html'",
",",
"title",
"=",
"_",
"(",
"\"%(user)s's profile\"",
",",
"user",
"=",
"username",
")",
",",
"entry",
"=",
"'profile'",
",",
"bootstrap_data",
"=",
"json",
".",
"dumps",
"(",
"payload",
",",
"default",
"=",
"utils",
".",
"json_iso_dttm_ser",
")",
",",
")"
] |
User profile page
|
[
"User",
"profile",
"page"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2866-L2881
|
21,479
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.slice_query
|
def slice_query(self, slice_id):
"""
This method exposes an API endpoint to
get the database query string for this slice
"""
viz_obj = get_viz(slice_id)
security_manager.assert_datasource_permission(viz_obj.datasource)
return self.get_query_string_response(viz_obj)
|
python
|
def slice_query(self, slice_id):
"""
This method exposes an API endpoint to
get the database query string for this slice
"""
viz_obj = get_viz(slice_id)
security_manager.assert_datasource_permission(viz_obj.datasource)
return self.get_query_string_response(viz_obj)
|
[
"def",
"slice_query",
"(",
"self",
",",
"slice_id",
")",
":",
"viz_obj",
"=",
"get_viz",
"(",
"slice_id",
")",
"security_manager",
".",
"assert_datasource_permission",
"(",
"viz_obj",
".",
"datasource",
")",
"return",
"self",
".",
"get_query_string_response",
"(",
"viz_obj",
")"
] |
This method exposes an API endpoint to
get the database query string for this slice
|
[
"This",
"method",
"exposes",
"an",
"API",
"endpoint",
"to",
"get",
"the",
"database",
"query",
"string",
"for",
"this",
"slice"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2901-L2908
|
21,480
|
apache/incubator-superset
|
superset/views/core.py
|
Superset.schemas_access_for_csv_upload
|
def schemas_access_for_csv_upload(self):
"""
This method exposes an API endpoint to
get the schema access control settings for csv upload in this database
"""
if not request.args.get('db_id'):
return json_error_response(
'No database is allowed for your csv upload')
db_id = int(request.args.get('db_id'))
database = (
db.session
.query(models.Database)
.filter_by(id=db_id)
.one()
)
try:
schemas_allowed = database.get_schema_access_for_csv_upload()
if (security_manager.database_access(database) or
security_manager.all_datasource_access()):
return self.json_response(schemas_allowed)
# the list schemas_allowed should not be empty here
# and the list schemas_allowed_processed returned from security_manager
# should not be empty either,
# otherwise the database should have been filtered out
# in CsvToDatabaseForm
schemas_allowed_processed = security_manager.schemas_accessible_by_user(
database, schemas_allowed, False)
return self.json_response(schemas_allowed_processed)
except Exception:
return json_error_response((
'Failed to fetch schemas allowed for csv upload in this database! '
'Please contact Superset Admin!\n\n'
'The error message returned was:\n{}').format(traceback.format_exc()))
|
python
|
def schemas_access_for_csv_upload(self):
"""
This method exposes an API endpoint to
get the schema access control settings for csv upload in this database
"""
if not request.args.get('db_id'):
return json_error_response(
'No database is allowed for your csv upload')
db_id = int(request.args.get('db_id'))
database = (
db.session
.query(models.Database)
.filter_by(id=db_id)
.one()
)
try:
schemas_allowed = database.get_schema_access_for_csv_upload()
if (security_manager.database_access(database) or
security_manager.all_datasource_access()):
return self.json_response(schemas_allowed)
# the list schemas_allowed should not be empty here
# and the list schemas_allowed_processed returned from security_manager
# should not be empty either,
# otherwise the database should have been filtered out
# in CsvToDatabaseForm
schemas_allowed_processed = security_manager.schemas_accessible_by_user(
database, schemas_allowed, False)
return self.json_response(schemas_allowed_processed)
except Exception:
return json_error_response((
'Failed to fetch schemas allowed for csv upload in this database! '
'Please contact Superset Admin!\n\n'
'The error message returned was:\n{}').format(traceback.format_exc()))
|
[
"def",
"schemas_access_for_csv_upload",
"(",
"self",
")",
":",
"if",
"not",
"request",
".",
"args",
".",
"get",
"(",
"'db_id'",
")",
":",
"return",
"json_error_response",
"(",
"'No database is allowed for your csv upload'",
")",
"db_id",
"=",
"int",
"(",
"request",
".",
"args",
".",
"get",
"(",
"'db_id'",
")",
")",
"database",
"=",
"(",
"db",
".",
"session",
".",
"query",
"(",
"models",
".",
"Database",
")",
".",
"filter_by",
"(",
"id",
"=",
"db_id",
")",
".",
"one",
"(",
")",
")",
"try",
":",
"schemas_allowed",
"=",
"database",
".",
"get_schema_access_for_csv_upload",
"(",
")",
"if",
"(",
"security_manager",
".",
"database_access",
"(",
"database",
")",
"or",
"security_manager",
".",
"all_datasource_access",
"(",
")",
")",
":",
"return",
"self",
".",
"json_response",
"(",
"schemas_allowed",
")",
"# the list schemas_allowed should not be empty here",
"# and the list schemas_allowed_processed returned from security_manager",
"# should not be empty either,",
"# otherwise the database should have been filtered out",
"# in CsvToDatabaseForm",
"schemas_allowed_processed",
"=",
"security_manager",
".",
"schemas_accessible_by_user",
"(",
"database",
",",
"schemas_allowed",
",",
"False",
")",
"return",
"self",
".",
"json_response",
"(",
"schemas_allowed_processed",
")",
"except",
"Exception",
":",
"return",
"json_error_response",
"(",
"(",
"'Failed to fetch schemas allowed for csv upload in this database! '",
"'Please contact Superset Admin!\\n\\n'",
"'The error message returned was:\\n{}'",
")",
".",
"format",
"(",
"traceback",
".",
"format_exc",
"(",
")",
")",
")"
] |
This method exposes an API endpoint to
get the schema access control settings for csv upload in this database
|
[
"This",
"method",
"exposes",
"an",
"API",
"endpoint",
"to",
"get",
"the",
"schema",
"access",
"control",
"settings",
"for",
"csv",
"upload",
"in",
"this",
"database"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2913-L2946
|
21,481
|
apache/incubator-superset
|
superset/utils/decorators.py
|
etag_cache
|
def etag_cache(max_age, check_perms=bool):
"""
A decorator for caching views and handling etag conditional requests.
The decorator adds headers to GET requests that help with caching: Last-
Modified, Expires and ETag. It also handles conditional requests, when the
client send an If-Matches header.
If a cache is set, the decorator will cache GET responses, bypassing the
dataframe serialization. POST requests will still benefit from the
dataframe cache for requests that produce the same SQL.
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
# check if the user can access the resource
check_perms(*args, **kwargs)
# for POST requests we can't set cache headers, use the response
# cache nor use conditional requests; this will still use the
# dataframe cache in `superset/viz.py`, though.
if request.method == 'POST':
return f(*args, **kwargs)
response = None
if cache:
try:
# build the cache key from the function arguments and any
# other additional GET arguments (like `form_data`, eg).
key_args = list(args)
key_kwargs = kwargs.copy()
key_kwargs.update(request.args)
cache_key = wrapper.make_cache_key(f, *key_args, **key_kwargs)
response = cache.get(cache_key)
except Exception: # pylint: disable=broad-except
if app.debug:
raise
logging.exception('Exception possibly due to cache backend.')
# if no response was cached, compute it using the wrapped function
if response is None:
response = f(*args, **kwargs)
# add headers for caching: Last Modified, Expires and ETag
response.cache_control.public = True
response.last_modified = datetime.utcnow()
expiration = max_age if max_age != 0 else FAR_FUTURE
response.expires = \
response.last_modified + timedelta(seconds=expiration)
response.add_etag()
# if we have a cache, store the response from the request
if cache:
try:
cache.set(cache_key, response, timeout=max_age)
except Exception: # pylint: disable=broad-except
if app.debug:
raise
logging.exception('Exception possibly due to cache backend.')
return response.make_conditional(request)
if cache:
wrapper.uncached = f
wrapper.cache_timeout = max_age
wrapper.make_cache_key = \
cache._memoize_make_cache_key( # pylint: disable=protected-access
make_name=None, timeout=max_age)
return wrapper
return decorator
|
python
|
def etag_cache(max_age, check_perms=bool):
"""
A decorator for caching views and handling etag conditional requests.
The decorator adds headers to GET requests that help with caching: Last-
Modified, Expires and ETag. It also handles conditional requests, when the
client send an If-Matches header.
If a cache is set, the decorator will cache GET responses, bypassing the
dataframe serialization. POST requests will still benefit from the
dataframe cache for requests that produce the same SQL.
"""
def decorator(f):
@wraps(f)
def wrapper(*args, **kwargs):
# check if the user can access the resource
check_perms(*args, **kwargs)
# for POST requests we can't set cache headers, use the response
# cache nor use conditional requests; this will still use the
# dataframe cache in `superset/viz.py`, though.
if request.method == 'POST':
return f(*args, **kwargs)
response = None
if cache:
try:
# build the cache key from the function arguments and any
# other additional GET arguments (like `form_data`, eg).
key_args = list(args)
key_kwargs = kwargs.copy()
key_kwargs.update(request.args)
cache_key = wrapper.make_cache_key(f, *key_args, **key_kwargs)
response = cache.get(cache_key)
except Exception: # pylint: disable=broad-except
if app.debug:
raise
logging.exception('Exception possibly due to cache backend.')
# if no response was cached, compute it using the wrapped function
if response is None:
response = f(*args, **kwargs)
# add headers for caching: Last Modified, Expires and ETag
response.cache_control.public = True
response.last_modified = datetime.utcnow()
expiration = max_age if max_age != 0 else FAR_FUTURE
response.expires = \
response.last_modified + timedelta(seconds=expiration)
response.add_etag()
# if we have a cache, store the response from the request
if cache:
try:
cache.set(cache_key, response, timeout=max_age)
except Exception: # pylint: disable=broad-except
if app.debug:
raise
logging.exception('Exception possibly due to cache backend.')
return response.make_conditional(request)
if cache:
wrapper.uncached = f
wrapper.cache_timeout = max_age
wrapper.make_cache_key = \
cache._memoize_make_cache_key( # pylint: disable=protected-access
make_name=None, timeout=max_age)
return wrapper
return decorator
|
[
"def",
"etag_cache",
"(",
"max_age",
",",
"check_perms",
"=",
"bool",
")",
":",
"def",
"decorator",
"(",
"f",
")",
":",
"@",
"wraps",
"(",
"f",
")",
"def",
"wrapper",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"# check if the user can access the resource",
"check_perms",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# for POST requests we can't set cache headers, use the response",
"# cache nor use conditional requests; this will still use the",
"# dataframe cache in `superset/viz.py`, though.",
"if",
"request",
".",
"method",
"==",
"'POST'",
":",
"return",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"response",
"=",
"None",
"if",
"cache",
":",
"try",
":",
"# build the cache key from the function arguments and any",
"# other additional GET arguments (like `form_data`, eg).",
"key_args",
"=",
"list",
"(",
"args",
")",
"key_kwargs",
"=",
"kwargs",
".",
"copy",
"(",
")",
"key_kwargs",
".",
"update",
"(",
"request",
".",
"args",
")",
"cache_key",
"=",
"wrapper",
".",
"make_cache_key",
"(",
"f",
",",
"*",
"key_args",
",",
"*",
"*",
"key_kwargs",
")",
"response",
"=",
"cache",
".",
"get",
"(",
"cache_key",
")",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"if",
"app",
".",
"debug",
":",
"raise",
"logging",
".",
"exception",
"(",
"'Exception possibly due to cache backend.'",
")",
"# if no response was cached, compute it using the wrapped function",
"if",
"response",
"is",
"None",
":",
"response",
"=",
"f",
"(",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"# add headers for caching: Last Modified, Expires and ETag",
"response",
".",
"cache_control",
".",
"public",
"=",
"True",
"response",
".",
"last_modified",
"=",
"datetime",
".",
"utcnow",
"(",
")",
"expiration",
"=",
"max_age",
"if",
"max_age",
"!=",
"0",
"else",
"FAR_FUTURE",
"response",
".",
"expires",
"=",
"response",
".",
"last_modified",
"+",
"timedelta",
"(",
"seconds",
"=",
"expiration",
")",
"response",
".",
"add_etag",
"(",
")",
"# if we have a cache, store the response from the request",
"if",
"cache",
":",
"try",
":",
"cache",
".",
"set",
"(",
"cache_key",
",",
"response",
",",
"timeout",
"=",
"max_age",
")",
"except",
"Exception",
":",
"# pylint: disable=broad-except",
"if",
"app",
".",
"debug",
":",
"raise",
"logging",
".",
"exception",
"(",
"'Exception possibly due to cache backend.'",
")",
"return",
"response",
".",
"make_conditional",
"(",
"request",
")",
"if",
"cache",
":",
"wrapper",
".",
"uncached",
"=",
"f",
"wrapper",
".",
"cache_timeout",
"=",
"max_age",
"wrapper",
".",
"make_cache_key",
"=",
"cache",
".",
"_memoize_make_cache_key",
"(",
"# pylint: disable=protected-access",
"make_name",
"=",
"None",
",",
"timeout",
"=",
"max_age",
")",
"return",
"wrapper",
"return",
"decorator"
] |
A decorator for caching views and handling etag conditional requests.
The decorator adds headers to GET requests that help with caching: Last-
Modified, Expires and ETag. It also handles conditional requests, when the
client send an If-Matches header.
If a cache is set, the decorator will cache GET responses, bypassing the
dataframe serialization. POST requests will still benefit from the
dataframe cache for requests that produce the same SQL.
|
[
"A",
"decorator",
"for",
"caching",
"views",
"and",
"handling",
"etag",
"conditional",
"requests",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/decorators.py#L46-L118
|
21,482
|
apache/incubator-superset
|
superset/db_engine_specs.py
|
BaseEngineSpec.apply_limit_to_sql
|
def apply_limit_to_sql(cls, sql, limit, database):
"""Alters the SQL statement to apply a LIMIT clause"""
if cls.limit_method == LimitMethod.WRAP_SQL:
sql = sql.strip('\t\n ;')
qry = (
select('*')
.select_from(
TextAsFrom(text(sql), ['*']).alias('inner_qry'),
)
.limit(limit)
)
return database.compile_sqla_query(qry)
elif LimitMethod.FORCE_LIMIT:
parsed_query = sql_parse.ParsedQuery(sql)
sql = parsed_query.get_query_with_new_limit(limit)
return sql
|
python
|
def apply_limit_to_sql(cls, sql, limit, database):
"""Alters the SQL statement to apply a LIMIT clause"""
if cls.limit_method == LimitMethod.WRAP_SQL:
sql = sql.strip('\t\n ;')
qry = (
select('*')
.select_from(
TextAsFrom(text(sql), ['*']).alias('inner_qry'),
)
.limit(limit)
)
return database.compile_sqla_query(qry)
elif LimitMethod.FORCE_LIMIT:
parsed_query = sql_parse.ParsedQuery(sql)
sql = parsed_query.get_query_with_new_limit(limit)
return sql
|
[
"def",
"apply_limit_to_sql",
"(",
"cls",
",",
"sql",
",",
"limit",
",",
"database",
")",
":",
"if",
"cls",
".",
"limit_method",
"==",
"LimitMethod",
".",
"WRAP_SQL",
":",
"sql",
"=",
"sql",
".",
"strip",
"(",
"'\\t\\n ;'",
")",
"qry",
"=",
"(",
"select",
"(",
"'*'",
")",
".",
"select_from",
"(",
"TextAsFrom",
"(",
"text",
"(",
"sql",
")",
",",
"[",
"'*'",
"]",
")",
".",
"alias",
"(",
"'inner_qry'",
")",
",",
")",
".",
"limit",
"(",
"limit",
")",
")",
"return",
"database",
".",
"compile_sqla_query",
"(",
"qry",
")",
"elif",
"LimitMethod",
".",
"FORCE_LIMIT",
":",
"parsed_query",
"=",
"sql_parse",
".",
"ParsedQuery",
"(",
"sql",
")",
"sql",
"=",
"parsed_query",
".",
"get_query_with_new_limit",
"(",
"limit",
")",
"return",
"sql"
] |
Alters the SQL statement to apply a LIMIT clause
|
[
"Alters",
"the",
"SQL",
"statement",
"to",
"apply",
"a",
"LIMIT",
"clause"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/db_engine_specs.py#L183-L198
|
21,483
|
apache/incubator-superset
|
superset/db_engine_specs.py
|
BaseEngineSpec.truncate_label
|
def truncate_label(cls, label):
"""
In the case that a label exceeds the max length supported by the engine,
this method is used to construct a deterministic and unique label based on
an md5 hash.
"""
label = hashlib.md5(label.encode('utf-8')).hexdigest()
# truncate hash if it exceeds max length
if cls.max_column_name_length and len(label) > cls.max_column_name_length:
label = label[:cls.max_column_name_length]
return label
|
python
|
def truncate_label(cls, label):
"""
In the case that a label exceeds the max length supported by the engine,
this method is used to construct a deterministic and unique label based on
an md5 hash.
"""
label = hashlib.md5(label.encode('utf-8')).hexdigest()
# truncate hash if it exceeds max length
if cls.max_column_name_length and len(label) > cls.max_column_name_length:
label = label[:cls.max_column_name_length]
return label
|
[
"def",
"truncate_label",
"(",
"cls",
",",
"label",
")",
":",
"label",
"=",
"hashlib",
".",
"md5",
"(",
"label",
".",
"encode",
"(",
"'utf-8'",
")",
")",
".",
"hexdigest",
"(",
")",
"# truncate hash if it exceeds max length",
"if",
"cls",
".",
"max_column_name_length",
"and",
"len",
"(",
"label",
")",
">",
"cls",
".",
"max_column_name_length",
":",
"label",
"=",
"label",
"[",
":",
"cls",
".",
"max_column_name_length",
"]",
"return",
"label"
] |
In the case that a label exceeds the max length supported by the engine,
this method is used to construct a deterministic and unique label based on
an md5 hash.
|
[
"In",
"the",
"case",
"that",
"a",
"label",
"exceeds",
"the",
"max",
"length",
"supported",
"by",
"the",
"engine",
"this",
"method",
"is",
"used",
"to",
"construct",
"a",
"deterministic",
"and",
"unique",
"label",
"based",
"on",
"an",
"md5",
"hash",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/db_engine_specs.py#L463-L473
|
21,484
|
apache/incubator-superset
|
superset/db_engine_specs.py
|
PostgresEngineSpec.get_table_names
|
def get_table_names(cls, inspector, schema):
"""Need to consider foreign tables for PostgreSQL"""
tables = inspector.get_table_names(schema)
tables.extend(inspector.get_foreign_table_names(schema))
return sorted(tables)
|
python
|
def get_table_names(cls, inspector, schema):
"""Need to consider foreign tables for PostgreSQL"""
tables = inspector.get_table_names(schema)
tables.extend(inspector.get_foreign_table_names(schema))
return sorted(tables)
|
[
"def",
"get_table_names",
"(",
"cls",
",",
"inspector",
",",
"schema",
")",
":",
"tables",
"=",
"inspector",
".",
"get_table_names",
"(",
"schema",
")",
"tables",
".",
"extend",
"(",
"inspector",
".",
"get_foreign_table_names",
"(",
"schema",
")",
")",
"return",
"sorted",
"(",
"tables",
")"
] |
Need to consider foreign tables for PostgreSQL
|
[
"Need",
"to",
"consider",
"foreign",
"tables",
"for",
"PostgreSQL"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/db_engine_specs.py#L522-L526
|
21,485
|
apache/incubator-superset
|
superset/db_engine_specs.py
|
PostgresEngineSpec.get_timestamp_column
|
def get_timestamp_column(expression, column_name):
"""Postgres is unable to identify mixed case column names unless they
are quoted."""
if expression:
return expression
elif column_name.lower() != column_name:
return f'"{column_name}"'
return column_name
|
python
|
def get_timestamp_column(expression, column_name):
"""Postgres is unable to identify mixed case column names unless they
are quoted."""
if expression:
return expression
elif column_name.lower() != column_name:
return f'"{column_name}"'
return column_name
|
[
"def",
"get_timestamp_column",
"(",
"expression",
",",
"column_name",
")",
":",
"if",
"expression",
":",
"return",
"expression",
"elif",
"column_name",
".",
"lower",
"(",
")",
"!=",
"column_name",
":",
"return",
"f'\"{column_name}\"'",
"return",
"column_name"
] |
Postgres is unable to identify mixed case column names unless they
are quoted.
|
[
"Postgres",
"is",
"unable",
"to",
"identify",
"mixed",
"case",
"column",
"names",
"unless",
"they",
"are",
"quoted",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/db_engine_specs.py#L529-L536
|
21,486
|
apache/incubator-superset
|
superset/db_engine_specs.py
|
MySQLEngineSpec.extract_error_message
|
def extract_error_message(cls, e):
"""Extract error message for queries"""
message = str(e)
try:
if isinstance(e.args, tuple) and len(e.args) > 1:
message = e.args[1]
except Exception:
pass
return message
|
python
|
def extract_error_message(cls, e):
"""Extract error message for queries"""
message = str(e)
try:
if isinstance(e.args, tuple) and len(e.args) > 1:
message = e.args[1]
except Exception:
pass
return message
|
[
"def",
"extract_error_message",
"(",
"cls",
",",
"e",
")",
":",
"message",
"=",
"str",
"(",
"e",
")",
"try",
":",
"if",
"isinstance",
"(",
"e",
".",
"args",
",",
"tuple",
")",
"and",
"len",
"(",
"e",
".",
"args",
")",
">",
"1",
":",
"message",
"=",
"e",
".",
"args",
"[",
"1",
"]",
"except",
"Exception",
":",
"pass",
"return",
"message"
] |
Extract error message for queries
|
[
"Extract",
"error",
"message",
"for",
"queries"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/db_engine_specs.py#L775-L783
|
21,487
|
apache/incubator-superset
|
superset/db_engine_specs.py
|
PrestoEngineSpec._partition_query
|
def _partition_query(
cls, table_name, limit=0, order_by=None, filters=None):
"""Returns a partition query
:param table_name: the name of the table to get partitions from
:type table_name: str
:param limit: the number of partitions to be returned
:type limit: int
:param order_by: a list of tuples of field name and a boolean
that determines if that field should be sorted in descending
order
:type order_by: list of (str, bool) tuples
:param filters: dict of field name and filter value combinations
"""
limit_clause = 'LIMIT {}'.format(limit) if limit else ''
order_by_clause = ''
if order_by:
l = [] # noqa: E741
for field, desc in order_by:
l.append(field + ' DESC' if desc else '')
order_by_clause = 'ORDER BY ' + ', '.join(l)
where_clause = ''
if filters:
l = [] # noqa: E741
for field, value in filters.items():
l.append(f"{field} = '{value}'")
where_clause = 'WHERE ' + ' AND '.join(l)
sql = textwrap.dedent(f"""\
SELECT * FROM "{table_name}$partitions"
{where_clause}
{order_by_clause}
{limit_clause}
""")
return sql
|
python
|
def _partition_query(
cls, table_name, limit=0, order_by=None, filters=None):
"""Returns a partition query
:param table_name: the name of the table to get partitions from
:type table_name: str
:param limit: the number of partitions to be returned
:type limit: int
:param order_by: a list of tuples of field name and a boolean
that determines if that field should be sorted in descending
order
:type order_by: list of (str, bool) tuples
:param filters: dict of field name and filter value combinations
"""
limit_clause = 'LIMIT {}'.format(limit) if limit else ''
order_by_clause = ''
if order_by:
l = [] # noqa: E741
for field, desc in order_by:
l.append(field + ' DESC' if desc else '')
order_by_clause = 'ORDER BY ' + ', '.join(l)
where_clause = ''
if filters:
l = [] # noqa: E741
for field, value in filters.items():
l.append(f"{field} = '{value}'")
where_clause = 'WHERE ' + ' AND '.join(l)
sql = textwrap.dedent(f"""\
SELECT * FROM "{table_name}$partitions"
{where_clause}
{order_by_clause}
{limit_clause}
""")
return sql
|
[
"def",
"_partition_query",
"(",
"cls",
",",
"table_name",
",",
"limit",
"=",
"0",
",",
"order_by",
"=",
"None",
",",
"filters",
"=",
"None",
")",
":",
"limit_clause",
"=",
"'LIMIT {}'",
".",
"format",
"(",
"limit",
")",
"if",
"limit",
"else",
"''",
"order_by_clause",
"=",
"''",
"if",
"order_by",
":",
"l",
"=",
"[",
"]",
"# noqa: E741",
"for",
"field",
",",
"desc",
"in",
"order_by",
":",
"l",
".",
"append",
"(",
"field",
"+",
"' DESC'",
"if",
"desc",
"else",
"''",
")",
"order_by_clause",
"=",
"'ORDER BY '",
"+",
"', '",
".",
"join",
"(",
"l",
")",
"where_clause",
"=",
"''",
"if",
"filters",
":",
"l",
"=",
"[",
"]",
"# noqa: E741",
"for",
"field",
",",
"value",
"in",
"filters",
".",
"items",
"(",
")",
":",
"l",
".",
"append",
"(",
"f\"{field} = '{value}'\"",
")",
"where_clause",
"=",
"'WHERE '",
"+",
"' AND '",
".",
"join",
"(",
"l",
")",
"sql",
"=",
"textwrap",
".",
"dedent",
"(",
"f\"\"\"\\\n SELECT * FROM \"{table_name}$partitions\"\n\n {where_clause}\n {order_by_clause}\n {limit_clause}\n \"\"\"",
")",
"return",
"sql"
] |
Returns a partition query
:param table_name: the name of the table to get partitions from
:type table_name: str
:param limit: the number of partitions to be returned
:type limit: int
:param order_by: a list of tuples of field name and a boolean
that determines if that field should be sorted in descending
order
:type order_by: list of (str, bool) tuples
:param filters: dict of field name and filter value combinations
|
[
"Returns",
"a",
"partition",
"query"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/db_engine_specs.py#L944-L980
|
21,488
|
apache/incubator-superset
|
superset/db_engine_specs.py
|
HiveEngineSpec.create_table_from_csv
|
def create_table_from_csv(form, table):
"""Uploads a csv file and creates a superset datasource in Hive."""
def convert_to_hive_type(col_type):
"""maps tableschema's types to hive types"""
tableschema_to_hive_types = {
'boolean': 'BOOLEAN',
'integer': 'INT',
'number': 'DOUBLE',
'string': 'STRING',
}
return tableschema_to_hive_types.get(col_type, 'STRING')
bucket_path = config['CSV_TO_HIVE_UPLOAD_S3_BUCKET']
if not bucket_path:
logging.info('No upload bucket specified')
raise Exception(
'No upload bucket specified. You can specify one in the config file.')
table_name = form.name.data
schema_name = form.schema.data
if config.get('UPLOADED_CSV_HIVE_NAMESPACE'):
if '.' in table_name or schema_name:
raise Exception(
"You can't specify a namespace. "
'All tables will be uploaded to the `{}` namespace'.format(
config.get('HIVE_NAMESPACE')))
full_table_name = '{}.{}'.format(
config.get('UPLOADED_CSV_HIVE_NAMESPACE'), table_name)
else:
if '.' in table_name and schema_name:
raise Exception(
"You can't specify a namespace both in the name of the table "
'and in the schema field. Please remove one')
full_table_name = '{}.{}'.format(
schema_name, table_name) if schema_name else table_name
filename = form.csv_file.data.filename
upload_prefix = config['CSV_TO_HIVE_UPLOAD_DIRECTORY']
upload_path = config['UPLOAD_FOLDER'] + \
secure_filename(filename)
# Optional dependency
from tableschema import Table # pylint: disable=import-error
hive_table_schema = Table(upload_path).infer()
column_name_and_type = []
for column_info in hive_table_schema['fields']:
column_name_and_type.append(
'`{}` {}'.format(
column_info['name'],
convert_to_hive_type(column_info['type'])))
schema_definition = ', '.join(column_name_and_type)
# Optional dependency
import boto3 # pylint: disable=import-error
s3 = boto3.client('s3')
location = os.path.join('s3a://', bucket_path, upload_prefix, table_name)
s3.upload_file(
upload_path, bucket_path,
os.path.join(upload_prefix, table_name, filename))
sql = f"""CREATE TABLE {full_table_name} ( {schema_definition} )
ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS
TEXTFILE LOCATION '{location}'
tblproperties ('skip.header.line.count'='1')"""
logging.info(form.con.data)
engine = create_engine(form.con.data.sqlalchemy_uri_decrypted)
engine.execute(sql)
|
python
|
def create_table_from_csv(form, table):
"""Uploads a csv file and creates a superset datasource in Hive."""
def convert_to_hive_type(col_type):
"""maps tableschema's types to hive types"""
tableschema_to_hive_types = {
'boolean': 'BOOLEAN',
'integer': 'INT',
'number': 'DOUBLE',
'string': 'STRING',
}
return tableschema_to_hive_types.get(col_type, 'STRING')
bucket_path = config['CSV_TO_HIVE_UPLOAD_S3_BUCKET']
if not bucket_path:
logging.info('No upload bucket specified')
raise Exception(
'No upload bucket specified. You can specify one in the config file.')
table_name = form.name.data
schema_name = form.schema.data
if config.get('UPLOADED_CSV_HIVE_NAMESPACE'):
if '.' in table_name or schema_name:
raise Exception(
"You can't specify a namespace. "
'All tables will be uploaded to the `{}` namespace'.format(
config.get('HIVE_NAMESPACE')))
full_table_name = '{}.{}'.format(
config.get('UPLOADED_CSV_HIVE_NAMESPACE'), table_name)
else:
if '.' in table_name and schema_name:
raise Exception(
"You can't specify a namespace both in the name of the table "
'and in the schema field. Please remove one')
full_table_name = '{}.{}'.format(
schema_name, table_name) if schema_name else table_name
filename = form.csv_file.data.filename
upload_prefix = config['CSV_TO_HIVE_UPLOAD_DIRECTORY']
upload_path = config['UPLOAD_FOLDER'] + \
secure_filename(filename)
# Optional dependency
from tableschema import Table # pylint: disable=import-error
hive_table_schema = Table(upload_path).infer()
column_name_and_type = []
for column_info in hive_table_schema['fields']:
column_name_and_type.append(
'`{}` {}'.format(
column_info['name'],
convert_to_hive_type(column_info['type'])))
schema_definition = ', '.join(column_name_and_type)
# Optional dependency
import boto3 # pylint: disable=import-error
s3 = boto3.client('s3')
location = os.path.join('s3a://', bucket_path, upload_prefix, table_name)
s3.upload_file(
upload_path, bucket_path,
os.path.join(upload_prefix, table_name, filename))
sql = f"""CREATE TABLE {full_table_name} ( {schema_definition} )
ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS
TEXTFILE LOCATION '{location}'
tblproperties ('skip.header.line.count'='1')"""
logging.info(form.con.data)
engine = create_engine(form.con.data.sqlalchemy_uri_decrypted)
engine.execute(sql)
|
[
"def",
"create_table_from_csv",
"(",
"form",
",",
"table",
")",
":",
"def",
"convert_to_hive_type",
"(",
"col_type",
")",
":",
"\"\"\"maps tableschema's types to hive types\"\"\"",
"tableschema_to_hive_types",
"=",
"{",
"'boolean'",
":",
"'BOOLEAN'",
",",
"'integer'",
":",
"'INT'",
",",
"'number'",
":",
"'DOUBLE'",
",",
"'string'",
":",
"'STRING'",
",",
"}",
"return",
"tableschema_to_hive_types",
".",
"get",
"(",
"col_type",
",",
"'STRING'",
")",
"bucket_path",
"=",
"config",
"[",
"'CSV_TO_HIVE_UPLOAD_S3_BUCKET'",
"]",
"if",
"not",
"bucket_path",
":",
"logging",
".",
"info",
"(",
"'No upload bucket specified'",
")",
"raise",
"Exception",
"(",
"'No upload bucket specified. You can specify one in the config file.'",
")",
"table_name",
"=",
"form",
".",
"name",
".",
"data",
"schema_name",
"=",
"form",
".",
"schema",
".",
"data",
"if",
"config",
".",
"get",
"(",
"'UPLOADED_CSV_HIVE_NAMESPACE'",
")",
":",
"if",
"'.'",
"in",
"table_name",
"or",
"schema_name",
":",
"raise",
"Exception",
"(",
"\"You can't specify a namespace. \"",
"'All tables will be uploaded to the `{}` namespace'",
".",
"format",
"(",
"config",
".",
"get",
"(",
"'HIVE_NAMESPACE'",
")",
")",
")",
"full_table_name",
"=",
"'{}.{}'",
".",
"format",
"(",
"config",
".",
"get",
"(",
"'UPLOADED_CSV_HIVE_NAMESPACE'",
")",
",",
"table_name",
")",
"else",
":",
"if",
"'.'",
"in",
"table_name",
"and",
"schema_name",
":",
"raise",
"Exception",
"(",
"\"You can't specify a namespace both in the name of the table \"",
"'and in the schema field. Please remove one'",
")",
"full_table_name",
"=",
"'{}.{}'",
".",
"format",
"(",
"schema_name",
",",
"table_name",
")",
"if",
"schema_name",
"else",
"table_name",
"filename",
"=",
"form",
".",
"csv_file",
".",
"data",
".",
"filename",
"upload_prefix",
"=",
"config",
"[",
"'CSV_TO_HIVE_UPLOAD_DIRECTORY'",
"]",
"upload_path",
"=",
"config",
"[",
"'UPLOAD_FOLDER'",
"]",
"+",
"secure_filename",
"(",
"filename",
")",
"# Optional dependency",
"from",
"tableschema",
"import",
"Table",
"# pylint: disable=import-error",
"hive_table_schema",
"=",
"Table",
"(",
"upload_path",
")",
".",
"infer",
"(",
")",
"column_name_and_type",
"=",
"[",
"]",
"for",
"column_info",
"in",
"hive_table_schema",
"[",
"'fields'",
"]",
":",
"column_name_and_type",
".",
"append",
"(",
"'`{}` {}'",
".",
"format",
"(",
"column_info",
"[",
"'name'",
"]",
",",
"convert_to_hive_type",
"(",
"column_info",
"[",
"'type'",
"]",
")",
")",
")",
"schema_definition",
"=",
"', '",
".",
"join",
"(",
"column_name_and_type",
")",
"# Optional dependency",
"import",
"boto3",
"# pylint: disable=import-error",
"s3",
"=",
"boto3",
".",
"client",
"(",
"'s3'",
")",
"location",
"=",
"os",
".",
"path",
".",
"join",
"(",
"'s3a://'",
",",
"bucket_path",
",",
"upload_prefix",
",",
"table_name",
")",
"s3",
".",
"upload_file",
"(",
"upload_path",
",",
"bucket_path",
",",
"os",
".",
"path",
".",
"join",
"(",
"upload_prefix",
",",
"table_name",
",",
"filename",
")",
")",
"sql",
"=",
"f\"\"\"CREATE TABLE {full_table_name} ( {schema_definition} )\n ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' STORED AS\n TEXTFILE LOCATION '{location}'\n tblproperties ('skip.header.line.count'='1')\"\"\"",
"logging",
".",
"info",
"(",
"form",
".",
"con",
".",
"data",
")",
"engine",
"=",
"create_engine",
"(",
"form",
".",
"con",
".",
"data",
".",
"sqlalchemy_uri_decrypted",
")",
"engine",
".",
"execute",
"(",
"sql",
")"
] |
Uploads a csv file and creates a superset datasource in Hive.
|
[
"Uploads",
"a",
"csv",
"file",
"and",
"creates",
"a",
"superset",
"datasource",
"in",
"Hive",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/db_engine_specs.py#L1139-L1209
|
21,489
|
apache/incubator-superset
|
superset/data/multiformat_time_series.py
|
load_multiformat_time_series
|
def load_multiformat_time_series():
"""Loading time series data from a zip file in the repo"""
data = get_example_data('multiformat_time_series.json.gz')
pdf = pd.read_json(data)
pdf.ds = pd.to_datetime(pdf.ds, unit='s')
pdf.ds2 = pd.to_datetime(pdf.ds2, unit='s')
pdf.to_sql(
'multiformat_time_series',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'ds': Date,
'ds2': DateTime,
'epoch_s': BigInteger,
'epoch_ms': BigInteger,
'string0': String(100),
'string1': String(100),
'string2': String(100),
'string3': String(100),
},
index=False)
print('Done loading table!')
print('-' * 80)
print('Creating table [multiformat_time_series] reference')
obj = db.session.query(TBL).filter_by(table_name='multiformat_time_series').first()
if not obj:
obj = TBL(table_name='multiformat_time_series')
obj.main_dttm_col = 'ds'
obj.database = utils.get_or_create_main_db()
dttm_and_expr_dict = {
'ds': [None, None],
'ds2': [None, None],
'epoch_s': ['epoch_s', None],
'epoch_ms': ['epoch_ms', None],
'string2': ['%Y%m%d-%H%M%S', None],
'string1': ['%Y-%m-%d^%H:%M:%S', None],
'string0': ['%Y-%m-%d %H:%M:%S.%f', None],
'string3': ['%Y/%m/%d%H:%M:%S.%f', None],
}
for col in obj.columns:
dttm_and_expr = dttm_and_expr_dict[col.column_name]
col.python_date_format = dttm_and_expr[0]
col.dbatabase_expr = dttm_and_expr[1]
col.is_dttm = True
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
print('Creating Heatmap charts')
for i, col in enumerate(tbl.columns):
slice_data = {
'metrics': ['count'],
'granularity_sqla': col.column_name,
'row_limit': config.get('ROW_LIMIT'),
'since': '2015',
'until': '2016',
'where': '',
'viz_type': 'cal_heatmap',
'domain_granularity': 'month',
'subdomain_granularity': 'day',
}
slc = Slice(
slice_name=f'Calendar Heatmap multiformat {i}',
viz_type='cal_heatmap',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
merge_slice(slc)
misc_dash_slices.add('Calendar Heatmap multiformat 0')
|
python
|
def load_multiformat_time_series():
"""Loading time series data from a zip file in the repo"""
data = get_example_data('multiformat_time_series.json.gz')
pdf = pd.read_json(data)
pdf.ds = pd.to_datetime(pdf.ds, unit='s')
pdf.ds2 = pd.to_datetime(pdf.ds2, unit='s')
pdf.to_sql(
'multiformat_time_series',
db.engine,
if_exists='replace',
chunksize=500,
dtype={
'ds': Date,
'ds2': DateTime,
'epoch_s': BigInteger,
'epoch_ms': BigInteger,
'string0': String(100),
'string1': String(100),
'string2': String(100),
'string3': String(100),
},
index=False)
print('Done loading table!')
print('-' * 80)
print('Creating table [multiformat_time_series] reference')
obj = db.session.query(TBL).filter_by(table_name='multiformat_time_series').first()
if not obj:
obj = TBL(table_name='multiformat_time_series')
obj.main_dttm_col = 'ds'
obj.database = utils.get_or_create_main_db()
dttm_and_expr_dict = {
'ds': [None, None],
'ds2': [None, None],
'epoch_s': ['epoch_s', None],
'epoch_ms': ['epoch_ms', None],
'string2': ['%Y%m%d-%H%M%S', None],
'string1': ['%Y-%m-%d^%H:%M:%S', None],
'string0': ['%Y-%m-%d %H:%M:%S.%f', None],
'string3': ['%Y/%m/%d%H:%M:%S.%f', None],
}
for col in obj.columns:
dttm_and_expr = dttm_and_expr_dict[col.column_name]
col.python_date_format = dttm_and_expr[0]
col.dbatabase_expr = dttm_and_expr[1]
col.is_dttm = True
db.session.merge(obj)
db.session.commit()
obj.fetch_metadata()
tbl = obj
print('Creating Heatmap charts')
for i, col in enumerate(tbl.columns):
slice_data = {
'metrics': ['count'],
'granularity_sqla': col.column_name,
'row_limit': config.get('ROW_LIMIT'),
'since': '2015',
'until': '2016',
'where': '',
'viz_type': 'cal_heatmap',
'domain_granularity': 'month',
'subdomain_granularity': 'day',
}
slc = Slice(
slice_name=f'Calendar Heatmap multiformat {i}',
viz_type='cal_heatmap',
datasource_type='table',
datasource_id=tbl.id,
params=get_slice_json(slice_data),
)
merge_slice(slc)
misc_dash_slices.add('Calendar Heatmap multiformat 0')
|
[
"def",
"load_multiformat_time_series",
"(",
")",
":",
"data",
"=",
"get_example_data",
"(",
"'multiformat_time_series.json.gz'",
")",
"pdf",
"=",
"pd",
".",
"read_json",
"(",
"data",
")",
"pdf",
".",
"ds",
"=",
"pd",
".",
"to_datetime",
"(",
"pdf",
".",
"ds",
",",
"unit",
"=",
"'s'",
")",
"pdf",
".",
"ds2",
"=",
"pd",
".",
"to_datetime",
"(",
"pdf",
".",
"ds2",
",",
"unit",
"=",
"'s'",
")",
"pdf",
".",
"to_sql",
"(",
"'multiformat_time_series'",
",",
"db",
".",
"engine",
",",
"if_exists",
"=",
"'replace'",
",",
"chunksize",
"=",
"500",
",",
"dtype",
"=",
"{",
"'ds'",
":",
"Date",
",",
"'ds2'",
":",
"DateTime",
",",
"'epoch_s'",
":",
"BigInteger",
",",
"'epoch_ms'",
":",
"BigInteger",
",",
"'string0'",
":",
"String",
"(",
"100",
")",
",",
"'string1'",
":",
"String",
"(",
"100",
")",
",",
"'string2'",
":",
"String",
"(",
"100",
")",
",",
"'string3'",
":",
"String",
"(",
"100",
")",
",",
"}",
",",
"index",
"=",
"False",
")",
"print",
"(",
"'Done loading table!'",
")",
"print",
"(",
"'-'",
"*",
"80",
")",
"print",
"(",
"'Creating table [multiformat_time_series] reference'",
")",
"obj",
"=",
"db",
".",
"session",
".",
"query",
"(",
"TBL",
")",
".",
"filter_by",
"(",
"table_name",
"=",
"'multiformat_time_series'",
")",
".",
"first",
"(",
")",
"if",
"not",
"obj",
":",
"obj",
"=",
"TBL",
"(",
"table_name",
"=",
"'multiformat_time_series'",
")",
"obj",
".",
"main_dttm_col",
"=",
"'ds'",
"obj",
".",
"database",
"=",
"utils",
".",
"get_or_create_main_db",
"(",
")",
"dttm_and_expr_dict",
"=",
"{",
"'ds'",
":",
"[",
"None",
",",
"None",
"]",
",",
"'ds2'",
":",
"[",
"None",
",",
"None",
"]",
",",
"'epoch_s'",
":",
"[",
"'epoch_s'",
",",
"None",
"]",
",",
"'epoch_ms'",
":",
"[",
"'epoch_ms'",
",",
"None",
"]",
",",
"'string2'",
":",
"[",
"'%Y%m%d-%H%M%S'",
",",
"None",
"]",
",",
"'string1'",
":",
"[",
"'%Y-%m-%d^%H:%M:%S'",
",",
"None",
"]",
",",
"'string0'",
":",
"[",
"'%Y-%m-%d %H:%M:%S.%f'",
",",
"None",
"]",
",",
"'string3'",
":",
"[",
"'%Y/%m/%d%H:%M:%S.%f'",
",",
"None",
"]",
",",
"}",
"for",
"col",
"in",
"obj",
".",
"columns",
":",
"dttm_and_expr",
"=",
"dttm_and_expr_dict",
"[",
"col",
".",
"column_name",
"]",
"col",
".",
"python_date_format",
"=",
"dttm_and_expr",
"[",
"0",
"]",
"col",
".",
"dbatabase_expr",
"=",
"dttm_and_expr",
"[",
"1",
"]",
"col",
".",
"is_dttm",
"=",
"True",
"db",
".",
"session",
".",
"merge",
"(",
"obj",
")",
"db",
".",
"session",
".",
"commit",
"(",
")",
"obj",
".",
"fetch_metadata",
"(",
")",
"tbl",
"=",
"obj",
"print",
"(",
"'Creating Heatmap charts'",
")",
"for",
"i",
",",
"col",
"in",
"enumerate",
"(",
"tbl",
".",
"columns",
")",
":",
"slice_data",
"=",
"{",
"'metrics'",
":",
"[",
"'count'",
"]",
",",
"'granularity_sqla'",
":",
"col",
".",
"column_name",
",",
"'row_limit'",
":",
"config",
".",
"get",
"(",
"'ROW_LIMIT'",
")",
",",
"'since'",
":",
"'2015'",
",",
"'until'",
":",
"'2016'",
",",
"'where'",
":",
"''",
",",
"'viz_type'",
":",
"'cal_heatmap'",
",",
"'domain_granularity'",
":",
"'month'",
",",
"'subdomain_granularity'",
":",
"'day'",
",",
"}",
"slc",
"=",
"Slice",
"(",
"slice_name",
"=",
"f'Calendar Heatmap multiformat {i}'",
",",
"viz_type",
"=",
"'cal_heatmap'",
",",
"datasource_type",
"=",
"'table'",
",",
"datasource_id",
"=",
"tbl",
".",
"id",
",",
"params",
"=",
"get_slice_json",
"(",
"slice_data",
")",
",",
")",
"merge_slice",
"(",
"slc",
")",
"misc_dash_slices",
".",
"add",
"(",
"'Calendar Heatmap multiformat 0'",
")"
] |
Loading time series data from a zip file in the repo
|
[
"Loading",
"time",
"series",
"data",
"from",
"a",
"zip",
"file",
"in",
"the",
"repo"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/data/multiformat_time_series.py#L34-L107
|
21,490
|
apache/incubator-superset
|
superset/utils/dashboard_import_export.py
|
import_dashboards
|
def import_dashboards(session, data_stream, import_time=None):
"""Imports dashboards from a stream to databases"""
current_tt = int(time.time())
import_time = current_tt if import_time is None else import_time
data = json.loads(data_stream.read(), object_hook=decode_dashboards)
# TODO: import DRUID datasources
for table in data['datasources']:
type(table).import_obj(table, import_time=import_time)
session.commit()
for dashboard in data['dashboards']:
Dashboard.import_obj(
dashboard, import_time=import_time)
session.commit()
|
python
|
def import_dashboards(session, data_stream, import_time=None):
"""Imports dashboards from a stream to databases"""
current_tt = int(time.time())
import_time = current_tt if import_time is None else import_time
data = json.loads(data_stream.read(), object_hook=decode_dashboards)
# TODO: import DRUID datasources
for table in data['datasources']:
type(table).import_obj(table, import_time=import_time)
session.commit()
for dashboard in data['dashboards']:
Dashboard.import_obj(
dashboard, import_time=import_time)
session.commit()
|
[
"def",
"import_dashboards",
"(",
"session",
",",
"data_stream",
",",
"import_time",
"=",
"None",
")",
":",
"current_tt",
"=",
"int",
"(",
"time",
".",
"time",
"(",
")",
")",
"import_time",
"=",
"current_tt",
"if",
"import_time",
"is",
"None",
"else",
"import_time",
"data",
"=",
"json",
".",
"loads",
"(",
"data_stream",
".",
"read",
"(",
")",
",",
"object_hook",
"=",
"decode_dashboards",
")",
"# TODO: import DRUID datasources",
"for",
"table",
"in",
"data",
"[",
"'datasources'",
"]",
":",
"type",
"(",
"table",
")",
".",
"import_obj",
"(",
"table",
",",
"import_time",
"=",
"import_time",
")",
"session",
".",
"commit",
"(",
")",
"for",
"dashboard",
"in",
"data",
"[",
"'dashboards'",
"]",
":",
"Dashboard",
".",
"import_obj",
"(",
"dashboard",
",",
"import_time",
"=",
"import_time",
")",
"session",
".",
"commit",
"(",
")"
] |
Imports dashboards from a stream to databases
|
[
"Imports",
"dashboards",
"from",
"a",
"stream",
"to",
"databases"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/dashboard_import_export.py#L26-L38
|
21,491
|
apache/incubator-superset
|
superset/utils/dashboard_import_export.py
|
export_dashboards
|
def export_dashboards(session):
"""Returns all dashboards metadata as a json dump"""
logging.info('Starting export')
dashboards = session.query(Dashboard)
dashboard_ids = []
for dashboard in dashboards:
dashboard_ids.append(dashboard.id)
data = Dashboard.export_dashboards(dashboard_ids)
return data
|
python
|
def export_dashboards(session):
"""Returns all dashboards metadata as a json dump"""
logging.info('Starting export')
dashboards = session.query(Dashboard)
dashboard_ids = []
for dashboard in dashboards:
dashboard_ids.append(dashboard.id)
data = Dashboard.export_dashboards(dashboard_ids)
return data
|
[
"def",
"export_dashboards",
"(",
"session",
")",
":",
"logging",
".",
"info",
"(",
"'Starting export'",
")",
"dashboards",
"=",
"session",
".",
"query",
"(",
"Dashboard",
")",
"dashboard_ids",
"=",
"[",
"]",
"for",
"dashboard",
"in",
"dashboards",
":",
"dashboard_ids",
".",
"append",
"(",
"dashboard",
".",
"id",
")",
"data",
"=",
"Dashboard",
".",
"export_dashboards",
"(",
"dashboard_ids",
")",
"return",
"data"
] |
Returns all dashboards metadata as a json dump
|
[
"Returns",
"all",
"dashboards",
"metadata",
"as",
"a",
"json",
"dump"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/dashboard_import_export.py#L41-L49
|
21,492
|
apache/incubator-superset
|
superset/sql_lab.py
|
handle_query_error
|
def handle_query_error(msg, query, session, payload=None):
"""Local method handling error while processing the SQL"""
payload = payload or {}
troubleshooting_link = config['TROUBLESHOOTING_LINK']
query.error_message = msg
query.status = QueryStatus.FAILED
query.tmp_table_name = None
session.commit()
payload.update({
'status': query.status,
'error': msg,
})
if troubleshooting_link:
payload['link'] = troubleshooting_link
return payload
|
python
|
def handle_query_error(msg, query, session, payload=None):
"""Local method handling error while processing the SQL"""
payload = payload or {}
troubleshooting_link = config['TROUBLESHOOTING_LINK']
query.error_message = msg
query.status = QueryStatus.FAILED
query.tmp_table_name = None
session.commit()
payload.update({
'status': query.status,
'error': msg,
})
if troubleshooting_link:
payload['link'] = troubleshooting_link
return payload
|
[
"def",
"handle_query_error",
"(",
"msg",
",",
"query",
",",
"session",
",",
"payload",
"=",
"None",
")",
":",
"payload",
"=",
"payload",
"or",
"{",
"}",
"troubleshooting_link",
"=",
"config",
"[",
"'TROUBLESHOOTING_LINK'",
"]",
"query",
".",
"error_message",
"=",
"msg",
"query",
".",
"status",
"=",
"QueryStatus",
".",
"FAILED",
"query",
".",
"tmp_table_name",
"=",
"None",
"session",
".",
"commit",
"(",
")",
"payload",
".",
"update",
"(",
"{",
"'status'",
":",
"query",
".",
"status",
",",
"'error'",
":",
"msg",
",",
"}",
")",
"if",
"troubleshooting_link",
":",
"payload",
"[",
"'link'",
"]",
"=",
"troubleshooting_link",
"return",
"payload"
] |
Local method handling error while processing the SQL
|
[
"Local",
"method",
"handling",
"error",
"while",
"processing",
"the",
"SQL"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/sql_lab.py#L63-L77
|
21,493
|
apache/incubator-superset
|
superset/sql_lab.py
|
get_query
|
def get_query(query_id, session, retry_count=5):
"""attemps to get the query and retry if it cannot"""
query = None
attempt = 0
while not query and attempt < retry_count:
try:
query = session.query(Query).filter_by(id=query_id).one()
except Exception:
attempt += 1
logging.error(
'Query with id `{}` could not be retrieved'.format(query_id))
stats_logger.incr('error_attempting_orm_query_' + str(attempt))
logging.error('Sleeping for a sec before retrying...')
sleep(1)
if not query:
stats_logger.incr('error_failed_at_getting_orm_query')
raise SqlLabException('Failed at getting query')
return query
|
python
|
def get_query(query_id, session, retry_count=5):
"""attemps to get the query and retry if it cannot"""
query = None
attempt = 0
while not query and attempt < retry_count:
try:
query = session.query(Query).filter_by(id=query_id).one()
except Exception:
attempt += 1
logging.error(
'Query with id `{}` could not be retrieved'.format(query_id))
stats_logger.incr('error_attempting_orm_query_' + str(attempt))
logging.error('Sleeping for a sec before retrying...')
sleep(1)
if not query:
stats_logger.incr('error_failed_at_getting_orm_query')
raise SqlLabException('Failed at getting query')
return query
|
[
"def",
"get_query",
"(",
"query_id",
",",
"session",
",",
"retry_count",
"=",
"5",
")",
":",
"query",
"=",
"None",
"attempt",
"=",
"0",
"while",
"not",
"query",
"and",
"attempt",
"<",
"retry_count",
":",
"try",
":",
"query",
"=",
"session",
".",
"query",
"(",
"Query",
")",
".",
"filter_by",
"(",
"id",
"=",
"query_id",
")",
".",
"one",
"(",
")",
"except",
"Exception",
":",
"attempt",
"+=",
"1",
"logging",
".",
"error",
"(",
"'Query with id `{}` could not be retrieved'",
".",
"format",
"(",
"query_id",
")",
")",
"stats_logger",
".",
"incr",
"(",
"'error_attempting_orm_query_'",
"+",
"str",
"(",
"attempt",
")",
")",
"logging",
".",
"error",
"(",
"'Sleeping for a sec before retrying...'",
")",
"sleep",
"(",
"1",
")",
"if",
"not",
"query",
":",
"stats_logger",
".",
"incr",
"(",
"'error_failed_at_getting_orm_query'",
")",
"raise",
"SqlLabException",
"(",
"'Failed at getting query'",
")",
"return",
"query"
] |
attemps to get the query and retry if it cannot
|
[
"attemps",
"to",
"get",
"the",
"query",
"and",
"retry",
"if",
"it",
"cannot"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/sql_lab.py#L80-L97
|
21,494
|
apache/incubator-superset
|
superset/sql_lab.py
|
execute_sql_statement
|
def execute_sql_statement(sql_statement, query, user_name, session, cursor):
"""Executes a single SQL statement"""
database = query.database
db_engine_spec = database.db_engine_spec
parsed_query = ParsedQuery(sql_statement)
sql = parsed_query.stripped()
SQL_MAX_ROWS = app.config.get('SQL_MAX_ROW')
if not parsed_query.is_readonly() and not database.allow_dml:
raise SqlLabSecurityException(
_('Only `SELECT` statements are allowed against this database'))
if query.select_as_cta:
if not parsed_query.is_select():
raise SqlLabException(_(
'Only `SELECT` statements can be used with the CREATE TABLE '
'feature.'))
if not query.tmp_table_name:
start_dttm = datetime.fromtimestamp(query.start_time)
query.tmp_table_name = 'tmp_{}_table_{}'.format(
query.user_id, start_dttm.strftime('%Y_%m_%d_%H_%M_%S'))
sql = parsed_query.as_create_table(query.tmp_table_name)
query.select_as_cta_used = True
if parsed_query.is_select():
if SQL_MAX_ROWS and (not query.limit or query.limit > SQL_MAX_ROWS):
query.limit = SQL_MAX_ROWS
if query.limit:
sql = database.apply_limit_to_sql(sql, query.limit)
# Hook to allow environment-specific mutation (usually comments) to the SQL
SQL_QUERY_MUTATOR = config.get('SQL_QUERY_MUTATOR')
if SQL_QUERY_MUTATOR:
sql = SQL_QUERY_MUTATOR(sql, user_name, security_manager, database)
try:
if log_query:
log_query(
query.database.sqlalchemy_uri,
query.executed_sql,
query.schema,
user_name,
__name__,
security_manager,
)
query.executed_sql = sql
with stats_timing('sqllab.query.time_executing_query', stats_logger):
logging.info('Running query: \n{}'.format(sql))
db_engine_spec.execute(cursor, sql, async_=True)
logging.info('Handling cursor')
db_engine_spec.handle_cursor(cursor, query, session)
with stats_timing('sqllab.query.time_fetching_results', stats_logger):
logging.debug('Fetching data for query object: {}'.format(query.to_dict()))
data = db_engine_spec.fetch_data(cursor, query.limit)
except SoftTimeLimitExceeded as e:
logging.exception(e)
raise SqlLabTimeoutException(
"SQL Lab timeout. This environment's policy is to kill queries "
'after {} seconds.'.format(SQLLAB_TIMEOUT))
except Exception as e:
logging.exception(e)
raise SqlLabException(db_engine_spec.extract_error_message(e))
logging.debug('Fetching cursor description')
cursor_description = cursor.description
return dataframe.SupersetDataFrame(data, cursor_description, db_engine_spec)
|
python
|
def execute_sql_statement(sql_statement, query, user_name, session, cursor):
"""Executes a single SQL statement"""
database = query.database
db_engine_spec = database.db_engine_spec
parsed_query = ParsedQuery(sql_statement)
sql = parsed_query.stripped()
SQL_MAX_ROWS = app.config.get('SQL_MAX_ROW')
if not parsed_query.is_readonly() and not database.allow_dml:
raise SqlLabSecurityException(
_('Only `SELECT` statements are allowed against this database'))
if query.select_as_cta:
if not parsed_query.is_select():
raise SqlLabException(_(
'Only `SELECT` statements can be used with the CREATE TABLE '
'feature.'))
if not query.tmp_table_name:
start_dttm = datetime.fromtimestamp(query.start_time)
query.tmp_table_name = 'tmp_{}_table_{}'.format(
query.user_id, start_dttm.strftime('%Y_%m_%d_%H_%M_%S'))
sql = parsed_query.as_create_table(query.tmp_table_name)
query.select_as_cta_used = True
if parsed_query.is_select():
if SQL_MAX_ROWS and (not query.limit or query.limit > SQL_MAX_ROWS):
query.limit = SQL_MAX_ROWS
if query.limit:
sql = database.apply_limit_to_sql(sql, query.limit)
# Hook to allow environment-specific mutation (usually comments) to the SQL
SQL_QUERY_MUTATOR = config.get('SQL_QUERY_MUTATOR')
if SQL_QUERY_MUTATOR:
sql = SQL_QUERY_MUTATOR(sql, user_name, security_manager, database)
try:
if log_query:
log_query(
query.database.sqlalchemy_uri,
query.executed_sql,
query.schema,
user_name,
__name__,
security_manager,
)
query.executed_sql = sql
with stats_timing('sqllab.query.time_executing_query', stats_logger):
logging.info('Running query: \n{}'.format(sql))
db_engine_spec.execute(cursor, sql, async_=True)
logging.info('Handling cursor')
db_engine_spec.handle_cursor(cursor, query, session)
with stats_timing('sqllab.query.time_fetching_results', stats_logger):
logging.debug('Fetching data for query object: {}'.format(query.to_dict()))
data = db_engine_spec.fetch_data(cursor, query.limit)
except SoftTimeLimitExceeded as e:
logging.exception(e)
raise SqlLabTimeoutException(
"SQL Lab timeout. This environment's policy is to kill queries "
'after {} seconds.'.format(SQLLAB_TIMEOUT))
except Exception as e:
logging.exception(e)
raise SqlLabException(db_engine_spec.extract_error_message(e))
logging.debug('Fetching cursor description')
cursor_description = cursor.description
return dataframe.SupersetDataFrame(data, cursor_description, db_engine_spec)
|
[
"def",
"execute_sql_statement",
"(",
"sql_statement",
",",
"query",
",",
"user_name",
",",
"session",
",",
"cursor",
")",
":",
"database",
"=",
"query",
".",
"database",
"db_engine_spec",
"=",
"database",
".",
"db_engine_spec",
"parsed_query",
"=",
"ParsedQuery",
"(",
"sql_statement",
")",
"sql",
"=",
"parsed_query",
".",
"stripped",
"(",
")",
"SQL_MAX_ROWS",
"=",
"app",
".",
"config",
".",
"get",
"(",
"'SQL_MAX_ROW'",
")",
"if",
"not",
"parsed_query",
".",
"is_readonly",
"(",
")",
"and",
"not",
"database",
".",
"allow_dml",
":",
"raise",
"SqlLabSecurityException",
"(",
"_",
"(",
"'Only `SELECT` statements are allowed against this database'",
")",
")",
"if",
"query",
".",
"select_as_cta",
":",
"if",
"not",
"parsed_query",
".",
"is_select",
"(",
")",
":",
"raise",
"SqlLabException",
"(",
"_",
"(",
"'Only `SELECT` statements can be used with the CREATE TABLE '",
"'feature.'",
")",
")",
"if",
"not",
"query",
".",
"tmp_table_name",
":",
"start_dttm",
"=",
"datetime",
".",
"fromtimestamp",
"(",
"query",
".",
"start_time",
")",
"query",
".",
"tmp_table_name",
"=",
"'tmp_{}_table_{}'",
".",
"format",
"(",
"query",
".",
"user_id",
",",
"start_dttm",
".",
"strftime",
"(",
"'%Y_%m_%d_%H_%M_%S'",
")",
")",
"sql",
"=",
"parsed_query",
".",
"as_create_table",
"(",
"query",
".",
"tmp_table_name",
")",
"query",
".",
"select_as_cta_used",
"=",
"True",
"if",
"parsed_query",
".",
"is_select",
"(",
")",
":",
"if",
"SQL_MAX_ROWS",
"and",
"(",
"not",
"query",
".",
"limit",
"or",
"query",
".",
"limit",
">",
"SQL_MAX_ROWS",
")",
":",
"query",
".",
"limit",
"=",
"SQL_MAX_ROWS",
"if",
"query",
".",
"limit",
":",
"sql",
"=",
"database",
".",
"apply_limit_to_sql",
"(",
"sql",
",",
"query",
".",
"limit",
")",
"# Hook to allow environment-specific mutation (usually comments) to the SQL",
"SQL_QUERY_MUTATOR",
"=",
"config",
".",
"get",
"(",
"'SQL_QUERY_MUTATOR'",
")",
"if",
"SQL_QUERY_MUTATOR",
":",
"sql",
"=",
"SQL_QUERY_MUTATOR",
"(",
"sql",
",",
"user_name",
",",
"security_manager",
",",
"database",
")",
"try",
":",
"if",
"log_query",
":",
"log_query",
"(",
"query",
".",
"database",
".",
"sqlalchemy_uri",
",",
"query",
".",
"executed_sql",
",",
"query",
".",
"schema",
",",
"user_name",
",",
"__name__",
",",
"security_manager",
",",
")",
"query",
".",
"executed_sql",
"=",
"sql",
"with",
"stats_timing",
"(",
"'sqllab.query.time_executing_query'",
",",
"stats_logger",
")",
":",
"logging",
".",
"info",
"(",
"'Running query: \\n{}'",
".",
"format",
"(",
"sql",
")",
")",
"db_engine_spec",
".",
"execute",
"(",
"cursor",
",",
"sql",
",",
"async_",
"=",
"True",
")",
"logging",
".",
"info",
"(",
"'Handling cursor'",
")",
"db_engine_spec",
".",
"handle_cursor",
"(",
"cursor",
",",
"query",
",",
"session",
")",
"with",
"stats_timing",
"(",
"'sqllab.query.time_fetching_results'",
",",
"stats_logger",
")",
":",
"logging",
".",
"debug",
"(",
"'Fetching data for query object: {}'",
".",
"format",
"(",
"query",
".",
"to_dict",
"(",
")",
")",
")",
"data",
"=",
"db_engine_spec",
".",
"fetch_data",
"(",
"cursor",
",",
"query",
".",
"limit",
")",
"except",
"SoftTimeLimitExceeded",
"as",
"e",
":",
"logging",
".",
"exception",
"(",
"e",
")",
"raise",
"SqlLabTimeoutException",
"(",
"\"SQL Lab timeout. This environment's policy is to kill queries \"",
"'after {} seconds.'",
".",
"format",
"(",
"SQLLAB_TIMEOUT",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"exception",
"(",
"e",
")",
"raise",
"SqlLabException",
"(",
"db_engine_spec",
".",
"extract_error_message",
"(",
"e",
")",
")",
"logging",
".",
"debug",
"(",
"'Fetching cursor description'",
")",
"cursor_description",
"=",
"cursor",
".",
"description",
"return",
"dataframe",
".",
"SupersetDataFrame",
"(",
"data",
",",
"cursor_description",
",",
"db_engine_spec",
")"
] |
Executes a single SQL statement
|
[
"Executes",
"a",
"single",
"SQL",
"statement"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/sql_lab.py#L144-L209
|
21,495
|
apache/incubator-superset
|
superset/utils/core.py
|
flasher
|
def flasher(msg, severity=None):
"""Flask's flash if available, logging call if not"""
try:
flash(msg, severity)
except RuntimeError:
if severity == 'danger':
logging.error(msg)
else:
logging.info(msg)
|
python
|
def flasher(msg, severity=None):
"""Flask's flash if available, logging call if not"""
try:
flash(msg, severity)
except RuntimeError:
if severity == 'danger':
logging.error(msg)
else:
logging.info(msg)
|
[
"def",
"flasher",
"(",
"msg",
",",
"severity",
"=",
"None",
")",
":",
"try",
":",
"flash",
"(",
"msg",
",",
"severity",
")",
"except",
"RuntimeError",
":",
"if",
"severity",
"==",
"'danger'",
":",
"logging",
".",
"error",
"(",
"msg",
")",
"else",
":",
"logging",
".",
"info",
"(",
"msg",
")"
] |
Flask's flash if available, logging call if not
|
[
"Flask",
"s",
"flash",
"if",
"available",
"logging",
"call",
"if",
"not"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L81-L89
|
21,496
|
apache/incubator-superset
|
superset/utils/core.py
|
list_minus
|
def list_minus(l: List, minus: List) -> List:
"""Returns l without what is in minus
>>> list_minus([1, 2, 3], [2])
[1, 3]
"""
return [o for o in l if o not in minus]
|
python
|
def list_minus(l: List, minus: List) -> List:
"""Returns l without what is in minus
>>> list_minus([1, 2, 3], [2])
[1, 3]
"""
return [o for o in l if o not in minus]
|
[
"def",
"list_minus",
"(",
"l",
":",
"List",
",",
"minus",
":",
"List",
")",
"->",
"List",
":",
"return",
"[",
"o",
"for",
"o",
"in",
"l",
"if",
"o",
"not",
"in",
"minus",
"]"
] |
Returns l without what is in minus
>>> list_minus([1, 2, 3], [2])
[1, 3]
|
[
"Returns",
"l",
"without",
"what",
"is",
"in",
"minus"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L188-L194
|
21,497
|
apache/incubator-superset
|
superset/utils/core.py
|
parse_human_datetime
|
def parse_human_datetime(s):
"""
Returns ``datetime.datetime`` from human readable strings
>>> from datetime import date, timedelta
>>> from dateutil.relativedelta import relativedelta
>>> parse_human_datetime('2015-04-03')
datetime.datetime(2015, 4, 3, 0, 0)
>>> parse_human_datetime('2/3/1969')
datetime.datetime(1969, 2, 3, 0, 0)
>>> parse_human_datetime('now') <= datetime.now()
True
>>> parse_human_datetime('yesterday') <= datetime.now()
True
>>> date.today() - timedelta(1) == parse_human_datetime('yesterday').date()
True
>>> year_ago_1 = parse_human_datetime('one year ago').date()
>>> year_ago_2 = (datetime.now() - relativedelta(years=1) ).date()
>>> year_ago_1 == year_ago_2
True
"""
if not s:
return None
try:
dttm = parse(s)
except Exception:
try:
cal = parsedatetime.Calendar()
parsed_dttm, parsed_flags = cal.parseDT(s)
# when time is not extracted, we 'reset to midnight'
if parsed_flags & 2 == 0:
parsed_dttm = parsed_dttm.replace(hour=0, minute=0, second=0)
dttm = dttm_from_timtuple(parsed_dttm.utctimetuple())
except Exception as e:
logging.exception(e)
raise ValueError("Couldn't parse date string [{}]".format(s))
return dttm
|
python
|
def parse_human_datetime(s):
"""
Returns ``datetime.datetime`` from human readable strings
>>> from datetime import date, timedelta
>>> from dateutil.relativedelta import relativedelta
>>> parse_human_datetime('2015-04-03')
datetime.datetime(2015, 4, 3, 0, 0)
>>> parse_human_datetime('2/3/1969')
datetime.datetime(1969, 2, 3, 0, 0)
>>> parse_human_datetime('now') <= datetime.now()
True
>>> parse_human_datetime('yesterday') <= datetime.now()
True
>>> date.today() - timedelta(1) == parse_human_datetime('yesterday').date()
True
>>> year_ago_1 = parse_human_datetime('one year ago').date()
>>> year_ago_2 = (datetime.now() - relativedelta(years=1) ).date()
>>> year_ago_1 == year_ago_2
True
"""
if not s:
return None
try:
dttm = parse(s)
except Exception:
try:
cal = parsedatetime.Calendar()
parsed_dttm, parsed_flags = cal.parseDT(s)
# when time is not extracted, we 'reset to midnight'
if parsed_flags & 2 == 0:
parsed_dttm = parsed_dttm.replace(hour=0, minute=0, second=0)
dttm = dttm_from_timtuple(parsed_dttm.utctimetuple())
except Exception as e:
logging.exception(e)
raise ValueError("Couldn't parse date string [{}]".format(s))
return dttm
|
[
"def",
"parse_human_datetime",
"(",
"s",
")",
":",
"if",
"not",
"s",
":",
"return",
"None",
"try",
":",
"dttm",
"=",
"parse",
"(",
"s",
")",
"except",
"Exception",
":",
"try",
":",
"cal",
"=",
"parsedatetime",
".",
"Calendar",
"(",
")",
"parsed_dttm",
",",
"parsed_flags",
"=",
"cal",
".",
"parseDT",
"(",
"s",
")",
"# when time is not extracted, we 'reset to midnight'",
"if",
"parsed_flags",
"&",
"2",
"==",
"0",
":",
"parsed_dttm",
"=",
"parsed_dttm",
".",
"replace",
"(",
"hour",
"=",
"0",
",",
"minute",
"=",
"0",
",",
"second",
"=",
"0",
")",
"dttm",
"=",
"dttm_from_timtuple",
"(",
"parsed_dttm",
".",
"utctimetuple",
"(",
")",
")",
"except",
"Exception",
"as",
"e",
":",
"logging",
".",
"exception",
"(",
"e",
")",
"raise",
"ValueError",
"(",
"\"Couldn't parse date string [{}]\"",
".",
"format",
"(",
"s",
")",
")",
"return",
"dttm"
] |
Returns ``datetime.datetime`` from human readable strings
>>> from datetime import date, timedelta
>>> from dateutil.relativedelta import relativedelta
>>> parse_human_datetime('2015-04-03')
datetime.datetime(2015, 4, 3, 0, 0)
>>> parse_human_datetime('2/3/1969')
datetime.datetime(1969, 2, 3, 0, 0)
>>> parse_human_datetime('now') <= datetime.now()
True
>>> parse_human_datetime('yesterday') <= datetime.now()
True
>>> date.today() - timedelta(1) == parse_human_datetime('yesterday').date()
True
>>> year_ago_1 = parse_human_datetime('one year ago').date()
>>> year_ago_2 = (datetime.now() - relativedelta(years=1) ).date()
>>> year_ago_1 == year_ago_2
True
|
[
"Returns",
"datetime",
".",
"datetime",
"from",
"human",
"readable",
"strings"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L197-L233
|
21,498
|
apache/incubator-superset
|
superset/utils/core.py
|
decode_dashboards
|
def decode_dashboards(o):
"""
Function to be passed into json.loads obj_hook parameter
Recreates the dashboard object from a json representation.
"""
import superset.models.core as models
from superset.connectors.sqla.models import (
SqlaTable, SqlMetric, TableColumn,
)
if '__Dashboard__' in o:
d = models.Dashboard()
d.__dict__.update(o['__Dashboard__'])
return d
elif '__Slice__' in o:
d = models.Slice()
d.__dict__.update(o['__Slice__'])
return d
elif '__TableColumn__' in o:
d = TableColumn()
d.__dict__.update(o['__TableColumn__'])
return d
elif '__SqlaTable__' in o:
d = SqlaTable()
d.__dict__.update(o['__SqlaTable__'])
return d
elif '__SqlMetric__' in o:
d = SqlMetric()
d.__dict__.update(o['__SqlMetric__'])
return d
elif '__datetime__' in o:
return datetime.strptime(o['__datetime__'], '%Y-%m-%dT%H:%M:%S')
else:
return o
|
python
|
def decode_dashboards(o):
"""
Function to be passed into json.loads obj_hook parameter
Recreates the dashboard object from a json representation.
"""
import superset.models.core as models
from superset.connectors.sqla.models import (
SqlaTable, SqlMetric, TableColumn,
)
if '__Dashboard__' in o:
d = models.Dashboard()
d.__dict__.update(o['__Dashboard__'])
return d
elif '__Slice__' in o:
d = models.Slice()
d.__dict__.update(o['__Slice__'])
return d
elif '__TableColumn__' in o:
d = TableColumn()
d.__dict__.update(o['__TableColumn__'])
return d
elif '__SqlaTable__' in o:
d = SqlaTable()
d.__dict__.update(o['__SqlaTable__'])
return d
elif '__SqlMetric__' in o:
d = SqlMetric()
d.__dict__.update(o['__SqlMetric__'])
return d
elif '__datetime__' in o:
return datetime.strptime(o['__datetime__'], '%Y-%m-%dT%H:%M:%S')
else:
return o
|
[
"def",
"decode_dashboards",
"(",
"o",
")",
":",
"import",
"superset",
".",
"models",
".",
"core",
"as",
"models",
"from",
"superset",
".",
"connectors",
".",
"sqla",
".",
"models",
"import",
"(",
"SqlaTable",
",",
"SqlMetric",
",",
"TableColumn",
",",
")",
"if",
"'__Dashboard__'",
"in",
"o",
":",
"d",
"=",
"models",
".",
"Dashboard",
"(",
")",
"d",
".",
"__dict__",
".",
"update",
"(",
"o",
"[",
"'__Dashboard__'",
"]",
")",
"return",
"d",
"elif",
"'__Slice__'",
"in",
"o",
":",
"d",
"=",
"models",
".",
"Slice",
"(",
")",
"d",
".",
"__dict__",
".",
"update",
"(",
"o",
"[",
"'__Slice__'",
"]",
")",
"return",
"d",
"elif",
"'__TableColumn__'",
"in",
"o",
":",
"d",
"=",
"TableColumn",
"(",
")",
"d",
".",
"__dict__",
".",
"update",
"(",
"o",
"[",
"'__TableColumn__'",
"]",
")",
"return",
"d",
"elif",
"'__SqlaTable__'",
"in",
"o",
":",
"d",
"=",
"SqlaTable",
"(",
")",
"d",
".",
"__dict__",
".",
"update",
"(",
"o",
"[",
"'__SqlaTable__'",
"]",
")",
"return",
"d",
"elif",
"'__SqlMetric__'",
"in",
"o",
":",
"d",
"=",
"SqlMetric",
"(",
")",
"d",
".",
"__dict__",
".",
"update",
"(",
"o",
"[",
"'__SqlMetric__'",
"]",
")",
"return",
"d",
"elif",
"'__datetime__'",
"in",
"o",
":",
"return",
"datetime",
".",
"strptime",
"(",
"o",
"[",
"'__datetime__'",
"]",
",",
"'%Y-%m-%dT%H:%M:%S'",
")",
"else",
":",
"return",
"o"
] |
Function to be passed into json.loads obj_hook parameter
Recreates the dashboard object from a json representation.
|
[
"Function",
"to",
"be",
"passed",
"into",
"json",
".",
"loads",
"obj_hook",
"parameter",
"Recreates",
"the",
"dashboard",
"object",
"from",
"a",
"json",
"representation",
"."
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L241-L274
|
21,499
|
apache/incubator-superset
|
superset/utils/core.py
|
parse_human_timedelta
|
def parse_human_timedelta(s: str):
"""
Returns ``datetime.datetime`` from natural language time deltas
>>> parse_human_datetime('now') <= datetime.now()
True
"""
cal = parsedatetime.Calendar()
dttm = dttm_from_timtuple(datetime.now().timetuple())
d = cal.parse(s or '', dttm)[0]
d = datetime(d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.tm_min, d.tm_sec)
return d - dttm
|
python
|
def parse_human_timedelta(s: str):
"""
Returns ``datetime.datetime`` from natural language time deltas
>>> parse_human_datetime('now') <= datetime.now()
True
"""
cal = parsedatetime.Calendar()
dttm = dttm_from_timtuple(datetime.now().timetuple())
d = cal.parse(s or '', dttm)[0]
d = datetime(d.tm_year, d.tm_mon, d.tm_mday, d.tm_hour, d.tm_min, d.tm_sec)
return d - dttm
|
[
"def",
"parse_human_timedelta",
"(",
"s",
":",
"str",
")",
":",
"cal",
"=",
"parsedatetime",
".",
"Calendar",
"(",
")",
"dttm",
"=",
"dttm_from_timtuple",
"(",
"datetime",
".",
"now",
"(",
")",
".",
"timetuple",
"(",
")",
")",
"d",
"=",
"cal",
".",
"parse",
"(",
"s",
"or",
"''",
",",
"dttm",
")",
"[",
"0",
"]",
"d",
"=",
"datetime",
"(",
"d",
".",
"tm_year",
",",
"d",
".",
"tm_mon",
",",
"d",
".",
"tm_mday",
",",
"d",
".",
"tm_hour",
",",
"d",
".",
"tm_min",
",",
"d",
".",
"tm_sec",
")",
"return",
"d",
"-",
"dttm"
] |
Returns ``datetime.datetime`` from natural language time deltas
>>> parse_human_datetime('now') <= datetime.now()
True
|
[
"Returns",
"datetime",
".",
"datetime",
"from",
"natural",
"language",
"time",
"deltas"
] |
ca2996c78f679260eb79c6008e276733df5fb653
|
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/core.py#L290-L301
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.