partition
stringclasses
3 values
func_name
stringlengths
1
134
docstring
stringlengths
1
46.9k
path
stringlengths
4
223
original_string
stringlengths
75
104k
code
stringlengths
75
104k
docstring_tokens
listlengths
1
1.97k
repo
stringlengths
7
55
language
stringclasses
1 value
url
stringlengths
87
315
code_tokens
listlengths
19
28.4k
sha
stringlengths
40
40
train
check_ownership
Meant to be used in `pre_update` hooks on models to enforce ownership Admin have all access, and other users need to be referenced on either the created_by field that comes with the ``AuditMixin``, or in a field named ``owners`` which is expected to be a one-to-many with the User model. It is meant to be used in the ModelView's pre_update hook in which raising will abort the update.
superset/views/base.py
def check_ownership(obj, raise_if_false=True): """Meant to be used in `pre_update` hooks on models to enforce ownership Admin have all access, and other users need to be referenced on either the created_by field that comes with the ``AuditMixin``, or in a field named ``owners`` which is expected to be a one-to-many with the User model. It is meant to be used in the ModelView's pre_update hook in which raising will abort the update. """ if not obj: return False security_exception = SupersetSecurityException( "You don't have the rights to alter [{}]".format(obj)) if g.user.is_anonymous: if raise_if_false: raise security_exception return False roles = [r.name for r in get_user_roles()] if 'Admin' in roles: return True session = db.create_scoped_session() orig_obj = session.query(obj.__class__).filter_by(id=obj.id).first() # Making a list of owners that works across ORM models owners = [] if hasattr(orig_obj, 'owners'): owners += orig_obj.owners if hasattr(orig_obj, 'owner'): owners += [orig_obj.owner] if hasattr(orig_obj, 'created_by'): owners += [orig_obj.created_by] owner_names = [o.username for o in owners if o] if ( g.user and hasattr(g.user, 'username') and g.user.username in owner_names): return True if raise_if_false: raise security_exception else: return False
def check_ownership(obj, raise_if_false=True): """Meant to be used in `pre_update` hooks on models to enforce ownership Admin have all access, and other users need to be referenced on either the created_by field that comes with the ``AuditMixin``, or in a field named ``owners`` which is expected to be a one-to-many with the User model. It is meant to be used in the ModelView's pre_update hook in which raising will abort the update. """ if not obj: return False security_exception = SupersetSecurityException( "You don't have the rights to alter [{}]".format(obj)) if g.user.is_anonymous: if raise_if_false: raise security_exception return False roles = [r.name for r in get_user_roles()] if 'Admin' in roles: return True session = db.create_scoped_session() orig_obj = session.query(obj.__class__).filter_by(id=obj.id).first() # Making a list of owners that works across ORM models owners = [] if hasattr(orig_obj, 'owners'): owners += orig_obj.owners if hasattr(orig_obj, 'owner'): owners += [orig_obj.owner] if hasattr(orig_obj, 'created_by'): owners += [orig_obj.created_by] owner_names = [o.username for o in owners if o] if ( g.user and hasattr(g.user, 'username') and g.user.username in owner_names): return True if raise_if_false: raise security_exception else: return False
[ "Meant", "to", "be", "used", "in", "pre_update", "hooks", "on", "models", "to", "enforce", "ownership" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/base.py#L331-L374
[ "def", "check_ownership", "(", "obj", ",", "raise_if_false", "=", "True", ")", ":", "if", "not", "obj", ":", "return", "False", "security_exception", "=", "SupersetSecurityException", "(", "\"You don't have the rights to alter [{}]\"", ".", "format", "(", "obj", ")", ")", "if", "g", ".", "user", ".", "is_anonymous", ":", "if", "raise_if_false", ":", "raise", "security_exception", "return", "False", "roles", "=", "[", "r", ".", "name", "for", "r", "in", "get_user_roles", "(", ")", "]", "if", "'Admin'", "in", "roles", ":", "return", "True", "session", "=", "db", ".", "create_scoped_session", "(", ")", "orig_obj", "=", "session", ".", "query", "(", "obj", ".", "__class__", ")", ".", "filter_by", "(", "id", "=", "obj", ".", "id", ")", ".", "first", "(", ")", "# Making a list of owners that works across ORM models", "owners", "=", "[", "]", "if", "hasattr", "(", "orig_obj", ",", "'owners'", ")", ":", "owners", "+=", "orig_obj", ".", "owners", "if", "hasattr", "(", "orig_obj", ",", "'owner'", ")", ":", "owners", "+=", "[", "orig_obj", ".", "owner", "]", "if", "hasattr", "(", "orig_obj", ",", "'created_by'", ")", ":", "owners", "+=", "[", "orig_obj", ".", "created_by", "]", "owner_names", "=", "[", "o", ".", "username", "for", "o", "in", "owners", "if", "o", "]", "if", "(", "g", ".", "user", "and", "hasattr", "(", "g", ".", "user", ",", "'username'", ")", "and", "g", ".", "user", ".", "username", "in", "owner_names", ")", ":", "return", "True", "if", "raise_if_false", ":", "raise", "security_exception", "else", ":", "return", "False" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
bind_field
Customize how fields are bound by stripping all whitespace. :param form: The form :param unbound_field: The unbound field :param options: The field options :returns: The bound field
superset/views/base.py
def bind_field( self, form: DynamicForm, unbound_field: UnboundField, options: Dict[Any, Any], ) -> Field: """ Customize how fields are bound by stripping all whitespace. :param form: The form :param unbound_field: The unbound field :param options: The field options :returns: The bound field """ filters = unbound_field.kwargs.get('filters', []) filters.append(lambda x: x.strip() if isinstance(x, str) else x) return unbound_field.bind(form=form, filters=filters, **options)
def bind_field( self, form: DynamicForm, unbound_field: UnboundField, options: Dict[Any, Any], ) -> Field: """ Customize how fields are bound by stripping all whitespace. :param form: The form :param unbound_field: The unbound field :param options: The field options :returns: The bound field """ filters = unbound_field.kwargs.get('filters', []) filters.append(lambda x: x.strip() if isinstance(x, str) else x) return unbound_field.bind(form=form, filters=filters, **options)
[ "Customize", "how", "fields", "are", "bound", "by", "stripping", "all", "whitespace", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/base.py#L377-L394
[ "def", "bind_field", "(", "self", ",", "form", ":", "DynamicForm", ",", "unbound_field", ":", "UnboundField", ",", "options", ":", "Dict", "[", "Any", ",", "Any", "]", ",", ")", "->", "Field", ":", "filters", "=", "unbound_field", ".", "kwargs", ".", "get", "(", "'filters'", ",", "[", "]", ")", "filters", ".", "append", "(", "lambda", "x", ":", "x", ".", "strip", "(", ")", "if", "isinstance", "(", "x", ",", "str", ")", "else", "x", ")", "return", "unbound_field", ".", "bind", "(", "form", "=", "form", ",", "filters", "=", "filters", ",", "*", "*", "options", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
BaseSupersetView.common_bootsrap_payload
Common data always sent to the client
superset/views/base.py
def common_bootsrap_payload(self): """Common data always sent to the client""" messages = get_flashed_messages(with_categories=True) locale = str(get_locale()) return { 'flash_messages': messages, 'conf': {k: conf.get(k) for k in FRONTEND_CONF_KEYS}, 'locale': locale, 'language_pack': get_language_pack(locale), 'feature_flags': get_feature_flags(), }
def common_bootsrap_payload(self): """Common data always sent to the client""" messages = get_flashed_messages(with_categories=True) locale = str(get_locale()) return { 'flash_messages': messages, 'conf': {k: conf.get(k) for k in FRONTEND_CONF_KEYS}, 'locale': locale, 'language_pack': get_language_pack(locale), 'feature_flags': get_feature_flags(), }
[ "Common", "data", "always", "sent", "to", "the", "client" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/base.py#L156-L166
[ "def", "common_bootsrap_payload", "(", "self", ")", ":", "messages", "=", "get_flashed_messages", "(", "with_categories", "=", "True", ")", "locale", "=", "str", "(", "get_locale", "(", ")", ")", "return", "{", "'flash_messages'", ":", "messages", ",", "'conf'", ":", "{", "k", ":", "conf", ".", "get", "(", "k", ")", "for", "k", "in", "FRONTEND_CONF_KEYS", "}", ",", "'locale'", ":", "locale", ",", "'language_pack'", ":", "get_language_pack", "(", "locale", ")", ",", "'feature_flags'", ":", "get_feature_flags", "(", ")", ",", "}" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
DeleteMixin._delete
Delete function logic, override to implement diferent logic deletes the record with primary_key = pk :param pk: record primary key to delete
superset/views/base.py
def _delete(self, pk): """ Delete function logic, override to implement diferent logic deletes the record with primary_key = pk :param pk: record primary key to delete """ item = self.datamodel.get(pk, self._base_filters) if not item: abort(404) try: self.pre_delete(item) except Exception as e: flash(str(e), 'danger') else: view_menu = security_manager.find_view_menu(item.get_perm()) pvs = security_manager.get_session.query( security_manager.permissionview_model).filter_by( view_menu=view_menu).all() schema_view_menu = None if hasattr(item, 'schema_perm'): schema_view_menu = security_manager.find_view_menu(item.schema_perm) pvs.extend(security_manager.get_session.query( security_manager.permissionview_model).filter_by( view_menu=schema_view_menu).all()) if self.datamodel.delete(item): self.post_delete(item) for pv in pvs: security_manager.get_session.delete(pv) if view_menu: security_manager.get_session.delete(view_menu) if schema_view_menu: security_manager.get_session.delete(schema_view_menu) security_manager.get_session.commit() flash(*self.datamodel.message) self.update_redirect()
def _delete(self, pk): """ Delete function logic, override to implement diferent logic deletes the record with primary_key = pk :param pk: record primary key to delete """ item = self.datamodel.get(pk, self._base_filters) if not item: abort(404) try: self.pre_delete(item) except Exception as e: flash(str(e), 'danger') else: view_menu = security_manager.find_view_menu(item.get_perm()) pvs = security_manager.get_session.query( security_manager.permissionview_model).filter_by( view_menu=view_menu).all() schema_view_menu = None if hasattr(item, 'schema_perm'): schema_view_menu = security_manager.find_view_menu(item.schema_perm) pvs.extend(security_manager.get_session.query( security_manager.permissionview_model).filter_by( view_menu=schema_view_menu).all()) if self.datamodel.delete(item): self.post_delete(item) for pv in pvs: security_manager.get_session.delete(pv) if view_menu: security_manager.get_session.delete(view_menu) if schema_view_menu: security_manager.get_session.delete(schema_view_menu) security_manager.get_session.commit() flash(*self.datamodel.message) self.update_redirect()
[ "Delete", "function", "logic", "override", "to", "implement", "diferent", "logic", "deletes", "the", "record", "with", "primary_key", "=", "pk" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/base.py#L207-L251
[ "def", "_delete", "(", "self", ",", "pk", ")", ":", "item", "=", "self", ".", "datamodel", ".", "get", "(", "pk", ",", "self", ".", "_base_filters", ")", "if", "not", "item", ":", "abort", "(", "404", ")", "try", ":", "self", ".", "pre_delete", "(", "item", ")", "except", "Exception", "as", "e", ":", "flash", "(", "str", "(", "e", ")", ",", "'danger'", ")", "else", ":", "view_menu", "=", "security_manager", ".", "find_view_menu", "(", "item", ".", "get_perm", "(", ")", ")", "pvs", "=", "security_manager", ".", "get_session", ".", "query", "(", "security_manager", ".", "permissionview_model", ")", ".", "filter_by", "(", "view_menu", "=", "view_menu", ")", ".", "all", "(", ")", "schema_view_menu", "=", "None", "if", "hasattr", "(", "item", ",", "'schema_perm'", ")", ":", "schema_view_menu", "=", "security_manager", ".", "find_view_menu", "(", "item", ".", "schema_perm", ")", "pvs", ".", "extend", "(", "security_manager", ".", "get_session", ".", "query", "(", "security_manager", ".", "permissionview_model", ")", ".", "filter_by", "(", "view_menu", "=", "schema_view_menu", ")", ".", "all", "(", ")", ")", "if", "self", ".", "datamodel", ".", "delete", "(", "item", ")", ":", "self", ".", "post_delete", "(", "item", ")", "for", "pv", "in", "pvs", ":", "security_manager", ".", "get_session", ".", "delete", "(", "pv", ")", "if", "view_menu", ":", "security_manager", ".", "get_session", ".", "delete", "(", "view_menu", ")", "if", "schema_view_menu", ":", "security_manager", ".", "get_session", ".", "delete", "(", "schema_view_menu", ")", "security_manager", ".", "get_session", ".", "commit", "(", ")", "flash", "(", "*", "self", ".", "datamodel", ".", "message", ")", "self", ".", "update_redirect", "(", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
SupersetFilter.get_all_permissions
Returns a set of tuples with the perm name and view menu name
superset/views/base.py
def get_all_permissions(self): """Returns a set of tuples with the perm name and view menu name""" perms = set() for role in self.get_user_roles(): for perm_view in role.permissions: t = (perm_view.permission.name, perm_view.view_menu.name) perms.add(t) return perms
def get_all_permissions(self): """Returns a set of tuples with the perm name and view menu name""" perms = set() for role in self.get_user_roles(): for perm_view in role.permissions: t = (perm_view.permission.name, perm_view.view_menu.name) perms.add(t) return perms
[ "Returns", "a", "set", "of", "tuples", "with", "the", "perm", "name", "and", "view", "menu", "name" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/base.py#L286-L293
[ "def", "get_all_permissions", "(", "self", ")", ":", "perms", "=", "set", "(", ")", "for", "role", "in", "self", ".", "get_user_roles", "(", ")", ":", "for", "perm_view", "in", "role", ".", "permissions", ":", "t", "=", "(", "perm_view", ".", "permission", ".", "name", ",", "perm_view", ".", "view_menu", ".", "name", ")", "perms", ".", "add", "(", "t", ")", "return", "perms" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
SupersetFilter.get_view_menus
Returns the details of view_menus for a perm name
superset/views/base.py
def get_view_menus(self, permission_name): """Returns the details of view_menus for a perm name""" vm = set() for perm_name, vm_name in self.get_all_permissions(): if perm_name == permission_name: vm.add(vm_name) return vm
def get_view_menus(self, permission_name): """Returns the details of view_menus for a perm name""" vm = set() for perm_name, vm_name in self.get_all_permissions(): if perm_name == permission_name: vm.add(vm_name) return vm
[ "Returns", "the", "details", "of", "view_menus", "for", "a", "perm", "name" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/base.py#L306-L312
[ "def", "get_view_menus", "(", "self", ",", "permission_name", ")", ":", "vm", "=", "set", "(", ")", "for", "perm_name", ",", "vm_name", "in", "self", ".", "get_all_permissions", "(", ")", ":", "if", "perm_name", "==", "permission_name", ":", "vm", ".", "add", "(", "vm_name", ")", "return", "vm" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
destroy_webdriver
Destroy a driver
superset/tasks/schedules.py
def destroy_webdriver(driver): """ Destroy a driver """ # This is some very flaky code in selenium. Hence the retries # and catch-all exceptions try: retry_call(driver.close, tries=2) except Exception: pass try: driver.quit() except Exception: pass
def destroy_webdriver(driver): """ Destroy a driver """ # This is some very flaky code in selenium. Hence the retries # and catch-all exceptions try: retry_call(driver.close, tries=2) except Exception: pass try: driver.quit() except Exception: pass
[ "Destroy", "a", "driver" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/tasks/schedules.py#L190-L204
[ "def", "destroy_webdriver", "(", "driver", ")", ":", "# This is some very flaky code in selenium. Hence the retries", "# and catch-all exceptions", "try", ":", "retry_call", "(", "driver", ".", "close", ",", "tries", "=", "2", ")", "except", "Exception", ":", "pass", "try", ":", "driver", ".", "quit", "(", ")", "except", "Exception", ":", "pass" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
deliver_dashboard
Given a schedule, delivery the dashboard as an email report
superset/tasks/schedules.py
def deliver_dashboard(schedule): """ Given a schedule, delivery the dashboard as an email report """ dashboard = schedule.dashboard dashboard_url = _get_url_path( 'Superset.dashboard', dashboard_id=dashboard.id, ) # Create a driver, fetch the page, wait for the page to render driver = create_webdriver() window = config.get('WEBDRIVER_WINDOW')['dashboard'] driver.set_window_size(*window) driver.get(dashboard_url) time.sleep(PAGE_RENDER_WAIT) # Set up a function to retry once for the element. # This is buggy in certain selenium versions with firefox driver get_element = getattr(driver, 'find_element_by_class_name') element = retry_call( get_element, fargs=['grid-container'], tries=2, delay=PAGE_RENDER_WAIT, ) try: screenshot = element.screenshot_as_png except WebDriverException: # Some webdrivers do not support screenshots for elements. # In such cases, take a screenshot of the entire page. screenshot = driver.screenshot() # pylint: disable=no-member finally: destroy_webdriver(driver) # Generate the email body and attachments email = _generate_mail_content( schedule, screenshot, dashboard.dashboard_title, dashboard_url, ) subject = __( '%(prefix)s %(title)s', prefix=config.get('EMAIL_REPORTS_SUBJECT_PREFIX'), title=dashboard.dashboard_title, ) _deliver_email(schedule, subject, email)
def deliver_dashboard(schedule): """ Given a schedule, delivery the dashboard as an email report """ dashboard = schedule.dashboard dashboard_url = _get_url_path( 'Superset.dashboard', dashboard_id=dashboard.id, ) # Create a driver, fetch the page, wait for the page to render driver = create_webdriver() window = config.get('WEBDRIVER_WINDOW')['dashboard'] driver.set_window_size(*window) driver.get(dashboard_url) time.sleep(PAGE_RENDER_WAIT) # Set up a function to retry once for the element. # This is buggy in certain selenium versions with firefox driver get_element = getattr(driver, 'find_element_by_class_name') element = retry_call( get_element, fargs=['grid-container'], tries=2, delay=PAGE_RENDER_WAIT, ) try: screenshot = element.screenshot_as_png except WebDriverException: # Some webdrivers do not support screenshots for elements. # In such cases, take a screenshot of the entire page. screenshot = driver.screenshot() # pylint: disable=no-member finally: destroy_webdriver(driver) # Generate the email body and attachments email = _generate_mail_content( schedule, screenshot, dashboard.dashboard_title, dashboard_url, ) subject = __( '%(prefix)s %(title)s', prefix=config.get('EMAIL_REPORTS_SUBJECT_PREFIX'), title=dashboard.dashboard_title, ) _deliver_email(schedule, subject, email)
[ "Given", "a", "schedule", "delivery", "the", "dashboard", "as", "an", "email", "report" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/tasks/schedules.py#L207-L258
[ "def", "deliver_dashboard", "(", "schedule", ")", ":", "dashboard", "=", "schedule", ".", "dashboard", "dashboard_url", "=", "_get_url_path", "(", "'Superset.dashboard'", ",", "dashboard_id", "=", "dashboard", ".", "id", ",", ")", "# Create a driver, fetch the page, wait for the page to render", "driver", "=", "create_webdriver", "(", ")", "window", "=", "config", ".", "get", "(", "'WEBDRIVER_WINDOW'", ")", "[", "'dashboard'", "]", "driver", ".", "set_window_size", "(", "*", "window", ")", "driver", ".", "get", "(", "dashboard_url", ")", "time", ".", "sleep", "(", "PAGE_RENDER_WAIT", ")", "# Set up a function to retry once for the element.", "# This is buggy in certain selenium versions with firefox driver", "get_element", "=", "getattr", "(", "driver", ",", "'find_element_by_class_name'", ")", "element", "=", "retry_call", "(", "get_element", ",", "fargs", "=", "[", "'grid-container'", "]", ",", "tries", "=", "2", ",", "delay", "=", "PAGE_RENDER_WAIT", ",", ")", "try", ":", "screenshot", "=", "element", ".", "screenshot_as_png", "except", "WebDriverException", ":", "# Some webdrivers do not support screenshots for elements.", "# In such cases, take a screenshot of the entire page.", "screenshot", "=", "driver", ".", "screenshot", "(", ")", "# pylint: disable=no-member", "finally", ":", "destroy_webdriver", "(", "driver", ")", "# Generate the email body and attachments", "email", "=", "_generate_mail_content", "(", "schedule", ",", "screenshot", ",", "dashboard", ".", "dashboard_title", ",", "dashboard_url", ",", ")", "subject", "=", "__", "(", "'%(prefix)s %(title)s'", ",", "prefix", "=", "config", ".", "get", "(", "'EMAIL_REPORTS_SUBJECT_PREFIX'", ")", ",", "title", "=", "dashboard", ".", "dashboard_title", ",", ")", "_deliver_email", "(", "schedule", ",", "subject", ",", "email", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
deliver_slice
Given a schedule, delivery the slice as an email report
superset/tasks/schedules.py
def deliver_slice(schedule): """ Given a schedule, delivery the slice as an email report """ if schedule.email_format == SliceEmailReportFormat.data: email = _get_slice_data(schedule) elif schedule.email_format == SliceEmailReportFormat.visualization: email = _get_slice_visualization(schedule) else: raise RuntimeError('Unknown email report format') subject = __( '%(prefix)s %(title)s', prefix=config.get('EMAIL_REPORTS_SUBJECT_PREFIX'), title=schedule.slice.slice_name, ) _deliver_email(schedule, subject, email)
def deliver_slice(schedule): """ Given a schedule, delivery the slice as an email report """ if schedule.email_format == SliceEmailReportFormat.data: email = _get_slice_data(schedule) elif schedule.email_format == SliceEmailReportFormat.visualization: email = _get_slice_visualization(schedule) else: raise RuntimeError('Unknown email report format') subject = __( '%(prefix)s %(title)s', prefix=config.get('EMAIL_REPORTS_SUBJECT_PREFIX'), title=schedule.slice.slice_name, ) _deliver_email(schedule, subject, email)
[ "Given", "a", "schedule", "delivery", "the", "slice", "as", "an", "email", "report" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/tasks/schedules.py#L356-L373
[ "def", "deliver_slice", "(", "schedule", ")", ":", "if", "schedule", ".", "email_format", "==", "SliceEmailReportFormat", ".", "data", ":", "email", "=", "_get_slice_data", "(", "schedule", ")", "elif", "schedule", ".", "email_format", "==", "SliceEmailReportFormat", ".", "visualization", ":", "email", "=", "_get_slice_visualization", "(", "schedule", ")", "else", ":", "raise", "RuntimeError", "(", "'Unknown email report format'", ")", "subject", "=", "__", "(", "'%(prefix)s %(title)s'", ",", "prefix", "=", "config", ".", "get", "(", "'EMAIL_REPORTS_SUBJECT_PREFIX'", ")", ",", "title", "=", "schedule", ".", "slice", ".", "slice_name", ",", ")", "_deliver_email", "(", "schedule", ",", "subject", ",", "email", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
schedule_window
Find all active schedules and schedule celery tasks for each of them with a specific ETA (determined by parsing the cron schedule for the schedule)
superset/tasks/schedules.py
def schedule_window(report_type, start_at, stop_at, resolution): """ Find all active schedules and schedule celery tasks for each of them with a specific ETA (determined by parsing the cron schedule for the schedule) """ model_cls = get_scheduler_model(report_type) dbsession = db.create_scoped_session() schedules = dbsession.query(model_cls).filter(model_cls.active.is_(True)) for schedule in schedules: args = ( report_type, schedule.id, ) # Schedule the job for the specified time window for eta in next_schedules(schedule.crontab, start_at, stop_at, resolution=resolution): schedule_email_report.apply_async(args, eta=eta)
def schedule_window(report_type, start_at, stop_at, resolution): """ Find all active schedules and schedule celery tasks for each of them with a specific ETA (determined by parsing the cron schedule for the schedule) """ model_cls = get_scheduler_model(report_type) dbsession = db.create_scoped_session() schedules = dbsession.query(model_cls).filter(model_cls.active.is_(True)) for schedule in schedules: args = ( report_type, schedule.id, ) # Schedule the job for the specified time window for eta in next_schedules(schedule.crontab, start_at, stop_at, resolution=resolution): schedule_email_report.apply_async(args, eta=eta)
[ "Find", "all", "active", "schedules", "and", "schedule", "celery", "tasks", "for", "each", "of", "them", "with", "a", "specific", "ETA", "(", "determined", "by", "parsing", "the", "cron", "schedule", "for", "the", "schedule", ")" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/tasks/schedules.py#L419-L440
[ "def", "schedule_window", "(", "report_type", ",", "start_at", ",", "stop_at", ",", "resolution", ")", ":", "model_cls", "=", "get_scheduler_model", "(", "report_type", ")", "dbsession", "=", "db", ".", "create_scoped_session", "(", ")", "schedules", "=", "dbsession", ".", "query", "(", "model_cls", ")", ".", "filter", "(", "model_cls", ".", "active", ".", "is_", "(", "True", ")", ")", "for", "schedule", "in", "schedules", ":", "args", "=", "(", "report_type", ",", "schedule", ".", "id", ",", ")", "# Schedule the job for the specified time window", "for", "eta", "in", "next_schedules", "(", "schedule", ".", "crontab", ",", "start_at", ",", "stop_at", ",", "resolution", "=", "resolution", ")", ":", "schedule_email_report", ".", "apply_async", "(", "args", ",", "eta", "=", "eta", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
schedule_hourly
Celery beat job meant to be invoked hourly
superset/tasks/schedules.py
def schedule_hourly(): """ Celery beat job meant to be invoked hourly """ if not config.get('ENABLE_SCHEDULED_EMAIL_REPORTS'): logging.info('Scheduled email reports not enabled in config') return resolution = config.get('EMAIL_REPORTS_CRON_RESOLUTION', 0) * 60 # Get the top of the hour start_at = datetime.now(tzlocal()).replace(microsecond=0, second=0, minute=0) stop_at = start_at + timedelta(seconds=3600) schedule_window(ScheduleType.dashboard.value, start_at, stop_at, resolution) schedule_window(ScheduleType.slice.value, start_at, stop_at, resolution)
def schedule_hourly(): """ Celery beat job meant to be invoked hourly """ if not config.get('ENABLE_SCHEDULED_EMAIL_REPORTS'): logging.info('Scheduled email reports not enabled in config') return resolution = config.get('EMAIL_REPORTS_CRON_RESOLUTION', 0) * 60 # Get the top of the hour start_at = datetime.now(tzlocal()).replace(microsecond=0, second=0, minute=0) stop_at = start_at + timedelta(seconds=3600) schedule_window(ScheduleType.dashboard.value, start_at, stop_at, resolution) schedule_window(ScheduleType.slice.value, start_at, stop_at, resolution)
[ "Celery", "beat", "job", "meant", "to", "be", "invoked", "hourly" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/tasks/schedules.py#L444-L457
[ "def", "schedule_hourly", "(", ")", ":", "if", "not", "config", ".", "get", "(", "'ENABLE_SCHEDULED_EMAIL_REPORTS'", ")", ":", "logging", ".", "info", "(", "'Scheduled email reports not enabled in config'", ")", "return", "resolution", "=", "config", ".", "get", "(", "'EMAIL_REPORTS_CRON_RESOLUTION'", ",", "0", ")", "*", "60", "# Get the top of the hour", "start_at", "=", "datetime", ".", "now", "(", "tzlocal", "(", ")", ")", ".", "replace", "(", "microsecond", "=", "0", ",", "second", "=", "0", ",", "minute", "=", "0", ")", "stop_at", "=", "start_at", "+", "timedelta", "(", "seconds", "=", "3600", ")", "schedule_window", "(", "ScheduleType", ".", "dashboard", ".", "value", ",", "start_at", ",", "stop_at", ",", "resolution", ")", "schedule_window", "(", "ScheduleType", ".", "slice", ".", "value", ",", "start_at", ",", "stop_at", ",", "resolution", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
dedup
De-duplicates a list of string by suffixing a counter Always returns the same number of entries as provided, and always returns unique values. Case sensitive comparison by default. >>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar']))) foo,bar,bar__1,bar__2,Bar >>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'], case_sensitive=False))) foo,bar,bar__1,bar__2,Bar__3
superset/dataframe.py
def dedup(l, suffix='__', case_sensitive=True): """De-duplicates a list of string by suffixing a counter Always returns the same number of entries as provided, and always returns unique values. Case sensitive comparison by default. >>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar']))) foo,bar,bar__1,bar__2,Bar >>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'], case_sensitive=False))) foo,bar,bar__1,bar__2,Bar__3 """ new_l = [] seen = {} for s in l: s_fixed_case = s if case_sensitive else s.lower() if s_fixed_case in seen: seen[s_fixed_case] += 1 s += suffix + str(seen[s_fixed_case]) else: seen[s_fixed_case] = 0 new_l.append(s) return new_l
def dedup(l, suffix='__', case_sensitive=True): """De-duplicates a list of string by suffixing a counter Always returns the same number of entries as provided, and always returns unique values. Case sensitive comparison by default. >>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar']))) foo,bar,bar__1,bar__2,Bar >>> print(','.join(dedup(['foo', 'bar', 'bar', 'bar', 'Bar'], case_sensitive=False))) foo,bar,bar__1,bar__2,Bar__3 """ new_l = [] seen = {} for s in l: s_fixed_case = s if case_sensitive else s.lower() if s_fixed_case in seen: seen[s_fixed_case] += 1 s += suffix + str(seen[s_fixed_case]) else: seen[s_fixed_case] = 0 new_l.append(s) return new_l
[ "De", "-", "duplicates", "a", "list", "of", "string", "by", "suffixing", "a", "counter" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/dataframe.py#L39-L60
[ "def", "dedup", "(", "l", ",", "suffix", "=", "'__'", ",", "case_sensitive", "=", "True", ")", ":", "new_l", "=", "[", "]", "seen", "=", "{", "}", "for", "s", "in", "l", ":", "s_fixed_case", "=", "s", "if", "case_sensitive", "else", "s", ".", "lower", "(", ")", "if", "s_fixed_case", "in", "seen", ":", "seen", "[", "s_fixed_case", "]", "+=", "1", "s", "+=", "suffix", "+", "str", "(", "seen", "[", "s_fixed_case", "]", ")", "else", ":", "seen", "[", "s_fixed_case", "]", "=", "0", "new_l", ".", "append", "(", "s", ")", "return", "new_l" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
SupersetDataFrame.db_type
Given a numpy dtype, Returns a generic database type
superset/dataframe.py
def db_type(cls, dtype): """Given a numpy dtype, Returns a generic database type""" if isinstance(dtype, ExtensionDtype): return cls.type_map.get(dtype.kind) elif hasattr(dtype, 'char'): return cls.type_map.get(dtype.char)
def db_type(cls, dtype): """Given a numpy dtype, Returns a generic database type""" if isinstance(dtype, ExtensionDtype): return cls.type_map.get(dtype.kind) elif hasattr(dtype, 'char'): return cls.type_map.get(dtype.char)
[ "Given", "a", "numpy", "dtype", "Returns", "a", "generic", "database", "type" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/dataframe.py#L122-L127
[ "def", "db_type", "(", "cls", ",", "dtype", ")", ":", "if", "isinstance", "(", "dtype", ",", "ExtensionDtype", ")", ":", "return", "cls", ".", "type_map", ".", "get", "(", "dtype", ".", "kind", ")", "elif", "hasattr", "(", "dtype", ",", "'char'", ")", ":", "return", "cls", ".", "type_map", ".", "get", "(", "dtype", ".", "char", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
SupersetDataFrame.columns
Provides metadata about columns for data visualization. :return: dict, with the fields name, type, is_date, is_dim and agg.
superset/dataframe.py
def columns(self): """Provides metadata about columns for data visualization. :return: dict, with the fields name, type, is_date, is_dim and agg. """ if self.df.empty: return None columns = [] sample_size = min(INFER_COL_TYPES_SAMPLE_SIZE, len(self.df.index)) sample = self.df if sample_size: sample = self.df.sample(sample_size) for col in self.df.dtypes.keys(): db_type_str = ( self._type_dict.get(col) or self.db_type(self.df.dtypes[col]) ) column = { 'name': col, 'agg': self.agg_func(self.df.dtypes[col], col), 'type': db_type_str, 'is_date': self.is_date(self.df.dtypes[col], db_type_str), 'is_dim': self.is_dimension(self.df.dtypes[col], col), } if not db_type_str or db_type_str.upper() == 'OBJECT': v = sample[col].iloc[0] if not sample[col].empty else None if isinstance(v, str): column['type'] = 'STRING' elif isinstance(v, int): column['type'] = 'INT' elif isinstance(v, float): column['type'] = 'FLOAT' elif isinstance(v, (datetime, date)): column['type'] = 'DATETIME' column['is_date'] = True column['is_dim'] = False # check if encoded datetime if ( column['type'] == 'STRING' and self.datetime_conversion_rate(sample[col]) > INFER_COL_TYPES_THRESHOLD): column.update({ 'is_date': True, 'is_dim': False, 'agg': None, }) # 'agg' is optional attribute if not column['agg']: column.pop('agg', None) columns.append(column) return columns
def columns(self): """Provides metadata about columns for data visualization. :return: dict, with the fields name, type, is_date, is_dim and agg. """ if self.df.empty: return None columns = [] sample_size = min(INFER_COL_TYPES_SAMPLE_SIZE, len(self.df.index)) sample = self.df if sample_size: sample = self.df.sample(sample_size) for col in self.df.dtypes.keys(): db_type_str = ( self._type_dict.get(col) or self.db_type(self.df.dtypes[col]) ) column = { 'name': col, 'agg': self.agg_func(self.df.dtypes[col], col), 'type': db_type_str, 'is_date': self.is_date(self.df.dtypes[col], db_type_str), 'is_dim': self.is_dimension(self.df.dtypes[col], col), } if not db_type_str or db_type_str.upper() == 'OBJECT': v = sample[col].iloc[0] if not sample[col].empty else None if isinstance(v, str): column['type'] = 'STRING' elif isinstance(v, int): column['type'] = 'INT' elif isinstance(v, float): column['type'] = 'FLOAT' elif isinstance(v, (datetime, date)): column['type'] = 'DATETIME' column['is_date'] = True column['is_dim'] = False # check if encoded datetime if ( column['type'] == 'STRING' and self.datetime_conversion_rate(sample[col]) > INFER_COL_TYPES_THRESHOLD): column.update({ 'is_date': True, 'is_dim': False, 'agg': None, }) # 'agg' is optional attribute if not column['agg']: column.pop('agg', None) columns.append(column) return columns
[ "Provides", "metadata", "about", "columns", "for", "data", "visualization", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/dataframe.py#L177-L229
[ "def", "columns", "(", "self", ")", ":", "if", "self", ".", "df", ".", "empty", ":", "return", "None", "columns", "=", "[", "]", "sample_size", "=", "min", "(", "INFER_COL_TYPES_SAMPLE_SIZE", ",", "len", "(", "self", ".", "df", ".", "index", ")", ")", "sample", "=", "self", ".", "df", "if", "sample_size", ":", "sample", "=", "self", ".", "df", ".", "sample", "(", "sample_size", ")", "for", "col", "in", "self", ".", "df", ".", "dtypes", ".", "keys", "(", ")", ":", "db_type_str", "=", "(", "self", ".", "_type_dict", ".", "get", "(", "col", ")", "or", "self", ".", "db_type", "(", "self", ".", "df", ".", "dtypes", "[", "col", "]", ")", ")", "column", "=", "{", "'name'", ":", "col", ",", "'agg'", ":", "self", ".", "agg_func", "(", "self", ".", "df", ".", "dtypes", "[", "col", "]", ",", "col", ")", ",", "'type'", ":", "db_type_str", ",", "'is_date'", ":", "self", ".", "is_date", "(", "self", ".", "df", ".", "dtypes", "[", "col", "]", ",", "db_type_str", ")", ",", "'is_dim'", ":", "self", ".", "is_dimension", "(", "self", ".", "df", ".", "dtypes", "[", "col", "]", ",", "col", ")", ",", "}", "if", "not", "db_type_str", "or", "db_type_str", ".", "upper", "(", ")", "==", "'OBJECT'", ":", "v", "=", "sample", "[", "col", "]", ".", "iloc", "[", "0", "]", "if", "not", "sample", "[", "col", "]", ".", "empty", "else", "None", "if", "isinstance", "(", "v", ",", "str", ")", ":", "column", "[", "'type'", "]", "=", "'STRING'", "elif", "isinstance", "(", "v", ",", "int", ")", ":", "column", "[", "'type'", "]", "=", "'INT'", "elif", "isinstance", "(", "v", ",", "float", ")", ":", "column", "[", "'type'", "]", "=", "'FLOAT'", "elif", "isinstance", "(", "v", ",", "(", "datetime", ",", "date", ")", ")", ":", "column", "[", "'type'", "]", "=", "'DATETIME'", "column", "[", "'is_date'", "]", "=", "True", "column", "[", "'is_dim'", "]", "=", "False", "# check if encoded datetime", "if", "(", "column", "[", "'type'", "]", "==", "'STRING'", "and", "self", ".", "datetime_conversion_rate", "(", "sample", "[", "col", "]", ")", ">", "INFER_COL_TYPES_THRESHOLD", ")", ":", "column", ".", "update", "(", "{", "'is_date'", ":", "True", ",", "'is_dim'", ":", "False", ",", "'agg'", ":", "None", ",", "}", ")", "# 'agg' is optional attribute", "if", "not", "column", "[", "'agg'", "]", ":", "column", ".", "pop", "(", "'agg'", ",", "None", ")", "columns", ".", "append", "(", "column", ")", "return", "columns" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
TableColumn.get_timestamp_expression
Getting the time component of the query
superset/connectors/sqla/models.py
def get_timestamp_expression(self, time_grain): """Getting the time component of the query""" label = utils.DTTM_ALIAS db = self.table.database pdf = self.python_date_format is_epoch = pdf in ('epoch_s', 'epoch_ms') if not self.expression and not time_grain and not is_epoch: sqla_col = column(self.column_name, type_=DateTime) return self.table.make_sqla_column_compatible(sqla_col, label) grain = None if time_grain: grain = db.grains_dict().get(time_grain) if not grain: raise NotImplementedError( f'No grain spec for {time_grain} for database {db.database_name}') col = db.db_engine_spec.get_timestamp_column(self.expression, self.column_name) expr = db.db_engine_spec.get_time_expr(col, pdf, time_grain, grain) sqla_col = literal_column(expr, type_=DateTime) return self.table.make_sqla_column_compatible(sqla_col, label)
def get_timestamp_expression(self, time_grain): """Getting the time component of the query""" label = utils.DTTM_ALIAS db = self.table.database pdf = self.python_date_format is_epoch = pdf in ('epoch_s', 'epoch_ms') if not self.expression and not time_grain and not is_epoch: sqla_col = column(self.column_name, type_=DateTime) return self.table.make_sqla_column_compatible(sqla_col, label) grain = None if time_grain: grain = db.grains_dict().get(time_grain) if not grain: raise NotImplementedError( f'No grain spec for {time_grain} for database {db.database_name}') col = db.db_engine_spec.get_timestamp_column(self.expression, self.column_name) expr = db.db_engine_spec.get_time_expr(col, pdf, time_grain, grain) sqla_col = literal_column(expr, type_=DateTime) return self.table.make_sqla_column_compatible(sqla_col, label)
[ "Getting", "the", "time", "component", "of", "the", "query" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/models.py#L143-L162
[ "def", "get_timestamp_expression", "(", "self", ",", "time_grain", ")", ":", "label", "=", "utils", ".", "DTTM_ALIAS", "db", "=", "self", ".", "table", ".", "database", "pdf", "=", "self", ".", "python_date_format", "is_epoch", "=", "pdf", "in", "(", "'epoch_s'", ",", "'epoch_ms'", ")", "if", "not", "self", ".", "expression", "and", "not", "time_grain", "and", "not", "is_epoch", ":", "sqla_col", "=", "column", "(", "self", ".", "column_name", ",", "type_", "=", "DateTime", ")", "return", "self", ".", "table", ".", "make_sqla_column_compatible", "(", "sqla_col", ",", "label", ")", "grain", "=", "None", "if", "time_grain", ":", "grain", "=", "db", ".", "grains_dict", "(", ")", ".", "get", "(", "time_grain", ")", "if", "not", "grain", ":", "raise", "NotImplementedError", "(", "f'No grain spec for {time_grain} for database {db.database_name}'", ")", "col", "=", "db", ".", "db_engine_spec", ".", "get_timestamp_column", "(", "self", ".", "expression", ",", "self", ".", "column_name", ")", "expr", "=", "db", ".", "db_engine_spec", ".", "get_time_expr", "(", "col", ",", "pdf", ",", "time_grain", ",", "grain", ")", "sqla_col", "=", "literal_column", "(", "expr", ",", "type_", "=", "DateTime", ")", "return", "self", ".", "table", ".", "make_sqla_column_compatible", "(", "sqla_col", ",", "label", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
TableColumn.dttm_sql_literal
Convert datetime object to a SQL expression string If database_expression is empty, the internal dttm will be parsed as the string with the pattern that the user inputted (python_date_format) If database_expression is not empty, the internal dttm will be parsed as the sql sentence for the database to convert
superset/connectors/sqla/models.py
def dttm_sql_literal(self, dttm, is_epoch_in_utc): """Convert datetime object to a SQL expression string If database_expression is empty, the internal dttm will be parsed as the string with the pattern that the user inputted (python_date_format) If database_expression is not empty, the internal dttm will be parsed as the sql sentence for the database to convert """ tf = self.python_date_format if self.database_expression: return self.database_expression.format(dttm.strftime('%Y-%m-%d %H:%M:%S')) elif tf: if is_epoch_in_utc: seconds_since_epoch = dttm.timestamp() else: seconds_since_epoch = (dttm - datetime(1970, 1, 1)).total_seconds() seconds_since_epoch = int(seconds_since_epoch) if tf == 'epoch_s': return str(seconds_since_epoch) elif tf == 'epoch_ms': return str(seconds_since_epoch * 1000) return "'{}'".format(dttm.strftime(tf)) else: s = self.table.database.db_engine_spec.convert_dttm( self.type or '', dttm) return s or "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S.%f'))
def dttm_sql_literal(self, dttm, is_epoch_in_utc): """Convert datetime object to a SQL expression string If database_expression is empty, the internal dttm will be parsed as the string with the pattern that the user inputted (python_date_format) If database_expression is not empty, the internal dttm will be parsed as the sql sentence for the database to convert """ tf = self.python_date_format if self.database_expression: return self.database_expression.format(dttm.strftime('%Y-%m-%d %H:%M:%S')) elif tf: if is_epoch_in_utc: seconds_since_epoch = dttm.timestamp() else: seconds_since_epoch = (dttm - datetime(1970, 1, 1)).total_seconds() seconds_since_epoch = int(seconds_since_epoch) if tf == 'epoch_s': return str(seconds_since_epoch) elif tf == 'epoch_ms': return str(seconds_since_epoch * 1000) return "'{}'".format(dttm.strftime(tf)) else: s = self.table.database.db_engine_spec.convert_dttm( self.type or '', dttm) return s or "'{}'".format(dttm.strftime('%Y-%m-%d %H:%M:%S.%f'))
[ "Convert", "datetime", "object", "to", "a", "SQL", "expression", "string" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/models.py#L172-L198
[ "def", "dttm_sql_literal", "(", "self", ",", "dttm", ",", "is_epoch_in_utc", ")", ":", "tf", "=", "self", ".", "python_date_format", "if", "self", ".", "database_expression", ":", "return", "self", ".", "database_expression", ".", "format", "(", "dttm", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S'", ")", ")", "elif", "tf", ":", "if", "is_epoch_in_utc", ":", "seconds_since_epoch", "=", "dttm", ".", "timestamp", "(", ")", "else", ":", "seconds_since_epoch", "=", "(", "dttm", "-", "datetime", "(", "1970", ",", "1", ",", "1", ")", ")", ".", "total_seconds", "(", ")", "seconds_since_epoch", "=", "int", "(", "seconds_since_epoch", ")", "if", "tf", "==", "'epoch_s'", ":", "return", "str", "(", "seconds_since_epoch", ")", "elif", "tf", "==", "'epoch_ms'", ":", "return", "str", "(", "seconds_since_epoch", "*", "1000", ")", "return", "\"'{}'\"", ".", "format", "(", "dttm", ".", "strftime", "(", "tf", ")", ")", "else", ":", "s", "=", "self", ".", "table", ".", "database", ".", "db_engine_spec", ".", "convert_dttm", "(", "self", ".", "type", "or", "''", ",", "dttm", ")", "return", "s", "or", "\"'{}'\"", ".", "format", "(", "dttm", ".", "strftime", "(", "'%Y-%m-%d %H:%M:%S.%f'", ")", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
SqlaTable.make_sqla_column_compatible
Takes a sql alchemy column object and adds label info if supported by engine. :param sqla_col: sql alchemy column instance :param label: alias/label that column is expected to have :return: either a sql alchemy column or label instance if supported by engine
superset/connectors/sqla/models.py
def make_sqla_column_compatible(self, sqla_col, label=None): """Takes a sql alchemy column object and adds label info if supported by engine. :param sqla_col: sql alchemy column instance :param label: alias/label that column is expected to have :return: either a sql alchemy column or label instance if supported by engine """ label_expected = label or sqla_col.name db_engine_spec = self.database.db_engine_spec if db_engine_spec.supports_column_aliases: label = db_engine_spec.make_label_compatible(label_expected) sqla_col = sqla_col.label(label) sqla_col._df_label_expected = label_expected return sqla_col
def make_sqla_column_compatible(self, sqla_col, label=None): """Takes a sql alchemy column object and adds label info if supported by engine. :param sqla_col: sql alchemy column instance :param label: alias/label that column is expected to have :return: either a sql alchemy column or label instance if supported by engine """ label_expected = label or sqla_col.name db_engine_spec = self.database.db_engine_spec if db_engine_spec.supports_column_aliases: label = db_engine_spec.make_label_compatible(label_expected) sqla_col = sqla_col.label(label) sqla_col._df_label_expected = label_expected return sqla_col
[ "Takes", "a", "sql", "alchemy", "column", "object", "and", "adds", "label", "info", "if", "supported", "by", "engine", ".", ":", "param", "sqla_col", ":", "sql", "alchemy", "column", "instance", ":", "param", "label", ":", "alias", "/", "label", "that", "column", "is", "expected", "to", "have", ":", "return", ":", "either", "a", "sql", "alchemy", "column", "or", "label", "instance", "if", "supported", "by", "engine" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/models.py#L302-L314
[ "def", "make_sqla_column_compatible", "(", "self", ",", "sqla_col", ",", "label", "=", "None", ")", ":", "label_expected", "=", "label", "or", "sqla_col", ".", "name", "db_engine_spec", "=", "self", ".", "database", ".", "db_engine_spec", "if", "db_engine_spec", ".", "supports_column_aliases", ":", "label", "=", "db_engine_spec", ".", "make_label_compatible", "(", "label_expected", ")", "sqla_col", "=", "sqla_col", ".", "label", "(", "label", ")", "sqla_col", ".", "_df_label_expected", "=", "label_expected", "return", "sqla_col" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
SqlaTable.values_for_column
Runs query against sqla to retrieve some sample values for the given column.
superset/connectors/sqla/models.py
def values_for_column(self, column_name, limit=10000): """Runs query against sqla to retrieve some sample values for the given column. """ cols = {col.column_name: col for col in self.columns} target_col = cols[column_name] tp = self.get_template_processor() qry = ( select([target_col.get_sqla_col()]) .select_from(self.get_from_clause(tp)) .distinct() ) if limit: qry = qry.limit(limit) if self.fetch_values_predicate: tp = self.get_template_processor() qry = qry.where(tp.process_template(self.fetch_values_predicate)) engine = self.database.get_sqla_engine() sql = '{}'.format( qry.compile(engine, compile_kwargs={'literal_binds': True}), ) sql = self.mutate_query_from_config(sql) df = pd.read_sql_query(sql=sql, con=engine) return [row[0] for row in df.to_records(index=False)]
def values_for_column(self, column_name, limit=10000): """Runs query against sqla to retrieve some sample values for the given column. """ cols = {col.column_name: col for col in self.columns} target_col = cols[column_name] tp = self.get_template_processor() qry = ( select([target_col.get_sqla_col()]) .select_from(self.get_from_clause(tp)) .distinct() ) if limit: qry = qry.limit(limit) if self.fetch_values_predicate: tp = self.get_template_processor() qry = qry.where(tp.process_template(self.fetch_values_predicate)) engine = self.database.get_sqla_engine() sql = '{}'.format( qry.compile(engine, compile_kwargs={'literal_binds': True}), ) sql = self.mutate_query_from_config(sql) df = pd.read_sql_query(sql=sql, con=engine) return [row[0] for row in df.to_records(index=False)]
[ "Runs", "query", "against", "sqla", "to", "retrieve", "some", "sample", "values", "for", "the", "given", "column", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/models.py#L437-L464
[ "def", "values_for_column", "(", "self", ",", "column_name", ",", "limit", "=", "10000", ")", ":", "cols", "=", "{", "col", ".", "column_name", ":", "col", "for", "col", "in", "self", ".", "columns", "}", "target_col", "=", "cols", "[", "column_name", "]", "tp", "=", "self", ".", "get_template_processor", "(", ")", "qry", "=", "(", "select", "(", "[", "target_col", ".", "get_sqla_col", "(", ")", "]", ")", ".", "select_from", "(", "self", ".", "get_from_clause", "(", "tp", ")", ")", ".", "distinct", "(", ")", ")", "if", "limit", ":", "qry", "=", "qry", ".", "limit", "(", "limit", ")", "if", "self", ".", "fetch_values_predicate", ":", "tp", "=", "self", ".", "get_template_processor", "(", ")", "qry", "=", "qry", ".", "where", "(", "tp", ".", "process_template", "(", "self", ".", "fetch_values_predicate", ")", ")", "engine", "=", "self", ".", "database", ".", "get_sqla_engine", "(", ")", "sql", "=", "'{}'", ".", "format", "(", "qry", ".", "compile", "(", "engine", ",", "compile_kwargs", "=", "{", "'literal_binds'", ":", "True", "}", ")", ",", ")", "sql", "=", "self", ".", "mutate_query_from_config", "(", "sql", ")", "df", "=", "pd", ".", "read_sql_query", "(", "sql", "=", "sql", ",", "con", "=", "engine", ")", "return", "[", "row", "[", "0", "]", "for", "row", "in", "df", ".", "to_records", "(", "index", "=", "False", ")", "]" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
SqlaTable.mutate_query_from_config
Apply config's SQL_QUERY_MUTATOR Typically adds comments to the query with context
superset/connectors/sqla/models.py
def mutate_query_from_config(self, sql): """Apply config's SQL_QUERY_MUTATOR Typically adds comments to the query with context""" SQL_QUERY_MUTATOR = config.get('SQL_QUERY_MUTATOR') if SQL_QUERY_MUTATOR: username = utils.get_username() sql = SQL_QUERY_MUTATOR(sql, username, security_manager, self.database) return sql
def mutate_query_from_config(self, sql): """Apply config's SQL_QUERY_MUTATOR Typically adds comments to the query with context""" SQL_QUERY_MUTATOR = config.get('SQL_QUERY_MUTATOR') if SQL_QUERY_MUTATOR: username = utils.get_username() sql = SQL_QUERY_MUTATOR(sql, username, security_manager, self.database) return sql
[ "Apply", "config", "s", "SQL_QUERY_MUTATOR" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/models.py#L466-L474
[ "def", "mutate_query_from_config", "(", "self", ",", "sql", ")", ":", "SQL_QUERY_MUTATOR", "=", "config", ".", "get", "(", "'SQL_QUERY_MUTATOR'", ")", "if", "SQL_QUERY_MUTATOR", ":", "username", "=", "utils", ".", "get_username", "(", ")", "sql", "=", "SQL_QUERY_MUTATOR", "(", "sql", ",", "username", ",", "security_manager", ",", "self", ".", "database", ")", "return", "sql" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
SqlaTable.adhoc_metric_to_sqla
Turn an adhoc metric into a sqlalchemy column. :param dict metric: Adhoc metric definition :param dict cols: Columns for the current table :returns: The metric defined as a sqlalchemy column :rtype: sqlalchemy.sql.column
superset/connectors/sqla/models.py
def adhoc_metric_to_sqla(self, metric, cols): """ Turn an adhoc metric into a sqlalchemy column. :param dict metric: Adhoc metric definition :param dict cols: Columns for the current table :returns: The metric defined as a sqlalchemy column :rtype: sqlalchemy.sql.column """ expression_type = metric.get('expressionType') label = utils.get_metric_name(metric) if expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SIMPLE']: column_name = metric.get('column').get('column_name') table_column = cols.get(column_name) if table_column: sqla_column = table_column.get_sqla_col() else: sqla_column = column(column_name) sqla_metric = self.sqla_aggregations[metric.get('aggregate')](sqla_column) elif expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SQL']: sqla_metric = literal_column(metric.get('sqlExpression')) else: return None return self.make_sqla_column_compatible(sqla_metric, label)
def adhoc_metric_to_sqla(self, metric, cols): """ Turn an adhoc metric into a sqlalchemy column. :param dict metric: Adhoc metric definition :param dict cols: Columns for the current table :returns: The metric defined as a sqlalchemy column :rtype: sqlalchemy.sql.column """ expression_type = metric.get('expressionType') label = utils.get_metric_name(metric) if expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SIMPLE']: column_name = metric.get('column').get('column_name') table_column = cols.get(column_name) if table_column: sqla_column = table_column.get_sqla_col() else: sqla_column = column(column_name) sqla_metric = self.sqla_aggregations[metric.get('aggregate')](sqla_column) elif expression_type == utils.ADHOC_METRIC_EXPRESSION_TYPES['SQL']: sqla_metric = literal_column(metric.get('sqlExpression')) else: return None return self.make_sqla_column_compatible(sqla_metric, label)
[ "Turn", "an", "adhoc", "metric", "into", "a", "sqlalchemy", "column", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/models.py#L509-L534
[ "def", "adhoc_metric_to_sqla", "(", "self", ",", "metric", ",", "cols", ")", ":", "expression_type", "=", "metric", ".", "get", "(", "'expressionType'", ")", "label", "=", "utils", ".", "get_metric_name", "(", "metric", ")", "if", "expression_type", "==", "utils", ".", "ADHOC_METRIC_EXPRESSION_TYPES", "[", "'SIMPLE'", "]", ":", "column_name", "=", "metric", ".", "get", "(", "'column'", ")", ".", "get", "(", "'column_name'", ")", "table_column", "=", "cols", ".", "get", "(", "column_name", ")", "if", "table_column", ":", "sqla_column", "=", "table_column", ".", "get_sqla_col", "(", ")", "else", ":", "sqla_column", "=", "column", "(", "column_name", ")", "sqla_metric", "=", "self", ".", "sqla_aggregations", "[", "metric", ".", "get", "(", "'aggregate'", ")", "]", "(", "sqla_column", ")", "elif", "expression_type", "==", "utils", ".", "ADHOC_METRIC_EXPRESSION_TYPES", "[", "'SQL'", "]", ":", "sqla_metric", "=", "literal_column", "(", "metric", ".", "get", "(", "'sqlExpression'", ")", ")", "else", ":", "return", "None", "return", "self", ".", "make_sqla_column_compatible", "(", "sqla_metric", ",", "label", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
SqlaTable.get_sqla_query
Querying any sqla table from this common interface
superset/connectors/sqla/models.py
def get_sqla_query( # sqla self, groupby, metrics, granularity, from_dttm, to_dttm, filter=None, # noqa is_timeseries=True, timeseries_limit=15, timeseries_limit_metric=None, row_limit=None, inner_from_dttm=None, inner_to_dttm=None, orderby=None, extras=None, columns=None, order_desc=True, prequeries=None, is_prequery=False, ): """Querying any sqla table from this common interface""" template_kwargs = { 'from_dttm': from_dttm, 'groupby': groupby, 'metrics': metrics, 'row_limit': row_limit, 'to_dttm': to_dttm, 'filter': filter, 'columns': {col.column_name: col for col in self.columns}, } template_kwargs.update(self.template_params_dict) template_processor = self.get_template_processor(**template_kwargs) db_engine_spec = self.database.db_engine_spec orderby = orderby or [] # For backward compatibility if granularity not in self.dttm_cols: granularity = self.main_dttm_col # Database spec supports join-free timeslot grouping time_groupby_inline = db_engine_spec.time_groupby_inline cols = {col.column_name: col for col in self.columns} metrics_dict = {m.metric_name: m for m in self.metrics} if not granularity and is_timeseries: raise Exception(_( 'Datetime column not provided as part table configuration ' 'and is required by this type of chart')) if not groupby and not metrics and not columns: raise Exception(_('Empty query?')) metrics_exprs = [] for m in metrics: if utils.is_adhoc_metric(m): metrics_exprs.append(self.adhoc_metric_to_sqla(m, cols)) elif m in metrics_dict: metrics_exprs.append(metrics_dict.get(m).get_sqla_col()) else: raise Exception(_("Metric '{}' is not valid".format(m))) if metrics_exprs: main_metric_expr = metrics_exprs[0] else: main_metric_expr, label = literal_column('COUNT(*)'), 'ccount' main_metric_expr = self.make_sqla_column_compatible(main_metric_expr, label) select_exprs = [] groupby_exprs_sans_timestamp = OrderedDict() if groupby: select_exprs = [] for s in groupby: if s in cols: outer = cols[s].get_sqla_col() else: outer = literal_column(f'({s})') outer = self.make_sqla_column_compatible(outer, s) groupby_exprs_sans_timestamp[outer.name] = outer select_exprs.append(outer) elif columns: for s in columns: select_exprs.append( cols[s].get_sqla_col() if s in cols else self.make_sqla_column_compatible(literal_column(s))) metrics_exprs = [] groupby_exprs_with_timestamp = OrderedDict(groupby_exprs_sans_timestamp.items()) if granularity: dttm_col = cols[granularity] time_grain = extras.get('time_grain_sqla') time_filters = [] if is_timeseries: timestamp = dttm_col.get_timestamp_expression(time_grain) select_exprs += [timestamp] groupby_exprs_with_timestamp[timestamp.name] = timestamp # Use main dttm column to support index with secondary dttm columns if db_engine_spec.time_secondary_columns and \ self.main_dttm_col in self.dttm_cols and \ self.main_dttm_col != dttm_col.column_name: time_filters.append(cols[self.main_dttm_col]. get_time_filter(from_dttm, to_dttm)) time_filters.append(dttm_col.get_time_filter(from_dttm, to_dttm)) select_exprs += metrics_exprs labels_expected = [c._df_label_expected for c in select_exprs] select_exprs = db_engine_spec.make_select_compatible( groupby_exprs_with_timestamp.values(), select_exprs) qry = sa.select(select_exprs) tbl = self.get_from_clause(template_processor) if not columns: qry = qry.group_by(*groupby_exprs_with_timestamp.values()) where_clause_and = [] having_clause_and = [] for flt in filter: if not all([flt.get(s) for s in ['col', 'op']]): continue col = flt['col'] op = flt['op'] col_obj = cols.get(col) if col_obj: is_list_target = op in ('in', 'not in') eq = self.filter_values_handler( flt.get('val'), target_column_is_numeric=col_obj.is_num, is_list_target=is_list_target) if op in ('in', 'not in'): cond = col_obj.get_sqla_col().in_(eq) if '<NULL>' in eq: cond = or_(cond, col_obj.get_sqla_col() == None) # noqa if op == 'not in': cond = ~cond where_clause_and.append(cond) else: if col_obj.is_num: eq = utils.string_to_num(flt['val']) if op == '==': where_clause_and.append(col_obj.get_sqla_col() == eq) elif op == '!=': where_clause_and.append(col_obj.get_sqla_col() != eq) elif op == '>': where_clause_and.append(col_obj.get_sqla_col() > eq) elif op == '<': where_clause_and.append(col_obj.get_sqla_col() < eq) elif op == '>=': where_clause_and.append(col_obj.get_sqla_col() >= eq) elif op == '<=': where_clause_and.append(col_obj.get_sqla_col() <= eq) elif op == 'LIKE': where_clause_and.append(col_obj.get_sqla_col().like(eq)) elif op == 'IS NULL': where_clause_and.append(col_obj.get_sqla_col() == None) # noqa elif op == 'IS NOT NULL': where_clause_and.append( col_obj.get_sqla_col() != None) # noqa if extras: where = extras.get('where') if where: where = template_processor.process_template(where) where_clause_and += [sa.text('({})'.format(where))] having = extras.get('having') if having: having = template_processor.process_template(having) having_clause_and += [sa.text('({})'.format(having))] if granularity: qry = qry.where(and_(*(time_filters + where_clause_and))) else: qry = qry.where(and_(*where_clause_and)) qry = qry.having(and_(*having_clause_and)) if not orderby and not columns: orderby = [(main_metric_expr, not order_desc)] for col, ascending in orderby: direction = asc if ascending else desc if utils.is_adhoc_metric(col): col = self.adhoc_metric_to_sqla(col, cols) qry = qry.order_by(direction(col)) if row_limit: qry = qry.limit(row_limit) if is_timeseries and \ timeseries_limit and groupby and not time_groupby_inline: if self.database.db_engine_spec.inner_joins: # some sql dialects require for order by expressions # to also be in the select clause -- others, e.g. vertica, # require a unique inner alias inner_main_metric_expr = self.make_sqla_column_compatible( main_metric_expr, 'mme_inner__') inner_groupby_exprs = [] inner_select_exprs = [] for gby_name, gby_obj in groupby_exprs_sans_timestamp.items(): inner = self.make_sqla_column_compatible(gby_obj, gby_name + '__') inner_groupby_exprs.append(inner) inner_select_exprs.append(inner) inner_select_exprs += [inner_main_metric_expr] subq = select(inner_select_exprs).select_from(tbl) inner_time_filter = dttm_col.get_time_filter( inner_from_dttm or from_dttm, inner_to_dttm or to_dttm, ) subq = subq.where(and_(*(where_clause_and + [inner_time_filter]))) subq = subq.group_by(*inner_groupby_exprs) ob = inner_main_metric_expr if timeseries_limit_metric: ob = self._get_timeseries_orderby( timeseries_limit_metric, metrics_dict, cols, ) direction = desc if order_desc else asc subq = subq.order_by(direction(ob)) subq = subq.limit(timeseries_limit) on_clause = [] for gby_name, gby_obj in groupby_exprs_sans_timestamp.items(): # in this case the column name, not the alias, needs to be # conditionally mutated, as it refers to the column alias in # the inner query col_name = db_engine_spec.make_label_compatible(gby_name + '__') on_clause.append(gby_obj == column(col_name)) tbl = tbl.join(subq.alias(), and_(*on_clause)) else: if timeseries_limit_metric: orderby = [( self._get_timeseries_orderby( timeseries_limit_metric, metrics_dict, cols, ), False, )] # run subquery to get top groups subquery_obj = { 'prequeries': prequeries, 'is_prequery': True, 'is_timeseries': False, 'row_limit': timeseries_limit, 'groupby': groupby, 'metrics': metrics, 'granularity': granularity, 'from_dttm': inner_from_dttm or from_dttm, 'to_dttm': inner_to_dttm or to_dttm, 'filter': filter, 'orderby': orderby, 'extras': extras, 'columns': columns, 'order_desc': True, } result = self.query(subquery_obj) dimensions = [ c for c in result.df.columns if c not in metrics and c in groupby_exprs_sans_timestamp ] top_groups = self._get_top_groups(result.df, dimensions, groupby_exprs_sans_timestamp) qry = qry.where(top_groups) return SqlaQuery(sqla_query=qry.select_from(tbl), labels_expected=labels_expected)
def get_sqla_query( # sqla self, groupby, metrics, granularity, from_dttm, to_dttm, filter=None, # noqa is_timeseries=True, timeseries_limit=15, timeseries_limit_metric=None, row_limit=None, inner_from_dttm=None, inner_to_dttm=None, orderby=None, extras=None, columns=None, order_desc=True, prequeries=None, is_prequery=False, ): """Querying any sqla table from this common interface""" template_kwargs = { 'from_dttm': from_dttm, 'groupby': groupby, 'metrics': metrics, 'row_limit': row_limit, 'to_dttm': to_dttm, 'filter': filter, 'columns': {col.column_name: col for col in self.columns}, } template_kwargs.update(self.template_params_dict) template_processor = self.get_template_processor(**template_kwargs) db_engine_spec = self.database.db_engine_spec orderby = orderby or [] # For backward compatibility if granularity not in self.dttm_cols: granularity = self.main_dttm_col # Database spec supports join-free timeslot grouping time_groupby_inline = db_engine_spec.time_groupby_inline cols = {col.column_name: col for col in self.columns} metrics_dict = {m.metric_name: m for m in self.metrics} if not granularity and is_timeseries: raise Exception(_( 'Datetime column not provided as part table configuration ' 'and is required by this type of chart')) if not groupby and not metrics and not columns: raise Exception(_('Empty query?')) metrics_exprs = [] for m in metrics: if utils.is_adhoc_metric(m): metrics_exprs.append(self.adhoc_metric_to_sqla(m, cols)) elif m in metrics_dict: metrics_exprs.append(metrics_dict.get(m).get_sqla_col()) else: raise Exception(_("Metric '{}' is not valid".format(m))) if metrics_exprs: main_metric_expr = metrics_exprs[0] else: main_metric_expr, label = literal_column('COUNT(*)'), 'ccount' main_metric_expr = self.make_sqla_column_compatible(main_metric_expr, label) select_exprs = [] groupby_exprs_sans_timestamp = OrderedDict() if groupby: select_exprs = [] for s in groupby: if s in cols: outer = cols[s].get_sqla_col() else: outer = literal_column(f'({s})') outer = self.make_sqla_column_compatible(outer, s) groupby_exprs_sans_timestamp[outer.name] = outer select_exprs.append(outer) elif columns: for s in columns: select_exprs.append( cols[s].get_sqla_col() if s in cols else self.make_sqla_column_compatible(literal_column(s))) metrics_exprs = [] groupby_exprs_with_timestamp = OrderedDict(groupby_exprs_sans_timestamp.items()) if granularity: dttm_col = cols[granularity] time_grain = extras.get('time_grain_sqla') time_filters = [] if is_timeseries: timestamp = dttm_col.get_timestamp_expression(time_grain) select_exprs += [timestamp] groupby_exprs_with_timestamp[timestamp.name] = timestamp # Use main dttm column to support index with secondary dttm columns if db_engine_spec.time_secondary_columns and \ self.main_dttm_col in self.dttm_cols and \ self.main_dttm_col != dttm_col.column_name: time_filters.append(cols[self.main_dttm_col]. get_time_filter(from_dttm, to_dttm)) time_filters.append(dttm_col.get_time_filter(from_dttm, to_dttm)) select_exprs += metrics_exprs labels_expected = [c._df_label_expected for c in select_exprs] select_exprs = db_engine_spec.make_select_compatible( groupby_exprs_with_timestamp.values(), select_exprs) qry = sa.select(select_exprs) tbl = self.get_from_clause(template_processor) if not columns: qry = qry.group_by(*groupby_exprs_with_timestamp.values()) where_clause_and = [] having_clause_and = [] for flt in filter: if not all([flt.get(s) for s in ['col', 'op']]): continue col = flt['col'] op = flt['op'] col_obj = cols.get(col) if col_obj: is_list_target = op in ('in', 'not in') eq = self.filter_values_handler( flt.get('val'), target_column_is_numeric=col_obj.is_num, is_list_target=is_list_target) if op in ('in', 'not in'): cond = col_obj.get_sqla_col().in_(eq) if '<NULL>' in eq: cond = or_(cond, col_obj.get_sqla_col() == None) # noqa if op == 'not in': cond = ~cond where_clause_and.append(cond) else: if col_obj.is_num: eq = utils.string_to_num(flt['val']) if op == '==': where_clause_and.append(col_obj.get_sqla_col() == eq) elif op == '!=': where_clause_and.append(col_obj.get_sqla_col() != eq) elif op == '>': where_clause_and.append(col_obj.get_sqla_col() > eq) elif op == '<': where_clause_and.append(col_obj.get_sqla_col() < eq) elif op == '>=': where_clause_and.append(col_obj.get_sqla_col() >= eq) elif op == '<=': where_clause_and.append(col_obj.get_sqla_col() <= eq) elif op == 'LIKE': where_clause_and.append(col_obj.get_sqla_col().like(eq)) elif op == 'IS NULL': where_clause_and.append(col_obj.get_sqla_col() == None) # noqa elif op == 'IS NOT NULL': where_clause_and.append( col_obj.get_sqla_col() != None) # noqa if extras: where = extras.get('where') if where: where = template_processor.process_template(where) where_clause_and += [sa.text('({})'.format(where))] having = extras.get('having') if having: having = template_processor.process_template(having) having_clause_and += [sa.text('({})'.format(having))] if granularity: qry = qry.where(and_(*(time_filters + where_clause_and))) else: qry = qry.where(and_(*where_clause_and)) qry = qry.having(and_(*having_clause_and)) if not orderby and not columns: orderby = [(main_metric_expr, not order_desc)] for col, ascending in orderby: direction = asc if ascending else desc if utils.is_adhoc_metric(col): col = self.adhoc_metric_to_sqla(col, cols) qry = qry.order_by(direction(col)) if row_limit: qry = qry.limit(row_limit) if is_timeseries and \ timeseries_limit and groupby and not time_groupby_inline: if self.database.db_engine_spec.inner_joins: # some sql dialects require for order by expressions # to also be in the select clause -- others, e.g. vertica, # require a unique inner alias inner_main_metric_expr = self.make_sqla_column_compatible( main_metric_expr, 'mme_inner__') inner_groupby_exprs = [] inner_select_exprs = [] for gby_name, gby_obj in groupby_exprs_sans_timestamp.items(): inner = self.make_sqla_column_compatible(gby_obj, gby_name + '__') inner_groupby_exprs.append(inner) inner_select_exprs.append(inner) inner_select_exprs += [inner_main_metric_expr] subq = select(inner_select_exprs).select_from(tbl) inner_time_filter = dttm_col.get_time_filter( inner_from_dttm or from_dttm, inner_to_dttm or to_dttm, ) subq = subq.where(and_(*(where_clause_and + [inner_time_filter]))) subq = subq.group_by(*inner_groupby_exprs) ob = inner_main_metric_expr if timeseries_limit_metric: ob = self._get_timeseries_orderby( timeseries_limit_metric, metrics_dict, cols, ) direction = desc if order_desc else asc subq = subq.order_by(direction(ob)) subq = subq.limit(timeseries_limit) on_clause = [] for gby_name, gby_obj in groupby_exprs_sans_timestamp.items(): # in this case the column name, not the alias, needs to be # conditionally mutated, as it refers to the column alias in # the inner query col_name = db_engine_spec.make_label_compatible(gby_name + '__') on_clause.append(gby_obj == column(col_name)) tbl = tbl.join(subq.alias(), and_(*on_clause)) else: if timeseries_limit_metric: orderby = [( self._get_timeseries_orderby( timeseries_limit_metric, metrics_dict, cols, ), False, )] # run subquery to get top groups subquery_obj = { 'prequeries': prequeries, 'is_prequery': True, 'is_timeseries': False, 'row_limit': timeseries_limit, 'groupby': groupby, 'metrics': metrics, 'granularity': granularity, 'from_dttm': inner_from_dttm or from_dttm, 'to_dttm': inner_to_dttm or to_dttm, 'filter': filter, 'orderby': orderby, 'extras': extras, 'columns': columns, 'order_desc': True, } result = self.query(subquery_obj) dimensions = [ c for c in result.df.columns if c not in metrics and c in groupby_exprs_sans_timestamp ] top_groups = self._get_top_groups(result.df, dimensions, groupby_exprs_sans_timestamp) qry = qry.where(top_groups) return SqlaQuery(sqla_query=qry.select_from(tbl), labels_expected=labels_expected)
[ "Querying", "any", "sqla", "table", "from", "this", "common", "interface" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/models.py#L536-L808
[ "def", "get_sqla_query", "(", "# sqla", "self", ",", "groupby", ",", "metrics", ",", "granularity", ",", "from_dttm", ",", "to_dttm", ",", "filter", "=", "None", ",", "# noqa", "is_timeseries", "=", "True", ",", "timeseries_limit", "=", "15", ",", "timeseries_limit_metric", "=", "None", ",", "row_limit", "=", "None", ",", "inner_from_dttm", "=", "None", ",", "inner_to_dttm", "=", "None", ",", "orderby", "=", "None", ",", "extras", "=", "None", ",", "columns", "=", "None", ",", "order_desc", "=", "True", ",", "prequeries", "=", "None", ",", "is_prequery", "=", "False", ",", ")", ":", "template_kwargs", "=", "{", "'from_dttm'", ":", "from_dttm", ",", "'groupby'", ":", "groupby", ",", "'metrics'", ":", "metrics", ",", "'row_limit'", ":", "row_limit", ",", "'to_dttm'", ":", "to_dttm", ",", "'filter'", ":", "filter", ",", "'columns'", ":", "{", "col", ".", "column_name", ":", "col", "for", "col", "in", "self", ".", "columns", "}", ",", "}", "template_kwargs", ".", "update", "(", "self", ".", "template_params_dict", ")", "template_processor", "=", "self", ".", "get_template_processor", "(", "*", "*", "template_kwargs", ")", "db_engine_spec", "=", "self", ".", "database", ".", "db_engine_spec", "orderby", "=", "orderby", "or", "[", "]", "# For backward compatibility", "if", "granularity", "not", "in", "self", ".", "dttm_cols", ":", "granularity", "=", "self", ".", "main_dttm_col", "# Database spec supports join-free timeslot grouping", "time_groupby_inline", "=", "db_engine_spec", ".", "time_groupby_inline", "cols", "=", "{", "col", ".", "column_name", ":", "col", "for", "col", "in", "self", ".", "columns", "}", "metrics_dict", "=", "{", "m", ".", "metric_name", ":", "m", "for", "m", "in", "self", ".", "metrics", "}", "if", "not", "granularity", "and", "is_timeseries", ":", "raise", "Exception", "(", "_", "(", "'Datetime column not provided as part table configuration '", "'and is required by this type of chart'", ")", ")", "if", "not", "groupby", "and", "not", "metrics", "and", "not", "columns", ":", "raise", "Exception", "(", "_", "(", "'Empty query?'", ")", ")", "metrics_exprs", "=", "[", "]", "for", "m", "in", "metrics", ":", "if", "utils", ".", "is_adhoc_metric", "(", "m", ")", ":", "metrics_exprs", ".", "append", "(", "self", ".", "adhoc_metric_to_sqla", "(", "m", ",", "cols", ")", ")", "elif", "m", "in", "metrics_dict", ":", "metrics_exprs", ".", "append", "(", "metrics_dict", ".", "get", "(", "m", ")", ".", "get_sqla_col", "(", ")", ")", "else", ":", "raise", "Exception", "(", "_", "(", "\"Metric '{}' is not valid\"", ".", "format", "(", "m", ")", ")", ")", "if", "metrics_exprs", ":", "main_metric_expr", "=", "metrics_exprs", "[", "0", "]", "else", ":", "main_metric_expr", ",", "label", "=", "literal_column", "(", "'COUNT(*)'", ")", ",", "'ccount'", "main_metric_expr", "=", "self", ".", "make_sqla_column_compatible", "(", "main_metric_expr", ",", "label", ")", "select_exprs", "=", "[", "]", "groupby_exprs_sans_timestamp", "=", "OrderedDict", "(", ")", "if", "groupby", ":", "select_exprs", "=", "[", "]", "for", "s", "in", "groupby", ":", "if", "s", "in", "cols", ":", "outer", "=", "cols", "[", "s", "]", ".", "get_sqla_col", "(", ")", "else", ":", "outer", "=", "literal_column", "(", "f'({s})'", ")", "outer", "=", "self", ".", "make_sqla_column_compatible", "(", "outer", ",", "s", ")", "groupby_exprs_sans_timestamp", "[", "outer", ".", "name", "]", "=", "outer", "select_exprs", ".", "append", "(", "outer", ")", "elif", "columns", ":", "for", "s", "in", "columns", ":", "select_exprs", ".", "append", "(", "cols", "[", "s", "]", ".", "get_sqla_col", "(", ")", "if", "s", "in", "cols", "else", "self", ".", "make_sqla_column_compatible", "(", "literal_column", "(", "s", ")", ")", ")", "metrics_exprs", "=", "[", "]", "groupby_exprs_with_timestamp", "=", "OrderedDict", "(", "groupby_exprs_sans_timestamp", ".", "items", "(", ")", ")", "if", "granularity", ":", "dttm_col", "=", "cols", "[", "granularity", "]", "time_grain", "=", "extras", ".", "get", "(", "'time_grain_sqla'", ")", "time_filters", "=", "[", "]", "if", "is_timeseries", ":", "timestamp", "=", "dttm_col", ".", "get_timestamp_expression", "(", "time_grain", ")", "select_exprs", "+=", "[", "timestamp", "]", "groupby_exprs_with_timestamp", "[", "timestamp", ".", "name", "]", "=", "timestamp", "# Use main dttm column to support index with secondary dttm columns", "if", "db_engine_spec", ".", "time_secondary_columns", "and", "self", ".", "main_dttm_col", "in", "self", ".", "dttm_cols", "and", "self", ".", "main_dttm_col", "!=", "dttm_col", ".", "column_name", ":", "time_filters", ".", "append", "(", "cols", "[", "self", ".", "main_dttm_col", "]", ".", "get_time_filter", "(", "from_dttm", ",", "to_dttm", ")", ")", "time_filters", ".", "append", "(", "dttm_col", ".", "get_time_filter", "(", "from_dttm", ",", "to_dttm", ")", ")", "select_exprs", "+=", "metrics_exprs", "labels_expected", "=", "[", "c", ".", "_df_label_expected", "for", "c", "in", "select_exprs", "]", "select_exprs", "=", "db_engine_spec", ".", "make_select_compatible", "(", "groupby_exprs_with_timestamp", ".", "values", "(", ")", ",", "select_exprs", ")", "qry", "=", "sa", ".", "select", "(", "select_exprs", ")", "tbl", "=", "self", ".", "get_from_clause", "(", "template_processor", ")", "if", "not", "columns", ":", "qry", "=", "qry", ".", "group_by", "(", "*", "groupby_exprs_with_timestamp", ".", "values", "(", ")", ")", "where_clause_and", "=", "[", "]", "having_clause_and", "=", "[", "]", "for", "flt", "in", "filter", ":", "if", "not", "all", "(", "[", "flt", ".", "get", "(", "s", ")", "for", "s", "in", "[", "'col'", ",", "'op'", "]", "]", ")", ":", "continue", "col", "=", "flt", "[", "'col'", "]", "op", "=", "flt", "[", "'op'", "]", "col_obj", "=", "cols", ".", "get", "(", "col", ")", "if", "col_obj", ":", "is_list_target", "=", "op", "in", "(", "'in'", ",", "'not in'", ")", "eq", "=", "self", ".", "filter_values_handler", "(", "flt", ".", "get", "(", "'val'", ")", ",", "target_column_is_numeric", "=", "col_obj", ".", "is_num", ",", "is_list_target", "=", "is_list_target", ")", "if", "op", "in", "(", "'in'", ",", "'not in'", ")", ":", "cond", "=", "col_obj", ".", "get_sqla_col", "(", ")", ".", "in_", "(", "eq", ")", "if", "'<NULL>'", "in", "eq", ":", "cond", "=", "or_", "(", "cond", ",", "col_obj", ".", "get_sqla_col", "(", ")", "==", "None", ")", "# noqa", "if", "op", "==", "'not in'", ":", "cond", "=", "~", "cond", "where_clause_and", ".", "append", "(", "cond", ")", "else", ":", "if", "col_obj", ".", "is_num", ":", "eq", "=", "utils", ".", "string_to_num", "(", "flt", "[", "'val'", "]", ")", "if", "op", "==", "'=='", ":", "where_clause_and", ".", "append", "(", "col_obj", ".", "get_sqla_col", "(", ")", "==", "eq", ")", "elif", "op", "==", "'!='", ":", "where_clause_and", ".", "append", "(", "col_obj", ".", "get_sqla_col", "(", ")", "!=", "eq", ")", "elif", "op", "==", "'>'", ":", "where_clause_and", ".", "append", "(", "col_obj", ".", "get_sqla_col", "(", ")", ">", "eq", ")", "elif", "op", "==", "'<'", ":", "where_clause_and", ".", "append", "(", "col_obj", ".", "get_sqla_col", "(", ")", "<", "eq", ")", "elif", "op", "==", "'>='", ":", "where_clause_and", ".", "append", "(", "col_obj", ".", "get_sqla_col", "(", ")", ">=", "eq", ")", "elif", "op", "==", "'<='", ":", "where_clause_and", ".", "append", "(", "col_obj", ".", "get_sqla_col", "(", ")", "<=", "eq", ")", "elif", "op", "==", "'LIKE'", ":", "where_clause_and", ".", "append", "(", "col_obj", ".", "get_sqla_col", "(", ")", ".", "like", "(", "eq", ")", ")", "elif", "op", "==", "'IS NULL'", ":", "where_clause_and", ".", "append", "(", "col_obj", ".", "get_sqla_col", "(", ")", "==", "None", ")", "# noqa", "elif", "op", "==", "'IS NOT NULL'", ":", "where_clause_and", ".", "append", "(", "col_obj", ".", "get_sqla_col", "(", ")", "!=", "None", ")", "# noqa", "if", "extras", ":", "where", "=", "extras", ".", "get", "(", "'where'", ")", "if", "where", ":", "where", "=", "template_processor", ".", "process_template", "(", "where", ")", "where_clause_and", "+=", "[", "sa", ".", "text", "(", "'({})'", ".", "format", "(", "where", ")", ")", "]", "having", "=", "extras", ".", "get", "(", "'having'", ")", "if", "having", ":", "having", "=", "template_processor", ".", "process_template", "(", "having", ")", "having_clause_and", "+=", "[", "sa", ".", "text", "(", "'({})'", ".", "format", "(", "having", ")", ")", "]", "if", "granularity", ":", "qry", "=", "qry", ".", "where", "(", "and_", "(", "*", "(", "time_filters", "+", "where_clause_and", ")", ")", ")", "else", ":", "qry", "=", "qry", ".", "where", "(", "and_", "(", "*", "where_clause_and", ")", ")", "qry", "=", "qry", ".", "having", "(", "and_", "(", "*", "having_clause_and", ")", ")", "if", "not", "orderby", "and", "not", "columns", ":", "orderby", "=", "[", "(", "main_metric_expr", ",", "not", "order_desc", ")", "]", "for", "col", ",", "ascending", "in", "orderby", ":", "direction", "=", "asc", "if", "ascending", "else", "desc", "if", "utils", ".", "is_adhoc_metric", "(", "col", ")", ":", "col", "=", "self", ".", "adhoc_metric_to_sqla", "(", "col", ",", "cols", ")", "qry", "=", "qry", ".", "order_by", "(", "direction", "(", "col", ")", ")", "if", "row_limit", ":", "qry", "=", "qry", ".", "limit", "(", "row_limit", ")", "if", "is_timeseries", "and", "timeseries_limit", "and", "groupby", "and", "not", "time_groupby_inline", ":", "if", "self", ".", "database", ".", "db_engine_spec", ".", "inner_joins", ":", "# some sql dialects require for order by expressions", "# to also be in the select clause -- others, e.g. vertica,", "# require a unique inner alias", "inner_main_metric_expr", "=", "self", ".", "make_sqla_column_compatible", "(", "main_metric_expr", ",", "'mme_inner__'", ")", "inner_groupby_exprs", "=", "[", "]", "inner_select_exprs", "=", "[", "]", "for", "gby_name", ",", "gby_obj", "in", "groupby_exprs_sans_timestamp", ".", "items", "(", ")", ":", "inner", "=", "self", ".", "make_sqla_column_compatible", "(", "gby_obj", ",", "gby_name", "+", "'__'", ")", "inner_groupby_exprs", ".", "append", "(", "inner", ")", "inner_select_exprs", ".", "append", "(", "inner", ")", "inner_select_exprs", "+=", "[", "inner_main_metric_expr", "]", "subq", "=", "select", "(", "inner_select_exprs", ")", ".", "select_from", "(", "tbl", ")", "inner_time_filter", "=", "dttm_col", ".", "get_time_filter", "(", "inner_from_dttm", "or", "from_dttm", ",", "inner_to_dttm", "or", "to_dttm", ",", ")", "subq", "=", "subq", ".", "where", "(", "and_", "(", "*", "(", "where_clause_and", "+", "[", "inner_time_filter", "]", ")", ")", ")", "subq", "=", "subq", ".", "group_by", "(", "*", "inner_groupby_exprs", ")", "ob", "=", "inner_main_metric_expr", "if", "timeseries_limit_metric", ":", "ob", "=", "self", ".", "_get_timeseries_orderby", "(", "timeseries_limit_metric", ",", "metrics_dict", ",", "cols", ",", ")", "direction", "=", "desc", "if", "order_desc", "else", "asc", "subq", "=", "subq", ".", "order_by", "(", "direction", "(", "ob", ")", ")", "subq", "=", "subq", ".", "limit", "(", "timeseries_limit", ")", "on_clause", "=", "[", "]", "for", "gby_name", ",", "gby_obj", "in", "groupby_exprs_sans_timestamp", ".", "items", "(", ")", ":", "# in this case the column name, not the alias, needs to be", "# conditionally mutated, as it refers to the column alias in", "# the inner query", "col_name", "=", "db_engine_spec", ".", "make_label_compatible", "(", "gby_name", "+", "'__'", ")", "on_clause", ".", "append", "(", "gby_obj", "==", "column", "(", "col_name", ")", ")", "tbl", "=", "tbl", ".", "join", "(", "subq", ".", "alias", "(", ")", ",", "and_", "(", "*", "on_clause", ")", ")", "else", ":", "if", "timeseries_limit_metric", ":", "orderby", "=", "[", "(", "self", ".", "_get_timeseries_orderby", "(", "timeseries_limit_metric", ",", "metrics_dict", ",", "cols", ",", ")", ",", "False", ",", ")", "]", "# run subquery to get top groups", "subquery_obj", "=", "{", "'prequeries'", ":", "prequeries", ",", "'is_prequery'", ":", "True", ",", "'is_timeseries'", ":", "False", ",", "'row_limit'", ":", "timeseries_limit", ",", "'groupby'", ":", "groupby", ",", "'metrics'", ":", "metrics", ",", "'granularity'", ":", "granularity", ",", "'from_dttm'", ":", "inner_from_dttm", "or", "from_dttm", ",", "'to_dttm'", ":", "inner_to_dttm", "or", "to_dttm", ",", "'filter'", ":", "filter", ",", "'orderby'", ":", "orderby", ",", "'extras'", ":", "extras", ",", "'columns'", ":", "columns", ",", "'order_desc'", ":", "True", ",", "}", "result", "=", "self", ".", "query", "(", "subquery_obj", ")", "dimensions", "=", "[", "c", "for", "c", "in", "result", ".", "df", ".", "columns", "if", "c", "not", "in", "metrics", "and", "c", "in", "groupby_exprs_sans_timestamp", "]", "top_groups", "=", "self", ".", "_get_top_groups", "(", "result", ".", "df", ",", "dimensions", ",", "groupby_exprs_sans_timestamp", ")", "qry", "=", "qry", ".", "where", "(", "top_groups", ")", "return", "SqlaQuery", "(", "sqla_query", "=", "qry", ".", "select_from", "(", "tbl", ")", ",", "labels_expected", "=", "labels_expected", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
SqlaTable.fetch_metadata
Fetches the metadata for the table and merges it in
superset/connectors/sqla/models.py
def fetch_metadata(self): """Fetches the metadata for the table and merges it in""" try: table = self.get_sqla_table_object() except Exception as e: logging.exception(e) raise Exception(_( "Table [{}] doesn't seem to exist in the specified database, " "couldn't fetch column information").format(self.table_name)) M = SqlMetric # noqa metrics = [] any_date_col = None db_engine_spec = self.database.db_engine_spec db_dialect = self.database.get_dialect() dbcols = ( db.session.query(TableColumn) .filter(TableColumn.table == self) .filter(or_(TableColumn.column_name == col.name for col in table.columns))) dbcols = {dbcol.column_name: dbcol for dbcol in dbcols} for col in table.columns: try: datatype = col.type.compile(dialect=db_dialect).upper() except Exception as e: datatype = 'UNKNOWN' logging.error( 'Unrecognized data type in {}.{}'.format(table, col.name)) logging.exception(e) dbcol = dbcols.get(col.name, None) if not dbcol: dbcol = TableColumn(column_name=col.name, type=datatype) dbcol.sum = dbcol.is_num dbcol.avg = dbcol.is_num dbcol.is_dttm = dbcol.is_time db_engine_spec.alter_new_orm_column(dbcol) else: dbcol.type = datatype dbcol.groupby = True dbcol.filterable = True self.columns.append(dbcol) if not any_date_col and dbcol.is_time: any_date_col = col.name metrics.append(M( metric_name='count', verbose_name='COUNT(*)', metric_type='count', expression='COUNT(*)', )) if not self.main_dttm_col: self.main_dttm_col = any_date_col self.add_missing_metrics(metrics) db.session.merge(self) db.session.commit()
def fetch_metadata(self): """Fetches the metadata for the table and merges it in""" try: table = self.get_sqla_table_object() except Exception as e: logging.exception(e) raise Exception(_( "Table [{}] doesn't seem to exist in the specified database, " "couldn't fetch column information").format(self.table_name)) M = SqlMetric # noqa metrics = [] any_date_col = None db_engine_spec = self.database.db_engine_spec db_dialect = self.database.get_dialect() dbcols = ( db.session.query(TableColumn) .filter(TableColumn.table == self) .filter(or_(TableColumn.column_name == col.name for col in table.columns))) dbcols = {dbcol.column_name: dbcol for dbcol in dbcols} for col in table.columns: try: datatype = col.type.compile(dialect=db_dialect).upper() except Exception as e: datatype = 'UNKNOWN' logging.error( 'Unrecognized data type in {}.{}'.format(table, col.name)) logging.exception(e) dbcol = dbcols.get(col.name, None) if not dbcol: dbcol = TableColumn(column_name=col.name, type=datatype) dbcol.sum = dbcol.is_num dbcol.avg = dbcol.is_num dbcol.is_dttm = dbcol.is_time db_engine_spec.alter_new_orm_column(dbcol) else: dbcol.type = datatype dbcol.groupby = True dbcol.filterable = True self.columns.append(dbcol) if not any_date_col and dbcol.is_time: any_date_col = col.name metrics.append(M( metric_name='count', verbose_name='COUNT(*)', metric_type='count', expression='COUNT(*)', )) if not self.main_dttm_col: self.main_dttm_col = any_date_col self.add_missing_metrics(metrics) db.session.merge(self) db.session.commit()
[ "Fetches", "the", "metadata", "for", "the", "table", "and", "merges", "it", "in" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/models.py#L875-L930
[ "def", "fetch_metadata", "(", "self", ")", ":", "try", ":", "table", "=", "self", ".", "get_sqla_table_object", "(", ")", "except", "Exception", "as", "e", ":", "logging", ".", "exception", "(", "e", ")", "raise", "Exception", "(", "_", "(", "\"Table [{}] doesn't seem to exist in the specified database, \"", "\"couldn't fetch column information\"", ")", ".", "format", "(", "self", ".", "table_name", ")", ")", "M", "=", "SqlMetric", "# noqa", "metrics", "=", "[", "]", "any_date_col", "=", "None", "db_engine_spec", "=", "self", ".", "database", ".", "db_engine_spec", "db_dialect", "=", "self", ".", "database", ".", "get_dialect", "(", ")", "dbcols", "=", "(", "db", ".", "session", ".", "query", "(", "TableColumn", ")", ".", "filter", "(", "TableColumn", ".", "table", "==", "self", ")", ".", "filter", "(", "or_", "(", "TableColumn", ".", "column_name", "==", "col", ".", "name", "for", "col", "in", "table", ".", "columns", ")", ")", ")", "dbcols", "=", "{", "dbcol", ".", "column_name", ":", "dbcol", "for", "dbcol", "in", "dbcols", "}", "for", "col", "in", "table", ".", "columns", ":", "try", ":", "datatype", "=", "col", ".", "type", ".", "compile", "(", "dialect", "=", "db_dialect", ")", ".", "upper", "(", ")", "except", "Exception", "as", "e", ":", "datatype", "=", "'UNKNOWN'", "logging", ".", "error", "(", "'Unrecognized data type in {}.{}'", ".", "format", "(", "table", ",", "col", ".", "name", ")", ")", "logging", ".", "exception", "(", "e", ")", "dbcol", "=", "dbcols", ".", "get", "(", "col", ".", "name", ",", "None", ")", "if", "not", "dbcol", ":", "dbcol", "=", "TableColumn", "(", "column_name", "=", "col", ".", "name", ",", "type", "=", "datatype", ")", "dbcol", ".", "sum", "=", "dbcol", ".", "is_num", "dbcol", ".", "avg", "=", "dbcol", ".", "is_num", "dbcol", ".", "is_dttm", "=", "dbcol", ".", "is_time", "db_engine_spec", ".", "alter_new_orm_column", "(", "dbcol", ")", "else", ":", "dbcol", ".", "type", "=", "datatype", "dbcol", ".", "groupby", "=", "True", "dbcol", ".", "filterable", "=", "True", "self", ".", "columns", ".", "append", "(", "dbcol", ")", "if", "not", "any_date_col", "and", "dbcol", ".", "is_time", ":", "any_date_col", "=", "col", ".", "name", "metrics", ".", "append", "(", "M", "(", "metric_name", "=", "'count'", ",", "verbose_name", "=", "'COUNT(*)'", ",", "metric_type", "=", "'count'", ",", "expression", "=", "'COUNT(*)'", ",", ")", ")", "if", "not", "self", ".", "main_dttm_col", ":", "self", ".", "main_dttm_col", "=", "any_date_col", "self", ".", "add_missing_metrics", "(", "metrics", ")", "db", ".", "session", ".", "merge", "(", "self", ")", "db", ".", "session", ".", "commit", "(", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
SqlaTable.import_obj
Imports the datasource from the object to the database. Metrics and columns and datasource will be overrided if exists. This function can be used to import/export dashboards between multiple superset instances. Audit metadata isn't copies over.
superset/connectors/sqla/models.py
def import_obj(cls, i_datasource, import_time=None): """Imports the datasource from the object to the database. Metrics and columns and datasource will be overrided if exists. This function can be used to import/export dashboards between multiple superset instances. Audit metadata isn't copies over. """ def lookup_sqlatable(table): return db.session.query(SqlaTable).join(Database).filter( SqlaTable.table_name == table.table_name, SqlaTable.schema == table.schema, Database.id == table.database_id, ).first() def lookup_database(table): return db.session.query(Database).filter_by( database_name=table.params_dict['database_name']).one() return import_datasource.import_datasource( db.session, i_datasource, lookup_database, lookup_sqlatable, import_time)
def import_obj(cls, i_datasource, import_time=None): """Imports the datasource from the object to the database. Metrics and columns and datasource will be overrided if exists. This function can be used to import/export dashboards between multiple superset instances. Audit metadata isn't copies over. """ def lookup_sqlatable(table): return db.session.query(SqlaTable).join(Database).filter( SqlaTable.table_name == table.table_name, SqlaTable.schema == table.schema, Database.id == table.database_id, ).first() def lookup_database(table): return db.session.query(Database).filter_by( database_name=table.params_dict['database_name']).one() return import_datasource.import_datasource( db.session, i_datasource, lookup_database, lookup_sqlatable, import_time)
[ "Imports", "the", "datasource", "from", "the", "object", "to", "the", "database", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/models.py#L933-L952
[ "def", "import_obj", "(", "cls", ",", "i_datasource", ",", "import_time", "=", "None", ")", ":", "def", "lookup_sqlatable", "(", "table", ")", ":", "return", "db", ".", "session", ".", "query", "(", "SqlaTable", ")", ".", "join", "(", "Database", ")", ".", "filter", "(", "SqlaTable", ".", "table_name", "==", "table", ".", "table_name", ",", "SqlaTable", ".", "schema", "==", "table", ".", "schema", ",", "Database", ".", "id", "==", "table", ".", "database_id", ",", ")", ".", "first", "(", ")", "def", "lookup_database", "(", "table", ")", ":", "return", "db", ".", "session", ".", "query", "(", "Database", ")", ".", "filter_by", "(", "database_name", "=", "table", ".", "params_dict", "[", "'database_name'", "]", ")", ".", "one", "(", ")", "return", "import_datasource", ".", "import_datasource", "(", "db", ".", "session", ",", "i_datasource", ",", "lookup_database", ",", "lookup_sqlatable", ",", "import_time", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
load_long_lat_data
Loading lat/long data from a csv file in the repo
superset/data/long_lat.py
def load_long_lat_data(): """Loading lat/long data from a csv file in the repo""" data = get_example_data('san_francisco.csv.gz', make_bytes=True) pdf = pd.read_csv(data, encoding='utf-8') start = datetime.datetime.now().replace( hour=0, minute=0, second=0, microsecond=0) pdf['datetime'] = [ start + datetime.timedelta(hours=i * 24 / (len(pdf) - 1)) for i in range(len(pdf)) ] pdf['occupancy'] = [random.randint(1, 6) for _ in range(len(pdf))] pdf['radius_miles'] = [random.uniform(1, 3) for _ in range(len(pdf))] pdf['geohash'] = pdf[['LAT', 'LON']].apply( lambda x: geohash.encode(*x), axis=1) pdf['delimited'] = pdf['LAT'].map(str).str.cat(pdf['LON'].map(str), sep=',') pdf.to_sql( # pylint: disable=no-member 'long_lat', db.engine, if_exists='replace', chunksize=500, dtype={ 'longitude': Float(), 'latitude': Float(), 'number': Float(), 'street': String(100), 'unit': String(10), 'city': String(50), 'district': String(50), 'region': String(50), 'postcode': Float(), 'id': String(100), 'datetime': DateTime(), 'occupancy': Float(), 'radius_miles': Float(), 'geohash': String(12), 'delimited': String(60), }, index=False) print('Done loading table!') print('-' * 80) print('Creating table reference') obj = db.session.query(TBL).filter_by(table_name='long_lat').first() if not obj: obj = TBL(table_name='long_lat') obj.main_dttm_col = 'datetime' obj.database = utils.get_or_create_main_db() db.session.merge(obj) db.session.commit() obj.fetch_metadata() tbl = obj slice_data = { 'granularity_sqla': 'day', 'since': '2014-01-01', 'until': 'now', 'where': '', 'viz_type': 'mapbox', 'all_columns_x': 'LON', 'all_columns_y': 'LAT', 'mapbox_style': 'mapbox://styles/mapbox/light-v9', 'all_columns': ['occupancy'], 'row_limit': 500000, } print('Creating a slice') slc = Slice( slice_name='Mapbox Long/Lat', viz_type='mapbox', datasource_type='table', datasource_id=tbl.id, params=get_slice_json(slice_data), ) misc_dash_slices.add(slc.slice_name) merge_slice(slc)
def load_long_lat_data(): """Loading lat/long data from a csv file in the repo""" data = get_example_data('san_francisco.csv.gz', make_bytes=True) pdf = pd.read_csv(data, encoding='utf-8') start = datetime.datetime.now().replace( hour=0, minute=0, second=0, microsecond=0) pdf['datetime'] = [ start + datetime.timedelta(hours=i * 24 / (len(pdf) - 1)) for i in range(len(pdf)) ] pdf['occupancy'] = [random.randint(1, 6) for _ in range(len(pdf))] pdf['radius_miles'] = [random.uniform(1, 3) for _ in range(len(pdf))] pdf['geohash'] = pdf[['LAT', 'LON']].apply( lambda x: geohash.encode(*x), axis=1) pdf['delimited'] = pdf['LAT'].map(str).str.cat(pdf['LON'].map(str), sep=',') pdf.to_sql( # pylint: disable=no-member 'long_lat', db.engine, if_exists='replace', chunksize=500, dtype={ 'longitude': Float(), 'latitude': Float(), 'number': Float(), 'street': String(100), 'unit': String(10), 'city': String(50), 'district': String(50), 'region': String(50), 'postcode': Float(), 'id': String(100), 'datetime': DateTime(), 'occupancy': Float(), 'radius_miles': Float(), 'geohash': String(12), 'delimited': String(60), }, index=False) print('Done loading table!') print('-' * 80) print('Creating table reference') obj = db.session.query(TBL).filter_by(table_name='long_lat').first() if not obj: obj = TBL(table_name='long_lat') obj.main_dttm_col = 'datetime' obj.database = utils.get_or_create_main_db() db.session.merge(obj) db.session.commit() obj.fetch_metadata() tbl = obj slice_data = { 'granularity_sqla': 'day', 'since': '2014-01-01', 'until': 'now', 'where': '', 'viz_type': 'mapbox', 'all_columns_x': 'LON', 'all_columns_y': 'LAT', 'mapbox_style': 'mapbox://styles/mapbox/light-v9', 'all_columns': ['occupancy'], 'row_limit': 500000, } print('Creating a slice') slc = Slice( slice_name='Mapbox Long/Lat', viz_type='mapbox', datasource_type='table', datasource_id=tbl.id, params=get_slice_json(slice_data), ) misc_dash_slices.add(slc.slice_name) merge_slice(slc)
[ "Loading", "lat", "/", "long", "data", "from", "a", "csv", "file", "in", "the", "repo" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/data/long_lat.py#L36-L110
[ "def", "load_long_lat_data", "(", ")", ":", "data", "=", "get_example_data", "(", "'san_francisco.csv.gz'", ",", "make_bytes", "=", "True", ")", "pdf", "=", "pd", ".", "read_csv", "(", "data", ",", "encoding", "=", "'utf-8'", ")", "start", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "replace", "(", "hour", "=", "0", ",", "minute", "=", "0", ",", "second", "=", "0", ",", "microsecond", "=", "0", ")", "pdf", "[", "'datetime'", "]", "=", "[", "start", "+", "datetime", ".", "timedelta", "(", "hours", "=", "i", "*", "24", "/", "(", "len", "(", "pdf", ")", "-", "1", ")", ")", "for", "i", "in", "range", "(", "len", "(", "pdf", ")", ")", "]", "pdf", "[", "'occupancy'", "]", "=", "[", "random", ".", "randint", "(", "1", ",", "6", ")", "for", "_", "in", "range", "(", "len", "(", "pdf", ")", ")", "]", "pdf", "[", "'radius_miles'", "]", "=", "[", "random", ".", "uniform", "(", "1", ",", "3", ")", "for", "_", "in", "range", "(", "len", "(", "pdf", ")", ")", "]", "pdf", "[", "'geohash'", "]", "=", "pdf", "[", "[", "'LAT'", ",", "'LON'", "]", "]", ".", "apply", "(", "lambda", "x", ":", "geohash", ".", "encode", "(", "*", "x", ")", ",", "axis", "=", "1", ")", "pdf", "[", "'delimited'", "]", "=", "pdf", "[", "'LAT'", "]", ".", "map", "(", "str", ")", ".", "str", ".", "cat", "(", "pdf", "[", "'LON'", "]", ".", "map", "(", "str", ")", ",", "sep", "=", "','", ")", "pdf", ".", "to_sql", "(", "# pylint: disable=no-member", "'long_lat'", ",", "db", ".", "engine", ",", "if_exists", "=", "'replace'", ",", "chunksize", "=", "500", ",", "dtype", "=", "{", "'longitude'", ":", "Float", "(", ")", ",", "'latitude'", ":", "Float", "(", ")", ",", "'number'", ":", "Float", "(", ")", ",", "'street'", ":", "String", "(", "100", ")", ",", "'unit'", ":", "String", "(", "10", ")", ",", "'city'", ":", "String", "(", "50", ")", ",", "'district'", ":", "String", "(", "50", ")", ",", "'region'", ":", "String", "(", "50", ")", ",", "'postcode'", ":", "Float", "(", ")", ",", "'id'", ":", "String", "(", "100", ")", ",", "'datetime'", ":", "DateTime", "(", ")", ",", "'occupancy'", ":", "Float", "(", ")", ",", "'radius_miles'", ":", "Float", "(", ")", ",", "'geohash'", ":", "String", "(", "12", ")", ",", "'delimited'", ":", "String", "(", "60", ")", ",", "}", ",", "index", "=", "False", ")", "print", "(", "'Done loading table!'", ")", "print", "(", "'-'", "*", "80", ")", "print", "(", "'Creating table reference'", ")", "obj", "=", "db", ".", "session", ".", "query", "(", "TBL", ")", ".", "filter_by", "(", "table_name", "=", "'long_lat'", ")", ".", "first", "(", ")", "if", "not", "obj", ":", "obj", "=", "TBL", "(", "table_name", "=", "'long_lat'", ")", "obj", ".", "main_dttm_col", "=", "'datetime'", "obj", ".", "database", "=", "utils", ".", "get_or_create_main_db", "(", ")", "db", ".", "session", ".", "merge", "(", "obj", ")", "db", ".", "session", ".", "commit", "(", ")", "obj", ".", "fetch_metadata", "(", ")", "tbl", "=", "obj", "slice_data", "=", "{", "'granularity_sqla'", ":", "'day'", ",", "'since'", ":", "'2014-01-01'", ",", "'until'", ":", "'now'", ",", "'where'", ":", "''", ",", "'viz_type'", ":", "'mapbox'", ",", "'all_columns_x'", ":", "'LON'", ",", "'all_columns_y'", ":", "'LAT'", ",", "'mapbox_style'", ":", "'mapbox://styles/mapbox/light-v9'", ",", "'all_columns'", ":", "[", "'occupancy'", "]", ",", "'row_limit'", ":", "500000", ",", "}", "print", "(", "'Creating a slice'", ")", "slc", "=", "Slice", "(", "slice_name", "=", "'Mapbox Long/Lat'", ",", "viz_type", "=", "'mapbox'", ",", "datasource_type", "=", "'table'", ",", "datasource_id", "=", "tbl", ".", "id", ",", "params", "=", "get_slice_json", "(", "slice_data", ")", ",", ")", "misc_dash_slices", ".", "add", "(", "slc", ".", "slice_name", ")", "merge_slice", "(", "slc", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Datasource.external_metadata
Gets column info from the source system
superset/views/datasource.py
def external_metadata(self, datasource_type=None, datasource_id=None): """Gets column info from the source system""" if datasource_type == 'druid': datasource = ConnectorRegistry.get_datasource( datasource_type, datasource_id, db.session) elif datasource_type == 'table': database = ( db.session .query(Database) .filter_by(id=request.args.get('db_id')) .one() ) Table = ConnectorRegistry.sources['table'] datasource = Table( database=database, table_name=request.args.get('table_name'), schema=request.args.get('schema') or None, ) external_metadata = datasource.external_metadata() return self.json_response(external_metadata)
def external_metadata(self, datasource_type=None, datasource_id=None): """Gets column info from the source system""" if datasource_type == 'druid': datasource = ConnectorRegistry.get_datasource( datasource_type, datasource_id, db.session) elif datasource_type == 'table': database = ( db.session .query(Database) .filter_by(id=request.args.get('db_id')) .one() ) Table = ConnectorRegistry.sources['table'] datasource = Table( database=database, table_name=request.args.get('table_name'), schema=request.args.get('schema') or None, ) external_metadata = datasource.external_metadata() return self.json_response(external_metadata)
[ "Gets", "column", "info", "from", "the", "source", "system" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/datasource.py#L70-L89
[ "def", "external_metadata", "(", "self", ",", "datasource_type", "=", "None", ",", "datasource_id", "=", "None", ")", ":", "if", "datasource_type", "==", "'druid'", ":", "datasource", "=", "ConnectorRegistry", ".", "get_datasource", "(", "datasource_type", ",", "datasource_id", ",", "db", ".", "session", ")", "elif", "datasource_type", "==", "'table'", ":", "database", "=", "(", "db", ".", "session", ".", "query", "(", "Database", ")", ".", "filter_by", "(", "id", "=", "request", ".", "args", ".", "get", "(", "'db_id'", ")", ")", ".", "one", "(", ")", ")", "Table", "=", "ConnectorRegistry", ".", "sources", "[", "'table'", "]", "datasource", "=", "Table", "(", "database", "=", "database", ",", "table_name", "=", "request", ".", "args", ".", "get", "(", "'table_name'", ")", ",", "schema", "=", "request", ".", "args", ".", "get", "(", "'schema'", ")", "or", "None", ",", ")", "external_metadata", "=", "datasource", ".", "external_metadata", "(", ")", "return", "self", ".", "json_response", "(", "external_metadata", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
filter_not_empty_values
Returns a list of non empty values or None
superset/forms.py
def filter_not_empty_values(value): """Returns a list of non empty values or None""" if not value: return None data = [x for x in value if x] if not data: return None return data
def filter_not_empty_values(value): """Returns a list of non empty values or None""" if not value: return None data = [x for x in value if x] if not data: return None return data
[ "Returns", "a", "list", "of", "non", "empty", "values", "or", "None" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/forms.py#L50-L57
[ "def", "filter_not_empty_values", "(", "value", ")", ":", "if", "not", "value", ":", "return", "None", "data", "=", "[", "x", "for", "x", "in", "value", "if", "x", "]", "if", "not", "data", ":", "return", "None", "return", "data" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
CsvToDatabaseForm.at_least_one_schema_is_allowed
If the user has access to the database or all datasource 1. if schemas_allowed_for_csv_upload is empty a) if database does not support schema user is able to upload csv without specifying schema name b) if database supports schema user is able to upload csv to any schema 2. if schemas_allowed_for_csv_upload is not empty a) if database does not support schema This situation is impossible and upload will fail b) if database supports schema user is able to upload to schema in schemas_allowed_for_csv_upload elif the user does not access to the database or all datasource 1. if schemas_allowed_for_csv_upload is empty a) if database does not support schema user is unable to upload csv b) if database supports schema user is unable to upload csv 2. if schemas_allowed_for_csv_upload is not empty a) if database does not support schema This situation is impossible and user is unable to upload csv b) if database supports schema user is able to upload to schema in schemas_allowed_for_csv_upload
superset/forms.py
def at_least_one_schema_is_allowed(database): """ If the user has access to the database or all datasource 1. if schemas_allowed_for_csv_upload is empty a) if database does not support schema user is able to upload csv without specifying schema name b) if database supports schema user is able to upload csv to any schema 2. if schemas_allowed_for_csv_upload is not empty a) if database does not support schema This situation is impossible and upload will fail b) if database supports schema user is able to upload to schema in schemas_allowed_for_csv_upload elif the user does not access to the database or all datasource 1. if schemas_allowed_for_csv_upload is empty a) if database does not support schema user is unable to upload csv b) if database supports schema user is unable to upload csv 2. if schemas_allowed_for_csv_upload is not empty a) if database does not support schema This situation is impossible and user is unable to upload csv b) if database supports schema user is able to upload to schema in schemas_allowed_for_csv_upload """ if (security_manager.database_access(database) or security_manager.all_datasource_access()): return True schemas = database.get_schema_access_for_csv_upload() if (schemas and security_manager.schemas_accessible_by_user( database, schemas, False)): return True return False
def at_least_one_schema_is_allowed(database): """ If the user has access to the database or all datasource 1. if schemas_allowed_for_csv_upload is empty a) if database does not support schema user is able to upload csv without specifying schema name b) if database supports schema user is able to upload csv to any schema 2. if schemas_allowed_for_csv_upload is not empty a) if database does not support schema This situation is impossible and upload will fail b) if database supports schema user is able to upload to schema in schemas_allowed_for_csv_upload elif the user does not access to the database or all datasource 1. if schemas_allowed_for_csv_upload is empty a) if database does not support schema user is unable to upload csv b) if database supports schema user is unable to upload csv 2. if schemas_allowed_for_csv_upload is not empty a) if database does not support schema This situation is impossible and user is unable to upload csv b) if database supports schema user is able to upload to schema in schemas_allowed_for_csv_upload """ if (security_manager.database_access(database) or security_manager.all_datasource_access()): return True schemas = database.get_schema_access_for_csv_upload() if (schemas and security_manager.schemas_accessible_by_user( database, schemas, False)): return True return False
[ "If", "the", "user", "has", "access", "to", "the", "database", "or", "all", "datasource", "1", ".", "if", "schemas_allowed_for_csv_upload", "is", "empty", "a", ")", "if", "database", "does", "not", "support", "schema", "user", "is", "able", "to", "upload", "csv", "without", "specifying", "schema", "name", "b", ")", "if", "database", "supports", "schema", "user", "is", "able", "to", "upload", "csv", "to", "any", "schema", "2", ".", "if", "schemas_allowed_for_csv_upload", "is", "not", "empty", "a", ")", "if", "database", "does", "not", "support", "schema", "This", "situation", "is", "impossible", "and", "upload", "will", "fail", "b", ")", "if", "database", "supports", "schema", "user", "is", "able", "to", "upload", "to", "schema", "in", "schemas_allowed_for_csv_upload", "elif", "the", "user", "does", "not", "access", "to", "the", "database", "or", "all", "datasource", "1", ".", "if", "schemas_allowed_for_csv_upload", "is", "empty", "a", ")", "if", "database", "does", "not", "support", "schema", "user", "is", "unable", "to", "upload", "csv", "b", ")", "if", "database", "supports", "schema", "user", "is", "unable", "to", "upload", "csv", "2", ".", "if", "schemas_allowed_for_csv_upload", "is", "not", "empty", "a", ")", "if", "database", "does", "not", "support", "schema", "This", "situation", "is", "impossible", "and", "user", "is", "unable", "to", "upload", "csv", "b", ")", "if", "database", "supports", "schema", "user", "is", "able", "to", "upload", "to", "schema", "in", "schemas_allowed_for_csv_upload" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/forms.py#L73-L106
[ "def", "at_least_one_schema_is_allowed", "(", "database", ")", ":", "if", "(", "security_manager", ".", "database_access", "(", "database", ")", "or", "security_manager", ".", "all_datasource_access", "(", ")", ")", ":", "return", "True", "schemas", "=", "database", ".", "get_schema_access_for_csv_upload", "(", ")", "if", "(", "schemas", "and", "security_manager", ".", "schemas_accessible_by_user", "(", "database", ",", "schemas", ",", "False", ")", ")", ":", "return", "True", "return", "False" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
QueryFilter.apply
Filter queries to only those owned by current user if can_only_access_owned_queries permission is set. :returns: query
superset/views/sql_lab.py
def apply( self, query: BaseQuery, func: Callable) -> BaseQuery: """ Filter queries to only those owned by current user if can_only_access_owned_queries permission is set. :returns: query """ if security_manager.can_only_access_owned_queries(): query = ( query .filter(Query.user_id == g.user.get_user_id()) ) return query
def apply( self, query: BaseQuery, func: Callable) -> BaseQuery: """ Filter queries to only those owned by current user if can_only_access_owned_queries permission is set. :returns: query """ if security_manager.can_only_access_owned_queries(): query = ( query .filter(Query.user_id == g.user.get_user_id()) ) return query
[ "Filter", "queries", "to", "only", "those", "owned", "by", "current", "user", "if", "can_only_access_owned_queries", "permission", "is", "set", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/sql_lab.py#L34-L49
[ "def", "apply", "(", "self", ",", "query", ":", "BaseQuery", ",", "func", ":", "Callable", ")", "->", "BaseQuery", ":", "if", "security_manager", ".", "can_only_access_owned_queries", "(", ")", ":", "query", "=", "(", "query", ".", "filter", "(", "Query", ".", "user_id", "==", "g", ".", "user", ".", "get_user_id", "(", ")", ")", ")", "return", "query" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
TableModelView.edit
Simple hack to redirect to explore view after saving
superset/connectors/sqla/views.py
def edit(self, pk): """Simple hack to redirect to explore view after saving""" resp = super(TableModelView, self).edit(pk) if isinstance(resp, str): return resp return redirect('/superset/explore/table/{}/'.format(pk))
def edit(self, pk): """Simple hack to redirect to explore view after saving""" resp = super(TableModelView, self).edit(pk) if isinstance(resp, str): return resp return redirect('/superset/explore/table/{}/'.format(pk))
[ "Simple", "hack", "to", "redirect", "to", "explore", "view", "after", "saving" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/sqla/views.py#L305-L310
[ "def", "edit", "(", "self", ",", "pk", ")", ":", "resp", "=", "super", "(", "TableModelView", ",", "self", ")", ".", "edit", "(", "pk", ")", "if", "isinstance", "(", "resp", ",", "str", ")", ":", "return", "resp", "return", "redirect", "(", "'/superset/explore/table/{}/'", ".", "format", "(", "pk", ")", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
get_language_pack
Get/cache a language pack Returns the langugage pack from cache if it exists, caches otherwise >>> get_language_pack('fr')['Dashboards'] "Tableaux de bords"
superset/translations/utils.py
def get_language_pack(locale): """Get/cache a language pack Returns the langugage pack from cache if it exists, caches otherwise >>> get_language_pack('fr')['Dashboards'] "Tableaux de bords" """ pack = ALL_LANGUAGE_PACKS.get(locale) if not pack: filename = DIR + '/{}/LC_MESSAGES/messages.json'.format(locale) try: with open(filename) as f: pack = json.load(f) ALL_LANGUAGE_PACKS[locale] = pack except Exception: # Assuming english, client side falls back on english pass return pack
def get_language_pack(locale): """Get/cache a language pack Returns the langugage pack from cache if it exists, caches otherwise >>> get_language_pack('fr')['Dashboards'] "Tableaux de bords" """ pack = ALL_LANGUAGE_PACKS.get(locale) if not pack: filename = DIR + '/{}/LC_MESSAGES/messages.json'.format(locale) try: with open(filename) as f: pack = json.load(f) ALL_LANGUAGE_PACKS[locale] = pack except Exception: # Assuming english, client side falls back on english pass return pack
[ "Get", "/", "cache", "a", "language", "pack" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/translations/utils.py#L27-L45
[ "def", "get_language_pack", "(", "locale", ")", ":", "pack", "=", "ALL_LANGUAGE_PACKS", ".", "get", "(", "locale", ")", "if", "not", "pack", ":", "filename", "=", "DIR", "+", "'/{}/LC_MESSAGES/messages.json'", ".", "format", "(", "locale", ")", "try", ":", "with", "open", "(", "filename", ")", "as", "f", ":", "pack", "=", "json", ".", "load", "(", "f", ")", "ALL_LANGUAGE_PACKS", "[", "locale", "]", "=", "pack", "except", "Exception", ":", "# Assuming english, client side falls back on english", "pass", "return", "pack" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
get_form_data
Build `form_data` for chart GET request from dashboard's `default_filters`. When a dashboard has `default_filters` they need to be added as extra filters in the GET request for charts.
superset/tasks/cache.py
def get_form_data(chart_id, dashboard=None): """ Build `form_data` for chart GET request from dashboard's `default_filters`. When a dashboard has `default_filters` they need to be added as extra filters in the GET request for charts. """ form_data = {'slice_id': chart_id} if dashboard is None or not dashboard.json_metadata: return form_data json_metadata = json.loads(dashboard.json_metadata) # do not apply filters if chart is immune to them if chart_id in json_metadata.get('filter_immune_slices', []): return form_data default_filters = json.loads(json_metadata.get('default_filters', 'null')) if not default_filters: return form_data # are some of the fields in the chart immune to filters? filter_immune_slice_fields = json_metadata.get('filter_immune_slice_fields', {}) immune_fields = filter_immune_slice_fields.get(str(chart_id), []) extra_filters = [] for filters in default_filters.values(): for col, val in filters.items(): if col not in immune_fields: extra_filters.append({'col': col, 'op': 'in', 'val': val}) if extra_filters: form_data['extra_filters'] = extra_filters return form_data
def get_form_data(chart_id, dashboard=None): """ Build `form_data` for chart GET request from dashboard's `default_filters`. When a dashboard has `default_filters` they need to be added as extra filters in the GET request for charts. """ form_data = {'slice_id': chart_id} if dashboard is None or not dashboard.json_metadata: return form_data json_metadata = json.loads(dashboard.json_metadata) # do not apply filters if chart is immune to them if chart_id in json_metadata.get('filter_immune_slices', []): return form_data default_filters = json.loads(json_metadata.get('default_filters', 'null')) if not default_filters: return form_data # are some of the fields in the chart immune to filters? filter_immune_slice_fields = json_metadata.get('filter_immune_slice_fields', {}) immune_fields = filter_immune_slice_fields.get(str(chart_id), []) extra_filters = [] for filters in default_filters.values(): for col, val in filters.items(): if col not in immune_fields: extra_filters.append({'col': col, 'op': 'in', 'val': val}) if extra_filters: form_data['extra_filters'] = extra_filters return form_data
[ "Build", "form_data", "for", "chart", "GET", "request", "from", "dashboard", "s", "default_filters", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/tasks/cache.py#L40-L75
[ "def", "get_form_data", "(", "chart_id", ",", "dashboard", "=", "None", ")", ":", "form_data", "=", "{", "'slice_id'", ":", "chart_id", "}", "if", "dashboard", "is", "None", "or", "not", "dashboard", ".", "json_metadata", ":", "return", "form_data", "json_metadata", "=", "json", ".", "loads", "(", "dashboard", ".", "json_metadata", ")", "# do not apply filters if chart is immune to them", "if", "chart_id", "in", "json_metadata", ".", "get", "(", "'filter_immune_slices'", ",", "[", "]", ")", ":", "return", "form_data", "default_filters", "=", "json", ".", "loads", "(", "json_metadata", ".", "get", "(", "'default_filters'", ",", "'null'", ")", ")", "if", "not", "default_filters", ":", "return", "form_data", "# are some of the fields in the chart immune to filters?", "filter_immune_slice_fields", "=", "json_metadata", ".", "get", "(", "'filter_immune_slice_fields'", ",", "{", "}", ")", "immune_fields", "=", "filter_immune_slice_fields", ".", "get", "(", "str", "(", "chart_id", ")", ",", "[", "]", ")", "extra_filters", "=", "[", "]", "for", "filters", "in", "default_filters", ".", "values", "(", ")", ":", "for", "col", ",", "val", "in", "filters", ".", "items", "(", ")", ":", "if", "col", "not", "in", "immune_fields", ":", "extra_filters", ".", "append", "(", "{", "'col'", ":", "col", ",", "'op'", ":", "'in'", ",", "'val'", ":", "val", "}", ")", "if", "extra_filters", ":", "form_data", "[", "'extra_filters'", "]", "=", "extra_filters", "return", "form_data" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
get_url
Return external URL for warming up a given chart/table cache.
superset/tasks/cache.py
def get_url(params): """Return external URL for warming up a given chart/table cache.""" baseurl = 'http://{SUPERSET_WEBSERVER_ADDRESS}:{SUPERSET_WEBSERVER_PORT}/'.format( **app.config) with app.test_request_context(): return urllib.parse.urljoin( baseurl, url_for('Superset.explore_json', **params), )
def get_url(params): """Return external URL for warming up a given chart/table cache.""" baseurl = 'http://{SUPERSET_WEBSERVER_ADDRESS}:{SUPERSET_WEBSERVER_PORT}/'.format( **app.config) with app.test_request_context(): return urllib.parse.urljoin( baseurl, url_for('Superset.explore_json', **params), )
[ "Return", "external", "URL", "for", "warming", "up", "a", "given", "chart", "/", "table", "cache", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/tasks/cache.py#L78-L86
[ "def", "get_url", "(", "params", ")", ":", "baseurl", "=", "'http://{SUPERSET_WEBSERVER_ADDRESS}:{SUPERSET_WEBSERVER_PORT}/'", ".", "format", "(", "*", "*", "app", ".", "config", ")", "with", "app", ".", "test_request_context", "(", ")", ":", "return", "urllib", ".", "parse", ".", "urljoin", "(", "baseurl", ",", "url_for", "(", "'Superset.explore_json'", ",", "*", "*", "params", ")", ",", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
cache_warmup
Warm up cache. This task periodically hits charts to warm up the cache.
superset/tasks/cache.py
def cache_warmup(strategy_name, *args, **kwargs): """ Warm up cache. This task periodically hits charts to warm up the cache. """ logger.info('Loading strategy') class_ = None for class_ in strategies: if class_.name == strategy_name: break else: message = f'No strategy {strategy_name} found!' logger.error(message) return message logger.info(f'Loading {class_.__name__}') try: strategy = class_(*args, **kwargs) logger.info('Success!') except TypeError: message = 'Error loading strategy!' logger.exception(message) return message results = {'success': [], 'errors': []} for url in strategy.get_urls(): try: logger.info(f'Fetching {url}') requests.get(url) results['success'].append(url) except RequestException: logger.exception('Error warming up cache!') results['errors'].append(url) return results
def cache_warmup(strategy_name, *args, **kwargs): """ Warm up cache. This task periodically hits charts to warm up the cache. """ logger.info('Loading strategy') class_ = None for class_ in strategies: if class_.name == strategy_name: break else: message = f'No strategy {strategy_name} found!' logger.error(message) return message logger.info(f'Loading {class_.__name__}') try: strategy = class_(*args, **kwargs) logger.info('Success!') except TypeError: message = 'Error loading strategy!' logger.exception(message) return message results = {'success': [], 'errors': []} for url in strategy.get_urls(): try: logger.info(f'Fetching {url}') requests.get(url) results['success'].append(url) except RequestException: logger.exception('Error warming up cache!') results['errors'].append(url) return results
[ "Warm", "up", "cache", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/tasks/cache.py#L280-L316
[ "def", "cache_warmup", "(", "strategy_name", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "logger", ".", "info", "(", "'Loading strategy'", ")", "class_", "=", "None", "for", "class_", "in", "strategies", ":", "if", "class_", ".", "name", "==", "strategy_name", ":", "break", "else", ":", "message", "=", "f'No strategy {strategy_name} found!'", "logger", ".", "error", "(", "message", ")", "return", "message", "logger", ".", "info", "(", "f'Loading {class_.__name__}'", ")", "try", ":", "strategy", "=", "class_", "(", "*", "args", ",", "*", "*", "kwargs", ")", "logger", ".", "info", "(", "'Success!'", ")", "except", "TypeError", ":", "message", "=", "'Error loading strategy!'", "logger", ".", "exception", "(", "message", ")", "return", "message", "results", "=", "{", "'success'", ":", "[", "]", ",", "'errors'", ":", "[", "]", "}", "for", "url", "in", "strategy", ".", "get_urls", "(", ")", ":", "try", ":", "logger", ".", "info", "(", "f'Fetching {url}'", ")", "requests", ".", "get", "(", "url", ")", "results", "[", "'success'", "]", ".", "append", "(", "url", ")", "except", "RequestException", ":", "logger", ".", "exception", "(", "'Error warming up cache!'", ")", "results", "[", "'errors'", "]", ".", "append", "(", "url", ")", "return", "results" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
fetch_logs
Mocked. Retrieve the logs produced by the execution of the query. Can be called multiple times to fetch the logs produced after the previous call. :returns: list<str> :raises: ``ProgrammingError`` when no query has been started .. note:: This is not a part of DB-API.
superset/db_engines/hive.py
def fetch_logs(self, max_rows=1024, orientation=None): """Mocked. Retrieve the logs produced by the execution of the query. Can be called multiple times to fetch the logs produced after the previous call. :returns: list<str> :raises: ``ProgrammingError`` when no query has been started .. note:: This is not a part of DB-API. """ from pyhive import hive from TCLIService import ttypes from thrift import Thrift orientation = orientation or ttypes.TFetchOrientation.FETCH_NEXT try: req = ttypes.TGetLogReq(operationHandle=self._operationHandle) logs = self._connection.client.GetLog(req).log return logs # raised if Hive is used except (ttypes.TApplicationException, Thrift.TApplicationException): if self._state == self._STATE_NONE: raise hive.ProgrammingError('No query yet') logs = [] while True: req = ttypes.TFetchResultsReq( operationHandle=self._operationHandle, orientation=ttypes.TFetchOrientation.FETCH_NEXT, maxRows=self.arraysize, fetchType=1, # 0: results, 1: logs ) response = self._connection.client.FetchResults(req) hive._check_status(response) assert not response.results.rows, \ 'expected data in columnar format' assert len(response.results.columns) == 1, response.results.columns new_logs = hive._unwrap_column(response.results.columns[0]) logs += new_logs if not new_logs: break return '\n'.join(logs)
def fetch_logs(self, max_rows=1024, orientation=None): """Mocked. Retrieve the logs produced by the execution of the query. Can be called multiple times to fetch the logs produced after the previous call. :returns: list<str> :raises: ``ProgrammingError`` when no query has been started .. note:: This is not a part of DB-API. """ from pyhive import hive from TCLIService import ttypes from thrift import Thrift orientation = orientation or ttypes.TFetchOrientation.FETCH_NEXT try: req = ttypes.TGetLogReq(operationHandle=self._operationHandle) logs = self._connection.client.GetLog(req).log return logs # raised if Hive is used except (ttypes.TApplicationException, Thrift.TApplicationException): if self._state == self._STATE_NONE: raise hive.ProgrammingError('No query yet') logs = [] while True: req = ttypes.TFetchResultsReq( operationHandle=self._operationHandle, orientation=ttypes.TFetchOrientation.FETCH_NEXT, maxRows=self.arraysize, fetchType=1, # 0: results, 1: logs ) response = self._connection.client.FetchResults(req) hive._check_status(response) assert not response.results.rows, \ 'expected data in columnar format' assert len(response.results.columns) == 1, response.results.columns new_logs = hive._unwrap_column(response.results.columns[0]) logs += new_logs if not new_logs: break return '\n'.join(logs)
[ "Mocked", ".", "Retrieve", "the", "logs", "produced", "by", "the", "execution", "of", "the", "query", ".", "Can", "be", "called", "multiple", "times", "to", "fetch", "the", "logs", "produced", "after", "the", "previous", "call", ".", ":", "returns", ":", "list<str", ">", ":", "raises", ":", "ProgrammingError", "when", "no", "query", "has", "been", "started", "..", "note", "::", "This", "is", "not", "a", "part", "of", "DB", "-", "API", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/db_engines/hive.py#L21-L61
[ "def", "fetch_logs", "(", "self", ",", "max_rows", "=", "1024", ",", "orientation", "=", "None", ")", ":", "from", "pyhive", "import", "hive", "from", "TCLIService", "import", "ttypes", "from", "thrift", "import", "Thrift", "orientation", "=", "orientation", "or", "ttypes", ".", "TFetchOrientation", ".", "FETCH_NEXT", "try", ":", "req", "=", "ttypes", ".", "TGetLogReq", "(", "operationHandle", "=", "self", ".", "_operationHandle", ")", "logs", "=", "self", ".", "_connection", ".", "client", ".", "GetLog", "(", "req", ")", ".", "log", "return", "logs", "# raised if Hive is used", "except", "(", "ttypes", ".", "TApplicationException", ",", "Thrift", ".", "TApplicationException", ")", ":", "if", "self", ".", "_state", "==", "self", ".", "_STATE_NONE", ":", "raise", "hive", ".", "ProgrammingError", "(", "'No query yet'", ")", "logs", "=", "[", "]", "while", "True", ":", "req", "=", "ttypes", ".", "TFetchResultsReq", "(", "operationHandle", "=", "self", ".", "_operationHandle", ",", "orientation", "=", "ttypes", ".", "TFetchOrientation", ".", "FETCH_NEXT", ",", "maxRows", "=", "self", ".", "arraysize", ",", "fetchType", "=", "1", ",", "# 0: results, 1: logs", ")", "response", "=", "self", ".", "_connection", ".", "client", ".", "FetchResults", "(", "req", ")", "hive", ".", "_check_status", "(", "response", ")", "assert", "not", "response", ".", "results", ".", "rows", ",", "'expected data in columnar format'", "assert", "len", "(", "response", ".", "results", ".", "columns", ")", "==", "1", ",", "response", ".", "results", ".", "columns", "new_logs", "=", "hive", ".", "_unwrap_column", "(", "response", ".", "results", ".", "columns", "[", "0", "]", ")", "logs", "+=", "new_logs", "if", "not", "new_logs", ":", "break", "return", "'\\n'", ".", "join", "(", "logs", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
DruidCluster.refresh_datasources
Refresh metadata of all datasources in the cluster If ``datasource_name`` is specified, only that datasource is updated
superset/connectors/druid/models.py
def refresh_datasources( self, datasource_name=None, merge_flag=True, refreshAll=True): """Refresh metadata of all datasources in the cluster If ``datasource_name`` is specified, only that datasource is updated """ ds_list = self.get_datasources() blacklist = conf.get('DRUID_DATA_SOURCE_BLACKLIST', []) ds_refresh = [] if not datasource_name: ds_refresh = list(filter(lambda ds: ds not in blacklist, ds_list)) elif datasource_name not in blacklist and datasource_name in ds_list: ds_refresh.append(datasource_name) else: return self.refresh(ds_refresh, merge_flag, refreshAll)
def refresh_datasources( self, datasource_name=None, merge_flag=True, refreshAll=True): """Refresh metadata of all datasources in the cluster If ``datasource_name`` is specified, only that datasource is updated """ ds_list = self.get_datasources() blacklist = conf.get('DRUID_DATA_SOURCE_BLACKLIST', []) ds_refresh = [] if not datasource_name: ds_refresh = list(filter(lambda ds: ds not in blacklist, ds_list)) elif datasource_name not in blacklist and datasource_name in ds_list: ds_refresh.append(datasource_name) else: return self.refresh(ds_refresh, merge_flag, refreshAll)
[ "Refresh", "metadata", "of", "all", "datasources", "in", "the", "cluster", "If", "datasource_name", "is", "specified", "only", "that", "datasource", "is", "updated" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L165-L182
[ "def", "refresh_datasources", "(", "self", ",", "datasource_name", "=", "None", ",", "merge_flag", "=", "True", ",", "refreshAll", "=", "True", ")", ":", "ds_list", "=", "self", ".", "get_datasources", "(", ")", "blacklist", "=", "conf", ".", "get", "(", "'DRUID_DATA_SOURCE_BLACKLIST'", ",", "[", "]", ")", "ds_refresh", "=", "[", "]", "if", "not", "datasource_name", ":", "ds_refresh", "=", "list", "(", "filter", "(", "lambda", "ds", ":", "ds", "not", "in", "blacklist", ",", "ds_list", ")", ")", "elif", "datasource_name", "not", "in", "blacklist", "and", "datasource_name", "in", "ds_list", ":", "ds_refresh", ".", "append", "(", "datasource_name", ")", "else", ":", "return", "self", ".", "refresh", "(", "ds_refresh", ",", "merge_flag", ",", "refreshAll", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
DruidCluster.refresh
Fetches metadata for the specified datasources and merges to the Superset database
superset/connectors/druid/models.py
def refresh(self, datasource_names, merge_flag, refreshAll): """ Fetches metadata for the specified datasources and merges to the Superset database """ session = db.session ds_list = ( session.query(DruidDatasource) .filter(DruidDatasource.cluster_name == self.cluster_name) .filter(DruidDatasource.datasource_name.in_(datasource_names)) ) ds_map = {ds.name: ds for ds in ds_list} for ds_name in datasource_names: datasource = ds_map.get(ds_name, None) if not datasource: datasource = DruidDatasource(datasource_name=ds_name) with session.no_autoflush: session.add(datasource) flasher( _('Adding new datasource [{}]').format(ds_name), 'success') ds_map[ds_name] = datasource elif refreshAll: flasher( _('Refreshing datasource [{}]').format(ds_name), 'info') else: del ds_map[ds_name] continue datasource.cluster = self datasource.merge_flag = merge_flag session.flush() # Prepare multithreaded executation pool = ThreadPool() ds_refresh = list(ds_map.values()) metadata = pool.map(_fetch_metadata_for, ds_refresh) pool.close() pool.join() for i in range(0, len(ds_refresh)): datasource = ds_refresh[i] cols = metadata[i] if cols: col_objs_list = ( session.query(DruidColumn) .filter(DruidColumn.datasource_id == datasource.id) .filter(DruidColumn.column_name.in_(cols.keys())) ) col_objs = {col.column_name: col for col in col_objs_list} for col in cols: if col == '__time': # skip the time column continue col_obj = col_objs.get(col) if not col_obj: col_obj = DruidColumn( datasource_id=datasource.id, column_name=col) with session.no_autoflush: session.add(col_obj) col_obj.type = cols[col]['type'] col_obj.datasource = datasource if col_obj.type == 'STRING': col_obj.groupby = True col_obj.filterable = True datasource.refresh_metrics() session.commit()
def refresh(self, datasource_names, merge_flag, refreshAll): """ Fetches metadata for the specified datasources and merges to the Superset database """ session = db.session ds_list = ( session.query(DruidDatasource) .filter(DruidDatasource.cluster_name == self.cluster_name) .filter(DruidDatasource.datasource_name.in_(datasource_names)) ) ds_map = {ds.name: ds for ds in ds_list} for ds_name in datasource_names: datasource = ds_map.get(ds_name, None) if not datasource: datasource = DruidDatasource(datasource_name=ds_name) with session.no_autoflush: session.add(datasource) flasher( _('Adding new datasource [{}]').format(ds_name), 'success') ds_map[ds_name] = datasource elif refreshAll: flasher( _('Refreshing datasource [{}]').format(ds_name), 'info') else: del ds_map[ds_name] continue datasource.cluster = self datasource.merge_flag = merge_flag session.flush() # Prepare multithreaded executation pool = ThreadPool() ds_refresh = list(ds_map.values()) metadata = pool.map(_fetch_metadata_for, ds_refresh) pool.close() pool.join() for i in range(0, len(ds_refresh)): datasource = ds_refresh[i] cols = metadata[i] if cols: col_objs_list = ( session.query(DruidColumn) .filter(DruidColumn.datasource_id == datasource.id) .filter(DruidColumn.column_name.in_(cols.keys())) ) col_objs = {col.column_name: col for col in col_objs_list} for col in cols: if col == '__time': # skip the time column continue col_obj = col_objs.get(col) if not col_obj: col_obj = DruidColumn( datasource_id=datasource.id, column_name=col) with session.no_autoflush: session.add(col_obj) col_obj.type = cols[col]['type'] col_obj.datasource = datasource if col_obj.type == 'STRING': col_obj.groupby = True col_obj.filterable = True datasource.refresh_metrics() session.commit()
[ "Fetches", "metadata", "for", "the", "specified", "datasources", "and", "merges", "to", "the", "Superset", "database" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L184-L248
[ "def", "refresh", "(", "self", ",", "datasource_names", ",", "merge_flag", ",", "refreshAll", ")", ":", "session", "=", "db", ".", "session", "ds_list", "=", "(", "session", ".", "query", "(", "DruidDatasource", ")", ".", "filter", "(", "DruidDatasource", ".", "cluster_name", "==", "self", ".", "cluster_name", ")", ".", "filter", "(", "DruidDatasource", ".", "datasource_name", ".", "in_", "(", "datasource_names", ")", ")", ")", "ds_map", "=", "{", "ds", ".", "name", ":", "ds", "for", "ds", "in", "ds_list", "}", "for", "ds_name", "in", "datasource_names", ":", "datasource", "=", "ds_map", ".", "get", "(", "ds_name", ",", "None", ")", "if", "not", "datasource", ":", "datasource", "=", "DruidDatasource", "(", "datasource_name", "=", "ds_name", ")", "with", "session", ".", "no_autoflush", ":", "session", ".", "add", "(", "datasource", ")", "flasher", "(", "_", "(", "'Adding new datasource [{}]'", ")", ".", "format", "(", "ds_name", ")", ",", "'success'", ")", "ds_map", "[", "ds_name", "]", "=", "datasource", "elif", "refreshAll", ":", "flasher", "(", "_", "(", "'Refreshing datasource [{}]'", ")", ".", "format", "(", "ds_name", ")", ",", "'info'", ")", "else", ":", "del", "ds_map", "[", "ds_name", "]", "continue", "datasource", ".", "cluster", "=", "self", "datasource", ".", "merge_flag", "=", "merge_flag", "session", ".", "flush", "(", ")", "# Prepare multithreaded executation", "pool", "=", "ThreadPool", "(", ")", "ds_refresh", "=", "list", "(", "ds_map", ".", "values", "(", ")", ")", "metadata", "=", "pool", ".", "map", "(", "_fetch_metadata_for", ",", "ds_refresh", ")", "pool", ".", "close", "(", ")", "pool", ".", "join", "(", ")", "for", "i", "in", "range", "(", "0", ",", "len", "(", "ds_refresh", ")", ")", ":", "datasource", "=", "ds_refresh", "[", "i", "]", "cols", "=", "metadata", "[", "i", "]", "if", "cols", ":", "col_objs_list", "=", "(", "session", ".", "query", "(", "DruidColumn", ")", ".", "filter", "(", "DruidColumn", ".", "datasource_id", "==", "datasource", ".", "id", ")", ".", "filter", "(", "DruidColumn", ".", "column_name", ".", "in_", "(", "cols", ".", "keys", "(", ")", ")", ")", ")", "col_objs", "=", "{", "col", ".", "column_name", ":", "col", "for", "col", "in", "col_objs_list", "}", "for", "col", "in", "cols", ":", "if", "col", "==", "'__time'", ":", "# skip the time column", "continue", "col_obj", "=", "col_objs", ".", "get", "(", "col", ")", "if", "not", "col_obj", ":", "col_obj", "=", "DruidColumn", "(", "datasource_id", "=", "datasource", ".", "id", ",", "column_name", "=", "col", ")", "with", "session", ".", "no_autoflush", ":", "session", ".", "add", "(", "col_obj", ")", "col_obj", ".", "type", "=", "cols", "[", "col", "]", "[", "'type'", "]", "col_obj", ".", "datasource", "=", "datasource", "if", "col_obj", ".", "type", "==", "'STRING'", ":", "col_obj", ".", "groupby", "=", "True", "col_obj", ".", "filterable", "=", "True", "datasource", ".", "refresh_metrics", "(", ")", "session", ".", "commit", "(", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
DruidColumn.refresh_metrics
Refresh metrics based on the column metadata
superset/connectors/druid/models.py
def refresh_metrics(self): """Refresh metrics based on the column metadata""" metrics = self.get_metrics() dbmetrics = ( db.session.query(DruidMetric) .filter(DruidMetric.datasource_id == self.datasource_id) .filter(DruidMetric.metric_name.in_(metrics.keys())) ) dbmetrics = {metric.metric_name: metric for metric in dbmetrics} for metric in metrics.values(): dbmetric = dbmetrics.get(metric.metric_name) if dbmetric: for attr in ['json', 'metric_type']: setattr(dbmetric, attr, getattr(metric, attr)) else: with db.session.no_autoflush: metric.datasource_id = self.datasource_id db.session.add(metric)
def refresh_metrics(self): """Refresh metrics based on the column metadata""" metrics = self.get_metrics() dbmetrics = ( db.session.query(DruidMetric) .filter(DruidMetric.datasource_id == self.datasource_id) .filter(DruidMetric.metric_name.in_(metrics.keys())) ) dbmetrics = {metric.metric_name: metric for metric in dbmetrics} for metric in metrics.values(): dbmetric = dbmetrics.get(metric.metric_name) if dbmetric: for attr in ['json', 'metric_type']: setattr(dbmetric, attr, getattr(metric, attr)) else: with db.session.no_autoflush: metric.datasource_id = self.datasource_id db.session.add(metric)
[ "Refresh", "metrics", "based", "on", "the", "column", "metadata" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L309-L326
[ "def", "refresh_metrics", "(", "self", ")", ":", "metrics", "=", "self", ".", "get_metrics", "(", ")", "dbmetrics", "=", "(", "db", ".", "session", ".", "query", "(", "DruidMetric", ")", ".", "filter", "(", "DruidMetric", ".", "datasource_id", "==", "self", ".", "datasource_id", ")", ".", "filter", "(", "DruidMetric", ".", "metric_name", ".", "in_", "(", "metrics", ".", "keys", "(", ")", ")", ")", ")", "dbmetrics", "=", "{", "metric", ".", "metric_name", ":", "metric", "for", "metric", "in", "dbmetrics", "}", "for", "metric", "in", "metrics", ".", "values", "(", ")", ":", "dbmetric", "=", "dbmetrics", ".", "get", "(", "metric", ".", "metric_name", ")", "if", "dbmetric", ":", "for", "attr", "in", "[", "'json'", ",", "'metric_type'", "]", ":", "setattr", "(", "dbmetric", ",", "attr", ",", "getattr", "(", "metric", ",", "attr", ")", ")", "else", ":", "with", "db", ".", "session", ".", "no_autoflush", ":", "metric", ".", "datasource_id", "=", "self", ".", "datasource_id", "db", ".", "session", ".", "add", "(", "metric", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
DruidDatasource.import_obj
Imports the datasource from the object to the database. Metrics and columns and datasource will be overridden if exists. This function can be used to import/export dashboards between multiple superset instances. Audit metadata isn't copies over.
superset/connectors/druid/models.py
def import_obj(cls, i_datasource, import_time=None): """Imports the datasource from the object to the database. Metrics and columns and datasource will be overridden if exists. This function can be used to import/export dashboards between multiple superset instances. Audit metadata isn't copies over. """ def lookup_datasource(d): return db.session.query(DruidDatasource).filter( DruidDatasource.datasource_name == d.datasource_name, DruidCluster.cluster_name == d.cluster_name, ).first() def lookup_cluster(d): return db.session.query(DruidCluster).filter_by( cluster_name=d.cluster_name).one() return import_datasource.import_datasource( db.session, i_datasource, lookup_cluster, lookup_datasource, import_time)
def import_obj(cls, i_datasource, import_time=None): """Imports the datasource from the object to the database. Metrics and columns and datasource will be overridden if exists. This function can be used to import/export dashboards between multiple superset instances. Audit metadata isn't copies over. """ def lookup_datasource(d): return db.session.query(DruidDatasource).filter( DruidDatasource.datasource_name == d.datasource_name, DruidCluster.cluster_name == d.cluster_name, ).first() def lookup_cluster(d): return db.session.query(DruidCluster).filter_by( cluster_name=d.cluster_name).one() return import_datasource.import_datasource( db.session, i_datasource, lookup_cluster, lookup_datasource, import_time)
[ "Imports", "the", "datasource", "from", "the", "object", "to", "the", "database", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L514-L532
[ "def", "import_obj", "(", "cls", ",", "i_datasource", ",", "import_time", "=", "None", ")", ":", "def", "lookup_datasource", "(", "d", ")", ":", "return", "db", ".", "session", ".", "query", "(", "DruidDatasource", ")", ".", "filter", "(", "DruidDatasource", ".", "datasource_name", "==", "d", ".", "datasource_name", ",", "DruidCluster", ".", "cluster_name", "==", "d", ".", "cluster_name", ",", ")", ".", "first", "(", ")", "def", "lookup_cluster", "(", "d", ")", ":", "return", "db", ".", "session", ".", "query", "(", "DruidCluster", ")", ".", "filter_by", "(", "cluster_name", "=", "d", ".", "cluster_name", ")", ".", "one", "(", ")", "return", "import_datasource", ".", "import_datasource", "(", "db", ".", "session", ",", "i_datasource", ",", "lookup_cluster", ",", "lookup_datasource", ",", "import_time", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
DruidDatasource.sync_to_db_from_config
Merges the ds config from druid_config into one stored in the db.
superset/connectors/druid/models.py
def sync_to_db_from_config( cls, druid_config, user, cluster, refresh=True): """Merges the ds config from druid_config into one stored in the db.""" session = db.session datasource = ( session.query(cls) .filter_by(datasource_name=druid_config['name']) .first() ) # Create a new datasource. if not datasource: datasource = cls( datasource_name=druid_config['name'], cluster=cluster, owners=[user], changed_by_fk=user.id, created_by_fk=user.id, ) session.add(datasource) elif not refresh: return dimensions = druid_config['dimensions'] col_objs = ( session.query(DruidColumn) .filter(DruidColumn.datasource_id == datasource.id) .filter(DruidColumn.column_name.in_(dimensions)) ) col_objs = {col.column_name: col for col in col_objs} for dim in dimensions: col_obj = col_objs.get(dim, None) if not col_obj: col_obj = DruidColumn( datasource_id=datasource.id, column_name=dim, groupby=True, filterable=True, # TODO: fetch type from Hive. type='STRING', datasource=datasource, ) session.add(col_obj) # Import Druid metrics metric_objs = ( session.query(DruidMetric) .filter(DruidMetric.datasource_id == datasource.id) .filter(DruidMetric.metric_name.in_( spec['name'] for spec in druid_config['metrics_spec'] )) ) metric_objs = {metric.metric_name: metric for metric in metric_objs} for metric_spec in druid_config['metrics_spec']: metric_name = metric_spec['name'] metric_type = metric_spec['type'] metric_json = json.dumps(metric_spec) if metric_type == 'count': metric_type = 'longSum' metric_json = json.dumps({ 'type': 'longSum', 'name': metric_name, 'fieldName': metric_name, }) metric_obj = metric_objs.get(metric_name, None) if not metric_obj: metric_obj = DruidMetric( metric_name=metric_name, metric_type=metric_type, verbose_name='%s(%s)' % (metric_type, metric_name), datasource=datasource, json=metric_json, description=( 'Imported from the airolap config dir for %s' % druid_config['name']), ) session.add(metric_obj) session.commit()
def sync_to_db_from_config( cls, druid_config, user, cluster, refresh=True): """Merges the ds config from druid_config into one stored in the db.""" session = db.session datasource = ( session.query(cls) .filter_by(datasource_name=druid_config['name']) .first() ) # Create a new datasource. if not datasource: datasource = cls( datasource_name=druid_config['name'], cluster=cluster, owners=[user], changed_by_fk=user.id, created_by_fk=user.id, ) session.add(datasource) elif not refresh: return dimensions = druid_config['dimensions'] col_objs = ( session.query(DruidColumn) .filter(DruidColumn.datasource_id == datasource.id) .filter(DruidColumn.column_name.in_(dimensions)) ) col_objs = {col.column_name: col for col in col_objs} for dim in dimensions: col_obj = col_objs.get(dim, None) if not col_obj: col_obj = DruidColumn( datasource_id=datasource.id, column_name=dim, groupby=True, filterable=True, # TODO: fetch type from Hive. type='STRING', datasource=datasource, ) session.add(col_obj) # Import Druid metrics metric_objs = ( session.query(DruidMetric) .filter(DruidMetric.datasource_id == datasource.id) .filter(DruidMetric.metric_name.in_( spec['name'] for spec in druid_config['metrics_spec'] )) ) metric_objs = {metric.metric_name: metric for metric in metric_objs} for metric_spec in druid_config['metrics_spec']: metric_name = metric_spec['name'] metric_type = metric_spec['type'] metric_json = json.dumps(metric_spec) if metric_type == 'count': metric_type = 'longSum' metric_json = json.dumps({ 'type': 'longSum', 'name': metric_name, 'fieldName': metric_name, }) metric_obj = metric_objs.get(metric_name, None) if not metric_obj: metric_obj = DruidMetric( metric_name=metric_name, metric_type=metric_type, verbose_name='%s(%s)' % (metric_type, metric_name), datasource=datasource, json=metric_json, description=( 'Imported from the airolap config dir for %s' % druid_config['name']), ) session.add(metric_obj) session.commit()
[ "Merges", "the", "ds", "config", "from", "druid_config", "into", "one", "stored", "in", "the", "db", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L590-L671
[ "def", "sync_to_db_from_config", "(", "cls", ",", "druid_config", ",", "user", ",", "cluster", ",", "refresh", "=", "True", ")", ":", "session", "=", "db", ".", "session", "datasource", "=", "(", "session", ".", "query", "(", "cls", ")", ".", "filter_by", "(", "datasource_name", "=", "druid_config", "[", "'name'", "]", ")", ".", "first", "(", ")", ")", "# Create a new datasource.", "if", "not", "datasource", ":", "datasource", "=", "cls", "(", "datasource_name", "=", "druid_config", "[", "'name'", "]", ",", "cluster", "=", "cluster", ",", "owners", "=", "[", "user", "]", ",", "changed_by_fk", "=", "user", ".", "id", ",", "created_by_fk", "=", "user", ".", "id", ",", ")", "session", ".", "add", "(", "datasource", ")", "elif", "not", "refresh", ":", "return", "dimensions", "=", "druid_config", "[", "'dimensions'", "]", "col_objs", "=", "(", "session", ".", "query", "(", "DruidColumn", ")", ".", "filter", "(", "DruidColumn", ".", "datasource_id", "==", "datasource", ".", "id", ")", ".", "filter", "(", "DruidColumn", ".", "column_name", ".", "in_", "(", "dimensions", ")", ")", ")", "col_objs", "=", "{", "col", ".", "column_name", ":", "col", "for", "col", "in", "col_objs", "}", "for", "dim", "in", "dimensions", ":", "col_obj", "=", "col_objs", ".", "get", "(", "dim", ",", "None", ")", "if", "not", "col_obj", ":", "col_obj", "=", "DruidColumn", "(", "datasource_id", "=", "datasource", ".", "id", ",", "column_name", "=", "dim", ",", "groupby", "=", "True", ",", "filterable", "=", "True", ",", "# TODO: fetch type from Hive.", "type", "=", "'STRING'", ",", "datasource", "=", "datasource", ",", ")", "session", ".", "add", "(", "col_obj", ")", "# Import Druid metrics", "metric_objs", "=", "(", "session", ".", "query", "(", "DruidMetric", ")", ".", "filter", "(", "DruidMetric", ".", "datasource_id", "==", "datasource", ".", "id", ")", ".", "filter", "(", "DruidMetric", ".", "metric_name", ".", "in_", "(", "spec", "[", "'name'", "]", "for", "spec", "in", "druid_config", "[", "'metrics_spec'", "]", ")", ")", ")", "metric_objs", "=", "{", "metric", ".", "metric_name", ":", "metric", "for", "metric", "in", "metric_objs", "}", "for", "metric_spec", "in", "druid_config", "[", "'metrics_spec'", "]", ":", "metric_name", "=", "metric_spec", "[", "'name'", "]", "metric_type", "=", "metric_spec", "[", "'type'", "]", "metric_json", "=", "json", ".", "dumps", "(", "metric_spec", ")", "if", "metric_type", "==", "'count'", ":", "metric_type", "=", "'longSum'", "metric_json", "=", "json", ".", "dumps", "(", "{", "'type'", ":", "'longSum'", ",", "'name'", ":", "metric_name", ",", "'fieldName'", ":", "metric_name", ",", "}", ")", "metric_obj", "=", "metric_objs", ".", "get", "(", "metric_name", ",", "None", ")", "if", "not", "metric_obj", ":", "metric_obj", "=", "DruidMetric", "(", "metric_name", "=", "metric_name", ",", "metric_type", "=", "metric_type", ",", "verbose_name", "=", "'%s(%s)'", "%", "(", "metric_type", ",", "metric_name", ")", ",", "datasource", "=", "datasource", ",", "json", "=", "metric_json", ",", "description", "=", "(", "'Imported from the airolap config dir for %s'", "%", "druid_config", "[", "'name'", "]", ")", ",", ")", "session", ".", "add", "(", "metric_obj", ")", "session", ".", "commit", "(", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
DruidDatasource.get_post_agg
For a metric specified as `postagg` returns the kind of post aggregation for pydruid.
superset/connectors/druid/models.py
def get_post_agg(mconf): """ For a metric specified as `postagg` returns the kind of post aggregation for pydruid. """ if mconf.get('type') == 'javascript': return JavascriptPostAggregator( name=mconf.get('name', ''), field_names=mconf.get('fieldNames', []), function=mconf.get('function', '')) elif mconf.get('type') == 'quantile': return Quantile( mconf.get('name', ''), mconf.get('probability', ''), ) elif mconf.get('type') == 'quantiles': return Quantiles( mconf.get('name', ''), mconf.get('probabilities', ''), ) elif mconf.get('type') == 'fieldAccess': return Field(mconf.get('name')) elif mconf.get('type') == 'constant': return Const( mconf.get('value'), output_name=mconf.get('name', ''), ) elif mconf.get('type') == 'hyperUniqueCardinality': return HyperUniqueCardinality( mconf.get('name'), ) elif mconf.get('type') == 'arithmetic': return Postaggregator( mconf.get('fn', '/'), mconf.get('fields', []), mconf.get('name', '')) else: return CustomPostAggregator( mconf.get('name', ''), mconf)
def get_post_agg(mconf): """ For a metric specified as `postagg` returns the kind of post aggregation for pydruid. """ if mconf.get('type') == 'javascript': return JavascriptPostAggregator( name=mconf.get('name', ''), field_names=mconf.get('fieldNames', []), function=mconf.get('function', '')) elif mconf.get('type') == 'quantile': return Quantile( mconf.get('name', ''), mconf.get('probability', ''), ) elif mconf.get('type') == 'quantiles': return Quantiles( mconf.get('name', ''), mconf.get('probabilities', ''), ) elif mconf.get('type') == 'fieldAccess': return Field(mconf.get('name')) elif mconf.get('type') == 'constant': return Const( mconf.get('value'), output_name=mconf.get('name', ''), ) elif mconf.get('type') == 'hyperUniqueCardinality': return HyperUniqueCardinality( mconf.get('name'), ) elif mconf.get('type') == 'arithmetic': return Postaggregator( mconf.get('fn', '/'), mconf.get('fields', []), mconf.get('name', '')) else: return CustomPostAggregator( mconf.get('name', ''), mconf)
[ "For", "a", "metric", "specified", "as", "postagg", "returns", "the", "kind", "of", "post", "aggregation", "for", "pydruid", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L731-L770
[ "def", "get_post_agg", "(", "mconf", ")", ":", "if", "mconf", ".", "get", "(", "'type'", ")", "==", "'javascript'", ":", "return", "JavascriptPostAggregator", "(", "name", "=", "mconf", ".", "get", "(", "'name'", ",", "''", ")", ",", "field_names", "=", "mconf", ".", "get", "(", "'fieldNames'", ",", "[", "]", ")", ",", "function", "=", "mconf", ".", "get", "(", "'function'", ",", "''", ")", ")", "elif", "mconf", ".", "get", "(", "'type'", ")", "==", "'quantile'", ":", "return", "Quantile", "(", "mconf", ".", "get", "(", "'name'", ",", "''", ")", ",", "mconf", ".", "get", "(", "'probability'", ",", "''", ")", ",", ")", "elif", "mconf", ".", "get", "(", "'type'", ")", "==", "'quantiles'", ":", "return", "Quantiles", "(", "mconf", ".", "get", "(", "'name'", ",", "''", ")", ",", "mconf", ".", "get", "(", "'probabilities'", ",", "''", ")", ",", ")", "elif", "mconf", ".", "get", "(", "'type'", ")", "==", "'fieldAccess'", ":", "return", "Field", "(", "mconf", ".", "get", "(", "'name'", ")", ")", "elif", "mconf", ".", "get", "(", "'type'", ")", "==", "'constant'", ":", "return", "Const", "(", "mconf", ".", "get", "(", "'value'", ")", ",", "output_name", "=", "mconf", ".", "get", "(", "'name'", ",", "''", ")", ",", ")", "elif", "mconf", ".", "get", "(", "'type'", ")", "==", "'hyperUniqueCardinality'", ":", "return", "HyperUniqueCardinality", "(", "mconf", ".", "get", "(", "'name'", ")", ",", ")", "elif", "mconf", ".", "get", "(", "'type'", ")", "==", "'arithmetic'", ":", "return", "Postaggregator", "(", "mconf", ".", "get", "(", "'fn'", ",", "'/'", ")", ",", "mconf", ".", "get", "(", "'fields'", ",", "[", "]", ")", ",", "mconf", ".", "get", "(", "'name'", ",", "''", ")", ")", "else", ":", "return", "CustomPostAggregator", "(", "mconf", ".", "get", "(", "'name'", ",", "''", ")", ",", "mconf", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
DruidDatasource.find_postaggs_for
Return a list of metrics that are post aggregations
superset/connectors/druid/models.py
def find_postaggs_for(postagg_names, metrics_dict): """Return a list of metrics that are post aggregations""" postagg_metrics = [ metrics_dict[name] for name in postagg_names if metrics_dict[name].metric_type == POST_AGG_TYPE ] # Remove post aggregations that were found for postagg in postagg_metrics: postagg_names.remove(postagg.metric_name) return postagg_metrics
def find_postaggs_for(postagg_names, metrics_dict): """Return a list of metrics that are post aggregations""" postagg_metrics = [ metrics_dict[name] for name in postagg_names if metrics_dict[name].metric_type == POST_AGG_TYPE ] # Remove post aggregations that were found for postagg in postagg_metrics: postagg_names.remove(postagg.metric_name) return postagg_metrics
[ "Return", "a", "list", "of", "metrics", "that", "are", "post", "aggregations" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L773-L782
[ "def", "find_postaggs_for", "(", "postagg_names", ",", "metrics_dict", ")", ":", "postagg_metrics", "=", "[", "metrics_dict", "[", "name", "]", "for", "name", "in", "postagg_names", "if", "metrics_dict", "[", "name", "]", ".", "metric_type", "==", "POST_AGG_TYPE", "]", "# Remove post aggregations that were found", "for", "postagg", "in", "postagg_metrics", ":", "postagg_names", ".", "remove", "(", "postagg", ".", "metric_name", ")", "return", "postagg_metrics" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
DruidDatasource.values_for_column
Retrieve some values for the given column
superset/connectors/druid/models.py
def values_for_column(self, column_name, limit=10000): """Retrieve some values for the given column""" logging.info( 'Getting values for columns [{}] limited to [{}]' .format(column_name, limit)) # TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid if self.fetch_values_from: from_dttm = utils.parse_human_datetime(self.fetch_values_from) else: from_dttm = datetime(1970, 1, 1) qry = dict( datasource=self.datasource_name, granularity='all', intervals=from_dttm.isoformat() + '/' + datetime.now().isoformat(), aggregations=dict(count=count('count')), dimension=column_name, metric='count', threshold=limit, ) client = self.cluster.get_pydruid_client() client.topn(**qry) df = client.export_pandas() return [row[column_name] for row in df.to_records(index=False)]
def values_for_column(self, column_name, limit=10000): """Retrieve some values for the given column""" logging.info( 'Getting values for columns [{}] limited to [{}]' .format(column_name, limit)) # TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid if self.fetch_values_from: from_dttm = utils.parse_human_datetime(self.fetch_values_from) else: from_dttm = datetime(1970, 1, 1) qry = dict( datasource=self.datasource_name, granularity='all', intervals=from_dttm.isoformat() + '/' + datetime.now().isoformat(), aggregations=dict(count=count('count')), dimension=column_name, metric='count', threshold=limit, ) client = self.cluster.get_pydruid_client() client.topn(**qry) df = client.export_pandas() return [row[column_name] for row in df.to_records(index=False)]
[ "Retrieve", "some", "values", "for", "the", "given", "column" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L857-L883
[ "def", "values_for_column", "(", "self", ",", "column_name", ",", "limit", "=", "10000", ")", ":", "logging", ".", "info", "(", "'Getting values for columns [{}] limited to [{}]'", ".", "format", "(", "column_name", ",", "limit", ")", ")", "# TODO: Use Lexicographic TopNMetricSpec once supported by PyDruid", "if", "self", ".", "fetch_values_from", ":", "from_dttm", "=", "utils", ".", "parse_human_datetime", "(", "self", ".", "fetch_values_from", ")", "else", ":", "from_dttm", "=", "datetime", "(", "1970", ",", "1", ",", "1", ")", "qry", "=", "dict", "(", "datasource", "=", "self", ".", "datasource_name", ",", "granularity", "=", "'all'", ",", "intervals", "=", "from_dttm", ".", "isoformat", "(", ")", "+", "'/'", "+", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", ",", "aggregations", "=", "dict", "(", "count", "=", "count", "(", "'count'", ")", ")", ",", "dimension", "=", "column_name", ",", "metric", "=", "'count'", ",", "threshold", "=", "limit", ",", ")", "client", "=", "self", ".", "cluster", ".", "get_pydruid_client", "(", ")", "client", ".", "topn", "(", "*", "*", "qry", ")", "df", "=", "client", ".", "export_pandas", "(", ")", "return", "[", "row", "[", "column_name", "]", "for", "row", "in", "df", ".", "to_records", "(", "index", "=", "False", ")", "]" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
DruidDatasource.get_aggregations
Returns a dictionary of aggregation metric names to aggregation json objects :param metrics_dict: dictionary of all the metrics :param saved_metrics: list of saved metric names :param adhoc_metrics: list of adhoc metric names :raise SupersetException: if one or more metric names are not aggregations
superset/connectors/druid/models.py
def get_aggregations(metrics_dict, saved_metrics, adhoc_metrics=[]): """ Returns a dictionary of aggregation metric names to aggregation json objects :param metrics_dict: dictionary of all the metrics :param saved_metrics: list of saved metric names :param adhoc_metrics: list of adhoc metric names :raise SupersetException: if one or more metric names are not aggregations """ aggregations = OrderedDict() invalid_metric_names = [] for metric_name in saved_metrics: if metric_name in metrics_dict: metric = metrics_dict[metric_name] if metric.metric_type == POST_AGG_TYPE: invalid_metric_names.append(metric_name) else: aggregations[metric_name] = metric.json_obj else: invalid_metric_names.append(metric_name) if len(invalid_metric_names) > 0: raise SupersetException( _('Metric(s) {} must be aggregations.').format(invalid_metric_names)) for adhoc_metric in adhoc_metrics: aggregations[adhoc_metric['label']] = { 'fieldName': adhoc_metric['column']['column_name'], 'fieldNames': [adhoc_metric['column']['column_name']], 'type': DruidDatasource.druid_type_from_adhoc_metric(adhoc_metric), 'name': adhoc_metric['label'], } return aggregations
def get_aggregations(metrics_dict, saved_metrics, adhoc_metrics=[]): """ Returns a dictionary of aggregation metric names to aggregation json objects :param metrics_dict: dictionary of all the metrics :param saved_metrics: list of saved metric names :param adhoc_metrics: list of adhoc metric names :raise SupersetException: if one or more metric names are not aggregations """ aggregations = OrderedDict() invalid_metric_names = [] for metric_name in saved_metrics: if metric_name in metrics_dict: metric = metrics_dict[metric_name] if metric.metric_type == POST_AGG_TYPE: invalid_metric_names.append(metric_name) else: aggregations[metric_name] = metric.json_obj else: invalid_metric_names.append(metric_name) if len(invalid_metric_names) > 0: raise SupersetException( _('Metric(s) {} must be aggregations.').format(invalid_metric_names)) for adhoc_metric in adhoc_metrics: aggregations[adhoc_metric['label']] = { 'fieldName': adhoc_metric['column']['column_name'], 'fieldNames': [adhoc_metric['column']['column_name']], 'type': DruidDatasource.druid_type_from_adhoc_metric(adhoc_metric), 'name': adhoc_metric['label'], } return aggregations
[ "Returns", "a", "dictionary", "of", "aggregation", "metric", "names", "to", "aggregation", "json", "objects" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L940-L970
[ "def", "get_aggregations", "(", "metrics_dict", ",", "saved_metrics", ",", "adhoc_metrics", "=", "[", "]", ")", ":", "aggregations", "=", "OrderedDict", "(", ")", "invalid_metric_names", "=", "[", "]", "for", "metric_name", "in", "saved_metrics", ":", "if", "metric_name", "in", "metrics_dict", ":", "metric", "=", "metrics_dict", "[", "metric_name", "]", "if", "metric", ".", "metric_type", "==", "POST_AGG_TYPE", ":", "invalid_metric_names", ".", "append", "(", "metric_name", ")", "else", ":", "aggregations", "[", "metric_name", "]", "=", "metric", ".", "json_obj", "else", ":", "invalid_metric_names", ".", "append", "(", "metric_name", ")", "if", "len", "(", "invalid_metric_names", ")", ">", "0", ":", "raise", "SupersetException", "(", "_", "(", "'Metric(s) {} must be aggregations.'", ")", ".", "format", "(", "invalid_metric_names", ")", ")", "for", "adhoc_metric", "in", "adhoc_metrics", ":", "aggregations", "[", "adhoc_metric", "[", "'label'", "]", "]", "=", "{", "'fieldName'", ":", "adhoc_metric", "[", "'column'", "]", "[", "'column_name'", "]", ",", "'fieldNames'", ":", "[", "adhoc_metric", "[", "'column'", "]", "[", "'column_name'", "]", "]", ",", "'type'", ":", "DruidDatasource", ".", "druid_type_from_adhoc_metric", "(", "adhoc_metric", ")", ",", "'name'", ":", "adhoc_metric", "[", "'label'", "]", ",", "}", "return", "aggregations" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
DruidDatasource._dimensions_to_values
Replace dimensions specs with their `dimension` values, and ignore those without
superset/connectors/druid/models.py
def _dimensions_to_values(dimensions): """ Replace dimensions specs with their `dimension` values, and ignore those without """ values = [] for dimension in dimensions: if isinstance(dimension, dict): if 'extractionFn' in dimension: values.append(dimension) elif 'dimension' in dimension: values.append(dimension['dimension']) else: values.append(dimension) return values
def _dimensions_to_values(dimensions): """ Replace dimensions specs with their `dimension` values, and ignore those without """ values = [] for dimension in dimensions: if isinstance(dimension, dict): if 'extractionFn' in dimension: values.append(dimension) elif 'dimension' in dimension: values.append(dimension['dimension']) else: values.append(dimension) return values
[ "Replace", "dimensions", "specs", "with", "their", "dimension", "values", "and", "ignore", "those", "without" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L1010-L1025
[ "def", "_dimensions_to_values", "(", "dimensions", ")", ":", "values", "=", "[", "]", "for", "dimension", "in", "dimensions", ":", "if", "isinstance", "(", "dimension", ",", "dict", ")", ":", "if", "'extractionFn'", "in", "dimension", ":", "values", ".", "append", "(", "dimension", ")", "elif", "'dimension'", "in", "dimension", ":", "values", ".", "append", "(", "dimension", "[", "'dimension'", "]", ")", "else", ":", "values", ".", "append", "(", "dimension", ")", "return", "values" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
DruidDatasource.run_query
Runs a query against Druid and returns a dataframe.
superset/connectors/druid/models.py
def run_query( # noqa / druid self, groupby, metrics, granularity, from_dttm, to_dttm, filter=None, # noqa is_timeseries=True, timeseries_limit=None, timeseries_limit_metric=None, row_limit=None, inner_from_dttm=None, inner_to_dttm=None, orderby=None, extras=None, # noqa columns=None, phase=2, client=None, order_desc=True, prequeries=None, is_prequery=False, ): """Runs a query against Druid and returns a dataframe. """ # TODO refactor into using a TBD Query object client = client or self.cluster.get_pydruid_client() row_limit = row_limit or conf.get('ROW_LIMIT') if not is_timeseries: granularity = 'all' if granularity == 'all': phase = 1 inner_from_dttm = inner_from_dttm or from_dttm inner_to_dttm = inner_to_dttm or to_dttm timezone = from_dttm.replace(tzinfo=DRUID_TZ).tzname() if from_dttm else None query_str = '' metrics_dict = {m.metric_name: m for m in self.metrics} columns_dict = {c.column_name: c for c in self.columns} if ( self.cluster and LooseVersion(self.cluster.get_druid_version()) < LooseVersion('0.11.0') ): for metric in metrics: self.sanitize_metric_object(metric) self.sanitize_metric_object(timeseries_limit_metric) aggregations, post_aggs = DruidDatasource.metrics_and_post_aggs( metrics, metrics_dict) self.check_restricted_metrics(aggregations) # the dimensions list with dimensionSpecs expanded dimensions = self.get_dimensions(groupby, columns_dict) extras = extras or {} qry = dict( datasource=self.datasource_name, dimensions=dimensions, aggregations=aggregations, granularity=DruidDatasource.granularity( granularity, timezone=timezone, origin=extras.get('druid_time_origin'), ), post_aggregations=post_aggs, intervals=self.intervals_from_dttms(from_dttm, to_dttm), ) filters = DruidDatasource.get_filters(filter, self.num_cols, columns_dict) if filters: qry['filter'] = filters having_filters = self.get_having_filters(extras.get('having_druid')) if having_filters: qry['having'] = having_filters order_direction = 'descending' if order_desc else 'ascending' if columns: columns.append('__time') del qry['post_aggregations'] del qry['aggregations'] qry['dimensions'] = columns qry['metrics'] = [] qry['granularity'] = 'all' qry['limit'] = row_limit client.scan(**qry) elif len(groupby) == 0 and not having_filters: logging.info('Running timeseries query for no groupby values') del qry['dimensions'] client.timeseries(**qry) elif ( not having_filters and len(groupby) == 1 and order_desc ): dim = list(qry.get('dimensions'))[0] logging.info('Running two-phase topn query for dimension [{}]'.format(dim)) pre_qry = deepcopy(qry) if timeseries_limit_metric: order_by = utils.get_metric_name(timeseries_limit_metric) aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs( [timeseries_limit_metric], metrics_dict) if phase == 1: pre_qry['aggregations'].update(aggs_dict) pre_qry['post_aggregations'].update(post_aggs_dict) else: pre_qry['aggregations'] = aggs_dict pre_qry['post_aggregations'] = post_aggs_dict else: agg_keys = qry['aggregations'].keys() order_by = list(agg_keys)[0] if agg_keys else None # Limit on the number of timeseries, doing a two-phases query pre_qry['granularity'] = 'all' pre_qry['threshold'] = min(row_limit, timeseries_limit or row_limit) pre_qry['metric'] = order_by pre_qry['dimension'] = self._dimensions_to_values(qry.get('dimensions'))[0] del pre_qry['dimensions'] client.topn(**pre_qry) logging.info('Phase 1 Complete') if phase == 2: query_str += '// Two phase query\n// Phase 1\n' query_str += json.dumps( client.query_builder.last_query.query_dict, indent=2) query_str += '\n' if phase == 1: return query_str query_str += ( "// Phase 2 (built based on phase one's results)\n") df = client.export_pandas() qry['filter'] = self._add_filter_from_pre_query_data( df, [pre_qry['dimension']], filters) qry['threshold'] = timeseries_limit or 1000 if row_limit and granularity == 'all': qry['threshold'] = row_limit qry['dimension'] = dim del qry['dimensions'] qry['metric'] = list(qry['aggregations'].keys())[0] client.topn(**qry) logging.info('Phase 2 Complete') elif len(groupby) > 0 or having_filters: # If grouping on multiple fields or using a having filter # we have to force a groupby query logging.info('Running groupby query for dimensions [{}]'.format(dimensions)) if timeseries_limit and is_timeseries: logging.info('Running two-phase query for timeseries') pre_qry = deepcopy(qry) pre_qry_dims = self._dimensions_to_values(qry['dimensions']) # Can't use set on an array with dicts # Use set with non-dict items only non_dict_dims = list( set([x for x in pre_qry_dims if not isinstance(x, dict)]), ) dict_dims = [x for x in pre_qry_dims if isinstance(x, dict)] pre_qry['dimensions'] = non_dict_dims + dict_dims order_by = None if metrics: order_by = utils.get_metric_name(metrics[0]) else: order_by = pre_qry_dims[0] if timeseries_limit_metric: order_by = utils.get_metric_name(timeseries_limit_metric) aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs( [timeseries_limit_metric], metrics_dict) if phase == 1: pre_qry['aggregations'].update(aggs_dict) pre_qry['post_aggregations'].update(post_aggs_dict) else: pre_qry['aggregations'] = aggs_dict pre_qry['post_aggregations'] = post_aggs_dict # Limit on the number of timeseries, doing a two-phases query pre_qry['granularity'] = 'all' pre_qry['limit_spec'] = { 'type': 'default', 'limit': min(timeseries_limit, row_limit), 'intervals': self.intervals_from_dttms( inner_from_dttm, inner_to_dttm), 'columns': [{ 'dimension': order_by, 'direction': order_direction, }], } client.groupby(**pre_qry) logging.info('Phase 1 Complete') query_str += '// Two phase query\n// Phase 1\n' query_str += json.dumps( client.query_builder.last_query.query_dict, indent=2) query_str += '\n' if phase == 1: return query_str query_str += ( "// Phase 2 (built based on phase one's results)\n") df = client.export_pandas() qry['filter'] = self._add_filter_from_pre_query_data( df, pre_qry['dimensions'], filters, ) qry['limit_spec'] = None if row_limit: dimension_values = self._dimensions_to_values(dimensions) qry['limit_spec'] = { 'type': 'default', 'limit': row_limit, 'columns': [{ 'dimension': ( utils.get_metric_name( metrics[0], ) if metrics else dimension_values[0] ), 'direction': order_direction, }], } client.groupby(**qry) logging.info('Query Complete') query_str += json.dumps( client.query_builder.last_query.query_dict, indent=2) return query_str
def run_query( # noqa / druid self, groupby, metrics, granularity, from_dttm, to_dttm, filter=None, # noqa is_timeseries=True, timeseries_limit=None, timeseries_limit_metric=None, row_limit=None, inner_from_dttm=None, inner_to_dttm=None, orderby=None, extras=None, # noqa columns=None, phase=2, client=None, order_desc=True, prequeries=None, is_prequery=False, ): """Runs a query against Druid and returns a dataframe. """ # TODO refactor into using a TBD Query object client = client or self.cluster.get_pydruid_client() row_limit = row_limit or conf.get('ROW_LIMIT') if not is_timeseries: granularity = 'all' if granularity == 'all': phase = 1 inner_from_dttm = inner_from_dttm or from_dttm inner_to_dttm = inner_to_dttm or to_dttm timezone = from_dttm.replace(tzinfo=DRUID_TZ).tzname() if from_dttm else None query_str = '' metrics_dict = {m.metric_name: m for m in self.metrics} columns_dict = {c.column_name: c for c in self.columns} if ( self.cluster and LooseVersion(self.cluster.get_druid_version()) < LooseVersion('0.11.0') ): for metric in metrics: self.sanitize_metric_object(metric) self.sanitize_metric_object(timeseries_limit_metric) aggregations, post_aggs = DruidDatasource.metrics_and_post_aggs( metrics, metrics_dict) self.check_restricted_metrics(aggregations) # the dimensions list with dimensionSpecs expanded dimensions = self.get_dimensions(groupby, columns_dict) extras = extras or {} qry = dict( datasource=self.datasource_name, dimensions=dimensions, aggregations=aggregations, granularity=DruidDatasource.granularity( granularity, timezone=timezone, origin=extras.get('druid_time_origin'), ), post_aggregations=post_aggs, intervals=self.intervals_from_dttms(from_dttm, to_dttm), ) filters = DruidDatasource.get_filters(filter, self.num_cols, columns_dict) if filters: qry['filter'] = filters having_filters = self.get_having_filters(extras.get('having_druid')) if having_filters: qry['having'] = having_filters order_direction = 'descending' if order_desc else 'ascending' if columns: columns.append('__time') del qry['post_aggregations'] del qry['aggregations'] qry['dimensions'] = columns qry['metrics'] = [] qry['granularity'] = 'all' qry['limit'] = row_limit client.scan(**qry) elif len(groupby) == 0 and not having_filters: logging.info('Running timeseries query for no groupby values') del qry['dimensions'] client.timeseries(**qry) elif ( not having_filters and len(groupby) == 1 and order_desc ): dim = list(qry.get('dimensions'))[0] logging.info('Running two-phase topn query for dimension [{}]'.format(dim)) pre_qry = deepcopy(qry) if timeseries_limit_metric: order_by = utils.get_metric_name(timeseries_limit_metric) aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs( [timeseries_limit_metric], metrics_dict) if phase == 1: pre_qry['aggregations'].update(aggs_dict) pre_qry['post_aggregations'].update(post_aggs_dict) else: pre_qry['aggregations'] = aggs_dict pre_qry['post_aggregations'] = post_aggs_dict else: agg_keys = qry['aggregations'].keys() order_by = list(agg_keys)[0] if agg_keys else None # Limit on the number of timeseries, doing a two-phases query pre_qry['granularity'] = 'all' pre_qry['threshold'] = min(row_limit, timeseries_limit or row_limit) pre_qry['metric'] = order_by pre_qry['dimension'] = self._dimensions_to_values(qry.get('dimensions'))[0] del pre_qry['dimensions'] client.topn(**pre_qry) logging.info('Phase 1 Complete') if phase == 2: query_str += '// Two phase query\n// Phase 1\n' query_str += json.dumps( client.query_builder.last_query.query_dict, indent=2) query_str += '\n' if phase == 1: return query_str query_str += ( "// Phase 2 (built based on phase one's results)\n") df = client.export_pandas() qry['filter'] = self._add_filter_from_pre_query_data( df, [pre_qry['dimension']], filters) qry['threshold'] = timeseries_limit or 1000 if row_limit and granularity == 'all': qry['threshold'] = row_limit qry['dimension'] = dim del qry['dimensions'] qry['metric'] = list(qry['aggregations'].keys())[0] client.topn(**qry) logging.info('Phase 2 Complete') elif len(groupby) > 0 or having_filters: # If grouping on multiple fields or using a having filter # we have to force a groupby query logging.info('Running groupby query for dimensions [{}]'.format(dimensions)) if timeseries_limit and is_timeseries: logging.info('Running two-phase query for timeseries') pre_qry = deepcopy(qry) pre_qry_dims = self._dimensions_to_values(qry['dimensions']) # Can't use set on an array with dicts # Use set with non-dict items only non_dict_dims = list( set([x for x in pre_qry_dims if not isinstance(x, dict)]), ) dict_dims = [x for x in pre_qry_dims if isinstance(x, dict)] pre_qry['dimensions'] = non_dict_dims + dict_dims order_by = None if metrics: order_by = utils.get_metric_name(metrics[0]) else: order_by = pre_qry_dims[0] if timeseries_limit_metric: order_by = utils.get_metric_name(timeseries_limit_metric) aggs_dict, post_aggs_dict = DruidDatasource.metrics_and_post_aggs( [timeseries_limit_metric], metrics_dict) if phase == 1: pre_qry['aggregations'].update(aggs_dict) pre_qry['post_aggregations'].update(post_aggs_dict) else: pre_qry['aggregations'] = aggs_dict pre_qry['post_aggregations'] = post_aggs_dict # Limit on the number of timeseries, doing a two-phases query pre_qry['granularity'] = 'all' pre_qry['limit_spec'] = { 'type': 'default', 'limit': min(timeseries_limit, row_limit), 'intervals': self.intervals_from_dttms( inner_from_dttm, inner_to_dttm), 'columns': [{ 'dimension': order_by, 'direction': order_direction, }], } client.groupby(**pre_qry) logging.info('Phase 1 Complete') query_str += '// Two phase query\n// Phase 1\n' query_str += json.dumps( client.query_builder.last_query.query_dict, indent=2) query_str += '\n' if phase == 1: return query_str query_str += ( "// Phase 2 (built based on phase one's results)\n") df = client.export_pandas() qry['filter'] = self._add_filter_from_pre_query_data( df, pre_qry['dimensions'], filters, ) qry['limit_spec'] = None if row_limit: dimension_values = self._dimensions_to_values(dimensions) qry['limit_spec'] = { 'type': 'default', 'limit': row_limit, 'columns': [{ 'dimension': ( utils.get_metric_name( metrics[0], ) if metrics else dimension_values[0] ), 'direction': order_direction, }], } client.groupby(**qry) logging.info('Query Complete') query_str += json.dumps( client.query_builder.last_query.query_dict, indent=2) return query_str
[ "Runs", "a", "query", "against", "Druid", "and", "returns", "a", "dataframe", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L1039-L1268
[ "def", "run_query", "(", "# noqa / druid", "self", ",", "groupby", ",", "metrics", ",", "granularity", ",", "from_dttm", ",", "to_dttm", ",", "filter", "=", "None", ",", "# noqa", "is_timeseries", "=", "True", ",", "timeseries_limit", "=", "None", ",", "timeseries_limit_metric", "=", "None", ",", "row_limit", "=", "None", ",", "inner_from_dttm", "=", "None", ",", "inner_to_dttm", "=", "None", ",", "orderby", "=", "None", ",", "extras", "=", "None", ",", "# noqa", "columns", "=", "None", ",", "phase", "=", "2", ",", "client", "=", "None", ",", "order_desc", "=", "True", ",", "prequeries", "=", "None", ",", "is_prequery", "=", "False", ",", ")", ":", "# TODO refactor into using a TBD Query object", "client", "=", "client", "or", "self", ".", "cluster", ".", "get_pydruid_client", "(", ")", "row_limit", "=", "row_limit", "or", "conf", ".", "get", "(", "'ROW_LIMIT'", ")", "if", "not", "is_timeseries", ":", "granularity", "=", "'all'", "if", "granularity", "==", "'all'", ":", "phase", "=", "1", "inner_from_dttm", "=", "inner_from_dttm", "or", "from_dttm", "inner_to_dttm", "=", "inner_to_dttm", "or", "to_dttm", "timezone", "=", "from_dttm", ".", "replace", "(", "tzinfo", "=", "DRUID_TZ", ")", ".", "tzname", "(", ")", "if", "from_dttm", "else", "None", "query_str", "=", "''", "metrics_dict", "=", "{", "m", ".", "metric_name", ":", "m", "for", "m", "in", "self", ".", "metrics", "}", "columns_dict", "=", "{", "c", ".", "column_name", ":", "c", "for", "c", "in", "self", ".", "columns", "}", "if", "(", "self", ".", "cluster", "and", "LooseVersion", "(", "self", ".", "cluster", ".", "get_druid_version", "(", ")", ")", "<", "LooseVersion", "(", "'0.11.0'", ")", ")", ":", "for", "metric", "in", "metrics", ":", "self", ".", "sanitize_metric_object", "(", "metric", ")", "self", ".", "sanitize_metric_object", "(", "timeseries_limit_metric", ")", "aggregations", ",", "post_aggs", "=", "DruidDatasource", ".", "metrics_and_post_aggs", "(", "metrics", ",", "metrics_dict", ")", "self", ".", "check_restricted_metrics", "(", "aggregations", ")", "# the dimensions list with dimensionSpecs expanded", "dimensions", "=", "self", ".", "get_dimensions", "(", "groupby", ",", "columns_dict", ")", "extras", "=", "extras", "or", "{", "}", "qry", "=", "dict", "(", "datasource", "=", "self", ".", "datasource_name", ",", "dimensions", "=", "dimensions", ",", "aggregations", "=", "aggregations", ",", "granularity", "=", "DruidDatasource", ".", "granularity", "(", "granularity", ",", "timezone", "=", "timezone", ",", "origin", "=", "extras", ".", "get", "(", "'druid_time_origin'", ")", ",", ")", ",", "post_aggregations", "=", "post_aggs", ",", "intervals", "=", "self", ".", "intervals_from_dttms", "(", "from_dttm", ",", "to_dttm", ")", ",", ")", "filters", "=", "DruidDatasource", ".", "get_filters", "(", "filter", ",", "self", ".", "num_cols", ",", "columns_dict", ")", "if", "filters", ":", "qry", "[", "'filter'", "]", "=", "filters", "having_filters", "=", "self", ".", "get_having_filters", "(", "extras", ".", "get", "(", "'having_druid'", ")", ")", "if", "having_filters", ":", "qry", "[", "'having'", "]", "=", "having_filters", "order_direction", "=", "'descending'", "if", "order_desc", "else", "'ascending'", "if", "columns", ":", "columns", ".", "append", "(", "'__time'", ")", "del", "qry", "[", "'post_aggregations'", "]", "del", "qry", "[", "'aggregations'", "]", "qry", "[", "'dimensions'", "]", "=", "columns", "qry", "[", "'metrics'", "]", "=", "[", "]", "qry", "[", "'granularity'", "]", "=", "'all'", "qry", "[", "'limit'", "]", "=", "row_limit", "client", ".", "scan", "(", "*", "*", "qry", ")", "elif", "len", "(", "groupby", ")", "==", "0", "and", "not", "having_filters", ":", "logging", ".", "info", "(", "'Running timeseries query for no groupby values'", ")", "del", "qry", "[", "'dimensions'", "]", "client", ".", "timeseries", "(", "*", "*", "qry", ")", "elif", "(", "not", "having_filters", "and", "len", "(", "groupby", ")", "==", "1", "and", "order_desc", ")", ":", "dim", "=", "list", "(", "qry", ".", "get", "(", "'dimensions'", ")", ")", "[", "0", "]", "logging", ".", "info", "(", "'Running two-phase topn query for dimension [{}]'", ".", "format", "(", "dim", ")", ")", "pre_qry", "=", "deepcopy", "(", "qry", ")", "if", "timeseries_limit_metric", ":", "order_by", "=", "utils", ".", "get_metric_name", "(", "timeseries_limit_metric", ")", "aggs_dict", ",", "post_aggs_dict", "=", "DruidDatasource", ".", "metrics_and_post_aggs", "(", "[", "timeseries_limit_metric", "]", ",", "metrics_dict", ")", "if", "phase", "==", "1", ":", "pre_qry", "[", "'aggregations'", "]", ".", "update", "(", "aggs_dict", ")", "pre_qry", "[", "'post_aggregations'", "]", ".", "update", "(", "post_aggs_dict", ")", "else", ":", "pre_qry", "[", "'aggregations'", "]", "=", "aggs_dict", "pre_qry", "[", "'post_aggregations'", "]", "=", "post_aggs_dict", "else", ":", "agg_keys", "=", "qry", "[", "'aggregations'", "]", ".", "keys", "(", ")", "order_by", "=", "list", "(", "agg_keys", ")", "[", "0", "]", "if", "agg_keys", "else", "None", "# Limit on the number of timeseries, doing a two-phases query", "pre_qry", "[", "'granularity'", "]", "=", "'all'", "pre_qry", "[", "'threshold'", "]", "=", "min", "(", "row_limit", ",", "timeseries_limit", "or", "row_limit", ")", "pre_qry", "[", "'metric'", "]", "=", "order_by", "pre_qry", "[", "'dimension'", "]", "=", "self", ".", "_dimensions_to_values", "(", "qry", ".", "get", "(", "'dimensions'", ")", ")", "[", "0", "]", "del", "pre_qry", "[", "'dimensions'", "]", "client", ".", "topn", "(", "*", "*", "pre_qry", ")", "logging", ".", "info", "(", "'Phase 1 Complete'", ")", "if", "phase", "==", "2", ":", "query_str", "+=", "'// Two phase query\\n// Phase 1\\n'", "query_str", "+=", "json", ".", "dumps", "(", "client", ".", "query_builder", ".", "last_query", ".", "query_dict", ",", "indent", "=", "2", ")", "query_str", "+=", "'\\n'", "if", "phase", "==", "1", ":", "return", "query_str", "query_str", "+=", "(", "\"// Phase 2 (built based on phase one's results)\\n\"", ")", "df", "=", "client", ".", "export_pandas", "(", ")", "qry", "[", "'filter'", "]", "=", "self", ".", "_add_filter_from_pre_query_data", "(", "df", ",", "[", "pre_qry", "[", "'dimension'", "]", "]", ",", "filters", ")", "qry", "[", "'threshold'", "]", "=", "timeseries_limit", "or", "1000", "if", "row_limit", "and", "granularity", "==", "'all'", ":", "qry", "[", "'threshold'", "]", "=", "row_limit", "qry", "[", "'dimension'", "]", "=", "dim", "del", "qry", "[", "'dimensions'", "]", "qry", "[", "'metric'", "]", "=", "list", "(", "qry", "[", "'aggregations'", "]", ".", "keys", "(", ")", ")", "[", "0", "]", "client", ".", "topn", "(", "*", "*", "qry", ")", "logging", ".", "info", "(", "'Phase 2 Complete'", ")", "elif", "len", "(", "groupby", ")", ">", "0", "or", "having_filters", ":", "# If grouping on multiple fields or using a having filter", "# we have to force a groupby query", "logging", ".", "info", "(", "'Running groupby query for dimensions [{}]'", ".", "format", "(", "dimensions", ")", ")", "if", "timeseries_limit", "and", "is_timeseries", ":", "logging", ".", "info", "(", "'Running two-phase query for timeseries'", ")", "pre_qry", "=", "deepcopy", "(", "qry", ")", "pre_qry_dims", "=", "self", ".", "_dimensions_to_values", "(", "qry", "[", "'dimensions'", "]", ")", "# Can't use set on an array with dicts", "# Use set with non-dict items only", "non_dict_dims", "=", "list", "(", "set", "(", "[", "x", "for", "x", "in", "pre_qry_dims", "if", "not", "isinstance", "(", "x", ",", "dict", ")", "]", ")", ",", ")", "dict_dims", "=", "[", "x", "for", "x", "in", "pre_qry_dims", "if", "isinstance", "(", "x", ",", "dict", ")", "]", "pre_qry", "[", "'dimensions'", "]", "=", "non_dict_dims", "+", "dict_dims", "order_by", "=", "None", "if", "metrics", ":", "order_by", "=", "utils", ".", "get_metric_name", "(", "metrics", "[", "0", "]", ")", "else", ":", "order_by", "=", "pre_qry_dims", "[", "0", "]", "if", "timeseries_limit_metric", ":", "order_by", "=", "utils", ".", "get_metric_name", "(", "timeseries_limit_metric", ")", "aggs_dict", ",", "post_aggs_dict", "=", "DruidDatasource", ".", "metrics_and_post_aggs", "(", "[", "timeseries_limit_metric", "]", ",", "metrics_dict", ")", "if", "phase", "==", "1", ":", "pre_qry", "[", "'aggregations'", "]", ".", "update", "(", "aggs_dict", ")", "pre_qry", "[", "'post_aggregations'", "]", ".", "update", "(", "post_aggs_dict", ")", "else", ":", "pre_qry", "[", "'aggregations'", "]", "=", "aggs_dict", "pre_qry", "[", "'post_aggregations'", "]", "=", "post_aggs_dict", "# Limit on the number of timeseries, doing a two-phases query", "pre_qry", "[", "'granularity'", "]", "=", "'all'", "pre_qry", "[", "'limit_spec'", "]", "=", "{", "'type'", ":", "'default'", ",", "'limit'", ":", "min", "(", "timeseries_limit", ",", "row_limit", ")", ",", "'intervals'", ":", "self", ".", "intervals_from_dttms", "(", "inner_from_dttm", ",", "inner_to_dttm", ")", ",", "'columns'", ":", "[", "{", "'dimension'", ":", "order_by", ",", "'direction'", ":", "order_direction", ",", "}", "]", ",", "}", "client", ".", "groupby", "(", "*", "*", "pre_qry", ")", "logging", ".", "info", "(", "'Phase 1 Complete'", ")", "query_str", "+=", "'// Two phase query\\n// Phase 1\\n'", "query_str", "+=", "json", ".", "dumps", "(", "client", ".", "query_builder", ".", "last_query", ".", "query_dict", ",", "indent", "=", "2", ")", "query_str", "+=", "'\\n'", "if", "phase", "==", "1", ":", "return", "query_str", "query_str", "+=", "(", "\"// Phase 2 (built based on phase one's results)\\n\"", ")", "df", "=", "client", ".", "export_pandas", "(", ")", "qry", "[", "'filter'", "]", "=", "self", ".", "_add_filter_from_pre_query_data", "(", "df", ",", "pre_qry", "[", "'dimensions'", "]", ",", "filters", ",", ")", "qry", "[", "'limit_spec'", "]", "=", "None", "if", "row_limit", ":", "dimension_values", "=", "self", ".", "_dimensions_to_values", "(", "dimensions", ")", "qry", "[", "'limit_spec'", "]", "=", "{", "'type'", ":", "'default'", ",", "'limit'", ":", "row_limit", ",", "'columns'", ":", "[", "{", "'dimension'", ":", "(", "utils", ".", "get_metric_name", "(", "metrics", "[", "0", "]", ",", ")", "if", "metrics", "else", "dimension_values", "[", "0", "]", ")", ",", "'direction'", ":", "order_direction", ",", "}", "]", ",", "}", "client", ".", "groupby", "(", "*", "*", "qry", ")", "logging", ".", "info", "(", "'Query Complete'", ")", "query_str", "+=", "json", ".", "dumps", "(", "client", ".", "query_builder", ".", "last_query", ".", "query_dict", ",", "indent", "=", "2", ")", "return", "query_str" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
DruidDatasource.homogenize_types
Converting all GROUPBY columns to strings When grouping by a numeric (say FLOAT) column, pydruid returns strings in the dataframe. This creates issues downstream related to having mixed types in the dataframe Here we replace None with <NULL> and make the whole series a str instead of an object.
superset/connectors/druid/models.py
def homogenize_types(df, groupby_cols): """Converting all GROUPBY columns to strings When grouping by a numeric (say FLOAT) column, pydruid returns strings in the dataframe. This creates issues downstream related to having mixed types in the dataframe Here we replace None with <NULL> and make the whole series a str instead of an object. """ for col in groupby_cols: df[col] = df[col].fillna('<NULL>').astype('unicode') return df
def homogenize_types(df, groupby_cols): """Converting all GROUPBY columns to strings When grouping by a numeric (say FLOAT) column, pydruid returns strings in the dataframe. This creates issues downstream related to having mixed types in the dataframe Here we replace None with <NULL> and make the whole series a str instead of an object. """ for col in groupby_cols: df[col] = df[col].fillna('<NULL>').astype('unicode') return df
[ "Converting", "all", "GROUPBY", "columns", "to", "strings" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L1271-L1283
[ "def", "homogenize_types", "(", "df", ",", "groupby_cols", ")", ":", "for", "col", "in", "groupby_cols", ":", "df", "[", "col", "]", "=", "df", "[", "col", "]", ".", "fillna", "(", "'<NULL>'", ")", ".", "astype", "(", "'unicode'", ")", "return", "df" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
DruidDatasource.get_filters
Given Superset filter data structure, returns pydruid Filter(s)
superset/connectors/druid/models.py
def get_filters(cls, raw_filters, num_cols, columns_dict): # noqa """Given Superset filter data structure, returns pydruid Filter(s)""" filters = None for flt in raw_filters: col = flt.get('col') op = flt.get('op') eq = flt.get('val') if ( not col or not op or (eq is None and op not in ('IS NULL', 'IS NOT NULL'))): continue # Check if this dimension uses an extraction function # If so, create the appropriate pydruid extraction object column_def = columns_dict.get(col) dim_spec = column_def.dimension_spec if column_def else None extraction_fn = None if dim_spec and 'extractionFn' in dim_spec: (col, extraction_fn) = DruidDatasource._create_extraction_fn(dim_spec) cond = None is_numeric_col = col in num_cols is_list_target = op in ('in', 'not in') eq = cls.filter_values_handler( eq, is_list_target=is_list_target, target_column_is_numeric=is_numeric_col) # For these two ops, could have used Dimension, # but it doesn't support extraction functions if op == '==': cond = Filter(dimension=col, value=eq, extraction_function=extraction_fn) elif op == '!=': cond = ~Filter(dimension=col, value=eq, extraction_function=extraction_fn) elif op in ('in', 'not in'): fields = [] # ignore the filter if it has no value if not len(eq): continue # if it uses an extraction fn, use the "in" operator # as Dimension isn't supported elif extraction_fn is not None: cond = Filter( dimension=col, values=eq, type='in', extraction_function=extraction_fn, ) elif len(eq) == 1: cond = Dimension(col) == eq[0] else: for s in eq: fields.append(Dimension(col) == s) cond = Filter(type='or', fields=fields) if op == 'not in': cond = ~cond elif op == 'regex': cond = Filter( extraction_function=extraction_fn, type='regex', pattern=eq, dimension=col, ) # For the ops below, could have used pydruid's Bound, # but it doesn't support extraction functions elif op == '>=': cond = Filter( type='bound', extraction_function=extraction_fn, dimension=col, lowerStrict=False, upperStrict=False, lower=eq, upper=None, alphaNumeric=is_numeric_col, ) elif op == '<=': cond = Filter( type='bound', extraction_function=extraction_fn, dimension=col, lowerStrict=False, upperStrict=False, lower=None, upper=eq, alphaNumeric=is_numeric_col, ) elif op == '>': cond = Filter( type='bound', extraction_function=extraction_fn, lowerStrict=True, upperStrict=False, dimension=col, lower=eq, upper=None, alphaNumeric=is_numeric_col, ) elif op == '<': cond = Filter( type='bound', extraction_function=extraction_fn, upperStrict=True, lowerStrict=False, dimension=col, lower=None, upper=eq, alphaNumeric=is_numeric_col, ) elif op == 'IS NULL': cond = Dimension(col) == None # NOQA elif op == 'IS NOT NULL': cond = Dimension(col) != None # NOQA if filters: filters = Filter(type='and', fields=[ cond, filters, ]) else: filters = cond return filters
def get_filters(cls, raw_filters, num_cols, columns_dict): # noqa """Given Superset filter data structure, returns pydruid Filter(s)""" filters = None for flt in raw_filters: col = flt.get('col') op = flt.get('op') eq = flt.get('val') if ( not col or not op or (eq is None and op not in ('IS NULL', 'IS NOT NULL'))): continue # Check if this dimension uses an extraction function # If so, create the appropriate pydruid extraction object column_def = columns_dict.get(col) dim_spec = column_def.dimension_spec if column_def else None extraction_fn = None if dim_spec and 'extractionFn' in dim_spec: (col, extraction_fn) = DruidDatasource._create_extraction_fn(dim_spec) cond = None is_numeric_col = col in num_cols is_list_target = op in ('in', 'not in') eq = cls.filter_values_handler( eq, is_list_target=is_list_target, target_column_is_numeric=is_numeric_col) # For these two ops, could have used Dimension, # but it doesn't support extraction functions if op == '==': cond = Filter(dimension=col, value=eq, extraction_function=extraction_fn) elif op == '!=': cond = ~Filter(dimension=col, value=eq, extraction_function=extraction_fn) elif op in ('in', 'not in'): fields = [] # ignore the filter if it has no value if not len(eq): continue # if it uses an extraction fn, use the "in" operator # as Dimension isn't supported elif extraction_fn is not None: cond = Filter( dimension=col, values=eq, type='in', extraction_function=extraction_fn, ) elif len(eq) == 1: cond = Dimension(col) == eq[0] else: for s in eq: fields.append(Dimension(col) == s) cond = Filter(type='or', fields=fields) if op == 'not in': cond = ~cond elif op == 'regex': cond = Filter( extraction_function=extraction_fn, type='regex', pattern=eq, dimension=col, ) # For the ops below, could have used pydruid's Bound, # but it doesn't support extraction functions elif op == '>=': cond = Filter( type='bound', extraction_function=extraction_fn, dimension=col, lowerStrict=False, upperStrict=False, lower=eq, upper=None, alphaNumeric=is_numeric_col, ) elif op == '<=': cond = Filter( type='bound', extraction_function=extraction_fn, dimension=col, lowerStrict=False, upperStrict=False, lower=None, upper=eq, alphaNumeric=is_numeric_col, ) elif op == '>': cond = Filter( type='bound', extraction_function=extraction_fn, lowerStrict=True, upperStrict=False, dimension=col, lower=eq, upper=None, alphaNumeric=is_numeric_col, ) elif op == '<': cond = Filter( type='bound', extraction_function=extraction_fn, upperStrict=True, lowerStrict=False, dimension=col, lower=None, upper=eq, alphaNumeric=is_numeric_col, ) elif op == 'IS NULL': cond = Dimension(col) == None # NOQA elif op == 'IS NOT NULL': cond = Dimension(col) != None # NOQA if filters: filters = Filter(type='and', fields=[ cond, filters, ]) else: filters = cond return filters
[ "Given", "Superset", "filter", "data", "structure", "returns", "pydruid", "Filter", "(", "s", ")" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/druid/models.py#L1361-L1484
[ "def", "get_filters", "(", "cls", ",", "raw_filters", ",", "num_cols", ",", "columns_dict", ")", ":", "# noqa", "filters", "=", "None", "for", "flt", "in", "raw_filters", ":", "col", "=", "flt", ".", "get", "(", "'col'", ")", "op", "=", "flt", ".", "get", "(", "'op'", ")", "eq", "=", "flt", ".", "get", "(", "'val'", ")", "if", "(", "not", "col", "or", "not", "op", "or", "(", "eq", "is", "None", "and", "op", "not", "in", "(", "'IS NULL'", ",", "'IS NOT NULL'", ")", ")", ")", ":", "continue", "# Check if this dimension uses an extraction function", "# If so, create the appropriate pydruid extraction object", "column_def", "=", "columns_dict", ".", "get", "(", "col", ")", "dim_spec", "=", "column_def", ".", "dimension_spec", "if", "column_def", "else", "None", "extraction_fn", "=", "None", "if", "dim_spec", "and", "'extractionFn'", "in", "dim_spec", ":", "(", "col", ",", "extraction_fn", ")", "=", "DruidDatasource", ".", "_create_extraction_fn", "(", "dim_spec", ")", "cond", "=", "None", "is_numeric_col", "=", "col", "in", "num_cols", "is_list_target", "=", "op", "in", "(", "'in'", ",", "'not in'", ")", "eq", "=", "cls", ".", "filter_values_handler", "(", "eq", ",", "is_list_target", "=", "is_list_target", ",", "target_column_is_numeric", "=", "is_numeric_col", ")", "# For these two ops, could have used Dimension,", "# but it doesn't support extraction functions", "if", "op", "==", "'=='", ":", "cond", "=", "Filter", "(", "dimension", "=", "col", ",", "value", "=", "eq", ",", "extraction_function", "=", "extraction_fn", ")", "elif", "op", "==", "'!='", ":", "cond", "=", "~", "Filter", "(", "dimension", "=", "col", ",", "value", "=", "eq", ",", "extraction_function", "=", "extraction_fn", ")", "elif", "op", "in", "(", "'in'", ",", "'not in'", ")", ":", "fields", "=", "[", "]", "# ignore the filter if it has no value", "if", "not", "len", "(", "eq", ")", ":", "continue", "# if it uses an extraction fn, use the \"in\" operator", "# as Dimension isn't supported", "elif", "extraction_fn", "is", "not", "None", ":", "cond", "=", "Filter", "(", "dimension", "=", "col", ",", "values", "=", "eq", ",", "type", "=", "'in'", ",", "extraction_function", "=", "extraction_fn", ",", ")", "elif", "len", "(", "eq", ")", "==", "1", ":", "cond", "=", "Dimension", "(", "col", ")", "==", "eq", "[", "0", "]", "else", ":", "for", "s", "in", "eq", ":", "fields", ".", "append", "(", "Dimension", "(", "col", ")", "==", "s", ")", "cond", "=", "Filter", "(", "type", "=", "'or'", ",", "fields", "=", "fields", ")", "if", "op", "==", "'not in'", ":", "cond", "=", "~", "cond", "elif", "op", "==", "'regex'", ":", "cond", "=", "Filter", "(", "extraction_function", "=", "extraction_fn", ",", "type", "=", "'regex'", ",", "pattern", "=", "eq", ",", "dimension", "=", "col", ",", ")", "# For the ops below, could have used pydruid's Bound,", "# but it doesn't support extraction functions", "elif", "op", "==", "'>='", ":", "cond", "=", "Filter", "(", "type", "=", "'bound'", ",", "extraction_function", "=", "extraction_fn", ",", "dimension", "=", "col", ",", "lowerStrict", "=", "False", ",", "upperStrict", "=", "False", ",", "lower", "=", "eq", ",", "upper", "=", "None", ",", "alphaNumeric", "=", "is_numeric_col", ",", ")", "elif", "op", "==", "'<='", ":", "cond", "=", "Filter", "(", "type", "=", "'bound'", ",", "extraction_function", "=", "extraction_fn", ",", "dimension", "=", "col", ",", "lowerStrict", "=", "False", ",", "upperStrict", "=", "False", ",", "lower", "=", "None", ",", "upper", "=", "eq", ",", "alphaNumeric", "=", "is_numeric_col", ",", ")", "elif", "op", "==", "'>'", ":", "cond", "=", "Filter", "(", "type", "=", "'bound'", ",", "extraction_function", "=", "extraction_fn", ",", "lowerStrict", "=", "True", ",", "upperStrict", "=", "False", ",", "dimension", "=", "col", ",", "lower", "=", "eq", ",", "upper", "=", "None", ",", "alphaNumeric", "=", "is_numeric_col", ",", ")", "elif", "op", "==", "'<'", ":", "cond", "=", "Filter", "(", "type", "=", "'bound'", ",", "extraction_function", "=", "extraction_fn", ",", "upperStrict", "=", "True", ",", "lowerStrict", "=", "False", ",", "dimension", "=", "col", ",", "lower", "=", "None", ",", "upper", "=", "eq", ",", "alphaNumeric", "=", "is_numeric_col", ",", ")", "elif", "op", "==", "'IS NULL'", ":", "cond", "=", "Dimension", "(", "col", ")", "==", "None", "# NOQA", "elif", "op", "==", "'IS NOT NULL'", ":", "cond", "=", "Dimension", "(", "col", ")", "!=", "None", "# NOQA", "if", "filters", ":", "filters", "=", "Filter", "(", "type", "=", "'and'", ",", "fields", "=", "[", "cond", ",", "filters", ",", "]", ")", "else", ":", "filters", "=", "cond", "return", "filters" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
get_env_variable
Get the environment variable or raise exception.
contrib/docker/superset_config.py
def get_env_variable(var_name, default=None): """Get the environment variable or raise exception.""" try: return os.environ[var_name] except KeyError: if default is not None: return default else: error_msg = 'The environment variable {} was missing, abort...'\ .format(var_name) raise EnvironmentError(error_msg)
def get_env_variable(var_name, default=None): """Get the environment variable or raise exception.""" try: return os.environ[var_name] except KeyError: if default is not None: return default else: error_msg = 'The environment variable {} was missing, abort...'\ .format(var_name) raise EnvironmentError(error_msg)
[ "Get", "the", "environment", "variable", "or", "raise", "exception", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/contrib/docker/superset_config.py#L20-L30
[ "def", "get_env_variable", "(", "var_name", ",", "default", "=", "None", ")", ":", "try", ":", "return", "os", ".", "environ", "[", "var_name", "]", "except", "KeyError", ":", "if", "default", "is", "not", "None", ":", "return", "default", "else", ":", "error_msg", "=", "'The environment variable {} was missing, abort...'", ".", "format", "(", "var_name", ")", "raise", "EnvironmentError", "(", "error_msg", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
ConnectorRegistry.get_eager_datasource
Returns datasource with columns and metrics.
superset/connectors/connector_registry.py
def get_eager_datasource(cls, session, datasource_type, datasource_id): """Returns datasource with columns and metrics.""" datasource_class = ConnectorRegistry.sources[datasource_type] return ( session.query(datasource_class) .options( subqueryload(datasource_class.columns), subqueryload(datasource_class.metrics), ) .filter_by(id=datasource_id) .one() )
def get_eager_datasource(cls, session, datasource_type, datasource_id): """Returns datasource with columns and metrics.""" datasource_class = ConnectorRegistry.sources[datasource_type] return ( session.query(datasource_class) .options( subqueryload(datasource_class.columns), subqueryload(datasource_class.metrics), ) .filter_by(id=datasource_id) .one() )
[ "Returns", "datasource", "with", "columns", "and", "metrics", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/connectors/connector_registry.py#L76-L87
[ "def", "get_eager_datasource", "(", "cls", ",", "session", ",", "datasource_type", ",", "datasource_id", ")", ":", "datasource_class", "=", "ConnectorRegistry", ".", "sources", "[", "datasource_type", "]", "return", "(", "session", ".", "query", "(", "datasource_class", ")", ".", "options", "(", "subqueryload", "(", "datasource_class", ".", "columns", ")", ",", "subqueryload", "(", "datasource_class", ".", "metrics", ")", ",", ")", ".", "filter_by", "(", "id", "=", "datasource_id", ")", ".", "one", "(", ")", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
load_misc_dashboard
Loading a dashboard featuring misc charts
superset/data/misc_dashboard.py
def load_misc_dashboard(): """Loading a dashboard featuring misc charts""" print('Creating the dashboard') db.session.expunge_all() dash = db.session.query(Dash).filter_by(slug=DASH_SLUG).first() if not dash: dash = Dash() js = textwrap.dedent("""\ { "CHART-BkeVbh8ANQ": { "children": [], "id": "CHART-BkeVbh8ANQ", "meta": { "chartId": 4004, "height": 34, "sliceName": "Multi Line", "width": 8 }, "type": "CHART" }, "CHART-H1HYNzEANX": { "children": [], "id": "CHART-H1HYNzEANX", "meta": { "chartId": 3940, "height": 50, "sliceName": "Energy Sankey", "width": 6 }, "type": "CHART" }, "CHART-HJOYVMV0E7": { "children": [], "id": "CHART-HJOYVMV0E7", "meta": { "chartId": 3969, "height": 63, "sliceName": "Mapbox Long/Lat", "width": 6 }, "type": "CHART" }, "CHART-S1WYNz4AVX": { "children": [], "id": "CHART-S1WYNz4AVX", "meta": { "chartId": 3989, "height": 25, "sliceName": "Parallel Coordinates", "width": 4 }, "type": "CHART" }, "CHART-r19KVMNCE7": { "children": [], "id": "CHART-r19KVMNCE7", "meta": { "chartId": 3971, "height": 34, "sliceName": "Calendar Heatmap multiformat 0", "width": 4 }, "type": "CHART" }, "CHART-rJ4K4GV04Q": { "children": [], "id": "CHART-rJ4K4GV04Q", "meta": { "chartId": 3941, "height": 63, "sliceName": "Energy Force Layout", "width": 6 }, "type": "CHART" }, "CHART-rkgF4G4A4X": { "children": [], "id": "CHART-rkgF4G4A4X", "meta": { "chartId": 3970, "height": 25, "sliceName": "Birth in France by department in 2016", "width": 8 }, "type": "CHART" }, "CHART-rywK4GVR4X": { "children": [], "id": "CHART-rywK4GVR4X", "meta": { "chartId": 3942, "height": 50, "sliceName": "Heatmap", "width": 6 }, "type": "CHART" }, "COLUMN-ByUFVf40EQ": { "children": [ "CHART-rywK4GVR4X", "CHART-HJOYVMV0E7" ], "id": "COLUMN-ByUFVf40EQ", "meta": { "background": "BACKGROUND_TRANSPARENT", "width": 6 }, "type": "COLUMN" }, "COLUMN-rkmYVGN04Q": { "children": [ "CHART-rJ4K4GV04Q", "CHART-H1HYNzEANX" ], "id": "COLUMN-rkmYVGN04Q", "meta": { "background": "BACKGROUND_TRANSPARENT", "width": 6 }, "type": "COLUMN" }, "GRID_ID": { "children": [ "ROW-SytNzNA4X", "ROW-S1MK4M4A4X", "ROW-HkFFEzVRVm" ], "id": "GRID_ID", "type": "GRID" }, "HEADER_ID": { "id": "HEADER_ID", "meta": { "text": "Misc Charts" }, "type": "HEADER" }, "ROOT_ID": { "children": [ "GRID_ID" ], "id": "ROOT_ID", "type": "ROOT" }, "ROW-HkFFEzVRVm": { "children": [ "CHART-r19KVMNCE7", "CHART-BkeVbh8ANQ" ], "id": "ROW-HkFFEzVRVm", "meta": { "background": "BACKGROUND_TRANSPARENT" }, "type": "ROW" }, "ROW-S1MK4M4A4X": { "children": [ "COLUMN-rkmYVGN04Q", "COLUMN-ByUFVf40EQ" ], "id": "ROW-S1MK4M4A4X", "meta": { "background": "BACKGROUND_TRANSPARENT" }, "type": "ROW" }, "ROW-SytNzNA4X": { "children": [ "CHART-rkgF4G4A4X", "CHART-S1WYNz4AVX" ], "id": "ROW-SytNzNA4X", "meta": { "background": "BACKGROUND_TRANSPARENT" }, "type": "ROW" }, "DASHBOARD_VERSION_KEY": "v2" } """) pos = json.loads(js) slices = ( db.session .query(Slice) .filter(Slice.slice_name.in_(misc_dash_slices)) .all() ) slices = sorted(slices, key=lambda x: x.id) update_slice_ids(pos, slices) dash.dashboard_title = 'Misc Charts' dash.position_json = json.dumps(pos, indent=4) dash.slug = DASH_SLUG dash.slices = slices db.session.merge(dash) db.session.commit()
def load_misc_dashboard(): """Loading a dashboard featuring misc charts""" print('Creating the dashboard') db.session.expunge_all() dash = db.session.query(Dash).filter_by(slug=DASH_SLUG).first() if not dash: dash = Dash() js = textwrap.dedent("""\ { "CHART-BkeVbh8ANQ": { "children": [], "id": "CHART-BkeVbh8ANQ", "meta": { "chartId": 4004, "height": 34, "sliceName": "Multi Line", "width": 8 }, "type": "CHART" }, "CHART-H1HYNzEANX": { "children": [], "id": "CHART-H1HYNzEANX", "meta": { "chartId": 3940, "height": 50, "sliceName": "Energy Sankey", "width": 6 }, "type": "CHART" }, "CHART-HJOYVMV0E7": { "children": [], "id": "CHART-HJOYVMV0E7", "meta": { "chartId": 3969, "height": 63, "sliceName": "Mapbox Long/Lat", "width": 6 }, "type": "CHART" }, "CHART-S1WYNz4AVX": { "children": [], "id": "CHART-S1WYNz4AVX", "meta": { "chartId": 3989, "height": 25, "sliceName": "Parallel Coordinates", "width": 4 }, "type": "CHART" }, "CHART-r19KVMNCE7": { "children": [], "id": "CHART-r19KVMNCE7", "meta": { "chartId": 3971, "height": 34, "sliceName": "Calendar Heatmap multiformat 0", "width": 4 }, "type": "CHART" }, "CHART-rJ4K4GV04Q": { "children": [], "id": "CHART-rJ4K4GV04Q", "meta": { "chartId": 3941, "height": 63, "sliceName": "Energy Force Layout", "width": 6 }, "type": "CHART" }, "CHART-rkgF4G4A4X": { "children": [], "id": "CHART-rkgF4G4A4X", "meta": { "chartId": 3970, "height": 25, "sliceName": "Birth in France by department in 2016", "width": 8 }, "type": "CHART" }, "CHART-rywK4GVR4X": { "children": [], "id": "CHART-rywK4GVR4X", "meta": { "chartId": 3942, "height": 50, "sliceName": "Heatmap", "width": 6 }, "type": "CHART" }, "COLUMN-ByUFVf40EQ": { "children": [ "CHART-rywK4GVR4X", "CHART-HJOYVMV0E7" ], "id": "COLUMN-ByUFVf40EQ", "meta": { "background": "BACKGROUND_TRANSPARENT", "width": 6 }, "type": "COLUMN" }, "COLUMN-rkmYVGN04Q": { "children": [ "CHART-rJ4K4GV04Q", "CHART-H1HYNzEANX" ], "id": "COLUMN-rkmYVGN04Q", "meta": { "background": "BACKGROUND_TRANSPARENT", "width": 6 }, "type": "COLUMN" }, "GRID_ID": { "children": [ "ROW-SytNzNA4X", "ROW-S1MK4M4A4X", "ROW-HkFFEzVRVm" ], "id": "GRID_ID", "type": "GRID" }, "HEADER_ID": { "id": "HEADER_ID", "meta": { "text": "Misc Charts" }, "type": "HEADER" }, "ROOT_ID": { "children": [ "GRID_ID" ], "id": "ROOT_ID", "type": "ROOT" }, "ROW-HkFFEzVRVm": { "children": [ "CHART-r19KVMNCE7", "CHART-BkeVbh8ANQ" ], "id": "ROW-HkFFEzVRVm", "meta": { "background": "BACKGROUND_TRANSPARENT" }, "type": "ROW" }, "ROW-S1MK4M4A4X": { "children": [ "COLUMN-rkmYVGN04Q", "COLUMN-ByUFVf40EQ" ], "id": "ROW-S1MK4M4A4X", "meta": { "background": "BACKGROUND_TRANSPARENT" }, "type": "ROW" }, "ROW-SytNzNA4X": { "children": [ "CHART-rkgF4G4A4X", "CHART-S1WYNz4AVX" ], "id": "ROW-SytNzNA4X", "meta": { "background": "BACKGROUND_TRANSPARENT" }, "type": "ROW" }, "DASHBOARD_VERSION_KEY": "v2" } """) pos = json.loads(js) slices = ( db.session .query(Slice) .filter(Slice.slice_name.in_(misc_dash_slices)) .all() ) slices = sorted(slices, key=lambda x: x.id) update_slice_ids(pos, slices) dash.dashboard_title = 'Misc Charts' dash.position_json = json.dumps(pos, indent=4) dash.slug = DASH_SLUG dash.slices = slices db.session.merge(dash) db.session.commit()
[ "Loading", "a", "dashboard", "featuring", "misc", "charts" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/data/misc_dashboard.py#L32-L228
[ "def", "load_misc_dashboard", "(", ")", ":", "print", "(", "'Creating the dashboard'", ")", "db", ".", "session", ".", "expunge_all", "(", ")", "dash", "=", "db", ".", "session", ".", "query", "(", "Dash", ")", ".", "filter_by", "(", "slug", "=", "DASH_SLUG", ")", ".", "first", "(", ")", "if", "not", "dash", ":", "dash", "=", "Dash", "(", ")", "js", "=", "textwrap", ".", "dedent", "(", "\"\"\"\\\n{\n \"CHART-BkeVbh8ANQ\": {\n \"children\": [],\n \"id\": \"CHART-BkeVbh8ANQ\",\n \"meta\": {\n \"chartId\": 4004,\n \"height\": 34,\n \"sliceName\": \"Multi Line\",\n \"width\": 8\n },\n \"type\": \"CHART\"\n },\n \"CHART-H1HYNzEANX\": {\n \"children\": [],\n \"id\": \"CHART-H1HYNzEANX\",\n \"meta\": {\n \"chartId\": 3940,\n \"height\": 50,\n \"sliceName\": \"Energy Sankey\",\n \"width\": 6\n },\n \"type\": \"CHART\"\n },\n \"CHART-HJOYVMV0E7\": {\n \"children\": [],\n \"id\": \"CHART-HJOYVMV0E7\",\n \"meta\": {\n \"chartId\": 3969,\n \"height\": 63,\n \"sliceName\": \"Mapbox Long/Lat\",\n \"width\": 6\n },\n \"type\": \"CHART\"\n },\n \"CHART-S1WYNz4AVX\": {\n \"children\": [],\n \"id\": \"CHART-S1WYNz4AVX\",\n \"meta\": {\n \"chartId\": 3989,\n \"height\": 25,\n \"sliceName\": \"Parallel Coordinates\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-r19KVMNCE7\": {\n \"children\": [],\n \"id\": \"CHART-r19KVMNCE7\",\n \"meta\": {\n \"chartId\": 3971,\n \"height\": 34,\n \"sliceName\": \"Calendar Heatmap multiformat 0\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-rJ4K4GV04Q\": {\n \"children\": [],\n \"id\": \"CHART-rJ4K4GV04Q\",\n \"meta\": {\n \"chartId\": 3941,\n \"height\": 63,\n \"sliceName\": \"Energy Force Layout\",\n \"width\": 6\n },\n \"type\": \"CHART\"\n },\n \"CHART-rkgF4G4A4X\": {\n \"children\": [],\n \"id\": \"CHART-rkgF4G4A4X\",\n \"meta\": {\n \"chartId\": 3970,\n \"height\": 25,\n \"sliceName\": \"Birth in France by department in 2016\",\n \"width\": 8\n },\n \"type\": \"CHART\"\n },\n \"CHART-rywK4GVR4X\": {\n \"children\": [],\n \"id\": \"CHART-rywK4GVR4X\",\n \"meta\": {\n \"chartId\": 3942,\n \"height\": 50,\n \"sliceName\": \"Heatmap\",\n \"width\": 6\n },\n \"type\": \"CHART\"\n },\n \"COLUMN-ByUFVf40EQ\": {\n \"children\": [\n \"CHART-rywK4GVR4X\",\n \"CHART-HJOYVMV0E7\"\n ],\n \"id\": \"COLUMN-ByUFVf40EQ\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\",\n \"width\": 6\n },\n \"type\": \"COLUMN\"\n },\n \"COLUMN-rkmYVGN04Q\": {\n \"children\": [\n \"CHART-rJ4K4GV04Q\",\n \"CHART-H1HYNzEANX\"\n ],\n \"id\": \"COLUMN-rkmYVGN04Q\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\",\n \"width\": 6\n },\n \"type\": \"COLUMN\"\n },\n \"GRID_ID\": {\n \"children\": [\n \"ROW-SytNzNA4X\",\n \"ROW-S1MK4M4A4X\",\n \"ROW-HkFFEzVRVm\"\n ],\n \"id\": \"GRID_ID\",\n \"type\": \"GRID\"\n },\n \"HEADER_ID\": {\n \"id\": \"HEADER_ID\",\n \"meta\": {\n \"text\": \"Misc Charts\"\n },\n \"type\": \"HEADER\"\n },\n \"ROOT_ID\": {\n \"children\": [\n \"GRID_ID\"\n ],\n \"id\": \"ROOT_ID\",\n \"type\": \"ROOT\"\n },\n \"ROW-HkFFEzVRVm\": {\n \"children\": [\n \"CHART-r19KVMNCE7\",\n \"CHART-BkeVbh8ANQ\"\n ],\n \"id\": \"ROW-HkFFEzVRVm\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-S1MK4M4A4X\": {\n \"children\": [\n \"COLUMN-rkmYVGN04Q\",\n \"COLUMN-ByUFVf40EQ\"\n ],\n \"id\": \"ROW-S1MK4M4A4X\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-SytNzNA4X\": {\n \"children\": [\n \"CHART-rkgF4G4A4X\",\n \"CHART-S1WYNz4AVX\"\n ],\n \"id\": \"ROW-SytNzNA4X\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"DASHBOARD_VERSION_KEY\": \"v2\"\n}\n \"\"\"", ")", "pos", "=", "json", ".", "loads", "(", "js", ")", "slices", "=", "(", "db", ".", "session", ".", "query", "(", "Slice", ")", ".", "filter", "(", "Slice", ".", "slice_name", ".", "in_", "(", "misc_dash_slices", ")", ")", ".", "all", "(", ")", ")", "slices", "=", "sorted", "(", "slices", ",", "key", "=", "lambda", "x", ":", "x", ".", "id", ")", "update_slice_ids", "(", "pos", ",", "slices", ")", "dash", ".", "dashboard_title", "=", "'Misc Charts'", "dash", ".", "position_json", "=", "json", ".", "dumps", "(", "pos", ",", "indent", "=", "4", ")", "dash", ".", "slug", "=", "DASH_SLUG", "dash", ".", "slices", "=", "slices", "db", ".", "session", ".", "merge", "(", "dash", ")", "db", ".", "session", ".", "commit", "(", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
load_world_bank_health_n_pop
Loads the world bank health dataset, slices and a dashboard
superset/data/world_bank.py
def load_world_bank_health_n_pop(): """Loads the world bank health dataset, slices and a dashboard""" tbl_name = 'wb_health_population' data = get_example_data('countries.json.gz') pdf = pd.read_json(data) pdf.columns = [col.replace('.', '_') for col in pdf.columns] pdf.year = pd.to_datetime(pdf.year) pdf.to_sql( tbl_name, db.engine, if_exists='replace', chunksize=50, dtype={ 'year': DateTime(), 'country_code': String(3), 'country_name': String(255), 'region': String(255), }, index=False) print('Creating table [wb_health_population] reference') tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first() if not tbl: tbl = TBL(table_name=tbl_name) tbl.description = utils.readfile(os.path.join(DATA_FOLDER, 'countries.md')) tbl.main_dttm_col = 'year' tbl.database = utils.get_or_create_main_db() tbl.filter_select_enabled = True metrics = [ 'sum__SP_POP_TOTL', 'sum__SH_DYN_AIDS', 'sum__SH_DYN_AIDS', 'sum__SP_RUR_TOTL_ZS', 'sum__SP_DYN_LE00_IN', ] for m in metrics: if not any(col.metric_name == m for col in tbl.metrics): tbl.metrics.append(SqlMetric( metric_name=m, expression=f'{m[:3]}({m[5:]})', )) db.session.merge(tbl) db.session.commit() tbl.fetch_metadata() defaults = { 'compare_lag': '10', 'compare_suffix': 'o10Y', 'limit': '25', 'granularity_sqla': 'year', 'groupby': [], 'metric': 'sum__SP_POP_TOTL', 'metrics': ['sum__SP_POP_TOTL'], 'row_limit': config.get('ROW_LIMIT'), 'since': '2014-01-01', 'until': '2014-01-02', 'time_range': '2014-01-01 : 2014-01-02', 'where': '', 'markup_type': 'markdown', 'country_fieldtype': 'cca3', 'secondary_metric': 'sum__SP_POP_TOTL', 'entity': 'country_code', 'show_bubbles': True, } print('Creating slices') slices = [ Slice( slice_name='Region Filter', viz_type='filter_box', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, viz_type='filter_box', date_filter=False, filter_configs=[ { 'asc': False, 'clearable': True, 'column': 'region', 'key': '2s98dfu', 'metric': 'sum__SP_POP_TOTL', 'multiple': True, }, { 'asc': False, 'clearable': True, 'key': 'li3j2lk', 'column': 'country_name', 'metric': 'sum__SP_POP_TOTL', 'multiple': True, }, ])), Slice( slice_name="World's Population", viz_type='big_number', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, since='2000', viz_type='big_number', compare_lag='10', metric='sum__SP_POP_TOTL', compare_suffix='over 10Y')), Slice( slice_name='Most Populated Countries', viz_type='table', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, viz_type='table', metrics=['sum__SP_POP_TOTL'], groupby=['country_name'])), Slice( slice_name='Growth Rate', viz_type='line', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, viz_type='line', since='1960-01-01', metrics=['sum__SP_POP_TOTL'], num_period_compare='10', groupby=['country_name'])), Slice( slice_name='% Rural', viz_type='world_map', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, viz_type='world_map', metric='sum__SP_RUR_TOTL_ZS', num_period_compare='10')), Slice( slice_name='Life Expectancy VS Rural %', viz_type='bubble', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, viz_type='bubble', since='2011-01-01', until='2011-01-02', series='region', limit=0, entity='country_name', x='sum__SP_RUR_TOTL_ZS', y='sum__SP_DYN_LE00_IN', size='sum__SP_POP_TOTL', max_bubble_size='50', filters=[{ 'col': 'country_code', 'val': [ 'TCA', 'MNP', 'DMA', 'MHL', 'MCO', 'SXM', 'CYM', 'TUV', 'IMY', 'KNA', 'ASM', 'ADO', 'AMA', 'PLW', ], 'op': 'not in'}], )), Slice( slice_name='Rural Breakdown', viz_type='sunburst', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, viz_type='sunburst', groupby=['region', 'country_name'], secondary_metric='sum__SP_RUR_TOTL', since='2011-01-01', until='2011-01-01')), Slice( slice_name="World's Pop Growth", viz_type='area', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, since='1960-01-01', until='now', viz_type='area', groupby=['region'])), Slice( slice_name='Box plot', viz_type='box_plot', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, since='1960-01-01', until='now', whisker_options='Min/max (no outliers)', x_ticks_layout='staggered', viz_type='box_plot', groupby=['region'])), Slice( slice_name='Treemap', viz_type='treemap', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, since='1960-01-01', until='now', viz_type='treemap', metrics=['sum__SP_POP_TOTL'], groupby=['region', 'country_code'])), Slice( slice_name='Parallel Coordinates', viz_type='para', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, since='2011-01-01', until='2011-01-01', viz_type='para', limit=100, metrics=[ 'sum__SP_POP_TOTL', 'sum__SP_RUR_TOTL_ZS', 'sum__SH_DYN_AIDS'], secondary_metric='sum__SP_POP_TOTL', series='country_name')), ] misc_dash_slices.add(slices[-1].slice_name) for slc in slices: merge_slice(slc) print("Creating a World's Health Bank dashboard") dash_name = "World's Bank Data" slug = 'world_health' dash = db.session.query(Dash).filter_by(slug=slug).first() if not dash: dash = Dash() js = textwrap.dedent("""\ { "CHART-36bfc934": { "children": [], "id": "CHART-36bfc934", "meta": { "chartId": 40, "height": 25, "sliceName": "Region Filter", "width": 2 }, "type": "CHART" }, "CHART-37982887": { "children": [], "id": "CHART-37982887", "meta": { "chartId": 41, "height": 25, "sliceName": "World's Population", "width": 2 }, "type": "CHART" }, "CHART-17e0f8d8": { "children": [], "id": "CHART-17e0f8d8", "meta": { "chartId": 42, "height": 92, "sliceName": "Most Populated Countries", "width": 3 }, "type": "CHART" }, "CHART-2ee52f30": { "children": [], "id": "CHART-2ee52f30", "meta": { "chartId": 43, "height": 38, "sliceName": "Growth Rate", "width": 6 }, "type": "CHART" }, "CHART-2d5b6871": { "children": [], "id": "CHART-2d5b6871", "meta": { "chartId": 44, "height": 52, "sliceName": "% Rural", "width": 7 }, "type": "CHART" }, "CHART-0fd0d252": { "children": [], "id": "CHART-0fd0d252", "meta": { "chartId": 45, "height": 50, "sliceName": "Life Expectancy VS Rural %", "width": 8 }, "type": "CHART" }, "CHART-97f4cb48": { "children": [], "id": "CHART-97f4cb48", "meta": { "chartId": 46, "height": 38, "sliceName": "Rural Breakdown", "width": 3 }, "type": "CHART" }, "CHART-b5e05d6f": { "children": [], "id": "CHART-b5e05d6f", "meta": { "chartId": 47, "height": 50, "sliceName": "World's Pop Growth", "width": 4 }, "type": "CHART" }, "CHART-e76e9f5f": { "children": [], "id": "CHART-e76e9f5f", "meta": { "chartId": 48, "height": 50, "sliceName": "Box plot", "width": 4 }, "type": "CHART" }, "CHART-a4808bba": { "children": [], "id": "CHART-a4808bba", "meta": { "chartId": 49, "height": 50, "sliceName": "Treemap", "width": 8 }, "type": "CHART" }, "COLUMN-071bbbad": { "children": [ "ROW-1e064e3c", "ROW-afdefba9" ], "id": "COLUMN-071bbbad", "meta": { "background": "BACKGROUND_TRANSPARENT", "width": 9 }, "type": "COLUMN" }, "COLUMN-fe3914b8": { "children": [ "CHART-36bfc934", "CHART-37982887" ], "id": "COLUMN-fe3914b8", "meta": { "background": "BACKGROUND_TRANSPARENT", "width": 2 }, "type": "COLUMN" }, "GRID_ID": { "children": [ "ROW-46632bc2", "ROW-3fa26c5d", "ROW-812b3f13" ], "id": "GRID_ID", "type": "GRID" }, "HEADER_ID": { "id": "HEADER_ID", "meta": { "text": "World's Bank Data" }, "type": "HEADER" }, "ROOT_ID": { "children": [ "GRID_ID" ], "id": "ROOT_ID", "type": "ROOT" }, "ROW-1e064e3c": { "children": [ "COLUMN-fe3914b8", "CHART-2d5b6871" ], "id": "ROW-1e064e3c", "meta": { "background": "BACKGROUND_TRANSPARENT" }, "type": "ROW" }, "ROW-3fa26c5d": { "children": [ "CHART-b5e05d6f", "CHART-0fd0d252" ], "id": "ROW-3fa26c5d", "meta": { "background": "BACKGROUND_TRANSPARENT" }, "type": "ROW" }, "ROW-46632bc2": { "children": [ "COLUMN-071bbbad", "CHART-17e0f8d8" ], "id": "ROW-46632bc2", "meta": { "background": "BACKGROUND_TRANSPARENT" }, "type": "ROW" }, "ROW-812b3f13": { "children": [ "CHART-a4808bba", "CHART-e76e9f5f" ], "id": "ROW-812b3f13", "meta": { "background": "BACKGROUND_TRANSPARENT" }, "type": "ROW" }, "ROW-afdefba9": { "children": [ "CHART-2ee52f30", "CHART-97f4cb48" ], "id": "ROW-afdefba9", "meta": { "background": "BACKGROUND_TRANSPARENT" }, "type": "ROW" }, "DASHBOARD_VERSION_KEY": "v2" } """) pos = json.loads(js) update_slice_ids(pos, slices) dash.dashboard_title = dash_name dash.position_json = json.dumps(pos, indent=4) dash.slug = slug dash.slices = slices[:-1] db.session.merge(dash) db.session.commit()
def load_world_bank_health_n_pop(): """Loads the world bank health dataset, slices and a dashboard""" tbl_name = 'wb_health_population' data = get_example_data('countries.json.gz') pdf = pd.read_json(data) pdf.columns = [col.replace('.', '_') for col in pdf.columns] pdf.year = pd.to_datetime(pdf.year) pdf.to_sql( tbl_name, db.engine, if_exists='replace', chunksize=50, dtype={ 'year': DateTime(), 'country_code': String(3), 'country_name': String(255), 'region': String(255), }, index=False) print('Creating table [wb_health_population] reference') tbl = db.session.query(TBL).filter_by(table_name=tbl_name).first() if not tbl: tbl = TBL(table_name=tbl_name) tbl.description = utils.readfile(os.path.join(DATA_FOLDER, 'countries.md')) tbl.main_dttm_col = 'year' tbl.database = utils.get_or_create_main_db() tbl.filter_select_enabled = True metrics = [ 'sum__SP_POP_TOTL', 'sum__SH_DYN_AIDS', 'sum__SH_DYN_AIDS', 'sum__SP_RUR_TOTL_ZS', 'sum__SP_DYN_LE00_IN', ] for m in metrics: if not any(col.metric_name == m for col in tbl.metrics): tbl.metrics.append(SqlMetric( metric_name=m, expression=f'{m[:3]}({m[5:]})', )) db.session.merge(tbl) db.session.commit() tbl.fetch_metadata() defaults = { 'compare_lag': '10', 'compare_suffix': 'o10Y', 'limit': '25', 'granularity_sqla': 'year', 'groupby': [], 'metric': 'sum__SP_POP_TOTL', 'metrics': ['sum__SP_POP_TOTL'], 'row_limit': config.get('ROW_LIMIT'), 'since': '2014-01-01', 'until': '2014-01-02', 'time_range': '2014-01-01 : 2014-01-02', 'where': '', 'markup_type': 'markdown', 'country_fieldtype': 'cca3', 'secondary_metric': 'sum__SP_POP_TOTL', 'entity': 'country_code', 'show_bubbles': True, } print('Creating slices') slices = [ Slice( slice_name='Region Filter', viz_type='filter_box', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, viz_type='filter_box', date_filter=False, filter_configs=[ { 'asc': False, 'clearable': True, 'column': 'region', 'key': '2s98dfu', 'metric': 'sum__SP_POP_TOTL', 'multiple': True, }, { 'asc': False, 'clearable': True, 'key': 'li3j2lk', 'column': 'country_name', 'metric': 'sum__SP_POP_TOTL', 'multiple': True, }, ])), Slice( slice_name="World's Population", viz_type='big_number', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, since='2000', viz_type='big_number', compare_lag='10', metric='sum__SP_POP_TOTL', compare_suffix='over 10Y')), Slice( slice_name='Most Populated Countries', viz_type='table', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, viz_type='table', metrics=['sum__SP_POP_TOTL'], groupby=['country_name'])), Slice( slice_name='Growth Rate', viz_type='line', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, viz_type='line', since='1960-01-01', metrics=['sum__SP_POP_TOTL'], num_period_compare='10', groupby=['country_name'])), Slice( slice_name='% Rural', viz_type='world_map', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, viz_type='world_map', metric='sum__SP_RUR_TOTL_ZS', num_period_compare='10')), Slice( slice_name='Life Expectancy VS Rural %', viz_type='bubble', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, viz_type='bubble', since='2011-01-01', until='2011-01-02', series='region', limit=0, entity='country_name', x='sum__SP_RUR_TOTL_ZS', y='sum__SP_DYN_LE00_IN', size='sum__SP_POP_TOTL', max_bubble_size='50', filters=[{ 'col': 'country_code', 'val': [ 'TCA', 'MNP', 'DMA', 'MHL', 'MCO', 'SXM', 'CYM', 'TUV', 'IMY', 'KNA', 'ASM', 'ADO', 'AMA', 'PLW', ], 'op': 'not in'}], )), Slice( slice_name='Rural Breakdown', viz_type='sunburst', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, viz_type='sunburst', groupby=['region', 'country_name'], secondary_metric='sum__SP_RUR_TOTL', since='2011-01-01', until='2011-01-01')), Slice( slice_name="World's Pop Growth", viz_type='area', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, since='1960-01-01', until='now', viz_type='area', groupby=['region'])), Slice( slice_name='Box plot', viz_type='box_plot', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, since='1960-01-01', until='now', whisker_options='Min/max (no outliers)', x_ticks_layout='staggered', viz_type='box_plot', groupby=['region'])), Slice( slice_name='Treemap', viz_type='treemap', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, since='1960-01-01', until='now', viz_type='treemap', metrics=['sum__SP_POP_TOTL'], groupby=['region', 'country_code'])), Slice( slice_name='Parallel Coordinates', viz_type='para', datasource_type='table', datasource_id=tbl.id, params=get_slice_json( defaults, since='2011-01-01', until='2011-01-01', viz_type='para', limit=100, metrics=[ 'sum__SP_POP_TOTL', 'sum__SP_RUR_TOTL_ZS', 'sum__SH_DYN_AIDS'], secondary_metric='sum__SP_POP_TOTL', series='country_name')), ] misc_dash_slices.add(slices[-1].slice_name) for slc in slices: merge_slice(slc) print("Creating a World's Health Bank dashboard") dash_name = "World's Bank Data" slug = 'world_health' dash = db.session.query(Dash).filter_by(slug=slug).first() if not dash: dash = Dash() js = textwrap.dedent("""\ { "CHART-36bfc934": { "children": [], "id": "CHART-36bfc934", "meta": { "chartId": 40, "height": 25, "sliceName": "Region Filter", "width": 2 }, "type": "CHART" }, "CHART-37982887": { "children": [], "id": "CHART-37982887", "meta": { "chartId": 41, "height": 25, "sliceName": "World's Population", "width": 2 }, "type": "CHART" }, "CHART-17e0f8d8": { "children": [], "id": "CHART-17e0f8d8", "meta": { "chartId": 42, "height": 92, "sliceName": "Most Populated Countries", "width": 3 }, "type": "CHART" }, "CHART-2ee52f30": { "children": [], "id": "CHART-2ee52f30", "meta": { "chartId": 43, "height": 38, "sliceName": "Growth Rate", "width": 6 }, "type": "CHART" }, "CHART-2d5b6871": { "children": [], "id": "CHART-2d5b6871", "meta": { "chartId": 44, "height": 52, "sliceName": "% Rural", "width": 7 }, "type": "CHART" }, "CHART-0fd0d252": { "children": [], "id": "CHART-0fd0d252", "meta": { "chartId": 45, "height": 50, "sliceName": "Life Expectancy VS Rural %", "width": 8 }, "type": "CHART" }, "CHART-97f4cb48": { "children": [], "id": "CHART-97f4cb48", "meta": { "chartId": 46, "height": 38, "sliceName": "Rural Breakdown", "width": 3 }, "type": "CHART" }, "CHART-b5e05d6f": { "children": [], "id": "CHART-b5e05d6f", "meta": { "chartId": 47, "height": 50, "sliceName": "World's Pop Growth", "width": 4 }, "type": "CHART" }, "CHART-e76e9f5f": { "children": [], "id": "CHART-e76e9f5f", "meta": { "chartId": 48, "height": 50, "sliceName": "Box plot", "width": 4 }, "type": "CHART" }, "CHART-a4808bba": { "children": [], "id": "CHART-a4808bba", "meta": { "chartId": 49, "height": 50, "sliceName": "Treemap", "width": 8 }, "type": "CHART" }, "COLUMN-071bbbad": { "children": [ "ROW-1e064e3c", "ROW-afdefba9" ], "id": "COLUMN-071bbbad", "meta": { "background": "BACKGROUND_TRANSPARENT", "width": 9 }, "type": "COLUMN" }, "COLUMN-fe3914b8": { "children": [ "CHART-36bfc934", "CHART-37982887" ], "id": "COLUMN-fe3914b8", "meta": { "background": "BACKGROUND_TRANSPARENT", "width": 2 }, "type": "COLUMN" }, "GRID_ID": { "children": [ "ROW-46632bc2", "ROW-3fa26c5d", "ROW-812b3f13" ], "id": "GRID_ID", "type": "GRID" }, "HEADER_ID": { "id": "HEADER_ID", "meta": { "text": "World's Bank Data" }, "type": "HEADER" }, "ROOT_ID": { "children": [ "GRID_ID" ], "id": "ROOT_ID", "type": "ROOT" }, "ROW-1e064e3c": { "children": [ "COLUMN-fe3914b8", "CHART-2d5b6871" ], "id": "ROW-1e064e3c", "meta": { "background": "BACKGROUND_TRANSPARENT" }, "type": "ROW" }, "ROW-3fa26c5d": { "children": [ "CHART-b5e05d6f", "CHART-0fd0d252" ], "id": "ROW-3fa26c5d", "meta": { "background": "BACKGROUND_TRANSPARENT" }, "type": "ROW" }, "ROW-46632bc2": { "children": [ "COLUMN-071bbbad", "CHART-17e0f8d8" ], "id": "ROW-46632bc2", "meta": { "background": "BACKGROUND_TRANSPARENT" }, "type": "ROW" }, "ROW-812b3f13": { "children": [ "CHART-a4808bba", "CHART-e76e9f5f" ], "id": "ROW-812b3f13", "meta": { "background": "BACKGROUND_TRANSPARENT" }, "type": "ROW" }, "ROW-afdefba9": { "children": [ "CHART-2ee52f30", "CHART-97f4cb48" ], "id": "ROW-afdefba9", "meta": { "background": "BACKGROUND_TRANSPARENT" }, "type": "ROW" }, "DASHBOARD_VERSION_KEY": "v2" } """) pos = json.loads(js) update_slice_ids(pos, slices) dash.dashboard_title = dash_name dash.position_json = json.dumps(pos, indent=4) dash.slug = slug dash.slices = slices[:-1] db.session.merge(dash) db.session.commit()
[ "Loads", "the", "world", "bank", "health", "dataset", "slices", "and", "a", "dashboard" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/data/world_bank.py#L43-L507
[ "def", "load_world_bank_health_n_pop", "(", ")", ":", "tbl_name", "=", "'wb_health_population'", "data", "=", "get_example_data", "(", "'countries.json.gz'", ")", "pdf", "=", "pd", ".", "read_json", "(", "data", ")", "pdf", ".", "columns", "=", "[", "col", ".", "replace", "(", "'.'", ",", "'_'", ")", "for", "col", "in", "pdf", ".", "columns", "]", "pdf", ".", "year", "=", "pd", ".", "to_datetime", "(", "pdf", ".", "year", ")", "pdf", ".", "to_sql", "(", "tbl_name", ",", "db", ".", "engine", ",", "if_exists", "=", "'replace'", ",", "chunksize", "=", "50", ",", "dtype", "=", "{", "'year'", ":", "DateTime", "(", ")", ",", "'country_code'", ":", "String", "(", "3", ")", ",", "'country_name'", ":", "String", "(", "255", ")", ",", "'region'", ":", "String", "(", "255", ")", ",", "}", ",", "index", "=", "False", ")", "print", "(", "'Creating table [wb_health_population] reference'", ")", "tbl", "=", "db", ".", "session", ".", "query", "(", "TBL", ")", ".", "filter_by", "(", "table_name", "=", "tbl_name", ")", ".", "first", "(", ")", "if", "not", "tbl", ":", "tbl", "=", "TBL", "(", "table_name", "=", "tbl_name", ")", "tbl", ".", "description", "=", "utils", ".", "readfile", "(", "os", ".", "path", ".", "join", "(", "DATA_FOLDER", ",", "'countries.md'", ")", ")", "tbl", ".", "main_dttm_col", "=", "'year'", "tbl", ".", "database", "=", "utils", ".", "get_or_create_main_db", "(", ")", "tbl", ".", "filter_select_enabled", "=", "True", "metrics", "=", "[", "'sum__SP_POP_TOTL'", ",", "'sum__SH_DYN_AIDS'", ",", "'sum__SH_DYN_AIDS'", ",", "'sum__SP_RUR_TOTL_ZS'", ",", "'sum__SP_DYN_LE00_IN'", ",", "]", "for", "m", "in", "metrics", ":", "if", "not", "any", "(", "col", ".", "metric_name", "==", "m", "for", "col", "in", "tbl", ".", "metrics", ")", ":", "tbl", ".", "metrics", ".", "append", "(", "SqlMetric", "(", "metric_name", "=", "m", ",", "expression", "=", "f'{m[:3]}({m[5:]})'", ",", ")", ")", "db", ".", "session", ".", "merge", "(", "tbl", ")", "db", ".", "session", ".", "commit", "(", ")", "tbl", ".", "fetch_metadata", "(", ")", "defaults", "=", "{", "'compare_lag'", ":", "'10'", ",", "'compare_suffix'", ":", "'o10Y'", ",", "'limit'", ":", "'25'", ",", "'granularity_sqla'", ":", "'year'", ",", "'groupby'", ":", "[", "]", ",", "'metric'", ":", "'sum__SP_POP_TOTL'", ",", "'metrics'", ":", "[", "'sum__SP_POP_TOTL'", "]", ",", "'row_limit'", ":", "config", ".", "get", "(", "'ROW_LIMIT'", ")", ",", "'since'", ":", "'2014-01-01'", ",", "'until'", ":", "'2014-01-02'", ",", "'time_range'", ":", "'2014-01-01 : 2014-01-02'", ",", "'where'", ":", "''", ",", "'markup_type'", ":", "'markdown'", ",", "'country_fieldtype'", ":", "'cca3'", ",", "'secondary_metric'", ":", "'sum__SP_POP_TOTL'", ",", "'entity'", ":", "'country_code'", ",", "'show_bubbles'", ":", "True", ",", "}", "print", "(", "'Creating slices'", ")", "slices", "=", "[", "Slice", "(", "slice_name", "=", "'Region Filter'", ",", "viz_type", "=", "'filter_box'", ",", "datasource_type", "=", "'table'", ",", "datasource_id", "=", "tbl", ".", "id", ",", "params", "=", "get_slice_json", "(", "defaults", ",", "viz_type", "=", "'filter_box'", ",", "date_filter", "=", "False", ",", "filter_configs", "=", "[", "{", "'asc'", ":", "False", ",", "'clearable'", ":", "True", ",", "'column'", ":", "'region'", ",", "'key'", ":", "'2s98dfu'", ",", "'metric'", ":", "'sum__SP_POP_TOTL'", ",", "'multiple'", ":", "True", ",", "}", ",", "{", "'asc'", ":", "False", ",", "'clearable'", ":", "True", ",", "'key'", ":", "'li3j2lk'", ",", "'column'", ":", "'country_name'", ",", "'metric'", ":", "'sum__SP_POP_TOTL'", ",", "'multiple'", ":", "True", ",", "}", ",", "]", ")", ")", ",", "Slice", "(", "slice_name", "=", "\"World's Population\"", ",", "viz_type", "=", "'big_number'", ",", "datasource_type", "=", "'table'", ",", "datasource_id", "=", "tbl", ".", "id", ",", "params", "=", "get_slice_json", "(", "defaults", ",", "since", "=", "'2000'", ",", "viz_type", "=", "'big_number'", ",", "compare_lag", "=", "'10'", ",", "metric", "=", "'sum__SP_POP_TOTL'", ",", "compare_suffix", "=", "'over 10Y'", ")", ")", ",", "Slice", "(", "slice_name", "=", "'Most Populated Countries'", ",", "viz_type", "=", "'table'", ",", "datasource_type", "=", "'table'", ",", "datasource_id", "=", "tbl", ".", "id", ",", "params", "=", "get_slice_json", "(", "defaults", ",", "viz_type", "=", "'table'", ",", "metrics", "=", "[", "'sum__SP_POP_TOTL'", "]", ",", "groupby", "=", "[", "'country_name'", "]", ")", ")", ",", "Slice", "(", "slice_name", "=", "'Growth Rate'", ",", "viz_type", "=", "'line'", ",", "datasource_type", "=", "'table'", ",", "datasource_id", "=", "tbl", ".", "id", ",", "params", "=", "get_slice_json", "(", "defaults", ",", "viz_type", "=", "'line'", ",", "since", "=", "'1960-01-01'", ",", "metrics", "=", "[", "'sum__SP_POP_TOTL'", "]", ",", "num_period_compare", "=", "'10'", ",", "groupby", "=", "[", "'country_name'", "]", ")", ")", ",", "Slice", "(", "slice_name", "=", "'% Rural'", ",", "viz_type", "=", "'world_map'", ",", "datasource_type", "=", "'table'", ",", "datasource_id", "=", "tbl", ".", "id", ",", "params", "=", "get_slice_json", "(", "defaults", ",", "viz_type", "=", "'world_map'", ",", "metric", "=", "'sum__SP_RUR_TOTL_ZS'", ",", "num_period_compare", "=", "'10'", ")", ")", ",", "Slice", "(", "slice_name", "=", "'Life Expectancy VS Rural %'", ",", "viz_type", "=", "'bubble'", ",", "datasource_type", "=", "'table'", ",", "datasource_id", "=", "tbl", ".", "id", ",", "params", "=", "get_slice_json", "(", "defaults", ",", "viz_type", "=", "'bubble'", ",", "since", "=", "'2011-01-01'", ",", "until", "=", "'2011-01-02'", ",", "series", "=", "'region'", ",", "limit", "=", "0", ",", "entity", "=", "'country_name'", ",", "x", "=", "'sum__SP_RUR_TOTL_ZS'", ",", "y", "=", "'sum__SP_DYN_LE00_IN'", ",", "size", "=", "'sum__SP_POP_TOTL'", ",", "max_bubble_size", "=", "'50'", ",", "filters", "=", "[", "{", "'col'", ":", "'country_code'", ",", "'val'", ":", "[", "'TCA'", ",", "'MNP'", ",", "'DMA'", ",", "'MHL'", ",", "'MCO'", ",", "'SXM'", ",", "'CYM'", ",", "'TUV'", ",", "'IMY'", ",", "'KNA'", ",", "'ASM'", ",", "'ADO'", ",", "'AMA'", ",", "'PLW'", ",", "]", ",", "'op'", ":", "'not in'", "}", "]", ",", ")", ")", ",", "Slice", "(", "slice_name", "=", "'Rural Breakdown'", ",", "viz_type", "=", "'sunburst'", ",", "datasource_type", "=", "'table'", ",", "datasource_id", "=", "tbl", ".", "id", ",", "params", "=", "get_slice_json", "(", "defaults", ",", "viz_type", "=", "'sunburst'", ",", "groupby", "=", "[", "'region'", ",", "'country_name'", "]", ",", "secondary_metric", "=", "'sum__SP_RUR_TOTL'", ",", "since", "=", "'2011-01-01'", ",", "until", "=", "'2011-01-01'", ")", ")", ",", "Slice", "(", "slice_name", "=", "\"World's Pop Growth\"", ",", "viz_type", "=", "'area'", ",", "datasource_type", "=", "'table'", ",", "datasource_id", "=", "tbl", ".", "id", ",", "params", "=", "get_slice_json", "(", "defaults", ",", "since", "=", "'1960-01-01'", ",", "until", "=", "'now'", ",", "viz_type", "=", "'area'", ",", "groupby", "=", "[", "'region'", "]", ")", ")", ",", "Slice", "(", "slice_name", "=", "'Box plot'", ",", "viz_type", "=", "'box_plot'", ",", "datasource_type", "=", "'table'", ",", "datasource_id", "=", "tbl", ".", "id", ",", "params", "=", "get_slice_json", "(", "defaults", ",", "since", "=", "'1960-01-01'", ",", "until", "=", "'now'", ",", "whisker_options", "=", "'Min/max (no outliers)'", ",", "x_ticks_layout", "=", "'staggered'", ",", "viz_type", "=", "'box_plot'", ",", "groupby", "=", "[", "'region'", "]", ")", ")", ",", "Slice", "(", "slice_name", "=", "'Treemap'", ",", "viz_type", "=", "'treemap'", ",", "datasource_type", "=", "'table'", ",", "datasource_id", "=", "tbl", ".", "id", ",", "params", "=", "get_slice_json", "(", "defaults", ",", "since", "=", "'1960-01-01'", ",", "until", "=", "'now'", ",", "viz_type", "=", "'treemap'", ",", "metrics", "=", "[", "'sum__SP_POP_TOTL'", "]", ",", "groupby", "=", "[", "'region'", ",", "'country_code'", "]", ")", ")", ",", "Slice", "(", "slice_name", "=", "'Parallel Coordinates'", ",", "viz_type", "=", "'para'", ",", "datasource_type", "=", "'table'", ",", "datasource_id", "=", "tbl", ".", "id", ",", "params", "=", "get_slice_json", "(", "defaults", ",", "since", "=", "'2011-01-01'", ",", "until", "=", "'2011-01-01'", ",", "viz_type", "=", "'para'", ",", "limit", "=", "100", ",", "metrics", "=", "[", "'sum__SP_POP_TOTL'", ",", "'sum__SP_RUR_TOTL_ZS'", ",", "'sum__SH_DYN_AIDS'", "]", ",", "secondary_metric", "=", "'sum__SP_POP_TOTL'", ",", "series", "=", "'country_name'", ")", ")", ",", "]", "misc_dash_slices", ".", "add", "(", "slices", "[", "-", "1", "]", ".", "slice_name", ")", "for", "slc", "in", "slices", ":", "merge_slice", "(", "slc", ")", "print", "(", "\"Creating a World's Health Bank dashboard\"", ")", "dash_name", "=", "\"World's Bank Data\"", "slug", "=", "'world_health'", "dash", "=", "db", ".", "session", ".", "query", "(", "Dash", ")", ".", "filter_by", "(", "slug", "=", "slug", ")", ".", "first", "(", ")", "if", "not", "dash", ":", "dash", "=", "Dash", "(", ")", "js", "=", "textwrap", ".", "dedent", "(", "\"\"\"\\\n{\n \"CHART-36bfc934\": {\n \"children\": [],\n \"id\": \"CHART-36bfc934\",\n \"meta\": {\n \"chartId\": 40,\n \"height\": 25,\n \"sliceName\": \"Region Filter\",\n \"width\": 2\n },\n \"type\": \"CHART\"\n },\n \"CHART-37982887\": {\n \"children\": [],\n \"id\": \"CHART-37982887\",\n \"meta\": {\n \"chartId\": 41,\n \"height\": 25,\n \"sliceName\": \"World's Population\",\n \"width\": 2\n },\n \"type\": \"CHART\"\n },\n \"CHART-17e0f8d8\": {\n \"children\": [],\n \"id\": \"CHART-17e0f8d8\",\n \"meta\": {\n \"chartId\": 42,\n \"height\": 92,\n \"sliceName\": \"Most Populated Countries\",\n \"width\": 3\n },\n \"type\": \"CHART\"\n },\n \"CHART-2ee52f30\": {\n \"children\": [],\n \"id\": \"CHART-2ee52f30\",\n \"meta\": {\n \"chartId\": 43,\n \"height\": 38,\n \"sliceName\": \"Growth Rate\",\n \"width\": 6\n },\n \"type\": \"CHART\"\n },\n \"CHART-2d5b6871\": {\n \"children\": [],\n \"id\": \"CHART-2d5b6871\",\n \"meta\": {\n \"chartId\": 44,\n \"height\": 52,\n \"sliceName\": \"% Rural\",\n \"width\": 7\n },\n \"type\": \"CHART\"\n },\n \"CHART-0fd0d252\": {\n \"children\": [],\n \"id\": \"CHART-0fd0d252\",\n \"meta\": {\n \"chartId\": 45,\n \"height\": 50,\n \"sliceName\": \"Life Expectancy VS Rural %\",\n \"width\": 8\n },\n \"type\": \"CHART\"\n },\n \"CHART-97f4cb48\": {\n \"children\": [],\n \"id\": \"CHART-97f4cb48\",\n \"meta\": {\n \"chartId\": 46,\n \"height\": 38,\n \"sliceName\": \"Rural Breakdown\",\n \"width\": 3\n },\n \"type\": \"CHART\"\n },\n \"CHART-b5e05d6f\": {\n \"children\": [],\n \"id\": \"CHART-b5e05d6f\",\n \"meta\": {\n \"chartId\": 47,\n \"height\": 50,\n \"sliceName\": \"World's Pop Growth\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-e76e9f5f\": {\n \"children\": [],\n \"id\": \"CHART-e76e9f5f\",\n \"meta\": {\n \"chartId\": 48,\n \"height\": 50,\n \"sliceName\": \"Box plot\",\n \"width\": 4\n },\n \"type\": \"CHART\"\n },\n \"CHART-a4808bba\": {\n \"children\": [],\n \"id\": \"CHART-a4808bba\",\n \"meta\": {\n \"chartId\": 49,\n \"height\": 50,\n \"sliceName\": \"Treemap\",\n \"width\": 8\n },\n \"type\": \"CHART\"\n },\n \"COLUMN-071bbbad\": {\n \"children\": [\n \"ROW-1e064e3c\",\n \"ROW-afdefba9\"\n ],\n \"id\": \"COLUMN-071bbbad\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\",\n \"width\": 9\n },\n \"type\": \"COLUMN\"\n },\n \"COLUMN-fe3914b8\": {\n \"children\": [\n \"CHART-36bfc934\",\n \"CHART-37982887\"\n ],\n \"id\": \"COLUMN-fe3914b8\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\",\n \"width\": 2\n },\n \"type\": \"COLUMN\"\n },\n \"GRID_ID\": {\n \"children\": [\n \"ROW-46632bc2\",\n \"ROW-3fa26c5d\",\n \"ROW-812b3f13\"\n ],\n \"id\": \"GRID_ID\",\n \"type\": \"GRID\"\n },\n \"HEADER_ID\": {\n \"id\": \"HEADER_ID\",\n \"meta\": {\n \"text\": \"World's Bank Data\"\n },\n \"type\": \"HEADER\"\n },\n \"ROOT_ID\": {\n \"children\": [\n \"GRID_ID\"\n ],\n \"id\": \"ROOT_ID\",\n \"type\": \"ROOT\"\n },\n \"ROW-1e064e3c\": {\n \"children\": [\n \"COLUMN-fe3914b8\",\n \"CHART-2d5b6871\"\n ],\n \"id\": \"ROW-1e064e3c\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-3fa26c5d\": {\n \"children\": [\n \"CHART-b5e05d6f\",\n \"CHART-0fd0d252\"\n ],\n \"id\": \"ROW-3fa26c5d\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-46632bc2\": {\n \"children\": [\n \"COLUMN-071bbbad\",\n \"CHART-17e0f8d8\"\n ],\n \"id\": \"ROW-46632bc2\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-812b3f13\": {\n \"children\": [\n \"CHART-a4808bba\",\n \"CHART-e76e9f5f\"\n ],\n \"id\": \"ROW-812b3f13\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"ROW-afdefba9\": {\n \"children\": [\n \"CHART-2ee52f30\",\n \"CHART-97f4cb48\"\n ],\n \"id\": \"ROW-afdefba9\",\n \"meta\": {\n \"background\": \"BACKGROUND_TRANSPARENT\"\n },\n \"type\": \"ROW\"\n },\n \"DASHBOARD_VERSION_KEY\": \"v2\"\n}\n \"\"\"", ")", "pos", "=", "json", ".", "loads", "(", "js", ")", "update_slice_ids", "(", "pos", ",", "slices", ")", "dash", ".", "dashboard_title", "=", "dash_name", "dash", ".", "position_json", "=", "json", ".", "dumps", "(", "pos", ",", "indent", "=", "4", ")", "dash", ".", "slug", "=", "slug", "dash", ".", "slices", "=", "slices", "[", ":", "-", "1", "]", "db", ".", "session", ".", "merge", "(", "dash", ")", "db", ".", "session", ".", "commit", "(", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
load_country_map_data
Loading data for map with country map
superset/data/country_map.py
def load_country_map_data(): """Loading data for map with country map""" csv_bytes = get_example_data( 'birth_france_data_for_country_map.csv', is_gzip=False, make_bytes=True) data = pd.read_csv(csv_bytes, encoding='utf-8') data['dttm'] = datetime.datetime.now().date() data.to_sql( # pylint: disable=no-member 'birth_france_by_region', db.engine, if_exists='replace', chunksize=500, dtype={ 'DEPT_ID': String(10), '2003': BigInteger, '2004': BigInteger, '2005': BigInteger, '2006': BigInteger, '2007': BigInteger, '2008': BigInteger, '2009': BigInteger, '2010': BigInteger, '2011': BigInteger, '2012': BigInteger, '2013': BigInteger, '2014': BigInteger, 'dttm': Date(), }, index=False) print('Done loading table!') print('-' * 80) print('Creating table reference') obj = db.session.query(TBL).filter_by(table_name='birth_france_by_region').first() if not obj: obj = TBL(table_name='birth_france_by_region') obj.main_dttm_col = 'dttm' obj.database = utils.get_or_create_main_db() if not any(col.metric_name == 'avg__2004' for col in obj.metrics): obj.metrics.append(SqlMetric( metric_name='avg__2004', expression='AVG(2004)', )) db.session.merge(obj) db.session.commit() obj.fetch_metadata() tbl = obj slice_data = { 'granularity_sqla': '', 'since': '', 'until': '', 'where': '', 'viz_type': 'country_map', 'entity': 'DEPT_ID', 'metric': { 'expressionType': 'SIMPLE', 'column': { 'type': 'INT', 'column_name': '2004', }, 'aggregate': 'AVG', 'label': 'Boys', 'optionName': 'metric_112342', }, 'row_limit': 500000, } print('Creating a slice') slc = Slice( slice_name='Birth in France by department in 2016', viz_type='country_map', datasource_type='table', datasource_id=tbl.id, params=get_slice_json(slice_data), ) misc_dash_slices.add(slc.slice_name) merge_slice(slc)
def load_country_map_data(): """Loading data for map with country map""" csv_bytes = get_example_data( 'birth_france_data_for_country_map.csv', is_gzip=False, make_bytes=True) data = pd.read_csv(csv_bytes, encoding='utf-8') data['dttm'] = datetime.datetime.now().date() data.to_sql( # pylint: disable=no-member 'birth_france_by_region', db.engine, if_exists='replace', chunksize=500, dtype={ 'DEPT_ID': String(10), '2003': BigInteger, '2004': BigInteger, '2005': BigInteger, '2006': BigInteger, '2007': BigInteger, '2008': BigInteger, '2009': BigInteger, '2010': BigInteger, '2011': BigInteger, '2012': BigInteger, '2013': BigInteger, '2014': BigInteger, 'dttm': Date(), }, index=False) print('Done loading table!') print('-' * 80) print('Creating table reference') obj = db.session.query(TBL).filter_by(table_name='birth_france_by_region').first() if not obj: obj = TBL(table_name='birth_france_by_region') obj.main_dttm_col = 'dttm' obj.database = utils.get_or_create_main_db() if not any(col.metric_name == 'avg__2004' for col in obj.metrics): obj.metrics.append(SqlMetric( metric_name='avg__2004', expression='AVG(2004)', )) db.session.merge(obj) db.session.commit() obj.fetch_metadata() tbl = obj slice_data = { 'granularity_sqla': '', 'since': '', 'until': '', 'where': '', 'viz_type': 'country_map', 'entity': 'DEPT_ID', 'metric': { 'expressionType': 'SIMPLE', 'column': { 'type': 'INT', 'column_name': '2004', }, 'aggregate': 'AVG', 'label': 'Boys', 'optionName': 'metric_112342', }, 'row_limit': 500000, } print('Creating a slice') slc = Slice( slice_name='Birth in France by department in 2016', viz_type='country_map', datasource_type='table', datasource_id=tbl.id, params=get_slice_json(slice_data), ) misc_dash_slices.add(slc.slice_name) merge_slice(slc)
[ "Loading", "data", "for", "map", "with", "country", "map" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/data/country_map.py#L35-L110
[ "def", "load_country_map_data", "(", ")", ":", "csv_bytes", "=", "get_example_data", "(", "'birth_france_data_for_country_map.csv'", ",", "is_gzip", "=", "False", ",", "make_bytes", "=", "True", ")", "data", "=", "pd", ".", "read_csv", "(", "csv_bytes", ",", "encoding", "=", "'utf-8'", ")", "data", "[", "'dttm'", "]", "=", "datetime", ".", "datetime", ".", "now", "(", ")", ".", "date", "(", ")", "data", ".", "to_sql", "(", "# pylint: disable=no-member", "'birth_france_by_region'", ",", "db", ".", "engine", ",", "if_exists", "=", "'replace'", ",", "chunksize", "=", "500", ",", "dtype", "=", "{", "'DEPT_ID'", ":", "String", "(", "10", ")", ",", "'2003'", ":", "BigInteger", ",", "'2004'", ":", "BigInteger", ",", "'2005'", ":", "BigInteger", ",", "'2006'", ":", "BigInteger", ",", "'2007'", ":", "BigInteger", ",", "'2008'", ":", "BigInteger", ",", "'2009'", ":", "BigInteger", ",", "'2010'", ":", "BigInteger", ",", "'2011'", ":", "BigInteger", ",", "'2012'", ":", "BigInteger", ",", "'2013'", ":", "BigInteger", ",", "'2014'", ":", "BigInteger", ",", "'dttm'", ":", "Date", "(", ")", ",", "}", ",", "index", "=", "False", ")", "print", "(", "'Done loading table!'", ")", "print", "(", "'-'", "*", "80", ")", "print", "(", "'Creating table reference'", ")", "obj", "=", "db", ".", "session", ".", "query", "(", "TBL", ")", ".", "filter_by", "(", "table_name", "=", "'birth_france_by_region'", ")", ".", "first", "(", ")", "if", "not", "obj", ":", "obj", "=", "TBL", "(", "table_name", "=", "'birth_france_by_region'", ")", "obj", ".", "main_dttm_col", "=", "'dttm'", "obj", ".", "database", "=", "utils", ".", "get_or_create_main_db", "(", ")", "if", "not", "any", "(", "col", ".", "metric_name", "==", "'avg__2004'", "for", "col", "in", "obj", ".", "metrics", ")", ":", "obj", ".", "metrics", ".", "append", "(", "SqlMetric", "(", "metric_name", "=", "'avg__2004'", ",", "expression", "=", "'AVG(2004)'", ",", ")", ")", "db", ".", "session", ".", "merge", "(", "obj", ")", "db", ".", "session", ".", "commit", "(", ")", "obj", ".", "fetch_metadata", "(", ")", "tbl", "=", "obj", "slice_data", "=", "{", "'granularity_sqla'", ":", "''", ",", "'since'", ":", "''", ",", "'until'", ":", "''", ",", "'where'", ":", "''", ",", "'viz_type'", ":", "'country_map'", ",", "'entity'", ":", "'DEPT_ID'", ",", "'metric'", ":", "{", "'expressionType'", ":", "'SIMPLE'", ",", "'column'", ":", "{", "'type'", ":", "'INT'", ",", "'column_name'", ":", "'2004'", ",", "}", ",", "'aggregate'", ":", "'AVG'", ",", "'label'", ":", "'Boys'", ",", "'optionName'", ":", "'metric_112342'", ",", "}", ",", "'row_limit'", ":", "500000", ",", "}", "print", "(", "'Creating a slice'", ")", "slc", "=", "Slice", "(", "slice_name", "=", "'Birth in France by department in 2016'", ",", "viz_type", "=", "'country_map'", ",", "datasource_type", "=", "'table'", ",", "datasource_id", "=", "tbl", ".", "id", ",", "params", "=", "get_slice_json", "(", "slice_data", ")", ",", ")", "misc_dash_slices", ".", "add", "(", "slc", ".", "slice_name", ")", "merge_slice", "(", "slc", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
ParsedQuery.get_statements
Returns a list of SQL statements as strings, stripped
superset/sql_parse.py
def get_statements(self): """Returns a list of SQL statements as strings, stripped""" statements = [] for statement in self._parsed: if statement: sql = str(statement).strip(' \n;\t') if sql: statements.append(sql) return statements
def get_statements(self): """Returns a list of SQL statements as strings, stripped""" statements = [] for statement in self._parsed: if statement: sql = str(statement).strip(' \n;\t') if sql: statements.append(sql) return statements
[ "Returns", "a", "list", "of", "SQL", "statements", "as", "strings", "stripped" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/sql_parse.py#L67-L75
[ "def", "get_statements", "(", "self", ")", ":", "statements", "=", "[", "]", "for", "statement", "in", "self", ".", "_parsed", ":", "if", "statement", ":", "sql", "=", "str", "(", "statement", ")", ".", "strip", "(", "' \\n;\\t'", ")", "if", "sql", ":", "statements", ".", "append", "(", "sql", ")", "return", "statements" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
ParsedQuery.as_create_table
Reformats the query into the create table as query. Works only for the single select SQL statements, in all other cases the sql query is not modified. :param superset_query: string, sql query that will be executed :param table_name: string, will contain the results of the query execution :param overwrite, boolean, table table_name will be dropped if true :return: string, create table as query
superset/sql_parse.py
def as_create_table(self, table_name, overwrite=False): """Reformats the query into the create table as query. Works only for the single select SQL statements, in all other cases the sql query is not modified. :param superset_query: string, sql query that will be executed :param table_name: string, will contain the results of the query execution :param overwrite, boolean, table table_name will be dropped if true :return: string, create table as query """ exec_sql = '' sql = self.stripped() if overwrite: exec_sql = f'DROP TABLE IF EXISTS {table_name};\n' exec_sql += f'CREATE TABLE {table_name} AS \n{sql}' return exec_sql
def as_create_table(self, table_name, overwrite=False): """Reformats the query into the create table as query. Works only for the single select SQL statements, in all other cases the sql query is not modified. :param superset_query: string, sql query that will be executed :param table_name: string, will contain the results of the query execution :param overwrite, boolean, table table_name will be dropped if true :return: string, create table as query """ exec_sql = '' sql = self.stripped() if overwrite: exec_sql = f'DROP TABLE IF EXISTS {table_name};\n' exec_sql += f'CREATE TABLE {table_name} AS \n{sql}' return exec_sql
[ "Reformats", "the", "query", "into", "the", "create", "table", "as", "query", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/sql_parse.py#L105-L121
[ "def", "as_create_table", "(", "self", ",", "table_name", ",", "overwrite", "=", "False", ")", ":", "exec_sql", "=", "''", "sql", "=", "self", ".", "stripped", "(", ")", "if", "overwrite", ":", "exec_sql", "=", "f'DROP TABLE IF EXISTS {table_name};\\n'", "exec_sql", "+=", "f'CREATE TABLE {table_name} AS \\n{sql}'", "return", "exec_sql" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
ParsedQuery.get_query_with_new_limit
returns the query with the specified limit
superset/sql_parse.py
def get_query_with_new_limit(self, new_limit): """returns the query with the specified limit""" """does not change the underlying query""" if not self._limit: return self.sql + ' LIMIT ' + str(new_limit) limit_pos = None tokens = self._parsed[0].tokens # Add all items to before_str until there is a limit for pos, item in enumerate(tokens): if item.ttype in Keyword and item.value.lower() == 'limit': limit_pos = pos break limit = tokens[limit_pos + 2] if limit.ttype == sqlparse.tokens.Literal.Number.Integer: tokens[limit_pos + 2].value = new_limit elif limit.is_group: tokens[limit_pos + 2].value = ( '{}, {}'.format(next(limit.get_identifiers()), new_limit) ) str_res = '' for i in tokens: str_res += str(i.value) return str_res
def get_query_with_new_limit(self, new_limit): """returns the query with the specified limit""" """does not change the underlying query""" if not self._limit: return self.sql + ' LIMIT ' + str(new_limit) limit_pos = None tokens = self._parsed[0].tokens # Add all items to before_str until there is a limit for pos, item in enumerate(tokens): if item.ttype in Keyword and item.value.lower() == 'limit': limit_pos = pos break limit = tokens[limit_pos + 2] if limit.ttype == sqlparse.tokens.Literal.Number.Integer: tokens[limit_pos + 2].value = new_limit elif limit.is_group: tokens[limit_pos + 2].value = ( '{}, {}'.format(next(limit.get_identifiers()), new_limit) ) str_res = '' for i in tokens: str_res += str(i.value) return str_res
[ "returns", "the", "query", "with", "the", "specified", "limit" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/sql_parse.py#L166-L189
[ "def", "get_query_with_new_limit", "(", "self", ",", "new_limit", ")", ":", "\"\"\"does not change the underlying query\"\"\"", "if", "not", "self", ".", "_limit", ":", "return", "self", ".", "sql", "+", "' LIMIT '", "+", "str", "(", "new_limit", ")", "limit_pos", "=", "None", "tokens", "=", "self", ".", "_parsed", "[", "0", "]", ".", "tokens", "# Add all items to before_str until there is a limit", "for", "pos", ",", "item", "in", "enumerate", "(", "tokens", ")", ":", "if", "item", ".", "ttype", "in", "Keyword", "and", "item", ".", "value", ".", "lower", "(", ")", "==", "'limit'", ":", "limit_pos", "=", "pos", "break", "limit", "=", "tokens", "[", "limit_pos", "+", "2", "]", "if", "limit", ".", "ttype", "==", "sqlparse", ".", "tokens", ".", "Literal", ".", "Number", ".", "Integer", ":", "tokens", "[", "limit_pos", "+", "2", "]", ".", "value", "=", "new_limit", "elif", "limit", ".", "is_group", ":", "tokens", "[", "limit_pos", "+", "2", "]", ".", "value", "=", "(", "'{}, {}'", ".", "format", "(", "next", "(", "limit", ".", "get_identifiers", "(", ")", ")", ",", "new_limit", ")", ")", "str_res", "=", "''", "for", "i", "in", "tokens", ":", "str_res", "+=", "str", "(", "i", ".", "value", ")", "return", "str_res" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
url_param
Read a url or post parameter and use it in your SQL Lab query When in SQL Lab, it's possible to add arbitrary URL "query string" parameters, and use those in your SQL code. For instance you can alter your url and add `?foo=bar`, as in `{domain}/superset/sqllab?foo=bar`. Then if your query is something like SELECT * FROM foo = '{{ url_param('foo') }}', it will be parsed at runtime and replaced by the value in the URL. As you create a visualization form this SQL Lab query, you can pass parameters in the explore view as well as from the dashboard, and it should carry through to your queries. :param param: the parameter to lookup :type param: str :param default: the value to return in the absence of the parameter :type default: str
superset/jinja_context.py
def url_param(param, default=None): """Read a url or post parameter and use it in your SQL Lab query When in SQL Lab, it's possible to add arbitrary URL "query string" parameters, and use those in your SQL code. For instance you can alter your url and add `?foo=bar`, as in `{domain}/superset/sqllab?foo=bar`. Then if your query is something like SELECT * FROM foo = '{{ url_param('foo') }}', it will be parsed at runtime and replaced by the value in the URL. As you create a visualization form this SQL Lab query, you can pass parameters in the explore view as well as from the dashboard, and it should carry through to your queries. :param param: the parameter to lookup :type param: str :param default: the value to return in the absence of the parameter :type default: str """ if request.args.get(param): return request.args.get(param, default) # Supporting POST as well as get if request.form.get('form_data'): form_data = json.loads(request.form.get('form_data')) url_params = form_data.get('url_params') or {} return url_params.get(param, default) return default
def url_param(param, default=None): """Read a url or post parameter and use it in your SQL Lab query When in SQL Lab, it's possible to add arbitrary URL "query string" parameters, and use those in your SQL code. For instance you can alter your url and add `?foo=bar`, as in `{domain}/superset/sqllab?foo=bar`. Then if your query is something like SELECT * FROM foo = '{{ url_param('foo') }}', it will be parsed at runtime and replaced by the value in the URL. As you create a visualization form this SQL Lab query, you can pass parameters in the explore view as well as from the dashboard, and it should carry through to your queries. :param param: the parameter to lookup :type param: str :param default: the value to return in the absence of the parameter :type default: str """ if request.args.get(param): return request.args.get(param, default) # Supporting POST as well as get if request.form.get('form_data'): form_data = json.loads(request.form.get('form_data')) url_params = form_data.get('url_params') or {} return url_params.get(param, default) return default
[ "Read", "a", "url", "or", "post", "parameter", "and", "use", "it", "in", "your", "SQL", "Lab", "query" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/jinja_context.py#L44-L70
[ "def", "url_param", "(", "param", ",", "default", "=", "None", ")", ":", "if", "request", ".", "args", ".", "get", "(", "param", ")", ":", "return", "request", ".", "args", ".", "get", "(", "param", ",", "default", ")", "# Supporting POST as well as get", "if", "request", ".", "form", ".", "get", "(", "'form_data'", ")", ":", "form_data", "=", "json", ".", "loads", "(", "request", ".", "form", ".", "get", "(", "'form_data'", ")", ")", "url_params", "=", "form_data", ".", "get", "(", "'url_params'", ")", "or", "{", "}", "return", "url_params", ".", "get", "(", "param", ",", "default", ")", "return", "default" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
filter_values
Gets a values for a particular filter as a list This is useful if: - you want to use a filter box to filter a query where the name of filter box column doesn't match the one in the select statement - you want to have the ability for filter inside the main query for speed purposes This searches for "filters" and "extra_filters" in form_data for a match Usage example: SELECT action, count(*) as times FROM logs WHERE action in ( {{ "'" + "','".join(filter_values('action_type')) + "'" }} ) GROUP BY 1 :param column: column/filter name to lookup :type column: str :param default: default value to return if there's no matching columns :type default: str :return: returns a list of filter values :type: list
superset/jinja_context.py
def filter_values(column, default=None): """ Gets a values for a particular filter as a list This is useful if: - you want to use a filter box to filter a query where the name of filter box column doesn't match the one in the select statement - you want to have the ability for filter inside the main query for speed purposes This searches for "filters" and "extra_filters" in form_data for a match Usage example: SELECT action, count(*) as times FROM logs WHERE action in ( {{ "'" + "','".join(filter_values('action_type')) + "'" }} ) GROUP BY 1 :param column: column/filter name to lookup :type column: str :param default: default value to return if there's no matching columns :type default: str :return: returns a list of filter values :type: list """ form_data = json.loads(request.form.get('form_data', '{}')) return_val = [] for filter_type in ['filters', 'extra_filters']: if filter_type not in form_data: continue for f in form_data[filter_type]: if f['col'] == column: for v in f['val']: return_val.append(v) if return_val: return return_val if default: return [default] else: return []
def filter_values(column, default=None): """ Gets a values for a particular filter as a list This is useful if: - you want to use a filter box to filter a query where the name of filter box column doesn't match the one in the select statement - you want to have the ability for filter inside the main query for speed purposes This searches for "filters" and "extra_filters" in form_data for a match Usage example: SELECT action, count(*) as times FROM logs WHERE action in ( {{ "'" + "','".join(filter_values('action_type')) + "'" }} ) GROUP BY 1 :param column: column/filter name to lookup :type column: str :param default: default value to return if there's no matching columns :type default: str :return: returns a list of filter values :type: list """ form_data = json.loads(request.form.get('form_data', '{}')) return_val = [] for filter_type in ['filters', 'extra_filters']: if filter_type not in form_data: continue for f in form_data[filter_type]: if f['col'] == column: for v in f['val']: return_val.append(v) if return_val: return return_val if default: return [default] else: return []
[ "Gets", "a", "values", "for", "a", "particular", "filter", "as", "a", "list" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/jinja_context.py#L85-L125
[ "def", "filter_values", "(", "column", ",", "default", "=", "None", ")", ":", "form_data", "=", "json", ".", "loads", "(", "request", ".", "form", ".", "get", "(", "'form_data'", ",", "'{}'", ")", ")", "return_val", "=", "[", "]", "for", "filter_type", "in", "[", "'filters'", ",", "'extra_filters'", "]", ":", "if", "filter_type", "not", "in", "form_data", ":", "continue", "for", "f", "in", "form_data", "[", "filter_type", "]", ":", "if", "f", "[", "'col'", "]", "==", "column", ":", "for", "v", "in", "f", "[", "'val'", "]", ":", "return_val", ".", "append", "(", "v", ")", "if", "return_val", ":", "return", "return_val", "if", "default", ":", "return", "[", "default", "]", "else", ":", "return", "[", "]" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
BaseTemplateProcessor.process_template
Processes a sql template >>> sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}'" >>> process_template(sql) "SELECT '2017-01-01T00:00:00'"
superset/jinja_context.py
def process_template(self, sql, **kwargs): """Processes a sql template >>> sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}'" >>> process_template(sql) "SELECT '2017-01-01T00:00:00'" """ template = self.env.from_string(sql) kwargs.update(self.context) return template.render(kwargs)
def process_template(self, sql, **kwargs): """Processes a sql template >>> sql = "SELECT '{{ datetime(2017, 1, 1).isoformat() }}'" >>> process_template(sql) "SELECT '2017-01-01T00:00:00'" """ template = self.env.from_string(sql) kwargs.update(self.context) return template.render(kwargs)
[ "Processes", "a", "sql", "template" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/jinja_context.py#L165-L174
[ "def", "process_template", "(", "self", ",", "sql", ",", "*", "*", "kwargs", ")", ":", "template", "=", "self", ".", "env", ".", "from_string", "(", "sql", ")", "kwargs", ".", "update", "(", "self", ".", "context", ")", "return", "template", ".", "render", "(", "kwargs", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
get_datasource_info
Compatibility layer for handling of datasource info datasource_id & datasource_type used to be passed in the URL directory, now they should come as part of the form_data, This function allows supporting both without duplicating code
superset/views/utils.py
def get_datasource_info(datasource_id, datasource_type, form_data): """Compatibility layer for handling of datasource info datasource_id & datasource_type used to be passed in the URL directory, now they should come as part of the form_data, This function allows supporting both without duplicating code""" datasource = form_data.get('datasource', '') if '__' in datasource: datasource_id, datasource_type = datasource.split('__') # The case where the datasource has been deleted datasource_id = None if datasource_id == 'None' else datasource_id if not datasource_id: raise Exception( 'The datasource associated with this chart no longer exists') datasource_id = int(datasource_id) return datasource_id, datasource_type
def get_datasource_info(datasource_id, datasource_type, form_data): """Compatibility layer for handling of datasource info datasource_id & datasource_type used to be passed in the URL directory, now they should come as part of the form_data, This function allows supporting both without duplicating code""" datasource = form_data.get('datasource', '') if '__' in datasource: datasource_id, datasource_type = datasource.split('__') # The case where the datasource has been deleted datasource_id = None if datasource_id == 'None' else datasource_id if not datasource_id: raise Exception( 'The datasource associated with this chart no longer exists') datasource_id = int(datasource_id) return datasource_id, datasource_type
[ "Compatibility", "layer", "for", "handling", "of", "datasource", "info" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/utils.py#L170-L186
[ "def", "get_datasource_info", "(", "datasource_id", ",", "datasource_type", ",", "form_data", ")", ":", "datasource", "=", "form_data", ".", "get", "(", "'datasource'", ",", "''", ")", "if", "'__'", "in", "datasource", ":", "datasource_id", ",", "datasource_type", "=", "datasource", ".", "split", "(", "'__'", ")", "# The case where the datasource has been deleted", "datasource_id", "=", "None", "if", "datasource_id", "==", "'None'", "else", "datasource_id", "if", "not", "datasource_id", ":", "raise", "Exception", "(", "'The datasource associated with this chart no longer exists'", ")", "datasource_id", "=", "int", "(", "datasource_id", ")", "return", "datasource_id", ",", "datasource_type" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
SupersetSecurityManager.can_access
Protecting from has_access failing from missing perms/view
superset/security.py
def can_access(self, permission_name, view_name): """Protecting from has_access failing from missing perms/view""" user = g.user if user.is_anonymous: return self.is_item_public(permission_name, view_name) return self._has_view_access(user, permission_name, view_name)
def can_access(self, permission_name, view_name): """Protecting from has_access failing from missing perms/view""" user = g.user if user.is_anonymous: return self.is_item_public(permission_name, view_name) return self._has_view_access(user, permission_name, view_name)
[ "Protecting", "from", "has_access", "failing", "from", "missing", "perms", "/", "view" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/security.py#L106-L111
[ "def", "can_access", "(", "self", ",", "permission_name", ",", "view_name", ")", ":", "user", "=", "g", ".", "user", "if", "user", ".", "is_anonymous", ":", "return", "self", ".", "is_item_public", "(", "permission_name", ",", "view_name", ")", "return", "self", ".", "_has_view_access", "(", "user", ",", "permission_name", ",", "view_name", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
SupersetSecurityManager.create_missing_perms
Creates missing perms for datasources, schemas and metrics
superset/security.py
def create_missing_perms(self): """Creates missing perms for datasources, schemas and metrics""" from superset import db from superset.models import core as models logging.info( 'Fetching a set of all perms to lookup which ones are missing') all_pvs = set() for pv in self.get_session.query(self.permissionview_model).all(): if pv.permission and pv.view_menu: all_pvs.add((pv.permission.name, pv.view_menu.name)) def merge_pv(view_menu, perm): """Create permission view menu only if it doesn't exist""" if view_menu and perm and (view_menu, perm) not in all_pvs: self.merge_perm(view_menu, perm) logging.info('Creating missing datasource permissions.') datasources = ConnectorRegistry.get_all_datasources(db.session) for datasource in datasources: merge_pv('datasource_access', datasource.get_perm()) merge_pv('schema_access', datasource.schema_perm) logging.info('Creating missing database permissions.') databases = db.session.query(models.Database).all() for database in databases: merge_pv('database_access', database.perm) logging.info('Creating missing metrics permissions') metrics = [] for datasource_class in ConnectorRegistry.sources.values(): metrics += list(db.session.query(datasource_class.metric_class).all()) for metric in metrics: if metric.is_restricted: merge_pv('metric_access', metric.perm)
def create_missing_perms(self): """Creates missing perms for datasources, schemas and metrics""" from superset import db from superset.models import core as models logging.info( 'Fetching a set of all perms to lookup which ones are missing') all_pvs = set() for pv in self.get_session.query(self.permissionview_model).all(): if pv.permission and pv.view_menu: all_pvs.add((pv.permission.name, pv.view_menu.name)) def merge_pv(view_menu, perm): """Create permission view menu only if it doesn't exist""" if view_menu and perm and (view_menu, perm) not in all_pvs: self.merge_perm(view_menu, perm) logging.info('Creating missing datasource permissions.') datasources = ConnectorRegistry.get_all_datasources(db.session) for datasource in datasources: merge_pv('datasource_access', datasource.get_perm()) merge_pv('schema_access', datasource.schema_perm) logging.info('Creating missing database permissions.') databases = db.session.query(models.Database).all() for database in databases: merge_pv('database_access', database.perm) logging.info('Creating missing metrics permissions') metrics = [] for datasource_class in ConnectorRegistry.sources.values(): metrics += list(db.session.query(datasource_class.metric_class).all()) for metric in metrics: if metric.is_restricted: merge_pv('metric_access', metric.perm)
[ "Creates", "missing", "perms", "for", "datasources", "schemas", "and", "metrics" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/security.py#L287-L322
[ "def", "create_missing_perms", "(", "self", ")", ":", "from", "superset", "import", "db", "from", "superset", ".", "models", "import", "core", "as", "models", "logging", ".", "info", "(", "'Fetching a set of all perms to lookup which ones are missing'", ")", "all_pvs", "=", "set", "(", ")", "for", "pv", "in", "self", ".", "get_session", ".", "query", "(", "self", ".", "permissionview_model", ")", ".", "all", "(", ")", ":", "if", "pv", ".", "permission", "and", "pv", ".", "view_menu", ":", "all_pvs", ".", "add", "(", "(", "pv", ".", "permission", ".", "name", ",", "pv", ".", "view_menu", ".", "name", ")", ")", "def", "merge_pv", "(", "view_menu", ",", "perm", ")", ":", "\"\"\"Create permission view menu only if it doesn't exist\"\"\"", "if", "view_menu", "and", "perm", "and", "(", "view_menu", ",", "perm", ")", "not", "in", "all_pvs", ":", "self", ".", "merge_perm", "(", "view_menu", ",", "perm", ")", "logging", ".", "info", "(", "'Creating missing datasource permissions.'", ")", "datasources", "=", "ConnectorRegistry", ".", "get_all_datasources", "(", "db", ".", "session", ")", "for", "datasource", "in", "datasources", ":", "merge_pv", "(", "'datasource_access'", ",", "datasource", ".", "get_perm", "(", ")", ")", "merge_pv", "(", "'schema_access'", ",", "datasource", ".", "schema_perm", ")", "logging", ".", "info", "(", "'Creating missing database permissions.'", ")", "databases", "=", "db", ".", "session", ".", "query", "(", "models", ".", "Database", ")", ".", "all", "(", ")", "for", "database", "in", "databases", ":", "merge_pv", "(", "'database_access'", ",", "database", ".", "perm", ")", "logging", ".", "info", "(", "'Creating missing metrics permissions'", ")", "metrics", "=", "[", "]", "for", "datasource_class", "in", "ConnectorRegistry", ".", "sources", ".", "values", "(", ")", ":", "metrics", "+=", "list", "(", "db", ".", "session", ".", "query", "(", "datasource_class", ".", "metric_class", ")", ".", "all", "(", ")", ")", "for", "metric", "in", "metrics", ":", "if", "metric", ".", "is_restricted", ":", "merge_pv", "(", "'metric_access'", ",", "metric", ".", "perm", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
SupersetSecurityManager.clean_perms
FAB leaves faulty permissions that need to be cleaned up
superset/security.py
def clean_perms(self): """FAB leaves faulty permissions that need to be cleaned up""" logging.info('Cleaning faulty perms') sesh = self.get_session pvms = ( sesh.query(ab_models.PermissionView) .filter(or_( ab_models.PermissionView.permission == None, # NOQA ab_models.PermissionView.view_menu == None, # NOQA )) ) deleted_count = pvms.delete() sesh.commit() if deleted_count: logging.info('Deleted {} faulty permissions'.format(deleted_count))
def clean_perms(self): """FAB leaves faulty permissions that need to be cleaned up""" logging.info('Cleaning faulty perms') sesh = self.get_session pvms = ( sesh.query(ab_models.PermissionView) .filter(or_( ab_models.PermissionView.permission == None, # NOQA ab_models.PermissionView.view_menu == None, # NOQA )) ) deleted_count = pvms.delete() sesh.commit() if deleted_count: logging.info('Deleted {} faulty permissions'.format(deleted_count))
[ "FAB", "leaves", "faulty", "permissions", "that", "need", "to", "be", "cleaned", "up" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/security.py#L324-L338
[ "def", "clean_perms", "(", "self", ")", ":", "logging", ".", "info", "(", "'Cleaning faulty perms'", ")", "sesh", "=", "self", ".", "get_session", "pvms", "=", "(", "sesh", ".", "query", "(", "ab_models", ".", "PermissionView", ")", ".", "filter", "(", "or_", "(", "ab_models", ".", "PermissionView", ".", "permission", "==", "None", ",", "# NOQA", "ab_models", ".", "PermissionView", ".", "view_menu", "==", "None", ",", "# NOQA", ")", ")", ")", "deleted_count", "=", "pvms", ".", "delete", "(", ")", "sesh", ".", "commit", "(", ")", "if", "deleted_count", ":", "logging", ".", "info", "(", "'Deleted {} faulty permissions'", ".", "format", "(", "deleted_count", ")", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
SupersetSecurityManager.sync_role_definitions
Inits the Superset application with security roles and such
superset/security.py
def sync_role_definitions(self): """Inits the Superset application with security roles and such""" from superset import conf logging.info('Syncing role definition') self.create_custom_permissions() # Creating default roles self.set_role('Admin', self.is_admin_pvm) self.set_role('Alpha', self.is_alpha_pvm) self.set_role('Gamma', self.is_gamma_pvm) self.set_role('granter', self.is_granter_pvm) self.set_role('sql_lab', self.is_sql_lab_pvm) if conf.get('PUBLIC_ROLE_LIKE_GAMMA', False): self.set_role('Public', self.is_gamma_pvm) self.create_missing_perms() # commit role and view menu updates self.get_session.commit() self.clean_perms()
def sync_role_definitions(self): """Inits the Superset application with security roles and such""" from superset import conf logging.info('Syncing role definition') self.create_custom_permissions() # Creating default roles self.set_role('Admin', self.is_admin_pvm) self.set_role('Alpha', self.is_alpha_pvm) self.set_role('Gamma', self.is_gamma_pvm) self.set_role('granter', self.is_granter_pvm) self.set_role('sql_lab', self.is_sql_lab_pvm) if conf.get('PUBLIC_ROLE_LIKE_GAMMA', False): self.set_role('Public', self.is_gamma_pvm) self.create_missing_perms() # commit role and view menu updates self.get_session.commit() self.clean_perms()
[ "Inits", "the", "Superset", "application", "with", "security", "roles", "and", "such" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/security.py#L340-L361
[ "def", "sync_role_definitions", "(", "self", ")", ":", "from", "superset", "import", "conf", "logging", ".", "info", "(", "'Syncing role definition'", ")", "self", ".", "create_custom_permissions", "(", ")", "# Creating default roles", "self", ".", "set_role", "(", "'Admin'", ",", "self", ".", "is_admin_pvm", ")", "self", ".", "set_role", "(", "'Alpha'", ",", "self", ".", "is_alpha_pvm", ")", "self", ".", "set_role", "(", "'Gamma'", ",", "self", ".", "is_gamma_pvm", ")", "self", ".", "set_role", "(", "'granter'", ",", "self", ".", "is_granter_pvm", ")", "self", ".", "set_role", "(", "'sql_lab'", ",", "self", ".", "is_sql_lab_pvm", ")", "if", "conf", ".", "get", "(", "'PUBLIC_ROLE_LIKE_GAMMA'", ",", "False", ")", ":", "self", ".", "set_role", "(", "'Public'", ",", "self", ".", "is_gamma_pvm", ")", "self", ".", "create_missing_perms", "(", ")", "# commit role and view menu updates", "self", ".", "get_session", ".", "commit", "(", ")", "self", ".", "clean_perms", "(", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
export_schema_to_dict
Exports the supported import/export schema to a dictionary
superset/utils/dict_import_export.py
def export_schema_to_dict(back_references): """Exports the supported import/export schema to a dictionary""" databases = [Database.export_schema(recursive=True, include_parent_ref=back_references)] clusters = [DruidCluster.export_schema(recursive=True, include_parent_ref=back_references)] data = dict() if databases: data[DATABASES_KEY] = databases if clusters: data[DRUID_CLUSTERS_KEY] = clusters return data
def export_schema_to_dict(back_references): """Exports the supported import/export schema to a dictionary""" databases = [Database.export_schema(recursive=True, include_parent_ref=back_references)] clusters = [DruidCluster.export_schema(recursive=True, include_parent_ref=back_references)] data = dict() if databases: data[DATABASES_KEY] = databases if clusters: data[DRUID_CLUSTERS_KEY] = clusters return data
[ "Exports", "the", "supported", "import", "/", "export", "schema", "to", "a", "dictionary" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/dict_import_export.py#L28-L39
[ "def", "export_schema_to_dict", "(", "back_references", ")", ":", "databases", "=", "[", "Database", ".", "export_schema", "(", "recursive", "=", "True", ",", "include_parent_ref", "=", "back_references", ")", "]", "clusters", "=", "[", "DruidCluster", ".", "export_schema", "(", "recursive", "=", "True", ",", "include_parent_ref", "=", "back_references", ")", "]", "data", "=", "dict", "(", ")", "if", "databases", ":", "data", "[", "DATABASES_KEY", "]", "=", "databases", "if", "clusters", ":", "data", "[", "DRUID_CLUSTERS_KEY", "]", "=", "clusters", "return", "data" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
export_to_dict
Exports databases and druid clusters to a dictionary
superset/utils/dict_import_export.py
def export_to_dict(session, recursive, back_references, include_defaults): """Exports databases and druid clusters to a dictionary""" logging.info('Starting export') dbs = session.query(Database) databases = [database.export_to_dict(recursive=recursive, include_parent_ref=back_references, include_defaults=include_defaults) for database in dbs] logging.info('Exported %d %s', len(databases), DATABASES_KEY) cls = session.query(DruidCluster) clusters = [cluster.export_to_dict(recursive=recursive, include_parent_ref=back_references, include_defaults=include_defaults) for cluster in cls] logging.info('Exported %d %s', len(clusters), DRUID_CLUSTERS_KEY) data = dict() if databases: data[DATABASES_KEY] = databases if clusters: data[DRUID_CLUSTERS_KEY] = clusters return data
def export_to_dict(session, recursive, back_references, include_defaults): """Exports databases and druid clusters to a dictionary""" logging.info('Starting export') dbs = session.query(Database) databases = [database.export_to_dict(recursive=recursive, include_parent_ref=back_references, include_defaults=include_defaults) for database in dbs] logging.info('Exported %d %s', len(databases), DATABASES_KEY) cls = session.query(DruidCluster) clusters = [cluster.export_to_dict(recursive=recursive, include_parent_ref=back_references, include_defaults=include_defaults) for cluster in cls] logging.info('Exported %d %s', len(clusters), DRUID_CLUSTERS_KEY) data = dict() if databases: data[DATABASES_KEY] = databases if clusters: data[DRUID_CLUSTERS_KEY] = clusters return data
[ "Exports", "databases", "and", "druid", "clusters", "to", "a", "dictionary" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/dict_import_export.py#L42-L63
[ "def", "export_to_dict", "(", "session", ",", "recursive", ",", "back_references", ",", "include_defaults", ")", ":", "logging", ".", "info", "(", "'Starting export'", ")", "dbs", "=", "session", ".", "query", "(", "Database", ")", "databases", "=", "[", "database", ".", "export_to_dict", "(", "recursive", "=", "recursive", ",", "include_parent_ref", "=", "back_references", ",", "include_defaults", "=", "include_defaults", ")", "for", "database", "in", "dbs", "]", "logging", ".", "info", "(", "'Exported %d %s'", ",", "len", "(", "databases", ")", ",", "DATABASES_KEY", ")", "cls", "=", "session", ".", "query", "(", "DruidCluster", ")", "clusters", "=", "[", "cluster", ".", "export_to_dict", "(", "recursive", "=", "recursive", ",", "include_parent_ref", "=", "back_references", ",", "include_defaults", "=", "include_defaults", ")", "for", "cluster", "in", "cls", "]", "logging", ".", "info", "(", "'Exported %d %s'", ",", "len", "(", "clusters", ")", ",", "DRUID_CLUSTERS_KEY", ")", "data", "=", "dict", "(", ")", "if", "databases", ":", "data", "[", "DATABASES_KEY", "]", "=", "databases", "if", "clusters", ":", "data", "[", "DRUID_CLUSTERS_KEY", "]", "=", "clusters", "return", "data" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
import_from_dict
Imports databases and druid clusters from dictionary
superset/utils/dict_import_export.py
def import_from_dict(session, data, sync=[]): """Imports databases and druid clusters from dictionary""" if isinstance(data, dict): logging.info('Importing %d %s', len(data.get(DATABASES_KEY, [])), DATABASES_KEY) for database in data.get(DATABASES_KEY, []): Database.import_from_dict(session, database, sync=sync) logging.info('Importing %d %s', len(data.get(DRUID_CLUSTERS_KEY, [])), DRUID_CLUSTERS_KEY) for datasource in data.get(DRUID_CLUSTERS_KEY, []): DruidCluster.import_from_dict(session, datasource, sync=sync) session.commit() else: logging.info('Supplied object is not a dictionary.')
def import_from_dict(session, data, sync=[]): """Imports databases and druid clusters from dictionary""" if isinstance(data, dict): logging.info('Importing %d %s', len(data.get(DATABASES_KEY, [])), DATABASES_KEY) for database in data.get(DATABASES_KEY, []): Database.import_from_dict(session, database, sync=sync) logging.info('Importing %d %s', len(data.get(DRUID_CLUSTERS_KEY, [])), DRUID_CLUSTERS_KEY) for datasource in data.get(DRUID_CLUSTERS_KEY, []): DruidCluster.import_from_dict(session, datasource, sync=sync) session.commit() else: logging.info('Supplied object is not a dictionary.')
[ "Imports", "databases", "and", "druid", "clusters", "from", "dictionary" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/dict_import_export.py#L66-L82
[ "def", "import_from_dict", "(", "session", ",", "data", ",", "sync", "=", "[", "]", ")", ":", "if", "isinstance", "(", "data", ",", "dict", ")", ":", "logging", ".", "info", "(", "'Importing %d %s'", ",", "len", "(", "data", ".", "get", "(", "DATABASES_KEY", ",", "[", "]", ")", ")", ",", "DATABASES_KEY", ")", "for", "database", "in", "data", ".", "get", "(", "DATABASES_KEY", ",", "[", "]", ")", ":", "Database", ".", "import_from_dict", "(", "session", ",", "database", ",", "sync", "=", "sync", ")", "logging", ".", "info", "(", "'Importing %d %s'", ",", "len", "(", "data", ".", "get", "(", "DRUID_CLUSTERS_KEY", ",", "[", "]", ")", ")", ",", "DRUID_CLUSTERS_KEY", ")", "for", "datasource", "in", "data", ".", "get", "(", "DRUID_CLUSTERS_KEY", ",", "[", "]", ")", ":", "DruidCluster", ".", "import_from_dict", "(", "session", ",", "datasource", ",", "sync", "=", "sync", ")", "session", ".", "commit", "(", ")", "else", ":", "logging", ".", "info", "(", "'Supplied object is not a dictionary.'", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Api.query
Takes a query_obj constructed in the client and returns payload data response for the given query_obj. params: query_context: json_blob
superset/views/api.py
def query(self): """ Takes a query_obj constructed in the client and returns payload data response for the given query_obj. params: query_context: json_blob """ query_context = QueryContext(**json.loads(request.form.get('query_context'))) security_manager.assert_datasource_permission(query_context.datasource) payload_json = query_context.get_payload() return json.dumps( payload_json, default=utils.json_int_dttm_ser, ignore_nan=True, )
def query(self): """ Takes a query_obj constructed in the client and returns payload data response for the given query_obj. params: query_context: json_blob """ query_context = QueryContext(**json.loads(request.form.get('query_context'))) security_manager.assert_datasource_permission(query_context.datasource) payload_json = query_context.get_payload() return json.dumps( payload_json, default=utils.json_int_dttm_ser, ignore_nan=True, )
[ "Takes", "a", "query_obj", "constructed", "in", "the", "client", "and", "returns", "payload", "data", "response", "for", "the", "given", "query_obj", ".", "params", ":", "query_context", ":", "json_blob" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/api.py#L38-L51
[ "def", "query", "(", "self", ")", ":", "query_context", "=", "QueryContext", "(", "*", "*", "json", ".", "loads", "(", "request", ".", "form", ".", "get", "(", "'query_context'", ")", ")", ")", "security_manager", ".", "assert_datasource_permission", "(", "query_context", ".", "datasource", ")", "payload_json", "=", "query_context", ".", "get_payload", "(", ")", "return", "json", ".", "dumps", "(", "payload_json", ",", "default", "=", "utils", ".", "json_int_dttm_ser", ",", "ignore_nan", "=", "True", ",", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Api.query_form_data
Get the formdata stored in the database for existing slice. params: slice_id: integer
superset/views/api.py
def query_form_data(self): """ Get the formdata stored in the database for existing slice. params: slice_id: integer """ form_data = {} slice_id = request.args.get('slice_id') if slice_id: slc = db.session.query(models.Slice).filter_by(id=slice_id).one_or_none() if slc: form_data = slc.form_data.copy() update_time_range(form_data) return json.dumps(form_data)
def query_form_data(self): """ Get the formdata stored in the database for existing slice. params: slice_id: integer """ form_data = {} slice_id = request.args.get('slice_id') if slice_id: slc = db.session.query(models.Slice).filter_by(id=slice_id).one_or_none() if slc: form_data = slc.form_data.copy() update_time_range(form_data) return json.dumps(form_data)
[ "Get", "the", "formdata", "stored", "in", "the", "database", "for", "existing", "slice", ".", "params", ":", "slice_id", ":", "integer" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/api.py#L58-L72
[ "def", "query_form_data", "(", "self", ")", ":", "form_data", "=", "{", "}", "slice_id", "=", "request", ".", "args", ".", "get", "(", "'slice_id'", ")", "if", "slice_id", ":", "slc", "=", "db", ".", "session", ".", "query", "(", "models", ".", "Slice", ")", ".", "filter_by", "(", "id", "=", "slice_id", ")", ".", "one_or_none", "(", ")", "if", "slc", ":", "form_data", "=", "slc", ".", "form_data", ".", "copy", "(", ")", "update_time_range", "(", "form_data", ")", "return", "json", ".", "dumps", "(", "form_data", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
load_css_templates
Loads 2 css templates to demonstrate the feature
superset/data/css_templates.py
def load_css_templates(): """Loads 2 css templates to demonstrate the feature""" print('Creating default CSS templates') obj = db.session.query(CssTemplate).filter_by(template_name='Flat').first() if not obj: obj = CssTemplate(template_name='Flat') css = textwrap.dedent("""\ .gridster div.widget { transition: background-color 0.5s ease; background-color: #FAFAFA; border: 1px solid #CCC; box-shadow: none; border-radius: 0px; } .gridster div.widget:hover { border: 1px solid #000; background-color: #EAEAEA; } .navbar { transition: opacity 0.5s ease; opacity: 0.05; } .navbar:hover { opacity: 1; } .chart-header .header{ font-weight: normal; font-size: 12px; } /* var bnbColors = [ //rausch hackb kazan babu lima beach tirol '#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c', '#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a', '#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e', ]; */ """) obj.css = css db.session.merge(obj) db.session.commit() obj = ( db.session.query(CssTemplate).filter_by(template_name='Courier Black').first()) if not obj: obj = CssTemplate(template_name='Courier Black') css = textwrap.dedent("""\ .gridster div.widget { transition: background-color 0.5s ease; background-color: #EEE; border: 2px solid #444; border-radius: 15px; box-shadow: none; } h2 { color: white; font-size: 52px; } .navbar { box-shadow: none; } .gridster div.widget:hover { border: 2px solid #000; background-color: #EAEAEA; } .navbar { transition: opacity 0.5s ease; opacity: 0.05; } .navbar:hover { opacity: 1; } .chart-header .header{ font-weight: normal; font-size: 12px; } .nvd3 text { font-size: 12px; font-family: inherit; } body{ background: #000; font-family: Courier, Monaco, monospace;; } /* var bnbColors = [ //rausch hackb kazan babu lima beach tirol '#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c', '#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a', '#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e', ]; */ """) obj.css = css db.session.merge(obj) db.session.commit()
def load_css_templates(): """Loads 2 css templates to demonstrate the feature""" print('Creating default CSS templates') obj = db.session.query(CssTemplate).filter_by(template_name='Flat').first() if not obj: obj = CssTemplate(template_name='Flat') css = textwrap.dedent("""\ .gridster div.widget { transition: background-color 0.5s ease; background-color: #FAFAFA; border: 1px solid #CCC; box-shadow: none; border-radius: 0px; } .gridster div.widget:hover { border: 1px solid #000; background-color: #EAEAEA; } .navbar { transition: opacity 0.5s ease; opacity: 0.05; } .navbar:hover { opacity: 1; } .chart-header .header{ font-weight: normal; font-size: 12px; } /* var bnbColors = [ //rausch hackb kazan babu lima beach tirol '#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c', '#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a', '#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e', ]; */ """) obj.css = css db.session.merge(obj) db.session.commit() obj = ( db.session.query(CssTemplate).filter_by(template_name='Courier Black').first()) if not obj: obj = CssTemplate(template_name='Courier Black') css = textwrap.dedent("""\ .gridster div.widget { transition: background-color 0.5s ease; background-color: #EEE; border: 2px solid #444; border-radius: 15px; box-shadow: none; } h2 { color: white; font-size: 52px; } .navbar { box-shadow: none; } .gridster div.widget:hover { border: 2px solid #000; background-color: #EAEAEA; } .navbar { transition: opacity 0.5s ease; opacity: 0.05; } .navbar:hover { opacity: 1; } .chart-header .header{ font-weight: normal; font-size: 12px; } .nvd3 text { font-size: 12px; font-family: inherit; } body{ background: #000; font-family: Courier, Monaco, monospace;; } /* var bnbColors = [ //rausch hackb kazan babu lima beach tirol '#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c', '#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a', '#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e', ]; */ """) obj.css = css db.session.merge(obj) db.session.commit()
[ "Loads", "2", "css", "templates", "to", "demonstrate", "the", "feature" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/data/css_templates.py#L23-L119
[ "def", "load_css_templates", "(", ")", ":", "print", "(", "'Creating default CSS templates'", ")", "obj", "=", "db", ".", "session", ".", "query", "(", "CssTemplate", ")", ".", "filter_by", "(", "template_name", "=", "'Flat'", ")", ".", "first", "(", ")", "if", "not", "obj", ":", "obj", "=", "CssTemplate", "(", "template_name", "=", "'Flat'", ")", "css", "=", "textwrap", ".", "dedent", "(", "\"\"\"\\\n .gridster div.widget {\n transition: background-color 0.5s ease;\n background-color: #FAFAFA;\n border: 1px solid #CCC;\n box-shadow: none;\n border-radius: 0px;\n }\n .gridster div.widget:hover {\n border: 1px solid #000;\n background-color: #EAEAEA;\n }\n .navbar {\n transition: opacity 0.5s ease;\n opacity: 0.05;\n }\n .navbar:hover {\n opacity: 1;\n }\n .chart-header .header{\n font-weight: normal;\n font-size: 12px;\n }\n /*\n var bnbColors = [\n //rausch hackb kazan babu lima beach tirol\n '#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c',\n '#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a',\n '#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e',\n ];\n */\n \"\"\"", ")", "obj", ".", "css", "=", "css", "db", ".", "session", ".", "merge", "(", "obj", ")", "db", ".", "session", ".", "commit", "(", ")", "obj", "=", "(", "db", ".", "session", ".", "query", "(", "CssTemplate", ")", ".", "filter_by", "(", "template_name", "=", "'Courier Black'", ")", ".", "first", "(", ")", ")", "if", "not", "obj", ":", "obj", "=", "CssTemplate", "(", "template_name", "=", "'Courier Black'", ")", "css", "=", "textwrap", ".", "dedent", "(", "\"\"\"\\\n .gridster div.widget {\n transition: background-color 0.5s ease;\n background-color: #EEE;\n border: 2px solid #444;\n border-radius: 15px;\n box-shadow: none;\n }\n h2 {\n color: white;\n font-size: 52px;\n }\n .navbar {\n box-shadow: none;\n }\n .gridster div.widget:hover {\n border: 2px solid #000;\n background-color: #EAEAEA;\n }\n .navbar {\n transition: opacity 0.5s ease;\n opacity: 0.05;\n }\n .navbar:hover {\n opacity: 1;\n }\n .chart-header .header{\n font-weight: normal;\n font-size: 12px;\n }\n .nvd3 text {\n font-size: 12px;\n font-family: inherit;\n }\n body{\n background: #000;\n font-family: Courier, Monaco, monospace;;\n }\n /*\n var bnbColors = [\n //rausch hackb kazan babu lima beach tirol\n '#ff5a5f', '#7b0051', '#007A87', '#00d1c1', '#8ce071', '#ffb400', '#b4a76c',\n '#ff8083', '#cc0086', '#00a1b3', '#00ffeb', '#bbedab', '#ffd266', '#cbc29a',\n '#ff3339', '#ff1ab1', '#005c66', '#00b3a5', '#55d12e', '#b37e00', '#988b4e',\n ];\n */\n \"\"\"", ")", "obj", ".", "css", "=", "css", "db", ".", "session", ".", "merge", "(", "obj", ")", "db", ".", "session", ".", "commit", "(", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
ImportMixin._parent_foreign_key_mappings
Get a mapping of foreign name to the local name of foreign keys
superset/models/helpers.py
def _parent_foreign_key_mappings(cls): """Get a mapping of foreign name to the local name of foreign keys""" parent_rel = cls.__mapper__.relationships.get(cls.export_parent) if parent_rel: return {l.name: r.name for (l, r) in parent_rel.local_remote_pairs} return {}
def _parent_foreign_key_mappings(cls): """Get a mapping of foreign name to the local name of foreign keys""" parent_rel = cls.__mapper__.relationships.get(cls.export_parent) if parent_rel: return {l.name: r.name for (l, r) in parent_rel.local_remote_pairs} return {}
[ "Get", "a", "mapping", "of", "foreign", "name", "to", "the", "local", "name", "of", "foreign", "keys" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/models/helpers.py#L60-L65
[ "def", "_parent_foreign_key_mappings", "(", "cls", ")", ":", "parent_rel", "=", "cls", ".", "__mapper__", ".", "relationships", ".", "get", "(", "cls", ".", "export_parent", ")", "if", "parent_rel", ":", "return", "{", "l", ".", "name", ":", "r", ".", "name", "for", "(", "l", ",", "r", ")", "in", "parent_rel", ".", "local_remote_pairs", "}", "return", "{", "}" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
ImportMixin._unique_constrains
Get all (single column and multi column) unique constraints
superset/models/helpers.py
def _unique_constrains(cls): """Get all (single column and multi column) unique constraints""" unique = [{c.name for c in u.columns} for u in cls.__table_args__ if isinstance(u, UniqueConstraint)] unique.extend({c.name} for c in cls.__table__.columns if c.unique) return unique
def _unique_constrains(cls): """Get all (single column and multi column) unique constraints""" unique = [{c.name for c in u.columns} for u in cls.__table_args__ if isinstance(u, UniqueConstraint)] unique.extend({c.name} for c in cls.__table__.columns if c.unique) return unique
[ "Get", "all", "(", "single", "column", "and", "multi", "column", ")", "unique", "constraints" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/models/helpers.py#L68-L73
[ "def", "_unique_constrains", "(", "cls", ")", ":", "unique", "=", "[", "{", "c", ".", "name", "for", "c", "in", "u", ".", "columns", "}", "for", "u", "in", "cls", ".", "__table_args__", "if", "isinstance", "(", "u", ",", "UniqueConstraint", ")", "]", "unique", ".", "extend", "(", "{", "c", ".", "name", "}", "for", "c", "in", "cls", ".", "__table__", ".", "columns", "if", "c", ".", "unique", ")", "return", "unique" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
ImportMixin.export_schema
Export schema as a dictionary
superset/models/helpers.py
def export_schema(cls, recursive=True, include_parent_ref=False): """Export schema as a dictionary""" parent_excludes = {} if not include_parent_ref: parent_ref = cls.__mapper__.relationships.get(cls.export_parent) if parent_ref: parent_excludes = {c.name for c in parent_ref.local_columns} def formatter(c): return ('{0} Default ({1})'.format( str(c.type), c.default.arg) if c.default else str(c.type)) schema = {c.name: formatter(c) for c in cls.__table__.columns if (c.name in cls.export_fields and c.name not in parent_excludes)} if recursive: for c in cls.export_children: child_class = cls.__mapper__.relationships[c].argument.class_ schema[c] = [child_class.export_schema(recursive=recursive, include_parent_ref=include_parent_ref)] return schema
def export_schema(cls, recursive=True, include_parent_ref=False): """Export schema as a dictionary""" parent_excludes = {} if not include_parent_ref: parent_ref = cls.__mapper__.relationships.get(cls.export_parent) if parent_ref: parent_excludes = {c.name for c in parent_ref.local_columns} def formatter(c): return ('{0} Default ({1})'.format( str(c.type), c.default.arg) if c.default else str(c.type)) schema = {c.name: formatter(c) for c in cls.__table__.columns if (c.name in cls.export_fields and c.name not in parent_excludes)} if recursive: for c in cls.export_children: child_class = cls.__mapper__.relationships[c].argument.class_ schema[c] = [child_class.export_schema(recursive=recursive, include_parent_ref=include_parent_ref)] return schema
[ "Export", "schema", "as", "a", "dictionary" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/models/helpers.py#L76-L96
[ "def", "export_schema", "(", "cls", ",", "recursive", "=", "True", ",", "include_parent_ref", "=", "False", ")", ":", "parent_excludes", "=", "{", "}", "if", "not", "include_parent_ref", ":", "parent_ref", "=", "cls", ".", "__mapper__", ".", "relationships", ".", "get", "(", "cls", ".", "export_parent", ")", "if", "parent_ref", ":", "parent_excludes", "=", "{", "c", ".", "name", "for", "c", "in", "parent_ref", ".", "local_columns", "}", "def", "formatter", "(", "c", ")", ":", "return", "(", "'{0} Default ({1})'", ".", "format", "(", "str", "(", "c", ".", "type", ")", ",", "c", ".", "default", ".", "arg", ")", "if", "c", ".", "default", "else", "str", "(", "c", ".", "type", ")", ")", "schema", "=", "{", "c", ".", "name", ":", "formatter", "(", "c", ")", "for", "c", "in", "cls", ".", "__table__", ".", "columns", "if", "(", "c", ".", "name", "in", "cls", ".", "export_fields", "and", "c", ".", "name", "not", "in", "parent_excludes", ")", "}", "if", "recursive", ":", "for", "c", "in", "cls", ".", "export_children", ":", "child_class", "=", "cls", ".", "__mapper__", ".", "relationships", "[", "c", "]", ".", "argument", ".", "class_", "schema", "[", "c", "]", "=", "[", "child_class", ".", "export_schema", "(", "recursive", "=", "recursive", ",", "include_parent_ref", "=", "include_parent_ref", ")", "]", "return", "schema" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
ImportMixin.import_from_dict
Import obj from a dictionary
superset/models/helpers.py
def import_from_dict(cls, session, dict_rep, parent=None, recursive=True, sync=[]): """Import obj from a dictionary""" parent_refs = cls._parent_foreign_key_mappings() export_fields = set(cls.export_fields) | set(parent_refs.keys()) new_children = {c: dict_rep.get(c) for c in cls.export_children if c in dict_rep} unique_constrains = cls._unique_constrains() filters = [] # Using these filters to check if obj already exists # Remove fields that should not get imported for k in list(dict_rep): if k not in export_fields: del dict_rep[k] if not parent: if cls.export_parent: for p in parent_refs.keys(): if p not in dict_rep: raise RuntimeError( '{0}: Missing field {1}'.format(cls.__name__, p)) else: # Set foreign keys to parent obj for k, v in parent_refs.items(): dict_rep[k] = getattr(parent, v) # Add filter for parent obj filters.extend([getattr(cls, k) == dict_rep.get(k) for k in parent_refs.keys()]) # Add filter for unique constraints ucs = [and_(*[getattr(cls, k) == dict_rep.get(k) for k in cs if dict_rep.get(k) is not None]) for cs in unique_constrains] filters.append(or_(*ucs)) # Check if object already exists in DB, break if more than one is found try: obj_query = session.query(cls).filter(and_(*filters)) obj = obj_query.one_or_none() except MultipleResultsFound as e: logging.error('Error importing %s \n %s \n %s', cls.__name__, str(obj_query), yaml.safe_dump(dict_rep)) raise e if not obj: is_new_obj = True # Create new DB object obj = cls(**dict_rep) logging.info('Importing new %s %s', obj.__tablename__, str(obj)) if cls.export_parent and parent: setattr(obj, cls.export_parent, parent) session.add(obj) else: is_new_obj = False logging.info('Updating %s %s', obj.__tablename__, str(obj)) # Update columns for k, v in dict_rep.items(): setattr(obj, k, v) # Recursively create children if recursive: for c in cls.export_children: child_class = cls.__mapper__.relationships[c].argument.class_ added = [] for c_obj in new_children.get(c, []): added.append(child_class.import_from_dict(session=session, dict_rep=c_obj, parent=obj, sync=sync)) # If children should get synced, delete the ones that did not # get updated. if c in sync and not is_new_obj: back_refs = child_class._parent_foreign_key_mappings() delete_filters = [getattr(child_class, k) == getattr(obj, back_refs.get(k)) for k in back_refs.keys()] to_delete = set(session.query(child_class).filter( and_(*delete_filters))).difference(set(added)) for o in to_delete: logging.info('Deleting %s %s', c, str(obj)) session.delete(o) return obj
def import_from_dict(cls, session, dict_rep, parent=None, recursive=True, sync=[]): """Import obj from a dictionary""" parent_refs = cls._parent_foreign_key_mappings() export_fields = set(cls.export_fields) | set(parent_refs.keys()) new_children = {c: dict_rep.get(c) for c in cls.export_children if c in dict_rep} unique_constrains = cls._unique_constrains() filters = [] # Using these filters to check if obj already exists # Remove fields that should not get imported for k in list(dict_rep): if k not in export_fields: del dict_rep[k] if not parent: if cls.export_parent: for p in parent_refs.keys(): if p not in dict_rep: raise RuntimeError( '{0}: Missing field {1}'.format(cls.__name__, p)) else: # Set foreign keys to parent obj for k, v in parent_refs.items(): dict_rep[k] = getattr(parent, v) # Add filter for parent obj filters.extend([getattr(cls, k) == dict_rep.get(k) for k in parent_refs.keys()]) # Add filter for unique constraints ucs = [and_(*[getattr(cls, k) == dict_rep.get(k) for k in cs if dict_rep.get(k) is not None]) for cs in unique_constrains] filters.append(or_(*ucs)) # Check if object already exists in DB, break if more than one is found try: obj_query = session.query(cls).filter(and_(*filters)) obj = obj_query.one_or_none() except MultipleResultsFound as e: logging.error('Error importing %s \n %s \n %s', cls.__name__, str(obj_query), yaml.safe_dump(dict_rep)) raise e if not obj: is_new_obj = True # Create new DB object obj = cls(**dict_rep) logging.info('Importing new %s %s', obj.__tablename__, str(obj)) if cls.export_parent and parent: setattr(obj, cls.export_parent, parent) session.add(obj) else: is_new_obj = False logging.info('Updating %s %s', obj.__tablename__, str(obj)) # Update columns for k, v in dict_rep.items(): setattr(obj, k, v) # Recursively create children if recursive: for c in cls.export_children: child_class = cls.__mapper__.relationships[c].argument.class_ added = [] for c_obj in new_children.get(c, []): added.append(child_class.import_from_dict(session=session, dict_rep=c_obj, parent=obj, sync=sync)) # If children should get synced, delete the ones that did not # get updated. if c in sync and not is_new_obj: back_refs = child_class._parent_foreign_key_mappings() delete_filters = [getattr(child_class, k) == getattr(obj, back_refs.get(k)) for k in back_refs.keys()] to_delete = set(session.query(child_class).filter( and_(*delete_filters))).difference(set(added)) for o in to_delete: logging.info('Deleting %s %s', c, str(obj)) session.delete(o) return obj
[ "Import", "obj", "from", "a", "dictionary" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/models/helpers.py#L99-L184
[ "def", "import_from_dict", "(", "cls", ",", "session", ",", "dict_rep", ",", "parent", "=", "None", ",", "recursive", "=", "True", ",", "sync", "=", "[", "]", ")", ":", "parent_refs", "=", "cls", ".", "_parent_foreign_key_mappings", "(", ")", "export_fields", "=", "set", "(", "cls", ".", "export_fields", ")", "|", "set", "(", "parent_refs", ".", "keys", "(", ")", ")", "new_children", "=", "{", "c", ":", "dict_rep", ".", "get", "(", "c", ")", "for", "c", "in", "cls", ".", "export_children", "if", "c", "in", "dict_rep", "}", "unique_constrains", "=", "cls", ".", "_unique_constrains", "(", ")", "filters", "=", "[", "]", "# Using these filters to check if obj already exists", "# Remove fields that should not get imported", "for", "k", "in", "list", "(", "dict_rep", ")", ":", "if", "k", "not", "in", "export_fields", ":", "del", "dict_rep", "[", "k", "]", "if", "not", "parent", ":", "if", "cls", ".", "export_parent", ":", "for", "p", "in", "parent_refs", ".", "keys", "(", ")", ":", "if", "p", "not", "in", "dict_rep", ":", "raise", "RuntimeError", "(", "'{0}: Missing field {1}'", ".", "format", "(", "cls", ".", "__name__", ",", "p", ")", ")", "else", ":", "# Set foreign keys to parent obj", "for", "k", ",", "v", "in", "parent_refs", ".", "items", "(", ")", ":", "dict_rep", "[", "k", "]", "=", "getattr", "(", "parent", ",", "v", ")", "# Add filter for parent obj", "filters", ".", "extend", "(", "[", "getattr", "(", "cls", ",", "k", ")", "==", "dict_rep", ".", "get", "(", "k", ")", "for", "k", "in", "parent_refs", ".", "keys", "(", ")", "]", ")", "# Add filter for unique constraints", "ucs", "=", "[", "and_", "(", "*", "[", "getattr", "(", "cls", ",", "k", ")", "==", "dict_rep", ".", "get", "(", "k", ")", "for", "k", "in", "cs", "if", "dict_rep", ".", "get", "(", "k", ")", "is", "not", "None", "]", ")", "for", "cs", "in", "unique_constrains", "]", "filters", ".", "append", "(", "or_", "(", "*", "ucs", ")", ")", "# Check if object already exists in DB, break if more than one is found", "try", ":", "obj_query", "=", "session", ".", "query", "(", "cls", ")", ".", "filter", "(", "and_", "(", "*", "filters", ")", ")", "obj", "=", "obj_query", ".", "one_or_none", "(", ")", "except", "MultipleResultsFound", "as", "e", ":", "logging", ".", "error", "(", "'Error importing %s \\n %s \\n %s'", ",", "cls", ".", "__name__", ",", "str", "(", "obj_query", ")", ",", "yaml", ".", "safe_dump", "(", "dict_rep", ")", ")", "raise", "e", "if", "not", "obj", ":", "is_new_obj", "=", "True", "# Create new DB object", "obj", "=", "cls", "(", "*", "*", "dict_rep", ")", "logging", ".", "info", "(", "'Importing new %s %s'", ",", "obj", ".", "__tablename__", ",", "str", "(", "obj", ")", ")", "if", "cls", ".", "export_parent", "and", "parent", ":", "setattr", "(", "obj", ",", "cls", ".", "export_parent", ",", "parent", ")", "session", ".", "add", "(", "obj", ")", "else", ":", "is_new_obj", "=", "False", "logging", ".", "info", "(", "'Updating %s %s'", ",", "obj", ".", "__tablename__", ",", "str", "(", "obj", ")", ")", "# Update columns", "for", "k", ",", "v", "in", "dict_rep", ".", "items", "(", ")", ":", "setattr", "(", "obj", ",", "k", ",", "v", ")", "# Recursively create children", "if", "recursive", ":", "for", "c", "in", "cls", ".", "export_children", ":", "child_class", "=", "cls", ".", "__mapper__", ".", "relationships", "[", "c", "]", ".", "argument", ".", "class_", "added", "=", "[", "]", "for", "c_obj", "in", "new_children", ".", "get", "(", "c", ",", "[", "]", ")", ":", "added", ".", "append", "(", "child_class", ".", "import_from_dict", "(", "session", "=", "session", ",", "dict_rep", "=", "c_obj", ",", "parent", "=", "obj", ",", "sync", "=", "sync", ")", ")", "# If children should get synced, delete the ones that did not", "# get updated.", "if", "c", "in", "sync", "and", "not", "is_new_obj", ":", "back_refs", "=", "child_class", ".", "_parent_foreign_key_mappings", "(", ")", "delete_filters", "=", "[", "getattr", "(", "child_class", ",", "k", ")", "==", "getattr", "(", "obj", ",", "back_refs", ".", "get", "(", "k", ")", ")", "for", "k", "in", "back_refs", ".", "keys", "(", ")", "]", "to_delete", "=", "set", "(", "session", ".", "query", "(", "child_class", ")", ".", "filter", "(", "and_", "(", "*", "delete_filters", ")", ")", ")", ".", "difference", "(", "set", "(", "added", ")", ")", "for", "o", "in", "to_delete", ":", "logging", ".", "info", "(", "'Deleting %s %s'", ",", "c", ",", "str", "(", "obj", ")", ")", "session", ".", "delete", "(", "o", ")", "return", "obj" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
ImportMixin.export_to_dict
Export obj to dictionary
superset/models/helpers.py
def export_to_dict(self, recursive=True, include_parent_ref=False, include_defaults=False): """Export obj to dictionary""" cls = self.__class__ parent_excludes = {} if recursive and not include_parent_ref: parent_ref = cls.__mapper__.relationships.get(cls.export_parent) if parent_ref: parent_excludes = {c.name for c in parent_ref.local_columns} dict_rep = {c.name: getattr(self, c.name) for c in cls.__table__.columns if (c.name in self.export_fields and c.name not in parent_excludes and (include_defaults or ( getattr(self, c.name) is not None and (not c.default or getattr(self, c.name) != c.default.arg)))) } if recursive: for c in self.export_children: # sorting to make lists of children stable dict_rep[c] = sorted( [ child.export_to_dict( recursive=recursive, include_parent_ref=include_parent_ref, include_defaults=include_defaults, ) for child in getattr(self, c) ], key=lambda k: sorted(k.items())) return dict_rep
def export_to_dict(self, recursive=True, include_parent_ref=False, include_defaults=False): """Export obj to dictionary""" cls = self.__class__ parent_excludes = {} if recursive and not include_parent_ref: parent_ref = cls.__mapper__.relationships.get(cls.export_parent) if parent_ref: parent_excludes = {c.name for c in parent_ref.local_columns} dict_rep = {c.name: getattr(self, c.name) for c in cls.__table__.columns if (c.name in self.export_fields and c.name not in parent_excludes and (include_defaults or ( getattr(self, c.name) is not None and (not c.default or getattr(self, c.name) != c.default.arg)))) } if recursive: for c in self.export_children: # sorting to make lists of children stable dict_rep[c] = sorted( [ child.export_to_dict( recursive=recursive, include_parent_ref=include_parent_ref, include_defaults=include_defaults, ) for child in getattr(self, c) ], key=lambda k: sorted(k.items())) return dict_rep
[ "Export", "obj", "to", "dictionary" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/models/helpers.py#L186-L217
[ "def", "export_to_dict", "(", "self", ",", "recursive", "=", "True", ",", "include_parent_ref", "=", "False", ",", "include_defaults", "=", "False", ")", ":", "cls", "=", "self", ".", "__class__", "parent_excludes", "=", "{", "}", "if", "recursive", "and", "not", "include_parent_ref", ":", "parent_ref", "=", "cls", ".", "__mapper__", ".", "relationships", ".", "get", "(", "cls", ".", "export_parent", ")", "if", "parent_ref", ":", "parent_excludes", "=", "{", "c", ".", "name", "for", "c", "in", "parent_ref", ".", "local_columns", "}", "dict_rep", "=", "{", "c", ".", "name", ":", "getattr", "(", "self", ",", "c", ".", "name", ")", "for", "c", "in", "cls", ".", "__table__", ".", "columns", "if", "(", "c", ".", "name", "in", "self", ".", "export_fields", "and", "c", ".", "name", "not", "in", "parent_excludes", "and", "(", "include_defaults", "or", "(", "getattr", "(", "self", ",", "c", ".", "name", ")", "is", "not", "None", "and", "(", "not", "c", ".", "default", "or", "getattr", "(", "self", ",", "c", ".", "name", ")", "!=", "c", ".", "default", ".", "arg", ")", ")", ")", ")", "}", "if", "recursive", ":", "for", "c", "in", "self", ".", "export_children", ":", "# sorting to make lists of children stable", "dict_rep", "[", "c", "]", "=", "sorted", "(", "[", "child", ".", "export_to_dict", "(", "recursive", "=", "recursive", ",", "include_parent_ref", "=", "include_parent_ref", ",", "include_defaults", "=", "include_defaults", ",", ")", "for", "child", "in", "getattr", "(", "self", ",", "c", ")", "]", ",", "key", "=", "lambda", "k", ":", "sorted", "(", "k", ".", "items", "(", ")", ")", ")", "return", "dict_rep" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
ImportMixin.override
Overrides the plain fields of the dashboard.
superset/models/helpers.py
def override(self, obj): """Overrides the plain fields of the dashboard.""" for field in obj.__class__.export_fields: setattr(self, field, getattr(obj, field))
def override(self, obj): """Overrides the plain fields of the dashboard.""" for field in obj.__class__.export_fields: setattr(self, field, getattr(obj, field))
[ "Overrides", "the", "plain", "fields", "of", "the", "dashboard", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/models/helpers.py#L219-L222
[ "def", "override", "(", "self", ",", "obj", ")", ":", "for", "field", "in", "obj", ".", "__class__", ".", "export_fields", ":", "setattr", "(", "self", ",", "field", ",", "getattr", "(", "obj", ",", "field", ")", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
update_time_range
Move since and until to time_range.
superset/legacy.py
def update_time_range(form_data): """Move since and until to time_range.""" if 'since' in form_data or 'until' in form_data: form_data['time_range'] = '{} : {}'.format( form_data.pop('since', '') or '', form_data.pop('until', '') or '', )
def update_time_range(form_data): """Move since and until to time_range.""" if 'since' in form_data or 'until' in form_data: form_data['time_range'] = '{} : {}'.format( form_data.pop('since', '') or '', form_data.pop('until', '') or '', )
[ "Move", "since", "and", "until", "to", "time_range", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/legacy.py#L21-L27
[ "def", "update_time_range", "(", "form_data", ")", ":", "if", "'since'", "in", "form_data", "or", "'until'", "in", "form_data", ":", "form_data", "[", "'time_range'", "]", "=", "'{} : {}'", ".", "format", "(", "form_data", ".", "pop", "(", "'since'", ",", "''", ")", "or", "''", ",", "form_data", ".", "pop", "(", "'until'", ",", "''", ")", "or", "''", ",", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
memoized_func
Use this decorator to cache functions that have predefined first arg. enable_cache is treated as True by default, except enable_cache = False is passed to the decorated function. force means whether to force refresh the cache and is treated as False by default, except force = True is passed to the decorated function. timeout of cache is set to 600 seconds by default, except cache_timeout = {timeout in seconds} is passed to the decorated function. memoized_func uses simple_cache and stored the data in memory. Key is a callable function that takes function arguments and returns the caching key.
superset/utils/cache.py
def memoized_func(key=view_cache_key, attribute_in_key=None): """Use this decorator to cache functions that have predefined first arg. enable_cache is treated as True by default, except enable_cache = False is passed to the decorated function. force means whether to force refresh the cache and is treated as False by default, except force = True is passed to the decorated function. timeout of cache is set to 600 seconds by default, except cache_timeout = {timeout in seconds} is passed to the decorated function. memoized_func uses simple_cache and stored the data in memory. Key is a callable function that takes function arguments and returns the caching key. """ def wrap(f): if tables_cache: def wrapped_f(self, *args, **kwargs): if not kwargs.get('cache', True): return f(self, *args, **kwargs) if attribute_in_key: cache_key = key(*args, **kwargs).format( getattr(self, attribute_in_key)) else: cache_key = key(*args, **kwargs) o = tables_cache.get(cache_key) if not kwargs.get('force') and o is not None: return o o = f(self, *args, **kwargs) tables_cache.set(cache_key, o, timeout=kwargs.get('cache_timeout')) return o else: # noop def wrapped_f(self, *args, **kwargs): return f(self, *args, **kwargs) return wrapped_f return wrap
def memoized_func(key=view_cache_key, attribute_in_key=None): """Use this decorator to cache functions that have predefined first arg. enable_cache is treated as True by default, except enable_cache = False is passed to the decorated function. force means whether to force refresh the cache and is treated as False by default, except force = True is passed to the decorated function. timeout of cache is set to 600 seconds by default, except cache_timeout = {timeout in seconds} is passed to the decorated function. memoized_func uses simple_cache and stored the data in memory. Key is a callable function that takes function arguments and returns the caching key. """ def wrap(f): if tables_cache: def wrapped_f(self, *args, **kwargs): if not kwargs.get('cache', True): return f(self, *args, **kwargs) if attribute_in_key: cache_key = key(*args, **kwargs).format( getattr(self, attribute_in_key)) else: cache_key = key(*args, **kwargs) o = tables_cache.get(cache_key) if not kwargs.get('force') and o is not None: return o o = f(self, *args, **kwargs) tables_cache.set(cache_key, o, timeout=kwargs.get('cache_timeout')) return o else: # noop def wrapped_f(self, *args, **kwargs): return f(self, *args, **kwargs) return wrapped_f return wrap
[ "Use", "this", "decorator", "to", "cache", "functions", "that", "have", "predefined", "first", "arg", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/utils/cache.py#L28-L67
[ "def", "memoized_func", "(", "key", "=", "view_cache_key", ",", "attribute_in_key", "=", "None", ")", ":", "def", "wrap", "(", "f", ")", ":", "if", "tables_cache", ":", "def", "wrapped_f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "if", "not", "kwargs", ".", "get", "(", "'cache'", ",", "True", ")", ":", "return", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "if", "attribute_in_key", ":", "cache_key", "=", "key", "(", "*", "args", ",", "*", "*", "kwargs", ")", ".", "format", "(", "getattr", "(", "self", ",", "attribute_in_key", ")", ")", "else", ":", "cache_key", "=", "key", "(", "*", "args", ",", "*", "*", "kwargs", ")", "o", "=", "tables_cache", ".", "get", "(", "cache_key", ")", "if", "not", "kwargs", ".", "get", "(", "'force'", ")", "and", "o", "is", "not", "None", ":", "return", "o", "o", "=", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "tables_cache", ".", "set", "(", "cache_key", ",", "o", ",", "timeout", "=", "kwargs", ".", "get", "(", "'cache_timeout'", ")", ")", "return", "o", "else", ":", "# noop", "def", "wrapped_f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", ":", "return", "f", "(", "self", ",", "*", "args", ",", "*", "*", "kwargs", ")", "return", "wrapped_f", "return", "wrap" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Query.name
Name property
superset/models/sql_lab.py
def name(self): """Name property""" ts = datetime.now().isoformat() ts = ts.replace('-', '').replace(':', '').split('.')[0] tab = (self.tab_name.replace(' ', '_').lower() if self.tab_name else 'notab') tab = re.sub(r'\W+', '', tab) return f'sqllab_{tab}_{ts}'
def name(self): """Name property""" ts = datetime.now().isoformat() ts = ts.replace('-', '').replace(':', '').split('.')[0] tab = (self.tab_name.replace(' ', '_').lower() if self.tab_name else 'notab') tab = re.sub(r'\W+', '', tab) return f'sqllab_{tab}_{ts}'
[ "Name", "property" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/models/sql_lab.py#L132-L139
[ "def", "name", "(", "self", ")", ":", "ts", "=", "datetime", ".", "now", "(", ")", ".", "isoformat", "(", ")", "ts", "=", "ts", ".", "replace", "(", "'-'", ",", "''", ")", ".", "replace", "(", "':'", ",", "''", ")", ".", "split", "(", "'.'", ")", "[", "0", "]", "tab", "=", "(", "self", ".", "tab_name", ".", "replace", "(", "' '", ",", "'_'", ")", ".", "lower", "(", ")", "if", "self", ".", "tab_name", "else", "'notab'", ")", "tab", "=", "re", ".", "sub", "(", "r'\\W+'", ",", "''", ",", "tab", ")", "return", "f'sqllab_{tab}_{ts}'" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
check_datasource_perms
Check if user can access a cached response from explore_json. This function takes `self` since it must have the same signature as the the decorated method.
superset/views/core.py
def check_datasource_perms(self, datasource_type=None, datasource_id=None): """ Check if user can access a cached response from explore_json. This function takes `self` since it must have the same signature as the the decorated method. """ form_data = get_form_data()[0] datasource_id, datasource_type = get_datasource_info( datasource_id, datasource_type, form_data) viz_obj = get_viz( datasource_type=datasource_type, datasource_id=datasource_id, form_data=form_data, force=False, ) security_manager.assert_datasource_permission(viz_obj.datasource)
def check_datasource_perms(self, datasource_type=None, datasource_id=None): """ Check if user can access a cached response from explore_json. This function takes `self` since it must have the same signature as the the decorated method. """ form_data = get_form_data()[0] datasource_id, datasource_type = get_datasource_info( datasource_id, datasource_type, form_data) viz_obj = get_viz( datasource_type=datasource_type, datasource_id=datasource_id, form_data=form_data, force=False, ) security_manager.assert_datasource_permission(viz_obj.datasource)
[ "Check", "if", "user", "can", "access", "a", "cached", "response", "from", "explore_json", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L106-L123
[ "def", "check_datasource_perms", "(", "self", ",", "datasource_type", "=", "None", ",", "datasource_id", "=", "None", ")", ":", "form_data", "=", "get_form_data", "(", ")", "[", "0", "]", "datasource_id", ",", "datasource_type", "=", "get_datasource_info", "(", "datasource_id", ",", "datasource_type", ",", "form_data", ")", "viz_obj", "=", "get_viz", "(", "datasource_type", "=", "datasource_type", ",", "datasource_id", "=", "datasource_id", ",", "form_data", "=", "form_data", ",", "force", "=", "False", ",", ")", "security_manager", ".", "assert_datasource_permission", "(", "viz_obj", ".", "datasource", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
check_slice_perms
Check if user can access a cached response from slice_json. This function takes `self` since it must have the same signature as the the decorated method.
superset/views/core.py
def check_slice_perms(self, slice_id): """ Check if user can access a cached response from slice_json. This function takes `self` since it must have the same signature as the the decorated method. """ form_data, slc = get_form_data(slice_id, use_slice_data=True) datasource_type = slc.datasource.type datasource_id = slc.datasource.id viz_obj = get_viz( datasource_type=datasource_type, datasource_id=datasource_id, form_data=form_data, force=False, ) security_manager.assert_datasource_permission(viz_obj.datasource)
def check_slice_perms(self, slice_id): """ Check if user can access a cached response from slice_json. This function takes `self` since it must have the same signature as the the decorated method. """ form_data, slc = get_form_data(slice_id, use_slice_data=True) datasource_type = slc.datasource.type datasource_id = slc.datasource.id viz_obj = get_viz( datasource_type=datasource_type, datasource_id=datasource_id, form_data=form_data, force=False, ) security_manager.assert_datasource_permission(viz_obj.datasource)
[ "Check", "if", "user", "can", "access", "a", "cached", "response", "from", "slice_json", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L126-L143
[ "def", "check_slice_perms", "(", "self", ",", "slice_id", ")", ":", "form_data", ",", "slc", "=", "get_form_data", "(", "slice_id", ",", "use_slice_data", "=", "True", ")", "datasource_type", "=", "slc", ".", "datasource", ".", "type", "datasource_id", "=", "slc", ".", "datasource", ".", "id", "viz_obj", "=", "get_viz", "(", "datasource_type", "=", "datasource_type", ",", "datasource_id", "=", "datasource_id", ",", "form_data", "=", "form_data", ",", "force", "=", "False", ",", ")", "security_manager", ".", "assert_datasource_permission", "(", "viz_obj", ".", "datasource", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
apply_caching
Applies the configuration's http headers to all responses
superset/views/core.py
def apply_caching(response): """Applies the configuration's http headers to all responses""" for k, v in config.get('HTTP_HEADERS').items(): response.headers[k] = v return response
def apply_caching(response): """Applies the configuration's http headers to all responses""" for k, v in config.get('HTTP_HEADERS').items(): response.headers[k] = v return response
[ "Applies", "the", "configuration", "s", "http", "headers", "to", "all", "responses" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L3017-L3021
[ "def", "apply_caching", "(", "response", ")", ":", "for", "k", ",", "v", "in", "config", ".", "get", "(", "'HTTP_HEADERS'", ")", ".", "items", "(", ")", ":", "response", ".", "headers", "[", "k", "]", "=", "v", "return", "response" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.override_role_permissions
Updates the role with the give datasource permissions. Permissions not in the request will be revoked. This endpoint should be available to admins only. Expects JSON in the format: { 'role_name': '{role_name}', 'database': [{ 'datasource_type': '{table|druid}', 'name': '{database_name}', 'schema': [{ 'name': '{schema_name}', 'datasources': ['{datasource name}, {datasource name}'] }] }] }
superset/views/core.py
def override_role_permissions(self): """Updates the role with the give datasource permissions. Permissions not in the request will be revoked. This endpoint should be available to admins only. Expects JSON in the format: { 'role_name': '{role_name}', 'database': [{ 'datasource_type': '{table|druid}', 'name': '{database_name}', 'schema': [{ 'name': '{schema_name}', 'datasources': ['{datasource name}, {datasource name}'] }] }] } """ data = request.get_json(force=True) role_name = data['role_name'] databases = data['database'] db_ds_names = set() for dbs in databases: for schema in dbs['schema']: for ds_name in schema['datasources']: fullname = utils.get_datasource_full_name( dbs['name'], ds_name, schema=schema['name']) db_ds_names.add(fullname) existing_datasources = ConnectorRegistry.get_all_datasources(db.session) datasources = [ d for d in existing_datasources if d.full_name in db_ds_names] role = security_manager.find_role(role_name) # remove all permissions role.permissions = [] # grant permissions to the list of datasources granted_perms = [] for datasource in datasources: view_menu_perm = security_manager.find_permission_view_menu( view_menu_name=datasource.perm, permission_name='datasource_access') # prevent creating empty permissions if view_menu_perm and view_menu_perm.view_menu: role.permissions.append(view_menu_perm) granted_perms.append(view_menu_perm.view_menu.name) db.session.commit() return self.json_response({ 'granted': granted_perms, 'requested': list(db_ds_names), }, status=201)
def override_role_permissions(self): """Updates the role with the give datasource permissions. Permissions not in the request will be revoked. This endpoint should be available to admins only. Expects JSON in the format: { 'role_name': '{role_name}', 'database': [{ 'datasource_type': '{table|druid}', 'name': '{database_name}', 'schema': [{ 'name': '{schema_name}', 'datasources': ['{datasource name}, {datasource name}'] }] }] } """ data = request.get_json(force=True) role_name = data['role_name'] databases = data['database'] db_ds_names = set() for dbs in databases: for schema in dbs['schema']: for ds_name in schema['datasources']: fullname = utils.get_datasource_full_name( dbs['name'], ds_name, schema=schema['name']) db_ds_names.add(fullname) existing_datasources = ConnectorRegistry.get_all_datasources(db.session) datasources = [ d for d in existing_datasources if d.full_name in db_ds_names] role = security_manager.find_role(role_name) # remove all permissions role.permissions = [] # grant permissions to the list of datasources granted_perms = [] for datasource in datasources: view_menu_perm = security_manager.find_permission_view_menu( view_menu_name=datasource.perm, permission_name='datasource_access') # prevent creating empty permissions if view_menu_perm and view_menu_perm.view_menu: role.permissions.append(view_menu_perm) granted_perms.append(view_menu_perm.view_menu.name) db.session.commit() return self.json_response({ 'granted': granted_perms, 'requested': list(db_ds_names), }, status=201)
[ "Updates", "the", "role", "with", "the", "give", "datasource", "permissions", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L862-L911
[ "def", "override_role_permissions", "(", "self", ")", ":", "data", "=", "request", ".", "get_json", "(", "force", "=", "True", ")", "role_name", "=", "data", "[", "'role_name'", "]", "databases", "=", "data", "[", "'database'", "]", "db_ds_names", "=", "set", "(", ")", "for", "dbs", "in", "databases", ":", "for", "schema", "in", "dbs", "[", "'schema'", "]", ":", "for", "ds_name", "in", "schema", "[", "'datasources'", "]", ":", "fullname", "=", "utils", ".", "get_datasource_full_name", "(", "dbs", "[", "'name'", "]", ",", "ds_name", ",", "schema", "=", "schema", "[", "'name'", "]", ")", "db_ds_names", ".", "add", "(", "fullname", ")", "existing_datasources", "=", "ConnectorRegistry", ".", "get_all_datasources", "(", "db", ".", "session", ")", "datasources", "=", "[", "d", "for", "d", "in", "existing_datasources", "if", "d", ".", "full_name", "in", "db_ds_names", "]", "role", "=", "security_manager", ".", "find_role", "(", "role_name", ")", "# remove all permissions", "role", ".", "permissions", "=", "[", "]", "# grant permissions to the list of datasources", "granted_perms", "=", "[", "]", "for", "datasource", "in", "datasources", ":", "view_menu_perm", "=", "security_manager", ".", "find_permission_view_menu", "(", "view_menu_name", "=", "datasource", ".", "perm", ",", "permission_name", "=", "'datasource_access'", ")", "# prevent creating empty permissions", "if", "view_menu_perm", "and", "view_menu_perm", ".", "view_menu", ":", "role", ".", "permissions", ".", "append", "(", "view_menu_perm", ")", "granted_perms", ".", "append", "(", "view_menu_perm", ".", "view_menu", ".", "name", ")", "db", ".", "session", ".", "commit", "(", ")", "return", "self", ".", "json_response", "(", "{", "'granted'", ":", "granted_perms", ",", "'requested'", ":", "list", "(", "db_ds_names", ")", ",", "}", ",", "status", "=", "201", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.explore_json
Serves all request that GET or POST form_data This endpoint evolved to be the entry point of many different requests that GETs or POSTs a form_data. `self.generate_json` receives this input and returns different payloads based on the request args in the first block TODO: break into one endpoint for each return shape
superset/views/core.py
def explore_json(self, datasource_type=None, datasource_id=None): """Serves all request that GET or POST form_data This endpoint evolved to be the entry point of many different requests that GETs or POSTs a form_data. `self.generate_json` receives this input and returns different payloads based on the request args in the first block TODO: break into one endpoint for each return shape""" csv = request.args.get('csv') == 'true' query = request.args.get('query') == 'true' results = request.args.get('results') == 'true' samples = request.args.get('samples') == 'true' force = request.args.get('force') == 'true' form_data = get_form_data()[0] datasource_id, datasource_type = get_datasource_info( datasource_id, datasource_type, form_data) viz_obj = get_viz( datasource_type=datasource_type, datasource_id=datasource_id, form_data=form_data, force=force, ) return self.generate_json( viz_obj, csv=csv, query=query, results=results, samples=samples, )
def explore_json(self, datasource_type=None, datasource_id=None): """Serves all request that GET or POST form_data This endpoint evolved to be the entry point of many different requests that GETs or POSTs a form_data. `self.generate_json` receives this input and returns different payloads based on the request args in the first block TODO: break into one endpoint for each return shape""" csv = request.args.get('csv') == 'true' query = request.args.get('query') == 'true' results = request.args.get('results') == 'true' samples = request.args.get('samples') == 'true' force = request.args.get('force') == 'true' form_data = get_form_data()[0] datasource_id, datasource_type = get_datasource_info( datasource_id, datasource_type, form_data) viz_obj = get_viz( datasource_type=datasource_type, datasource_id=datasource_id, form_data=form_data, force=force, ) return self.generate_json( viz_obj, csv=csv, query=query, results=results, samples=samples, )
[ "Serves", "all", "request", "that", "GET", "or", "POST", "form_data" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1233-L1265
[ "def", "explore_json", "(", "self", ",", "datasource_type", "=", "None", ",", "datasource_id", "=", "None", ")", ":", "csv", "=", "request", ".", "args", ".", "get", "(", "'csv'", ")", "==", "'true'", "query", "=", "request", ".", "args", ".", "get", "(", "'query'", ")", "==", "'true'", "results", "=", "request", ".", "args", ".", "get", "(", "'results'", ")", "==", "'true'", "samples", "=", "request", ".", "args", ".", "get", "(", "'samples'", ")", "==", "'true'", "force", "=", "request", ".", "args", ".", "get", "(", "'force'", ")", "==", "'true'", "form_data", "=", "get_form_data", "(", ")", "[", "0", "]", "datasource_id", ",", "datasource_type", "=", "get_datasource_info", "(", "datasource_id", ",", "datasource_type", ",", "form_data", ")", "viz_obj", "=", "get_viz", "(", "datasource_type", "=", "datasource_type", ",", "datasource_id", "=", "datasource_id", ",", "form_data", "=", "form_data", ",", "force", "=", "force", ",", ")", "return", "self", ".", "generate_json", "(", "viz_obj", ",", "csv", "=", "csv", ",", "query", "=", "query", ",", "results", "=", "results", ",", "samples", "=", "samples", ",", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.import_dashboards
Overrides the dashboards using json instances from the file.
superset/views/core.py
def import_dashboards(self): """Overrides the dashboards using json instances from the file.""" f = request.files.get('file') if request.method == 'POST' and f: dashboard_import_export.import_dashboards(db.session, f.stream) return redirect('/dashboard/list/') return self.render_template('superset/import_dashboards.html')
def import_dashboards(self): """Overrides the dashboards using json instances from the file.""" f = request.files.get('file') if request.method == 'POST' and f: dashboard_import_export.import_dashboards(db.session, f.stream) return redirect('/dashboard/list/') return self.render_template('superset/import_dashboards.html')
[ "Overrides", "the", "dashboards", "using", "json", "instances", "from", "the", "file", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1270-L1276
[ "def", "import_dashboards", "(", "self", ")", ":", "f", "=", "request", ".", "files", ".", "get", "(", "'file'", ")", "if", "request", ".", "method", "==", "'POST'", "and", "f", ":", "dashboard_import_export", ".", "import_dashboards", "(", "db", ".", "session", ",", "f", ".", "stream", ")", "return", "redirect", "(", "'/dashboard/list/'", ")", "return", "self", ".", "render_template", "(", "'superset/import_dashboards.html'", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.explorev2
Deprecated endpoint, here for backward compatibility of urls
superset/views/core.py
def explorev2(self, datasource_type, datasource_id): """Deprecated endpoint, here for backward compatibility of urls""" return redirect(url_for( 'Superset.explore', datasource_type=datasource_type, datasource_id=datasource_id, **request.args))
def explorev2(self, datasource_type, datasource_id): """Deprecated endpoint, here for backward compatibility of urls""" return redirect(url_for( 'Superset.explore', datasource_type=datasource_type, datasource_id=datasource_id, **request.args))
[ "Deprecated", "endpoint", "here", "for", "backward", "compatibility", "of", "urls" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1281-L1287
[ "def", "explorev2", "(", "self", ",", "datasource_type", ",", "datasource_id", ")", ":", "return", "redirect", "(", "url_for", "(", "'Superset.explore'", ",", "datasource_type", "=", "datasource_type", ",", "datasource_id", "=", "datasource_id", ",", "*", "*", "request", ".", "args", ")", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.filter
Endpoint to retrieve values for specified column. :param datasource_type: Type of datasource e.g. table :param datasource_id: Datasource id :param column: Column name to retrieve values for :return:
superset/views/core.py
def filter(self, datasource_type, datasource_id, column): """ Endpoint to retrieve values for specified column. :param datasource_type: Type of datasource e.g. table :param datasource_id: Datasource id :param column: Column name to retrieve values for :return: """ # TODO: Cache endpoint by user, datasource and column datasource = ConnectorRegistry.get_datasource( datasource_type, datasource_id, db.session) if not datasource: return json_error_response(DATASOURCE_MISSING_ERR) security_manager.assert_datasource_permission(datasource) payload = json.dumps( datasource.values_for_column( column, config.get('FILTER_SELECT_ROW_LIMIT', 10000), ), default=utils.json_int_dttm_ser) return json_success(payload)
def filter(self, datasource_type, datasource_id, column): """ Endpoint to retrieve values for specified column. :param datasource_type: Type of datasource e.g. table :param datasource_id: Datasource id :param column: Column name to retrieve values for :return: """ # TODO: Cache endpoint by user, datasource and column datasource = ConnectorRegistry.get_datasource( datasource_type, datasource_id, db.session) if not datasource: return json_error_response(DATASOURCE_MISSING_ERR) security_manager.assert_datasource_permission(datasource) payload = json.dumps( datasource.values_for_column( column, config.get('FILTER_SELECT_ROW_LIMIT', 10000), ), default=utils.json_int_dttm_ser) return json_success(payload)
[ "Endpoint", "to", "retrieve", "values", "for", "specified", "column", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1394-L1415
[ "def", "filter", "(", "self", ",", "datasource_type", ",", "datasource_id", ",", "column", ")", ":", "# TODO: Cache endpoint by user, datasource and column", "datasource", "=", "ConnectorRegistry", ".", "get_datasource", "(", "datasource_type", ",", "datasource_id", ",", "db", ".", "session", ")", "if", "not", "datasource", ":", "return", "json_error_response", "(", "DATASOURCE_MISSING_ERR", ")", "security_manager", ".", "assert_datasource_permission", "(", "datasource", ")", "payload", "=", "json", ".", "dumps", "(", "datasource", ".", "values_for_column", "(", "column", ",", "config", ".", "get", "(", "'FILTER_SELECT_ROW_LIMIT'", ",", "10000", ")", ",", ")", ",", "default", "=", "utils", ".", "json_int_dttm_ser", ")", "return", "json_success", "(", "payload", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.save_or_overwrite_slice
Save or overwrite a slice
superset/views/core.py
def save_or_overwrite_slice( self, args, slc, slice_add_perm, slice_overwrite_perm, slice_download_perm, datasource_id, datasource_type, datasource_name): """Save or overwrite a slice""" slice_name = args.get('slice_name') action = args.get('action') form_data = get_form_data()[0] if action in ('saveas'): if 'slice_id' in form_data: form_data.pop('slice_id') # don't save old slice_id slc = models.Slice(owners=[g.user] if g.user else []) slc.params = json.dumps(form_data, indent=2, sort_keys=True) slc.datasource_name = datasource_name slc.viz_type = form_data['viz_type'] slc.datasource_type = datasource_type slc.datasource_id = datasource_id slc.slice_name = slice_name if action in ('saveas') and slice_add_perm: self.save_slice(slc) elif action == 'overwrite' and slice_overwrite_perm: self.overwrite_slice(slc) # Adding slice to a dashboard if requested dash = None if request.args.get('add_to_dash') == 'existing': dash = ( db.session.query(models.Dashboard) .filter_by(id=int(request.args.get('save_to_dashboard_id'))) .one() ) # check edit dashboard permissions dash_overwrite_perm = check_ownership(dash, raise_if_false=False) if not dash_overwrite_perm: return json_error_response( _('You don\'t have the rights to ') + _('alter this ') + _('dashboard'), status=400) flash( _('Chart [{}] was added to dashboard [{}]').format( slc.slice_name, dash.dashboard_title), 'info') elif request.args.get('add_to_dash') == 'new': # check create dashboard permissions dash_add_perm = security_manager.can_access('can_add', 'DashboardModelView') if not dash_add_perm: return json_error_response( _('You don\'t have the rights to ') + _('create a ') + _('dashboard'), status=400) dash = models.Dashboard( dashboard_title=request.args.get('new_dashboard_name'), owners=[g.user] if g.user else []) flash( _('Dashboard [{}] just got created and chart [{}] was added ' 'to it').format( dash.dashboard_title, slc.slice_name), 'info') if dash and slc not in dash.slices: dash.slices.append(slc) db.session.commit() response = { 'can_add': slice_add_perm, 'can_download': slice_download_perm, 'can_overwrite': is_owner(slc, g.user), 'form_data': slc.form_data, 'slice': slc.data, 'dashboard_id': dash.id if dash else None, } if request.args.get('goto_dash') == 'true': response.update({'dashboard': dash.url}) return json_success(json.dumps(response))
def save_or_overwrite_slice( self, args, slc, slice_add_perm, slice_overwrite_perm, slice_download_perm, datasource_id, datasource_type, datasource_name): """Save or overwrite a slice""" slice_name = args.get('slice_name') action = args.get('action') form_data = get_form_data()[0] if action in ('saveas'): if 'slice_id' in form_data: form_data.pop('slice_id') # don't save old slice_id slc = models.Slice(owners=[g.user] if g.user else []) slc.params = json.dumps(form_data, indent=2, sort_keys=True) slc.datasource_name = datasource_name slc.viz_type = form_data['viz_type'] slc.datasource_type = datasource_type slc.datasource_id = datasource_id slc.slice_name = slice_name if action in ('saveas') and slice_add_perm: self.save_slice(slc) elif action == 'overwrite' and slice_overwrite_perm: self.overwrite_slice(slc) # Adding slice to a dashboard if requested dash = None if request.args.get('add_to_dash') == 'existing': dash = ( db.session.query(models.Dashboard) .filter_by(id=int(request.args.get('save_to_dashboard_id'))) .one() ) # check edit dashboard permissions dash_overwrite_perm = check_ownership(dash, raise_if_false=False) if not dash_overwrite_perm: return json_error_response( _('You don\'t have the rights to ') + _('alter this ') + _('dashboard'), status=400) flash( _('Chart [{}] was added to dashboard [{}]').format( slc.slice_name, dash.dashboard_title), 'info') elif request.args.get('add_to_dash') == 'new': # check create dashboard permissions dash_add_perm = security_manager.can_access('can_add', 'DashboardModelView') if not dash_add_perm: return json_error_response( _('You don\'t have the rights to ') + _('create a ') + _('dashboard'), status=400) dash = models.Dashboard( dashboard_title=request.args.get('new_dashboard_name'), owners=[g.user] if g.user else []) flash( _('Dashboard [{}] just got created and chart [{}] was added ' 'to it').format( dash.dashboard_title, slc.slice_name), 'info') if dash and slc not in dash.slices: dash.slices.append(slc) db.session.commit() response = { 'can_add': slice_add_perm, 'can_download': slice_download_perm, 'can_overwrite': is_owner(slc, g.user), 'form_data': slc.form_data, 'slice': slc.data, 'dashboard_id': dash.id if dash else None, } if request.args.get('goto_dash') == 'true': response.update({'dashboard': dash.url}) return json_success(json.dumps(response))
[ "Save", "or", "overwrite", "a", "slice" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1417-L1498
[ "def", "save_or_overwrite_slice", "(", "self", ",", "args", ",", "slc", ",", "slice_add_perm", ",", "slice_overwrite_perm", ",", "slice_download_perm", ",", "datasource_id", ",", "datasource_type", ",", "datasource_name", ")", ":", "slice_name", "=", "args", ".", "get", "(", "'slice_name'", ")", "action", "=", "args", ".", "get", "(", "'action'", ")", "form_data", "=", "get_form_data", "(", ")", "[", "0", "]", "if", "action", "in", "(", "'saveas'", ")", ":", "if", "'slice_id'", "in", "form_data", ":", "form_data", ".", "pop", "(", "'slice_id'", ")", "# don't save old slice_id", "slc", "=", "models", ".", "Slice", "(", "owners", "=", "[", "g", ".", "user", "]", "if", "g", ".", "user", "else", "[", "]", ")", "slc", ".", "params", "=", "json", ".", "dumps", "(", "form_data", ",", "indent", "=", "2", ",", "sort_keys", "=", "True", ")", "slc", ".", "datasource_name", "=", "datasource_name", "slc", ".", "viz_type", "=", "form_data", "[", "'viz_type'", "]", "slc", ".", "datasource_type", "=", "datasource_type", "slc", ".", "datasource_id", "=", "datasource_id", "slc", ".", "slice_name", "=", "slice_name", "if", "action", "in", "(", "'saveas'", ")", "and", "slice_add_perm", ":", "self", ".", "save_slice", "(", "slc", ")", "elif", "action", "==", "'overwrite'", "and", "slice_overwrite_perm", ":", "self", ".", "overwrite_slice", "(", "slc", ")", "# Adding slice to a dashboard if requested", "dash", "=", "None", "if", "request", ".", "args", ".", "get", "(", "'add_to_dash'", ")", "==", "'existing'", ":", "dash", "=", "(", "db", ".", "session", ".", "query", "(", "models", ".", "Dashboard", ")", ".", "filter_by", "(", "id", "=", "int", "(", "request", ".", "args", ".", "get", "(", "'save_to_dashboard_id'", ")", ")", ")", ".", "one", "(", ")", ")", "# check edit dashboard permissions", "dash_overwrite_perm", "=", "check_ownership", "(", "dash", ",", "raise_if_false", "=", "False", ")", "if", "not", "dash_overwrite_perm", ":", "return", "json_error_response", "(", "_", "(", "'You don\\'t have the rights to '", ")", "+", "_", "(", "'alter this '", ")", "+", "_", "(", "'dashboard'", ")", ",", "status", "=", "400", ")", "flash", "(", "_", "(", "'Chart [{}] was added to dashboard [{}]'", ")", ".", "format", "(", "slc", ".", "slice_name", ",", "dash", ".", "dashboard_title", ")", ",", "'info'", ")", "elif", "request", ".", "args", ".", "get", "(", "'add_to_dash'", ")", "==", "'new'", ":", "# check create dashboard permissions", "dash_add_perm", "=", "security_manager", ".", "can_access", "(", "'can_add'", ",", "'DashboardModelView'", ")", "if", "not", "dash_add_perm", ":", "return", "json_error_response", "(", "_", "(", "'You don\\'t have the rights to '", ")", "+", "_", "(", "'create a '", ")", "+", "_", "(", "'dashboard'", ")", ",", "status", "=", "400", ")", "dash", "=", "models", ".", "Dashboard", "(", "dashboard_title", "=", "request", ".", "args", ".", "get", "(", "'new_dashboard_name'", ")", ",", "owners", "=", "[", "g", ".", "user", "]", "if", "g", ".", "user", "else", "[", "]", ")", "flash", "(", "_", "(", "'Dashboard [{}] just got created and chart [{}] was added '", "'to it'", ")", ".", "format", "(", "dash", ".", "dashboard_title", ",", "slc", ".", "slice_name", ")", ",", "'info'", ")", "if", "dash", "and", "slc", "not", "in", "dash", ".", "slices", ":", "dash", ".", "slices", ".", "append", "(", "slc", ")", "db", ".", "session", ".", "commit", "(", ")", "response", "=", "{", "'can_add'", ":", "slice_add_perm", ",", "'can_download'", ":", "slice_download_perm", ",", "'can_overwrite'", ":", "is_owner", "(", "slc", ",", "g", ".", "user", ")", ",", "'form_data'", ":", "slc", ".", "form_data", ",", "'slice'", ":", "slc", ".", "data", ",", "'dashboard_id'", ":", "dash", ".", "id", "if", "dash", "else", "None", ",", "}", "if", "request", ".", "args", ".", "get", "(", "'goto_dash'", ")", "==", "'true'", ":", "response", ".", "update", "(", "{", "'dashboard'", ":", "dash", ".", "url", "}", ")", "return", "json_success", "(", "json", ".", "dumps", "(", "response", ")", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.checkbox
endpoint for checking/unchecking any boolean in a sqla model
superset/views/core.py
def checkbox(self, model_view, id_, attr, value): """endpoint for checking/unchecking any boolean in a sqla model""" modelview_to_model = { '{}ColumnInlineView'.format(name.capitalize()): source.column_class for name, source in ConnectorRegistry.sources.items() } model = modelview_to_model[model_view] col = db.session.query(model).filter_by(id=id_).first() checked = value == 'true' if col: setattr(col, attr, checked) if checked: metrics = col.get_metrics().values() col.datasource.add_missing_metrics(metrics) db.session.commit() return json_success('OK')
def checkbox(self, model_view, id_, attr, value): """endpoint for checking/unchecking any boolean in a sqla model""" modelview_to_model = { '{}ColumnInlineView'.format(name.capitalize()): source.column_class for name, source in ConnectorRegistry.sources.items() } model = modelview_to_model[model_view] col = db.session.query(model).filter_by(id=id_).first() checked = value == 'true' if col: setattr(col, attr, checked) if checked: metrics = col.get_metrics().values() col.datasource.add_missing_metrics(metrics) db.session.commit() return json_success('OK')
[ "endpoint", "for", "checking", "/", "unchecking", "any", "boolean", "in", "a", "sqla", "model" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1517-L1532
[ "def", "checkbox", "(", "self", ",", "model_view", ",", "id_", ",", "attr", ",", "value", ")", ":", "modelview_to_model", "=", "{", "'{}ColumnInlineView'", ".", "format", "(", "name", ".", "capitalize", "(", ")", ")", ":", "source", ".", "column_class", "for", "name", ",", "source", "in", "ConnectorRegistry", ".", "sources", ".", "items", "(", ")", "}", "model", "=", "modelview_to_model", "[", "model_view", "]", "col", "=", "db", ".", "session", ".", "query", "(", "model", ")", ".", "filter_by", "(", "id", "=", "id_", ")", ".", "first", "(", ")", "checked", "=", "value", "==", "'true'", "if", "col", ":", "setattr", "(", "col", ",", "attr", ",", "checked", ")", "if", "checked", ":", "metrics", "=", "col", ".", "get_metrics", "(", ")", ".", "values", "(", ")", "col", ".", "datasource", ".", "add_missing_metrics", "(", "metrics", ")", "db", ".", "session", ".", "commit", "(", ")", "return", "json_success", "(", "'OK'", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.tables
Endpoint to fetch the list of tables for given database
superset/views/core.py
def tables(self, db_id, schema, substr, force_refresh='false'): """Endpoint to fetch the list of tables for given database""" db_id = int(db_id) force_refresh = force_refresh.lower() == 'true' schema = utils.js_string_to_python(schema) substr = utils.js_string_to_python(substr) database = db.session.query(models.Database).filter_by(id=db_id).one() if schema: table_names = database.all_table_names_in_schema( schema=schema, force=force_refresh, cache=database.table_cache_enabled, cache_timeout=database.table_cache_timeout) view_names = database.all_view_names_in_schema( schema=schema, force=force_refresh, cache=database.table_cache_enabled, cache_timeout=database.table_cache_timeout) else: table_names = database.all_table_names_in_database( cache=True, force=False, cache_timeout=24 * 60 * 60) view_names = database.all_view_names_in_database( cache=True, force=False, cache_timeout=24 * 60 * 60) table_names = security_manager.accessible_by_user(database, table_names, schema) view_names = security_manager.accessible_by_user(database, view_names, schema) if substr: table_names = [tn for tn in table_names if substr in tn] view_names = [vn for vn in view_names if substr in vn] if not schema and database.default_schemas: def get_schema(tbl_or_view_name): return tbl_or_view_name.split('.')[0] if '.' in tbl_or_view_name else None user_schema = g.user.email.split('@')[0] valid_schemas = set(database.default_schemas + [user_schema]) table_names = [tn for tn in table_names if get_schema(tn) in valid_schemas] view_names = [vn for vn in view_names if get_schema(vn) in valid_schemas] max_items = config.get('MAX_TABLE_NAMES') or len(table_names) total_items = len(table_names) + len(view_names) max_tables = len(table_names) max_views = len(view_names) if total_items and substr: max_tables = max_items * len(table_names) // total_items max_views = max_items * len(view_names) // total_items table_options = [{'value': tn, 'label': tn} for tn in table_names[:max_tables]] table_options.extend([{'value': vn, 'label': '[view] {}'.format(vn)} for vn in view_names[:max_views]]) payload = { 'tableLength': len(table_names) + len(view_names), 'options': table_options, } return json_success(json.dumps(payload))
def tables(self, db_id, schema, substr, force_refresh='false'): """Endpoint to fetch the list of tables for given database""" db_id = int(db_id) force_refresh = force_refresh.lower() == 'true' schema = utils.js_string_to_python(schema) substr = utils.js_string_to_python(substr) database = db.session.query(models.Database).filter_by(id=db_id).one() if schema: table_names = database.all_table_names_in_schema( schema=schema, force=force_refresh, cache=database.table_cache_enabled, cache_timeout=database.table_cache_timeout) view_names = database.all_view_names_in_schema( schema=schema, force=force_refresh, cache=database.table_cache_enabled, cache_timeout=database.table_cache_timeout) else: table_names = database.all_table_names_in_database( cache=True, force=False, cache_timeout=24 * 60 * 60) view_names = database.all_view_names_in_database( cache=True, force=False, cache_timeout=24 * 60 * 60) table_names = security_manager.accessible_by_user(database, table_names, schema) view_names = security_manager.accessible_by_user(database, view_names, schema) if substr: table_names = [tn for tn in table_names if substr in tn] view_names = [vn for vn in view_names if substr in vn] if not schema and database.default_schemas: def get_schema(tbl_or_view_name): return tbl_or_view_name.split('.')[0] if '.' in tbl_or_view_name else None user_schema = g.user.email.split('@')[0] valid_schemas = set(database.default_schemas + [user_schema]) table_names = [tn for tn in table_names if get_schema(tn) in valid_schemas] view_names = [vn for vn in view_names if get_schema(vn) in valid_schemas] max_items = config.get('MAX_TABLE_NAMES') or len(table_names) total_items = len(table_names) + len(view_names) max_tables = len(table_names) max_views = len(view_names) if total_items and substr: max_tables = max_items * len(table_names) // total_items max_views = max_items * len(view_names) // total_items table_options = [{'value': tn, 'label': tn} for tn in table_names[:max_tables]] table_options.extend([{'value': vn, 'label': '[view] {}'.format(vn)} for vn in view_names[:max_views]]) payload = { 'tableLength': len(table_names) + len(view_names), 'options': table_options, } return json_success(json.dumps(payload))
[ "Endpoint", "to", "fetch", "the", "list", "of", "tables", "for", "given", "database" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1564-L1619
[ "def", "tables", "(", "self", ",", "db_id", ",", "schema", ",", "substr", ",", "force_refresh", "=", "'false'", ")", ":", "db_id", "=", "int", "(", "db_id", ")", "force_refresh", "=", "force_refresh", ".", "lower", "(", ")", "==", "'true'", "schema", "=", "utils", ".", "js_string_to_python", "(", "schema", ")", "substr", "=", "utils", ".", "js_string_to_python", "(", "substr", ")", "database", "=", "db", ".", "session", ".", "query", "(", "models", ".", "Database", ")", ".", "filter_by", "(", "id", "=", "db_id", ")", ".", "one", "(", ")", "if", "schema", ":", "table_names", "=", "database", ".", "all_table_names_in_schema", "(", "schema", "=", "schema", ",", "force", "=", "force_refresh", ",", "cache", "=", "database", ".", "table_cache_enabled", ",", "cache_timeout", "=", "database", ".", "table_cache_timeout", ")", "view_names", "=", "database", ".", "all_view_names_in_schema", "(", "schema", "=", "schema", ",", "force", "=", "force_refresh", ",", "cache", "=", "database", ".", "table_cache_enabled", ",", "cache_timeout", "=", "database", ".", "table_cache_timeout", ")", "else", ":", "table_names", "=", "database", ".", "all_table_names_in_database", "(", "cache", "=", "True", ",", "force", "=", "False", ",", "cache_timeout", "=", "24", "*", "60", "*", "60", ")", "view_names", "=", "database", ".", "all_view_names_in_database", "(", "cache", "=", "True", ",", "force", "=", "False", ",", "cache_timeout", "=", "24", "*", "60", "*", "60", ")", "table_names", "=", "security_manager", ".", "accessible_by_user", "(", "database", ",", "table_names", ",", "schema", ")", "view_names", "=", "security_manager", ".", "accessible_by_user", "(", "database", ",", "view_names", ",", "schema", ")", "if", "substr", ":", "table_names", "=", "[", "tn", "for", "tn", "in", "table_names", "if", "substr", "in", "tn", "]", "view_names", "=", "[", "vn", "for", "vn", "in", "view_names", "if", "substr", "in", "vn", "]", "if", "not", "schema", "and", "database", ".", "default_schemas", ":", "def", "get_schema", "(", "tbl_or_view_name", ")", ":", "return", "tbl_or_view_name", ".", "split", "(", "'.'", ")", "[", "0", "]", "if", "'.'", "in", "tbl_or_view_name", "else", "None", "user_schema", "=", "g", ".", "user", ".", "email", ".", "split", "(", "'@'", ")", "[", "0", "]", "valid_schemas", "=", "set", "(", "database", ".", "default_schemas", "+", "[", "user_schema", "]", ")", "table_names", "=", "[", "tn", "for", "tn", "in", "table_names", "if", "get_schema", "(", "tn", ")", "in", "valid_schemas", "]", "view_names", "=", "[", "vn", "for", "vn", "in", "view_names", "if", "get_schema", "(", "vn", ")", "in", "valid_schemas", "]", "max_items", "=", "config", ".", "get", "(", "'MAX_TABLE_NAMES'", ")", "or", "len", "(", "table_names", ")", "total_items", "=", "len", "(", "table_names", ")", "+", "len", "(", "view_names", ")", "max_tables", "=", "len", "(", "table_names", ")", "max_views", "=", "len", "(", "view_names", ")", "if", "total_items", "and", "substr", ":", "max_tables", "=", "max_items", "*", "len", "(", "table_names", ")", "//", "total_items", "max_views", "=", "max_items", "*", "len", "(", "view_names", ")", "//", "total_items", "table_options", "=", "[", "{", "'value'", ":", "tn", ",", "'label'", ":", "tn", "}", "for", "tn", "in", "table_names", "[", ":", "max_tables", "]", "]", "table_options", ".", "extend", "(", "[", "{", "'value'", ":", "vn", ",", "'label'", ":", "'[view] {}'", ".", "format", "(", "vn", ")", "}", "for", "vn", "in", "view_names", "[", ":", "max_views", "]", "]", ")", "payload", "=", "{", "'tableLength'", ":", "len", "(", "table_names", ")", "+", "len", "(", "view_names", ")", ",", "'options'", ":", "table_options", ",", "}", "return", "json_success", "(", "json", ".", "dumps", "(", "payload", ")", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.copy_dash
Copy dashboard
superset/views/core.py
def copy_dash(self, dashboard_id): """Copy dashboard""" session = db.session() data = json.loads(request.form.get('data')) dash = models.Dashboard() original_dash = ( session .query(models.Dashboard) .filter_by(id=dashboard_id).first()) dash.owners = [g.user] if g.user else [] dash.dashboard_title = data['dashboard_title'] if data['duplicate_slices']: # Duplicating slices as well, mapping old ids to new ones old_to_new_sliceids = {} for slc in original_dash.slices: new_slice = slc.clone() new_slice.owners = [g.user] if g.user else [] session.add(new_slice) session.flush() new_slice.dashboards.append(dash) old_to_new_sliceids['{}'.format(slc.id)] = \ '{}'.format(new_slice.id) # update chartId of layout entities # in v2_dash positions json data, chartId should be integer, # while in older version slice_id is string type for value in data['positions'].values(): if ( isinstance(value, dict) and value.get('meta') and value.get('meta').get('chartId') ): old_id = '{}'.format(value.get('meta').get('chartId')) new_id = int(old_to_new_sliceids[old_id]) value['meta']['chartId'] = new_id else: dash.slices = original_dash.slices dash.params = original_dash.params self._set_dash_metadata(dash, data) session.add(dash) session.commit() dash_json = json.dumps(dash.data) session.close() return json_success(dash_json)
def copy_dash(self, dashboard_id): """Copy dashboard""" session = db.session() data = json.loads(request.form.get('data')) dash = models.Dashboard() original_dash = ( session .query(models.Dashboard) .filter_by(id=dashboard_id).first()) dash.owners = [g.user] if g.user else [] dash.dashboard_title = data['dashboard_title'] if data['duplicate_slices']: # Duplicating slices as well, mapping old ids to new ones old_to_new_sliceids = {} for slc in original_dash.slices: new_slice = slc.clone() new_slice.owners = [g.user] if g.user else [] session.add(new_slice) session.flush() new_slice.dashboards.append(dash) old_to_new_sliceids['{}'.format(slc.id)] = \ '{}'.format(new_slice.id) # update chartId of layout entities # in v2_dash positions json data, chartId should be integer, # while in older version slice_id is string type for value in data['positions'].values(): if ( isinstance(value, dict) and value.get('meta') and value.get('meta').get('chartId') ): old_id = '{}'.format(value.get('meta').get('chartId')) new_id = int(old_to_new_sliceids[old_id]) value['meta']['chartId'] = new_id else: dash.slices = original_dash.slices dash.params = original_dash.params self._set_dash_metadata(dash, data) session.add(dash) session.commit() dash_json = json.dumps(dash.data) session.close() return json_success(dash_json)
[ "Copy", "dashboard" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1624-L1669
[ "def", "copy_dash", "(", "self", ",", "dashboard_id", ")", ":", "session", "=", "db", ".", "session", "(", ")", "data", "=", "json", ".", "loads", "(", "request", ".", "form", ".", "get", "(", "'data'", ")", ")", "dash", "=", "models", ".", "Dashboard", "(", ")", "original_dash", "=", "(", "session", ".", "query", "(", "models", ".", "Dashboard", ")", ".", "filter_by", "(", "id", "=", "dashboard_id", ")", ".", "first", "(", ")", ")", "dash", ".", "owners", "=", "[", "g", ".", "user", "]", "if", "g", ".", "user", "else", "[", "]", "dash", ".", "dashboard_title", "=", "data", "[", "'dashboard_title'", "]", "if", "data", "[", "'duplicate_slices'", "]", ":", "# Duplicating slices as well, mapping old ids to new ones", "old_to_new_sliceids", "=", "{", "}", "for", "slc", "in", "original_dash", ".", "slices", ":", "new_slice", "=", "slc", ".", "clone", "(", ")", "new_slice", ".", "owners", "=", "[", "g", ".", "user", "]", "if", "g", ".", "user", "else", "[", "]", "session", ".", "add", "(", "new_slice", ")", "session", ".", "flush", "(", ")", "new_slice", ".", "dashboards", ".", "append", "(", "dash", ")", "old_to_new_sliceids", "[", "'{}'", ".", "format", "(", "slc", ".", "id", ")", "]", "=", "'{}'", ".", "format", "(", "new_slice", ".", "id", ")", "# update chartId of layout entities", "# in v2_dash positions json data, chartId should be integer,", "# while in older version slice_id is string type", "for", "value", "in", "data", "[", "'positions'", "]", ".", "values", "(", ")", ":", "if", "(", "isinstance", "(", "value", ",", "dict", ")", "and", "value", ".", "get", "(", "'meta'", ")", "and", "value", ".", "get", "(", "'meta'", ")", ".", "get", "(", "'chartId'", ")", ")", ":", "old_id", "=", "'{}'", ".", "format", "(", "value", ".", "get", "(", "'meta'", ")", ".", "get", "(", "'chartId'", ")", ")", "new_id", "=", "int", "(", "old_to_new_sliceids", "[", "old_id", "]", ")", "value", "[", "'meta'", "]", "[", "'chartId'", "]", "=", "new_id", "else", ":", "dash", ".", "slices", "=", "original_dash", ".", "slices", "dash", ".", "params", "=", "original_dash", ".", "params", "self", ".", "_set_dash_metadata", "(", "dash", ",", "data", ")", "session", ".", "add", "(", "dash", ")", "session", ".", "commit", "(", ")", "dash_json", "=", "json", ".", "dumps", "(", "dash", ".", "data", ")", "session", ".", "close", "(", ")", "return", "json_success", "(", "dash_json", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.save_dash
Save a dashboard's metadata
superset/views/core.py
def save_dash(self, dashboard_id): """Save a dashboard's metadata""" session = db.session() dash = (session .query(models.Dashboard) .filter_by(id=dashboard_id).first()) check_ownership(dash, raise_if_false=True) data = json.loads(request.form.get('data')) self._set_dash_metadata(dash, data) session.merge(dash) session.commit() session.close() return json_success(json.dumps({'status': 'SUCCESS'}))
def save_dash(self, dashboard_id): """Save a dashboard's metadata""" session = db.session() dash = (session .query(models.Dashboard) .filter_by(id=dashboard_id).first()) check_ownership(dash, raise_if_false=True) data = json.loads(request.form.get('data')) self._set_dash_metadata(dash, data) session.merge(dash) session.commit() session.close() return json_success(json.dumps({'status': 'SUCCESS'}))
[ "Save", "a", "dashboard", "s", "metadata" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1674-L1686
[ "def", "save_dash", "(", "self", ",", "dashboard_id", ")", ":", "session", "=", "db", ".", "session", "(", ")", "dash", "=", "(", "session", ".", "query", "(", "models", ".", "Dashboard", ")", ".", "filter_by", "(", "id", "=", "dashboard_id", ")", ".", "first", "(", ")", ")", "check_ownership", "(", "dash", ",", "raise_if_false", "=", "True", ")", "data", "=", "json", ".", "loads", "(", "request", ".", "form", ".", "get", "(", "'data'", ")", ")", "self", ".", "_set_dash_metadata", "(", "dash", ",", "data", ")", "session", ".", "merge", "(", "dash", ")", "session", ".", "commit", "(", ")", "session", ".", "close", "(", ")", "return", "json_success", "(", "json", ".", "dumps", "(", "{", "'status'", ":", "'SUCCESS'", "}", ")", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.add_slices
Add and save slices to a dashboard
superset/views/core.py
def add_slices(self, dashboard_id): """Add and save slices to a dashboard""" data = json.loads(request.form.get('data')) session = db.session() Slice = models.Slice # noqa dash = ( session.query(models.Dashboard).filter_by(id=dashboard_id).first()) check_ownership(dash, raise_if_false=True) new_slices = session.query(Slice).filter( Slice.id.in_(data['slice_ids'])) dash.slices += new_slices session.merge(dash) session.commit() session.close() return 'SLICES ADDED'
def add_slices(self, dashboard_id): """Add and save slices to a dashboard""" data = json.loads(request.form.get('data')) session = db.session() Slice = models.Slice # noqa dash = ( session.query(models.Dashboard).filter_by(id=dashboard_id).first()) check_ownership(dash, raise_if_false=True) new_slices = session.query(Slice).filter( Slice.id.in_(data['slice_ids'])) dash.slices += new_slices session.merge(dash) session.commit() session.close() return 'SLICES ADDED'
[ "Add", "and", "save", "slices", "to", "a", "dashboard" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1749-L1763
[ "def", "add_slices", "(", "self", ",", "dashboard_id", ")", ":", "data", "=", "json", ".", "loads", "(", "request", ".", "form", ".", "get", "(", "'data'", ")", ")", "session", "=", "db", ".", "session", "(", ")", "Slice", "=", "models", ".", "Slice", "# noqa", "dash", "=", "(", "session", ".", "query", "(", "models", ".", "Dashboard", ")", ".", "filter_by", "(", "id", "=", "dashboard_id", ")", ".", "first", "(", ")", ")", "check_ownership", "(", "dash", ",", "raise_if_false", "=", "True", ")", "new_slices", "=", "session", ".", "query", "(", "Slice", ")", ".", "filter", "(", "Slice", ".", "id", ".", "in_", "(", "data", "[", "'slice_ids'", "]", ")", ")", "dash", ".", "slices", "+=", "new_slices", "session", ".", "merge", "(", "dash", ")", "session", ".", "commit", "(", ")", "session", ".", "close", "(", ")", "return", "'SLICES ADDED'" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.recent_activity
Recent activity (actions) for a given user
superset/views/core.py
def recent_activity(self, user_id): """Recent activity (actions) for a given user""" M = models # noqa if request.args.get('limit'): limit = int(request.args.get('limit')) else: limit = 1000 qry = ( db.session.query(M.Log, M.Dashboard, M.Slice) .outerjoin( M.Dashboard, M.Dashboard.id == M.Log.dashboard_id, ) .outerjoin( M.Slice, M.Slice.id == M.Log.slice_id, ) .filter( sqla.and_( ~M.Log.action.in_(('queries', 'shortner', 'sql_json')), M.Log.user_id == user_id, ), ) .order_by(M.Log.dttm.desc()) .limit(limit) ) payload = [] for log in qry.all(): item_url = None item_title = None if log.Dashboard: item_url = log.Dashboard.url item_title = log.Dashboard.dashboard_title elif log.Slice: item_url = log.Slice.slice_url item_title = log.Slice.slice_name payload.append({ 'action': log.Log.action, 'item_url': item_url, 'item_title': item_title, 'time': log.Log.dttm, }) return json_success( json.dumps(payload, default=utils.json_int_dttm_ser))
def recent_activity(self, user_id): """Recent activity (actions) for a given user""" M = models # noqa if request.args.get('limit'): limit = int(request.args.get('limit')) else: limit = 1000 qry = ( db.session.query(M.Log, M.Dashboard, M.Slice) .outerjoin( M.Dashboard, M.Dashboard.id == M.Log.dashboard_id, ) .outerjoin( M.Slice, M.Slice.id == M.Log.slice_id, ) .filter( sqla.and_( ~M.Log.action.in_(('queries', 'shortner', 'sql_json')), M.Log.user_id == user_id, ), ) .order_by(M.Log.dttm.desc()) .limit(limit) ) payload = [] for log in qry.all(): item_url = None item_title = None if log.Dashboard: item_url = log.Dashboard.url item_title = log.Dashboard.dashboard_title elif log.Slice: item_url = log.Slice.slice_url item_title = log.Slice.slice_name payload.append({ 'action': log.Log.action, 'item_url': item_url, 'item_title': item_title, 'time': log.Log.dttm, }) return json_success( json.dumps(payload, default=utils.json_int_dttm_ser))
[ "Recent", "activity", "(", "actions", ")", "for", "a", "given", "user" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1826-L1872
[ "def", "recent_activity", "(", "self", ",", "user_id", ")", ":", "M", "=", "models", "# noqa", "if", "request", ".", "args", ".", "get", "(", "'limit'", ")", ":", "limit", "=", "int", "(", "request", ".", "args", ".", "get", "(", "'limit'", ")", ")", "else", ":", "limit", "=", "1000", "qry", "=", "(", "db", ".", "session", ".", "query", "(", "M", ".", "Log", ",", "M", ".", "Dashboard", ",", "M", ".", "Slice", ")", ".", "outerjoin", "(", "M", ".", "Dashboard", ",", "M", ".", "Dashboard", ".", "id", "==", "M", ".", "Log", ".", "dashboard_id", ",", ")", ".", "outerjoin", "(", "M", ".", "Slice", ",", "M", ".", "Slice", ".", "id", "==", "M", ".", "Log", ".", "slice_id", ",", ")", ".", "filter", "(", "sqla", ".", "and_", "(", "~", "M", ".", "Log", ".", "action", ".", "in_", "(", "(", "'queries'", ",", "'shortner'", ",", "'sql_json'", ")", ")", ",", "M", ".", "Log", ".", "user_id", "==", "user_id", ",", ")", ",", ")", ".", "order_by", "(", "M", ".", "Log", ".", "dttm", ".", "desc", "(", ")", ")", ".", "limit", "(", "limit", ")", ")", "payload", "=", "[", "]", "for", "log", "in", "qry", ".", "all", "(", ")", ":", "item_url", "=", "None", "item_title", "=", "None", "if", "log", ".", "Dashboard", ":", "item_url", "=", "log", ".", "Dashboard", ".", "url", "item_title", "=", "log", ".", "Dashboard", ".", "dashboard_title", "elif", "log", ".", "Slice", ":", "item_url", "=", "log", ".", "Slice", ".", "slice_url", "item_title", "=", "log", ".", "Slice", ".", "slice_name", "payload", ".", "append", "(", "{", "'action'", ":", "log", ".", "Log", ".", "action", ",", "'item_url'", ":", "item_url", ",", "'item_title'", ":", "item_title", ",", "'time'", ":", "log", ".", "Log", ".", "dttm", ",", "}", ")", "return", "json_success", "(", "json", ".", "dumps", "(", "payload", ",", "default", "=", "utils", ".", "json_int_dttm_ser", ")", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.fave_dashboards_by_username
This lets us use a user's username to pull favourite dashboards
superset/views/core.py
def fave_dashboards_by_username(self, username): """This lets us use a user's username to pull favourite dashboards""" user = security_manager.find_user(username=username) return self.fave_dashboards(user.get_id())
def fave_dashboards_by_username(self, username): """This lets us use a user's username to pull favourite dashboards""" user = security_manager.find_user(username=username) return self.fave_dashboards(user.get_id())
[ "This", "lets", "us", "use", "a", "user", "s", "username", "to", "pull", "favourite", "dashboards" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1900-L1903
[ "def", "fave_dashboards_by_username", "(", "self", ",", "username", ")", ":", "user", "=", "security_manager", ".", "find_user", "(", "username", "=", "username", ")", "return", "self", ".", "fave_dashboards", "(", "user", ".", "get_id", "(", ")", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.user_slices
List of slices a user created, or faved
superset/views/core.py
def user_slices(self, user_id=None): """List of slices a user created, or faved""" if not user_id: user_id = g.user.id Slice = models.Slice # noqa FavStar = models.FavStar # noqa qry = ( db.session.query(Slice, FavStar.dttm).join( models.FavStar, sqla.and_( models.FavStar.user_id == int(user_id), models.FavStar.class_name == 'slice', models.Slice.id == models.FavStar.obj_id, ), isouter=True).filter( sqla.or_( Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id, FavStar.user_id == user_id, ), ) .order_by(Slice.slice_name.asc()) ) payload = [{ 'id': o.Slice.id, 'title': o.Slice.slice_name, 'url': o.Slice.slice_url, 'data': o.Slice.form_data, 'dttm': o.dttm if o.dttm else o.Slice.changed_on, 'viz_type': o.Slice.viz_type, } for o in qry.all()] return json_success( json.dumps(payload, default=utils.json_int_dttm_ser))
def user_slices(self, user_id=None): """List of slices a user created, or faved""" if not user_id: user_id = g.user.id Slice = models.Slice # noqa FavStar = models.FavStar # noqa qry = ( db.session.query(Slice, FavStar.dttm).join( models.FavStar, sqla.and_( models.FavStar.user_id == int(user_id), models.FavStar.class_name == 'slice', models.Slice.id == models.FavStar.obj_id, ), isouter=True).filter( sqla.or_( Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id, FavStar.user_id == user_id, ), ) .order_by(Slice.slice_name.asc()) ) payload = [{ 'id': o.Slice.id, 'title': o.Slice.slice_name, 'url': o.Slice.slice_url, 'data': o.Slice.form_data, 'dttm': o.dttm if o.dttm else o.Slice.changed_on, 'viz_type': o.Slice.viz_type, } for o in qry.all()] return json_success( json.dumps(payload, default=utils.json_int_dttm_ser))
[ "List", "of", "slices", "a", "user", "created", "or", "faved" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L1977-L2010
[ "def", "user_slices", "(", "self", ",", "user_id", "=", "None", ")", ":", "if", "not", "user_id", ":", "user_id", "=", "g", ".", "user", ".", "id", "Slice", "=", "models", ".", "Slice", "# noqa", "FavStar", "=", "models", ".", "FavStar", "# noqa", "qry", "=", "(", "db", ".", "session", ".", "query", "(", "Slice", ",", "FavStar", ".", "dttm", ")", ".", "join", "(", "models", ".", "FavStar", ",", "sqla", ".", "and_", "(", "models", ".", "FavStar", ".", "user_id", "==", "int", "(", "user_id", ")", ",", "models", ".", "FavStar", ".", "class_name", "==", "'slice'", ",", "models", ".", "Slice", ".", "id", "==", "models", ".", "FavStar", ".", "obj_id", ",", ")", ",", "isouter", "=", "True", ")", ".", "filter", "(", "sqla", ".", "or_", "(", "Slice", ".", "created_by_fk", "==", "user_id", ",", "Slice", ".", "changed_by_fk", "==", "user_id", ",", "FavStar", ".", "user_id", "==", "user_id", ",", ")", ",", ")", ".", "order_by", "(", "Slice", ".", "slice_name", ".", "asc", "(", ")", ")", ")", "payload", "=", "[", "{", "'id'", ":", "o", ".", "Slice", ".", "id", ",", "'title'", ":", "o", ".", "Slice", ".", "slice_name", ",", "'url'", ":", "o", ".", "Slice", ".", "slice_url", ",", "'data'", ":", "o", ".", "Slice", ".", "form_data", ",", "'dttm'", ":", "o", ".", "dttm", "if", "o", ".", "dttm", "else", "o", ".", "Slice", ".", "changed_on", ",", "'viz_type'", ":", "o", ".", "Slice", ".", "viz_type", ",", "}", "for", "o", "in", "qry", ".", "all", "(", ")", "]", "return", "json_success", "(", "json", ".", "dumps", "(", "payload", ",", "default", "=", "utils", ".", "json_int_dttm_ser", ")", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.created_slices
List of slices created by this user
superset/views/core.py
def created_slices(self, user_id=None): """List of slices created by this user""" if not user_id: user_id = g.user.id Slice = models.Slice # noqa qry = ( db.session.query(Slice) .filter( sqla.or_( Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id, ), ) .order_by(Slice.changed_on.desc()) ) payload = [{ 'id': o.id, 'title': o.slice_name, 'url': o.slice_url, 'dttm': o.changed_on, 'viz_type': o.viz_type, } for o in qry.all()] return json_success( json.dumps(payload, default=utils.json_int_dttm_ser))
def created_slices(self, user_id=None): """List of slices created by this user""" if not user_id: user_id = g.user.id Slice = models.Slice # noqa qry = ( db.session.query(Slice) .filter( sqla.or_( Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id, ), ) .order_by(Slice.changed_on.desc()) ) payload = [{ 'id': o.id, 'title': o.slice_name, 'url': o.slice_url, 'dttm': o.changed_on, 'viz_type': o.viz_type, } for o in qry.all()] return json_success( json.dumps(payload, default=utils.json_int_dttm_ser))
[ "List", "of", "slices", "created", "by", "this", "user" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2016-L2039
[ "def", "created_slices", "(", "self", ",", "user_id", "=", "None", ")", ":", "if", "not", "user_id", ":", "user_id", "=", "g", ".", "user", ".", "id", "Slice", "=", "models", ".", "Slice", "# noqa", "qry", "=", "(", "db", ".", "session", ".", "query", "(", "Slice", ")", ".", "filter", "(", "sqla", ".", "or_", "(", "Slice", ".", "created_by_fk", "==", "user_id", ",", "Slice", ".", "changed_by_fk", "==", "user_id", ",", ")", ",", ")", ".", "order_by", "(", "Slice", ".", "changed_on", ".", "desc", "(", ")", ")", ")", "payload", "=", "[", "{", "'id'", ":", "o", ".", "id", ",", "'title'", ":", "o", ".", "slice_name", ",", "'url'", ":", "o", ".", "slice_url", ",", "'dttm'", ":", "o", ".", "changed_on", ",", "'viz_type'", ":", "o", ".", "viz_type", ",", "}", "for", "o", "in", "qry", ".", "all", "(", ")", "]", "return", "json_success", "(", "json", ".", "dumps", "(", "payload", ",", "default", "=", "utils", ".", "json_int_dttm_ser", ")", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.fave_slices
Favorite slices for a user
superset/views/core.py
def fave_slices(self, user_id=None): """Favorite slices for a user""" if not user_id: user_id = g.user.id qry = ( db.session.query( models.Slice, models.FavStar.dttm, ) .join( models.FavStar, sqla.and_( models.FavStar.user_id == int(user_id), models.FavStar.class_name == 'slice', models.Slice.id == models.FavStar.obj_id, ), ) .order_by( models.FavStar.dttm.desc(), ) ) payload = [] for o in qry.all(): d = { 'id': o.Slice.id, 'title': o.Slice.slice_name, 'url': o.Slice.slice_url, 'dttm': o.dttm, 'viz_type': o.Slice.viz_type, } if o.Slice.created_by: user = o.Slice.created_by d['creator'] = str(user) d['creator_url'] = '/superset/profile/{}/'.format( user.username) payload.append(d) return json_success( json.dumps(payload, default=utils.json_int_dttm_ser))
def fave_slices(self, user_id=None): """Favorite slices for a user""" if not user_id: user_id = g.user.id qry = ( db.session.query( models.Slice, models.FavStar.dttm, ) .join( models.FavStar, sqla.and_( models.FavStar.user_id == int(user_id), models.FavStar.class_name == 'slice', models.Slice.id == models.FavStar.obj_id, ), ) .order_by( models.FavStar.dttm.desc(), ) ) payload = [] for o in qry.all(): d = { 'id': o.Slice.id, 'title': o.Slice.slice_name, 'url': o.Slice.slice_url, 'dttm': o.dttm, 'viz_type': o.Slice.viz_type, } if o.Slice.created_by: user = o.Slice.created_by d['creator'] = str(user) d['creator_url'] = '/superset/profile/{}/'.format( user.username) payload.append(d) return json_success( json.dumps(payload, default=utils.json_int_dttm_ser))
[ "Favorite", "slices", "for", "a", "user" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2045-L2082
[ "def", "fave_slices", "(", "self", ",", "user_id", "=", "None", ")", ":", "if", "not", "user_id", ":", "user_id", "=", "g", ".", "user", ".", "id", "qry", "=", "(", "db", ".", "session", ".", "query", "(", "models", ".", "Slice", ",", "models", ".", "FavStar", ".", "dttm", ",", ")", ".", "join", "(", "models", ".", "FavStar", ",", "sqla", ".", "and_", "(", "models", ".", "FavStar", ".", "user_id", "==", "int", "(", "user_id", ")", ",", "models", ".", "FavStar", ".", "class_name", "==", "'slice'", ",", "models", ".", "Slice", ".", "id", "==", "models", ".", "FavStar", ".", "obj_id", ",", ")", ",", ")", ".", "order_by", "(", "models", ".", "FavStar", ".", "dttm", ".", "desc", "(", ")", ",", ")", ")", "payload", "=", "[", "]", "for", "o", "in", "qry", ".", "all", "(", ")", ":", "d", "=", "{", "'id'", ":", "o", ".", "Slice", ".", "id", ",", "'title'", ":", "o", ".", "Slice", ".", "slice_name", ",", "'url'", ":", "o", ".", "Slice", ".", "slice_url", ",", "'dttm'", ":", "o", ".", "dttm", ",", "'viz_type'", ":", "o", ".", "Slice", ".", "viz_type", ",", "}", "if", "o", ".", "Slice", ".", "created_by", ":", "user", "=", "o", ".", "Slice", ".", "created_by", "d", "[", "'creator'", "]", "=", "str", "(", "user", ")", "d", "[", "'creator_url'", "]", "=", "'/superset/profile/{}/'", ".", "format", "(", "user", ".", "username", ")", "payload", ".", "append", "(", "d", ")", "return", "json_success", "(", "json", ".", "dumps", "(", "payload", ",", "default", "=", "utils", ".", "json_int_dttm_ser", ")", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.warm_up_cache
Warms up the cache for the slice or table. Note for slices a force refresh occurs.
superset/views/core.py
def warm_up_cache(self): """Warms up the cache for the slice or table. Note for slices a force refresh occurs. """ slices = None session = db.session() slice_id = request.args.get('slice_id') table_name = request.args.get('table_name') db_name = request.args.get('db_name') if not slice_id and not (table_name and db_name): return json_error_response(__( 'Malformed request. slice_id or table_name and db_name ' 'arguments are expected'), status=400) if slice_id: slices = session.query(models.Slice).filter_by(id=slice_id).all() if not slices: return json_error_response(__( 'Chart %(id)s not found', id=slice_id), status=404) elif table_name and db_name: SqlaTable = ConnectorRegistry.sources['table'] table = ( session.query(SqlaTable) .join(models.Database) .filter( models.Database.database_name == db_name or SqlaTable.table_name == table_name) ).first() if not table: return json_error_response(__( "Table %(t)s wasn't found in the database %(d)s", t=table_name, s=db_name), status=404) slices = session.query(models.Slice).filter_by( datasource_id=table.id, datasource_type=table.type).all() for slc in slices: try: form_data = get_form_data(slc.id, use_slice_data=True)[0] obj = get_viz( datasource_type=slc.datasource.type, datasource_id=slc.datasource.id, form_data=form_data, force=True, ) obj.get_json() except Exception as e: return json_error_response(utils.error_msg_from_exception(e)) return json_success(json.dumps( [{'slice_id': slc.id, 'slice_name': slc.slice_name} for slc in slices]))
def warm_up_cache(self): """Warms up the cache for the slice or table. Note for slices a force refresh occurs. """ slices = None session = db.session() slice_id = request.args.get('slice_id') table_name = request.args.get('table_name') db_name = request.args.get('db_name') if not slice_id and not (table_name and db_name): return json_error_response(__( 'Malformed request. slice_id or table_name and db_name ' 'arguments are expected'), status=400) if slice_id: slices = session.query(models.Slice).filter_by(id=slice_id).all() if not slices: return json_error_response(__( 'Chart %(id)s not found', id=slice_id), status=404) elif table_name and db_name: SqlaTable = ConnectorRegistry.sources['table'] table = ( session.query(SqlaTable) .join(models.Database) .filter( models.Database.database_name == db_name or SqlaTable.table_name == table_name) ).first() if not table: return json_error_response(__( "Table %(t)s wasn't found in the database %(d)s", t=table_name, s=db_name), status=404) slices = session.query(models.Slice).filter_by( datasource_id=table.id, datasource_type=table.type).all() for slc in slices: try: form_data = get_form_data(slc.id, use_slice_data=True)[0] obj = get_viz( datasource_type=slc.datasource.type, datasource_id=slc.datasource.id, form_data=form_data, force=True, ) obj.get_json() except Exception as e: return json_error_response(utils.error_msg_from_exception(e)) return json_success(json.dumps( [{'slice_id': slc.id, 'slice_name': slc.slice_name} for slc in slices]))
[ "Warms", "up", "the", "cache", "for", "the", "slice", "or", "table", "." ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2087-L2138
[ "def", "warm_up_cache", "(", "self", ")", ":", "slices", "=", "None", "session", "=", "db", ".", "session", "(", ")", "slice_id", "=", "request", ".", "args", ".", "get", "(", "'slice_id'", ")", "table_name", "=", "request", ".", "args", ".", "get", "(", "'table_name'", ")", "db_name", "=", "request", ".", "args", ".", "get", "(", "'db_name'", ")", "if", "not", "slice_id", "and", "not", "(", "table_name", "and", "db_name", ")", ":", "return", "json_error_response", "(", "__", "(", "'Malformed request. slice_id or table_name and db_name '", "'arguments are expected'", ")", ",", "status", "=", "400", ")", "if", "slice_id", ":", "slices", "=", "session", ".", "query", "(", "models", ".", "Slice", ")", ".", "filter_by", "(", "id", "=", "slice_id", ")", ".", "all", "(", ")", "if", "not", "slices", ":", "return", "json_error_response", "(", "__", "(", "'Chart %(id)s not found'", ",", "id", "=", "slice_id", ")", ",", "status", "=", "404", ")", "elif", "table_name", "and", "db_name", ":", "SqlaTable", "=", "ConnectorRegistry", ".", "sources", "[", "'table'", "]", "table", "=", "(", "session", ".", "query", "(", "SqlaTable", ")", ".", "join", "(", "models", ".", "Database", ")", ".", "filter", "(", "models", ".", "Database", ".", "database_name", "==", "db_name", "or", "SqlaTable", ".", "table_name", "==", "table_name", ")", ")", ".", "first", "(", ")", "if", "not", "table", ":", "return", "json_error_response", "(", "__", "(", "\"Table %(t)s wasn't found in the database %(d)s\"", ",", "t", "=", "table_name", ",", "s", "=", "db_name", ")", ",", "status", "=", "404", ")", "slices", "=", "session", ".", "query", "(", "models", ".", "Slice", ")", ".", "filter_by", "(", "datasource_id", "=", "table", ".", "id", ",", "datasource_type", "=", "table", ".", "type", ")", ".", "all", "(", ")", "for", "slc", "in", "slices", ":", "try", ":", "form_data", "=", "get_form_data", "(", "slc", ".", "id", ",", "use_slice_data", "=", "True", ")", "[", "0", "]", "obj", "=", "get_viz", "(", "datasource_type", "=", "slc", ".", "datasource", ".", "type", ",", "datasource_id", "=", "slc", ".", "datasource", ".", "id", ",", "form_data", "=", "form_data", ",", "force", "=", "True", ",", ")", "obj", ".", "get_json", "(", ")", "except", "Exception", "as", "e", ":", "return", "json_error_response", "(", "utils", ".", "error_msg_from_exception", "(", "e", ")", ")", "return", "json_success", "(", "json", ".", "dumps", "(", "[", "{", "'slice_id'", ":", "slc", ".", "id", ",", "'slice_name'", ":", "slc", ".", "slice_name", "}", "for", "slc", "in", "slices", "]", ")", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.favstar
Toggle favorite stars on Slices and Dashboard
superset/views/core.py
def favstar(self, class_name, obj_id, action): """Toggle favorite stars on Slices and Dashboard""" session = db.session() FavStar = models.FavStar # noqa count = 0 favs = session.query(FavStar).filter_by( class_name=class_name, obj_id=obj_id, user_id=g.user.get_id()).all() if action == 'select': if not favs: session.add( FavStar( class_name=class_name, obj_id=obj_id, user_id=g.user.get_id(), dttm=datetime.now(), ), ) count = 1 elif action == 'unselect': for fav in favs: session.delete(fav) else: count = len(favs) session.commit() return json_success(json.dumps({'count': count}))
def favstar(self, class_name, obj_id, action): """Toggle favorite stars on Slices and Dashboard""" session = db.session() FavStar = models.FavStar # noqa count = 0 favs = session.query(FavStar).filter_by( class_name=class_name, obj_id=obj_id, user_id=g.user.get_id()).all() if action == 'select': if not favs: session.add( FavStar( class_name=class_name, obj_id=obj_id, user_id=g.user.get_id(), dttm=datetime.now(), ), ) count = 1 elif action == 'unselect': for fav in favs: session.delete(fav) else: count = len(favs) session.commit() return json_success(json.dumps({'count': count}))
[ "Toggle", "favorite", "stars", "on", "Slices", "and", "Dashboard" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2142-L2167
[ "def", "favstar", "(", "self", ",", "class_name", ",", "obj_id", ",", "action", ")", ":", "session", "=", "db", ".", "session", "(", ")", "FavStar", "=", "models", ".", "FavStar", "# noqa", "count", "=", "0", "favs", "=", "session", ".", "query", "(", "FavStar", ")", ".", "filter_by", "(", "class_name", "=", "class_name", ",", "obj_id", "=", "obj_id", ",", "user_id", "=", "g", ".", "user", ".", "get_id", "(", ")", ")", ".", "all", "(", ")", "if", "action", "==", "'select'", ":", "if", "not", "favs", ":", "session", ".", "add", "(", "FavStar", "(", "class_name", "=", "class_name", ",", "obj_id", "=", "obj_id", ",", "user_id", "=", "g", ".", "user", ".", "get_id", "(", ")", ",", "dttm", "=", "datetime", ".", "now", "(", ")", ",", ")", ",", ")", "count", "=", "1", "elif", "action", "==", "'unselect'", ":", "for", "fav", "in", "favs", ":", "session", ".", "delete", "(", "fav", ")", "else", ":", "count", "=", "len", "(", "favs", ")", "session", ".", "commit", "(", ")", "return", "json_success", "(", "json", ".", "dumps", "(", "{", "'count'", ":", "count", "}", ")", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653
train
Superset.dashboard
Server side rendering for a dashboard
superset/views/core.py
def dashboard(self, dashboard_id): """Server side rendering for a dashboard""" session = db.session() qry = session.query(models.Dashboard) if dashboard_id.isdigit(): qry = qry.filter_by(id=int(dashboard_id)) else: qry = qry.filter_by(slug=dashboard_id) dash = qry.one_or_none() if not dash: abort(404) datasources = set() for slc in dash.slices: datasource = slc.datasource if datasource: datasources.add(datasource) if config.get('ENABLE_ACCESS_REQUEST'): for datasource in datasources: if datasource and not security_manager.datasource_access(datasource): flash( __(security_manager.get_datasource_access_error_msg(datasource)), 'danger') return redirect( 'superset/request_access/?' f'dashboard_id={dash.id}&') dash_edit_perm = check_ownership(dash, raise_if_false=False) and \ security_manager.can_access('can_save_dash', 'Superset') dash_save_perm = security_manager.can_access('can_save_dash', 'Superset') superset_can_explore = security_manager.can_access('can_explore', 'Superset') superset_can_csv = security_manager.can_access('can_csv', 'Superset') slice_can_edit = security_manager.can_access('can_edit', 'SliceModelView') standalone_mode = request.args.get('standalone') == 'true' edit_mode = request.args.get('edit') == 'true' # Hack to log the dashboard_id properly, even when getting a slug @log_this def dashboard(**kwargs): # noqa pass dashboard( dashboard_id=dash.id, dashboard_version='v2', dash_edit_perm=dash_edit_perm, edit_mode=edit_mode) dashboard_data = dash.data dashboard_data.update({ 'standalone_mode': standalone_mode, 'dash_save_perm': dash_save_perm, 'dash_edit_perm': dash_edit_perm, 'superset_can_explore': superset_can_explore, 'superset_can_csv': superset_can_csv, 'slice_can_edit': slice_can_edit, }) bootstrap_data = { 'user_id': g.user.get_id(), 'dashboard_data': dashboard_data, 'datasources': {ds.uid: ds.data for ds in datasources}, 'common': self.common_bootsrap_payload(), 'editMode': edit_mode, } if request.args.get('json') == 'true': return json_success(json.dumps(bootstrap_data)) return self.render_template( 'superset/dashboard.html', entry='dashboard', standalone_mode=standalone_mode, title=dash.dashboard_title, bootstrap_data=json.dumps(bootstrap_data), )
def dashboard(self, dashboard_id): """Server side rendering for a dashboard""" session = db.session() qry = session.query(models.Dashboard) if dashboard_id.isdigit(): qry = qry.filter_by(id=int(dashboard_id)) else: qry = qry.filter_by(slug=dashboard_id) dash = qry.one_or_none() if not dash: abort(404) datasources = set() for slc in dash.slices: datasource = slc.datasource if datasource: datasources.add(datasource) if config.get('ENABLE_ACCESS_REQUEST'): for datasource in datasources: if datasource and not security_manager.datasource_access(datasource): flash( __(security_manager.get_datasource_access_error_msg(datasource)), 'danger') return redirect( 'superset/request_access/?' f'dashboard_id={dash.id}&') dash_edit_perm = check_ownership(dash, raise_if_false=False) and \ security_manager.can_access('can_save_dash', 'Superset') dash_save_perm = security_manager.can_access('can_save_dash', 'Superset') superset_can_explore = security_manager.can_access('can_explore', 'Superset') superset_can_csv = security_manager.can_access('can_csv', 'Superset') slice_can_edit = security_manager.can_access('can_edit', 'SliceModelView') standalone_mode = request.args.get('standalone') == 'true' edit_mode = request.args.get('edit') == 'true' # Hack to log the dashboard_id properly, even when getting a slug @log_this def dashboard(**kwargs): # noqa pass dashboard( dashboard_id=dash.id, dashboard_version='v2', dash_edit_perm=dash_edit_perm, edit_mode=edit_mode) dashboard_data = dash.data dashboard_data.update({ 'standalone_mode': standalone_mode, 'dash_save_perm': dash_save_perm, 'dash_edit_perm': dash_edit_perm, 'superset_can_explore': superset_can_explore, 'superset_can_csv': superset_can_csv, 'slice_can_edit': slice_can_edit, }) bootstrap_data = { 'user_id': g.user.get_id(), 'dashboard_data': dashboard_data, 'datasources': {ds.uid: ds.data for ds in datasources}, 'common': self.common_bootsrap_payload(), 'editMode': edit_mode, } if request.args.get('json') == 'true': return json_success(json.dumps(bootstrap_data)) return self.render_template( 'superset/dashboard.html', entry='dashboard', standalone_mode=standalone_mode, title=dash.dashboard_title, bootstrap_data=json.dumps(bootstrap_data), )
[ "Server", "side", "rendering", "for", "a", "dashboard" ]
apache/incubator-superset
python
https://github.com/apache/incubator-superset/blob/ca2996c78f679260eb79c6008e276733df5fb653/superset/views/core.py#L2171-L2246
[ "def", "dashboard", "(", "self", ",", "dashboard_id", ")", ":", "session", "=", "db", ".", "session", "(", ")", "qry", "=", "session", ".", "query", "(", "models", ".", "Dashboard", ")", "if", "dashboard_id", ".", "isdigit", "(", ")", ":", "qry", "=", "qry", ".", "filter_by", "(", "id", "=", "int", "(", "dashboard_id", ")", ")", "else", ":", "qry", "=", "qry", ".", "filter_by", "(", "slug", "=", "dashboard_id", ")", "dash", "=", "qry", ".", "one_or_none", "(", ")", "if", "not", "dash", ":", "abort", "(", "404", ")", "datasources", "=", "set", "(", ")", "for", "slc", "in", "dash", ".", "slices", ":", "datasource", "=", "slc", ".", "datasource", "if", "datasource", ":", "datasources", ".", "add", "(", "datasource", ")", "if", "config", ".", "get", "(", "'ENABLE_ACCESS_REQUEST'", ")", ":", "for", "datasource", "in", "datasources", ":", "if", "datasource", "and", "not", "security_manager", ".", "datasource_access", "(", "datasource", ")", ":", "flash", "(", "__", "(", "security_manager", ".", "get_datasource_access_error_msg", "(", "datasource", ")", ")", ",", "'danger'", ")", "return", "redirect", "(", "'superset/request_access/?'", "f'dashboard_id={dash.id}&'", ")", "dash_edit_perm", "=", "check_ownership", "(", "dash", ",", "raise_if_false", "=", "False", ")", "and", "security_manager", ".", "can_access", "(", "'can_save_dash'", ",", "'Superset'", ")", "dash_save_perm", "=", "security_manager", ".", "can_access", "(", "'can_save_dash'", ",", "'Superset'", ")", "superset_can_explore", "=", "security_manager", ".", "can_access", "(", "'can_explore'", ",", "'Superset'", ")", "superset_can_csv", "=", "security_manager", ".", "can_access", "(", "'can_csv'", ",", "'Superset'", ")", "slice_can_edit", "=", "security_manager", ".", "can_access", "(", "'can_edit'", ",", "'SliceModelView'", ")", "standalone_mode", "=", "request", ".", "args", ".", "get", "(", "'standalone'", ")", "==", "'true'", "edit_mode", "=", "request", ".", "args", ".", "get", "(", "'edit'", ")", "==", "'true'", "# Hack to log the dashboard_id properly, even when getting a slug", "@", "log_this", "def", "dashboard", "(", "*", "*", "kwargs", ")", ":", "# noqa", "pass", "dashboard", "(", "dashboard_id", "=", "dash", ".", "id", ",", "dashboard_version", "=", "'v2'", ",", "dash_edit_perm", "=", "dash_edit_perm", ",", "edit_mode", "=", "edit_mode", ")", "dashboard_data", "=", "dash", ".", "data", "dashboard_data", ".", "update", "(", "{", "'standalone_mode'", ":", "standalone_mode", ",", "'dash_save_perm'", ":", "dash_save_perm", ",", "'dash_edit_perm'", ":", "dash_edit_perm", ",", "'superset_can_explore'", ":", "superset_can_explore", ",", "'superset_can_csv'", ":", "superset_can_csv", ",", "'slice_can_edit'", ":", "slice_can_edit", ",", "}", ")", "bootstrap_data", "=", "{", "'user_id'", ":", "g", ".", "user", ".", "get_id", "(", ")", ",", "'dashboard_data'", ":", "dashboard_data", ",", "'datasources'", ":", "{", "ds", ".", "uid", ":", "ds", ".", "data", "for", "ds", "in", "datasources", "}", ",", "'common'", ":", "self", ".", "common_bootsrap_payload", "(", ")", ",", "'editMode'", ":", "edit_mode", ",", "}", "if", "request", ".", "args", ".", "get", "(", "'json'", ")", "==", "'true'", ":", "return", "json_success", "(", "json", ".", "dumps", "(", "bootstrap_data", ")", ")", "return", "self", ".", "render_template", "(", "'superset/dashboard.html'", ",", "entry", "=", "'dashboard'", ",", "standalone_mode", "=", "standalone_mode", ",", "title", "=", "dash", ".", "dashboard_title", ",", "bootstrap_data", "=", "json", ".", "dumps", "(", "bootstrap_data", ")", ",", ")" ]
ca2996c78f679260eb79c6008e276733df5fb653