INSTRUCTION
stringlengths
1
8.43k
RESPONSE
stringlengths
75
104k
Check the old password is valid and set the new password.
def update(self, instance, validated_data): """Check the old password is valid and set the new password.""" if not instance.check_password(validated_data['old_password']): msg = _('Invalid password.') raise serializers.ValidationError({'old_password': msg}) instance.set_password(validated_data['new_password']) instance.save() return instance
Set the new password for the user.
def update(self, instance, validated_data): """Set the new password for the user.""" instance.set_password(validated_data['new_password']) instance.save() return instance
Validate if email exists and requires a verification.
def validate_email(self, email): """ Validate if email exists and requires a verification. `validate_email` will set a `user` attribute on the instance allowing the view to send an email confirmation. """ try: self.user = User.objects.get_by_natural_key(email) except User.DoesNotExist: msg = _('A user with this email address does not exist.') raise serializers.ValidationError(msg) if self.user.email_verified: msg = _('User email address is already verified.') raise serializers.ValidationError(msg) return email
Create auth token. Differs from DRF that it always creates new token but not re - using them.
def post(self, request): """Create auth token. Differs from DRF that it always creates new token but not re-using them.""" serializer = self.serializer_class(data=request.data) if serializer.is_valid(): user = serializer.validated_data['user'] signals.user_logged_in.send(type(self), user=user, request=request) token = self.model.objects.create(user=user) token.update_expiry() return response.Response({'token': token.key}) return response.Response( serializer.errors, status=status.HTTP_400_BAD_REQUEST)
Delete auth token when delete request was issued.
def delete(self, request, *args, **kwargs): """Delete auth token when `delete` request was issued.""" # Logic repeated from DRF because one cannot easily reuse it auth = get_authorization_header(request).split() if not auth or auth[0].lower() != b'token': return response.Response(status=status.HTTP_400_BAD_REQUEST) if len(auth) == 1: msg = 'Invalid token header. No credentials provided.' return response.Response(msg, status=status.HTTP_400_BAD_REQUEST) elif len(auth) > 2: msg = 'Invalid token header. Token string should not contain spaces.' return response.Response(msg, status=status.HTTP_400_BAD_REQUEST) try: token = self.model.objects.get(key=auth[1]) except self.model.DoesNotExist: pass else: token.delete() signals.user_logged_out.send( type(self), user=token.user, request=request, ) return response.Response(status=status.HTTP_204_NO_CONTENT)
Disallow users other than the user whose email is being reset.
def initial(self, request, *args, **kwargs): """Disallow users other than the user whose email is being reset.""" email = request.data.get('email') if request.user.is_authenticated() and email != request.user.email: raise PermissionDenied() return super(ResendConfirmationEmail, self).initial( request, *args, **kwargs )
Validate email and send a request to confirm it.
def post(self, request, *args, **kwargs): """Validate `email` and send a request to confirm it.""" serializer = self.serializer_class(data=request.data) if not serializer.is_valid(): return response.Response( serializer.errors, status=status.HTTP_400_BAD_REQUEST, ) serializer.user.send_validation_email() msg = _('Email confirmation sent.') return response.Response(msg, status=status.HTTP_204_NO_CONTENT)
Since User. email is unique this check is redundant but it sets a nicer error message than the ORM. See #13147.
def clean_email(self): """ Since User.email is unique, this check is redundant, but it sets a nicer error message than the ORM. See #13147. """ email = self.cleaned_data['email'] try: User._default_manager.get(email__iexact=email) except User.DoesNotExist: return email.lower() raise forms.ValidationError(self.error_messages['duplicate_email'])
Update token s expiration datetime on every auth action.
def update_expiry(self, commit=True): """Update token's expiration datetime on every auth action.""" self.expires = update_expiry(self.created) if commit: self.save()
Email context to reset a user password.
def password_reset_email_context(notification): """Email context to reset a user password.""" return { 'protocol': 'https', 'uid': notification.user.generate_uid(), 'token': notification.user.generate_token(), 'site': notification.site, }
Send a notification by email.
def email_handler(notification, email_context): """Send a notification by email.""" incuna_mail.send( to=notification.user.email, subject=notification.email_subject, template_name=notification.text_email_template, html_template_name=notification.html_email_template, context=email_context(notification), headers=getattr(notification, 'headers', {}), )
Password reset email handler.
def password_reset_email_handler(notification): """Password reset email handler.""" base_subject = _('{domain} password reset').format(domain=notification.site.domain) subject = getattr(settings, 'DUM_PASSWORD_RESET_SUBJECT', base_subject) notification.email_subject = subject email_handler(notification, password_reset_email_context)
Validation email handler.
def validation_email_handler(notification): """Validation email handler.""" base_subject = _('{domain} account validate').format(domain=notification.site.domain) subject = getattr(settings, 'DUM_VALIDATE_EMAIL_SUBJECT', base_subject) notification.email_subject = subject email_handler(notification, validation_email_context)
Authenticate a user from a token form field
def authenticate(self, request): """ Authenticate a user from a token form field Errors thrown here will be swallowed by django-rest-framework, and it expects us to return None if authentication fails. """ try: key = request.data['token'] except KeyError: return try: token = AuthToken.objects.get(key=key) except AuthToken.DoesNotExist: return return (token.user, token)
Custom authentication to check if auth token has expired.
def authenticate_credentials(self, key): """Custom authentication to check if auth token has expired.""" user, token = super(TokenAuthentication, self).authenticate_credentials(key) if token.expires < timezone.now(): msg = _('Token has expired.') raise exceptions.AuthenticationFailed(msg) # Update the token's expiration date token.update_expiry() return (user, token)
Displays bokeh output inside a notebook.
def notebook_show(obj, doc, comm): """ Displays bokeh output inside a notebook. """ target = obj.ref['id'] load_mime = 'application/vnd.holoviews_load.v0+json' exec_mime = 'application/vnd.holoviews_exec.v0+json' # Publish plot HTML bokeh_script, bokeh_div, _ = bokeh.embed.notebook.notebook_content(obj, comm.id) publish_display_data(data={'text/html': encode_utf8(bokeh_div)}) # Publish comm manager JS = '\n'.join([PYVIZ_PROXY, JupyterCommManager.js_manager]) publish_display_data(data={load_mime: JS, 'application/javascript': JS}) # Publish bokeh plot JS msg_handler = bokeh_msg_handler.format(plot_id=target) comm_js = comm.js_template.format(plot_id=target, comm_id=comm.id, msg_handler=msg_handler) bokeh_js = '\n'.join([comm_js, bokeh_script]) # Note: extension should be altered so text/html is not required publish_display_data(data={exec_mime: '', 'text/html': '', 'application/javascript': bokeh_js}, metadata={exec_mime: {'id': target}})
Temporary fix to patch HoloViews plot comms
def process_hv_plots(widgets, plots): """ Temporary fix to patch HoloViews plot comms """ bokeh_plots = [] for plot in plots: if hasattr(plot, '_update_callbacks'): for subplot in plot.traverse(lambda x: x): subplot.comm = widgets.server_comm for cb in subplot.callbacks: for c in cb.callbacks: c.code = c.code.replace(plot.id, widgets.plot_id) plot = plot.state bokeh_plots.append(plot) return bokeh_plots
Returns a CustomJS callback that can be attached to send the widget state across the notebook comms.
def _get_customjs(self, change, p_name): """ Returns a CustomJS callback that can be attached to send the widget state across the notebook comms. """ data_template = "data = {{p_name: '{p_name}', value: cb_obj['{change}']}};" fetch_data = data_template.format(change=change, p_name=p_name) self_callback = JS_CALLBACK.format(comm_id=self.comm.id, timeout=self.timeout, debounce=self.debounce, plot_id=self.plot_id) js_callback = CustomJS(code='\n'.join([fetch_data, self_callback])) return js_callback
Get widget for param_name
def widget(self, param_name): """Get widget for param_name""" if param_name not in self._widgets: self._widgets[param_name] = self._make_widget(param_name) return self._widgets[param_name]
Return name widget boxes for all parameters ( i. e. a property sheet )
def widgets(self): """Return name,widget boxes for all parameters (i.e., a property sheet)""" params = self.parameterized.params().items() key_fn = lambda x: x[1].precedence if x[1].precedence is not None else self.p.default_precedence sorted_precedence = sorted(params, key=key_fn) outputs = [k for k, p in sorted_precedence if isinstance(p, _View)] filtered = [(k,p) for (k,p) in sorted_precedence if ((p.precedence is None) or (p.precedence >= self.p.display_threshold)) and k not in outputs] groups = itertools.groupby(filtered, key=key_fn) sorted_groups = [sorted(grp) for (k,grp) in groups] ordered_params = [el[0] for group in sorted_groups for el in group] # Format name specially ordered_params.pop(ordered_params.index('name')) widgets = [Div(text='<b>{0}</b>'.format(self.parameterized.name))] def format_name(pname): p = self.parameterized.params(pname) # omit name for buttons, which already show the name on the button name = "" if issubclass(type(p),param.Action) else pname return Div(text=name) if self.p.show_labels: widgets += [self.widget(pname) for pname in ordered_params] else: widgets += [self.widget(pname) for pname in ordered_params] if self.p.button and not (self.p.callback is None and self.p.next_n==0): display_button = Button(label=self.p.button_text) def click_cb(): # Execute and clear changes since last button press try: self.execute(self._changed) except Exception as e: self._changed.clear() raise e self._changed.clear() display_button.on_click(click_cb) widgets.append(display_button) outputs = [self.widget(pname) for pname in outputs] return widgets, outputs
The default Renderer function which handles HoloViews objects.
def render_function(obj, view): """ The default Renderer function which handles HoloViews objects. """ try: import holoviews as hv except: hv = None if hv and isinstance(obj, hv.core.Dimensioned): renderer = hv.renderer('bokeh') if not view._notebook: renderer = renderer.instance(mode='server') plot = renderer.get_plot(obj, doc=view._document) if view._notebook: plot.comm = view._comm plot.document = view._document return plot.state return obj
Forces a parameter value to be text
def TextWidget(*args, **kw): """Forces a parameter value to be text""" kw['value'] = str(kw['value']) kw.pop('options', None) return TextInput(*args,**kw)
Given a list of objects returns a dictionary mapping from string name for the object to the object itself.
def named_objs(objlist): """ Given a list of objects, returns a dictionary mapping from string name for the object to the object itself. """ objs = [] for k, obj in objlist: if hasattr(k, '__name__'): k = k.__name__ else: k = as_unicode(k) objs.append((k, obj)) return objs
Returns the instance owning the supplied instancemethod or the class owning the supplied classmethod.
def get_method_owner(meth): """ Returns the instance owning the supplied instancemethod or the class owning the supplied classmethod. """ if inspect.ismethod(meth): if sys.version_info < (3,0): return meth.im_class if meth.im_self is None else meth.im_self else: return meth.__self__
Take the http_auth value and split it into the attributes that carry the http auth username and password
def _assign_auth_values(self, http_auth): """Take the http_auth value and split it into the attributes that carry the http auth username and password :param str|tuple http_auth: The http auth value """ if not http_auth: pass elif isinstance(http_auth, (tuple, list)): self._auth_user, self._auth_password = http_auth elif isinstance(http_auth, str): self._auth_user, self._auth_password = http_auth.split(':') else: raise ValueError('HTTP Auth Credentials should be str or ' 'tuple, not %s' % type(http_auth))
Returns True if the cluster is up False otherwise.
def ping(self, params=None): """ Returns True if the cluster is up, False otherwise. """ try: self.transport.perform_request('HEAD', '/', params=params) except TransportError: raise gen.Return(False) raise gen.Return(True)
Get the basic info from the current cluster.
def info(self, params=None): """Get the basic info from the current cluster. :rtype: dict """ _, data = yield self.transport.perform_request('GET', '/', params=params) raise gen.Return(data)
Coroutine. Queries cluster Health API.
def health(self, params=None): """Coroutine. Queries cluster Health API. Returns a 2-tuple, where first element is request status, and second element is a dictionary with response data. :param params: dictionary of query parameters, will be handed over to the underlying :class:`~torando_elasticsearch.AsyncHTTPConnection` class for serialization """ status, data = yield self.transport.perform_request( "GET", "/_cluster/health", params=params) raise gen.Return((status, data))
Adds a typed JSON document in a specific index making it searchable. Behind the scenes this method calls index (... op_type = create ) <http:// elasticsearch. org/ guide/ reference/ api/ index_/ > _
def create(self, index, doc_type, body, id=None, params=None): """ Adds a typed JSON document in a specific index, making it searchable. Behind the scenes this method calls index(..., op_type='create') `<http://elasticsearch.org/guide/reference/api/index_/>`_ :arg index: The name of the index :arg doc_type: The type of the document :arg id: Document ID :arg body: The document :arg consistency: Explicit write consistency setting for the operation :arg id: Specific document ID (when the POST method is used) :arg parent: ID of the parent document :arg percolate: Percolator queries to execute while indexing the doc :arg refresh: Refresh the index after performing the operation :arg replication: Specific replication type (default: sync) :arg routing: Specific routing value :arg timeout: Explicit operation timeout :arg timestamp: Explicit timestamp for the document :arg ttl: Expiration time for the document :arg version: Explicit version number for concurrency control :arg version_type: Specific version type """ result = yield self.index(index, doc_type, body, id=id, params=params, op_type='create') raise gen.Return(result)
Adds or updates a typed JSON document in a specific index making it searchable. <http:// elasticsearch. org/ guide/ reference/ api/ index_/ > _
def index(self, index, doc_type, body, id=None, params=None): """ Adds or updates a typed JSON document in a specific index, making it searchable. `<http://elasticsearch.org/guide/reference/api/index_/>`_ :arg index: The name of the index :arg doc_type: The type of the document :arg body: The document :arg id: Document ID :arg consistency: Explicit write consistency setting for the operation :arg op_type: Explicit operation type (default: index) :arg parent: ID of the parent document :arg percolate: Percolator queries to execute while indexing the doc :arg refresh: Refresh the index after performing the operation :arg replication: Specific replication type (default: sync) :arg routing: Specific routing value :arg timeout: Explicit operation timeout :arg timestamp: Explicit timestamp for the document :arg ttl: Expiration time for the document :arg version: Explicit version number for concurrency control :arg version_type: Specific version type """ _, data = yield self.transport.perform_request( 'PUT' if id else 'POST', _make_path(index, doc_type, id), params=params, body=body) raise gen.Return(data)
Returns a boolean indicating whether or not given document exists in Elasticsearch. <http:// elasticsearch. org/ guide/ reference/ api/ get/ > _
def exists(self, index, id, doc_type='_all', params=None): """ Returns a boolean indicating whether or not given document exists in Elasticsearch. `<http://elasticsearch.org/guide/reference/api/get/>`_ :arg index: The name of the index :arg id: The document ID :arg doc_type: The type of the document (uses `_all` by default to fetch the first document matching the ID across all types) :arg parent: The ID of the parent document :arg preference: Specify the node or shard the operation should be performed on (default: random) :arg realtime: Specify whether to perform the operation in realtime or search mode :arg refresh: Refresh the shard containing the document before performing the operation :arg routing: Specific routing value """ try: self.transport.perform_request( 'HEAD', _make_path(index, doc_type, id), params=params) except exceptions.NotFoundError: return gen.Return(False) raise gen.Return(True)
Retrieve a specified alias. <http:// www. elastic. co/ guide/ en/ elasticsearch/ reference/ current/ indices - aliases. html > _: arg index: A comma - separated list of index names to filter aliases: arg name: A comma - separated list of alias names to return: arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. ( This includes _all string or when no indices have been specified ): arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open closed or both. default all valid choices are: open closed none all: arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable ( missing or closed ): arg local: Return local information do not retrieve the state from master node ( default: false )
def get_alias(self, index=None, name=None, params=None): """ Retrieve a specified alias. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-aliases.html>`_ :arg index: A comma-separated list of index names to filter aliases :arg name: A comma-separated list of alias names to return :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'all', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg local: Return local information, do not retrieve the state from master node (default: false) """ _, result = yield self.transport.perform_request( 'GET', _make_path(index, '_alias', name), params=params) raise gen.Return(result)
Execute a search query and get back search hits that match the query. <http:// www. elasticsearch. org/ guide/ reference/ api/ search/ > _
def search(self, index=None, doc_type=None, body=None, params=None): """ Execute a search query and get back search hits that match the query. `<http://www.elasticsearch.org/guide/reference/api/search/>`_ :arg index: A comma-separated list of index names to search; use `_all` or empty string to perform the operation on all indices :arg doc_type: A comma-separated list of document types to search; leave empty to perform the operation on all types :arg body: The search definition using the Query DSL :arg _source: True or false to return the _source field or not, or a list of fields to return :arg _source_exclude: A list of fields to exclude from the returned _source field :arg _source_include: A list of fields to extract and return from the _source field :arg analyze_wildcard: Specify whether wildcard and prefix queries should be analyzed (default: false) :arg analyzer: The analyzer to use for the query string :arg default_operator: The default operator for query string query (AND or OR) (default: OR) :arg df: The field to use as default where no field prefix is given in the query string :arg explain: Specify whether to return detailed information about score computation as part of a hit :arg fields: A comma-separated list of fields to return as part of a hit :arg ignore_indices: When performed on multiple indices, allows to ignore `missing` ones (default: none) :arg indices_boost: Comma-separated list of index boosts :arg lenient: Specify whether format-based query failures (such as providing text to a numeric field) should be ignored :arg lowercase_expanded_terms: Specify whether query terms should be lowercased :arg from_: Starting offset (default: 0) :arg preference: Specify the node or shard the operation should be performed on (default: random) :arg q: Query in the Lucene query string syntax :arg routing: A comma-separated list of specific routing values :arg scroll: Specify how long a consistent view of the index should be maintained for scrolled search :arg search_type: Search operation type :arg size: Number of hits to return (default: 10) :arg sort: A comma-separated list of <field>:<direction> pairs :arg source: The URL-encoded request definition using the Query DSL (instead of using request body) :arg stats: Specific 'tag' of the request for logging and statistical purposes :arg suggest_field: Specify which field to use for suggestions :arg suggest_mode: Specify suggest mode (default: missing) :arg suggest_size: How many suggestions to return in response :arg suggest_text: The source text for which the suggestions should be returned :arg timeout: Explicit operation timeout :arg version: Specify whether to return document version as part of a hit """ # from is a reserved word so it cannot be used, use from_ instead if 'from_' in params: params['from'] = params.pop('from_') if doc_type and not index: index = '_all' _, data = yield self.transport.perform_request('GET', _make_path(index, doc_type, '_search'), params=params, body=body) raise gen.Return(data)
Scroll a search request created by specifying the scroll parameter. <http:// www. elasticsearch. org/ guide/ reference/ api/ search/ scroll/ > _
def scroll(self, scroll_id, scroll, params=None): """ Scroll a search request created by specifying the scroll parameter. `<http://www.elasticsearch.org/guide/reference/api/search/scroll/>`_ :arg scroll_id: The scroll ID :arg scroll: Specify how long a consistent view of the index should be maintained for scrolled search """ body = { "scroll": scroll, "scroll_id": scroll_id } if params: if "scroll" in params.keys(): params.pop("scroll") if "scroll_id" in params.keys(): params.pop("scroll_id") _, data = yield self.transport.perform_request('POST', _make_path('_search', 'scroll'), body=body, params=params) raise gen.Return(data)
Clear the scroll request created by specifying the scroll parameter to search. <http:// www. elasticsearch. org/ guide/ reference/ api/ search/ scroll/ > _
def clear_scroll(self, scroll_id, params=None): """ Clear the scroll request created by specifying the scroll parameter to search. `<http://www.elasticsearch.org/guide/reference/api/search/scroll/>`_ :arg scroll_id: The scroll ID or a list of scroll IDs """ if not isinstance(scroll_id, list): scroll_id = [scroll_id] body = { "scroll_id": scroll_id } if params and "scroll_id" in params.keys(): params.pop("scroll_id") _, data = yield self.transport.perform_request('DELETE', _make_path('_search', 'scroll'), body=body, params=params) raise gen.Return(data)
Retrieve mapping definition of index or index/ type. <http:// www. elastic. co/ guide/ en/ elasticsearch/ reference/ current/ indices - get - mapping. html > _: arg index: A comma - separated list of index names: arg doc_type: A comma - separated list of document types: arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. ( This includes _all string or when no indices have been specified ): arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open closed or both. default open valid choices are: open closed none all: arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable ( missing or closed ): arg local: Return local information do not retrieve the state from master node ( default: false )
def get_mapping(self, index=None, doc_type=None, params=None): """ Retrieve mapping definition of index or index/type. `<http://www.elastic.co/guide/en/elasticsearch/reference/current/indices-get-mapping.html>`_ :arg index: A comma-separated list of index names :arg doc_type: A comma-separated list of document types :arg allow_no_indices: Whether to ignore if a wildcard indices expression resolves into no concrete indices. (This includes `_all` string or when no indices have been specified) :arg expand_wildcards: Whether to expand wildcard expression to concrete indices that are open, closed or both., default 'open', valid choices are: 'open', 'closed', 'none', 'all' :arg ignore_unavailable: Whether specified concrete indices should be ignored when unavailable (missing or closed) :arg local: Return local information, do not retrieve the state from master node (default: false) """ _, data = yield self.transport.perform_request('GET', _make_path(index, '_mapping', doc_type), params=params) raise gen.Return(data)
The suggest feature suggests similar looking terms based on a provided text by using a suggester. <http:// elasticsearch. org/ guide/ reference/ api/ search/ suggest/ > _
def suggest(self, index=None, body=None, params=None): """ The suggest feature suggests similar looking terms based on a provided text by using a suggester. `<http://elasticsearch.org/guide/reference/api/search/suggest/>`_ :arg index: A comma-separated list of index names to restrict the operation; use `_all` or empty string to perform the operation on all indices :arg body: The request definition :arg ignore_indices: When performed on multiple indices, allows to ignore `missing` ones (default: none) :arg preference: Specify the node or shard the operation should be performed on (default: random) :arg routing: Specific routing value :arg source: The URL-encoded request definition (instead of using request body) """ _, data = yield self.transport.perform_request('POST', _make_path(index, '_suggest'), params=params, body=body) raise gen.Return(data)
Converts bytes to a human readable format
def bytes_to_readable(num): """Converts bytes to a human readable format""" if num < 512: return "0 Kb" elif num < 1024: return "1 Kb" for unit in ['', 'Kb', 'Mb', 'Gb', 'Tb', 'Pb', 'Eb', 'Zb']: if abs(num) < 1024.0: return "%3.1f%s" % (num, unit) num /= 1024.0 return "%.1f%s" % (num, 'Yb')
Total CPU load for Synology DSM
def cpu_total_load(self): """Total CPU load for Synology DSM""" system_load = self.cpu_system_load user_load = self.cpu_user_load other_load = self.cpu_other_load if system_load is not None and \ user_load is not None and \ other_load is not None: return system_load + user_load + other_load
Total Memory Size of Synology DSM
def memory_size(self, human_readable=True): """Total Memory Size of Synology DSM""" if self._data is not None: # Memory is actually returned in KB's so multiply before converting return_data = int(self._data["memory"]["memory_size"]) * 1024 if human_readable: return SynoFormatHelper.bytes_to_readable( return_data) else: return return_data
Function to get specific network ( eth0 total etc )
def _get_network(self, network_id): """Function to get specific network (eth0, total, etc)""" if self._data is not None: for network in self._data["network"]: if network["device"] == network_id: return network
Total upload speed being used
def network_up(self, human_readable=True): """Total upload speed being used""" network = self._get_network("total") if network is not None: return_data = int(network["tx"]) if human_readable: return SynoFormatHelper.bytes_to_readable( return_data) else: return return_data
Returns all available volumes
def volumes(self): """Returns all available volumes""" if self._data is not None: volumes = [] for volume in self._data["volumes"]: volumes.append(volume["id"]) return volumes
Returns a specific volume
def _get_volume(self, volume_id): """Returns a specific volume""" if self._data is not None: for volume in self._data["volumes"]: if volume["id"] == volume_id: return volume
Total size of volume
def volume_size_total(self, volume, human_readable=True): """Total size of volume""" volume = self._get_volume(volume) if volume is not None: return_data = int(volume["size"]["total"]) if human_readable: return SynoFormatHelper.bytes_to_readable( return_data) else: return return_data
Total used size in percentage for volume
def volume_percentage_used(self, volume): """Total used size in percentage for volume""" volume = self._get_volume(volume) if volume is not None: total = int(volume["size"]["total"]) used = int(volume["size"]["used"]) if used is not None and used > 0 and \ total is not None and total > 0: return round((float(used) / float(total)) * 100.0, 1)
Average temperature of all disks making up the volume
def volume_disk_temp_avg(self, volume): """Average temperature of all disks making up the volume""" volume = self._get_volume(volume) if volume is not None: vol_disks = volume["disks"] if vol_disks is not None: total_temp = 0 total_disks = 0 for vol_disk in vol_disks: disk_temp = self.disk_temp(vol_disk) if disk_temp is not None: total_disks += 1 total_temp += disk_temp if total_temp > 0 and total_disks > 0: return round(total_temp / total_disks, 0)
Maximum temperature of all disks making up the volume
def volume_disk_temp_max(self, volume): """Maximum temperature of all disks making up the volume""" volume = self._get_volume(volume) if volume is not None: vol_disks = volume["disks"] if vol_disks is not None: max_temp = 0 for vol_disk in vol_disks: disk_temp = self.disk_temp(vol_disk) if disk_temp is not None and disk_temp > max_temp: max_temp = disk_temp return max_temp
Returns all available ( internal ) disks
def disks(self): """Returns all available (internal) disks""" if self._data is not None: disks = [] for disk in self._data["disks"]: disks.append(disk["id"]) return disks
Returns a specific disk
def _get_disk(self, disk_id): """Returns a specific disk""" if self._data is not None: for disk in self._data["disks"]: if disk["id"] == disk_id: return disk
Build and execute login request
def _login(self): """Build and execute login request""" api_path = "%s/auth.cgi?api=SYNO.API.Auth&version=2" % ( self.base_url, ) login_path = "method=login&%s" % (self._encode_credentials()) url = "%s&%s&session=Core&format=cookie" % ( api_path, login_path) result = self._execute_get_url(url, False) # Parse Result if valid if result is not None: self.access_token = result["data"]["sid"] self._debuglog("Authentication Succesfull, token: " + str(self.access_token)) return True else: self._debuglog("Authentication Failed") return False
Function to handle sessions for a GET request
def _get_url(self, url, retry_on_error=True): """Function to handle sessions for a GET request""" # Check if we failed to request the url or need to login if self.access_token is None or \ self._session is None or \ self._session_error: # Clear Access Token en reset session error self.access_token = None self._session_error = False # First Reset the session if self._session is not None: self._session = None self._debuglog("Creating New Session") self._session = requests.Session() # disable SSL certificate verification if self._use_https: self._session.verify = False # We Created a new Session so login if self._login() is False: self._session_error = True self._debuglog("Login Failed, unable to process request") return # Now request the data response = self._execute_get_url(url) if (self._session_error or response is None) and retry_on_error: self._debuglog("Error occured, retrying...") self._get_url(url, False) return response
Function to execute and handle a GET request
def _execute_get_url(self, request_url, append_sid=True): """Function to execute and handle a GET request""" # Prepare Request self._debuglog("Requesting URL: '" + request_url + "'") if append_sid: self._debuglog("Appending access_token (SID: " + self.access_token + ") to url") request_url = "%s&_sid=%s" % ( request_url, self.access_token) # Execute Request try: resp = self._session.get(request_url) self._debuglog("Request executed: " + str(resp.status_code)) if resp.status_code == 200: # We got a response json_data = json.loads(resp.text) if json_data["success"]: self._debuglog("Succesfull returning data") self._debuglog(str(json_data)) return json_data else: if json_data["error"]["code"] in {105, 106, 107, 119}: self._debuglog("Session error: " + str(json_data["error"]["code"])) self._session_error = True else: self._debuglog("Failed: " + resp.text) else: # We got a 404 or 401 return None #pylint: disable=bare-except except: return None
Updates the various instanced modules
def update(self): """Updates the various instanced modules""" if self._utilisation is not None: api = "SYNO.Core.System.Utilization" url = "%s/entry.cgi?api=%s&version=1&method=get&_sid=%s" % ( self.base_url, api, self.access_token) self._utilisation.update(self._get_url(url)) if self._storage is not None: api = "SYNO.Storage.CGI.Storage" url = "%s/entry.cgi?api=%s&version=1&method=load_info&_sid=%s" % ( self.base_url, api, self.access_token) self._storage.update(self._get_url(url))
Getter for various Utilisation variables
def utilisation(self): """Getter for various Utilisation variables""" if self._utilisation is None: api = "SYNO.Core.System.Utilization" url = "%s/entry.cgi?api=%s&version=1&method=get" % ( self.base_url, api) self._utilisation = SynoUtilization(self._get_url(url)) return self._utilisation
Getter for various Storage variables
def storage(self): """Getter for various Storage variables""" if self._storage is None: api = "SYNO.Storage.CGI.Storage" url = "%s/entry.cgi?api=%s&version=1&method=load_info" % ( self.base_url, api) self._storage = SynoStorage(self._get_url(url)) return self._storage
Creates the context for a specific request.
def for_request(request, body=None): """Creates the context for a specific request.""" tenant, jwt_data = Tenant.objects.for_request(request, body) webhook_sender_id = jwt_data.get('sub') sender_data = None if body and 'item' in body: if 'sender' in body['item']: sender_data = body['item']['sender'] elif 'message' in body['item'] and 'from' in body['item']['message']: sender_data = body['item']['message']['from'] if sender_data is None: if webhook_sender_id is None: raise BadTenantError('Cannot identify sender in tenant') sender_data = {'id': webhook_sender_id} return Context( tenant=tenant, sender=HipchatUser( id=sender_data.get('id'), name=sender_data.get('name'), mention_name=sender_data.get('mention_name'), ), signed_request=request.GET.get('signed_request'), context=jwt_data.get('context') or {}, )
The cached token of the current tenant.
def tenant_token(self): """The cached token of the current tenant.""" rv = getattr(self, '_tenant_token', None) if rv is None: rv = self._tenant_token = self.tenant.get_token() return rv
Helper function for building an attribute dictionary.
def build_attrs(self, extra_attrs=None, **kwargs): "Helper function for building an attribute dictionary." self.attrs = self.widget.build_attrs(extra_attrs=None, **kwargs) return self.attrs
Class decorator that makes sure the passed apps are present in INSTALLED_APPS.
def with_apps(*apps): """ Class decorator that makes sure the passed apps are present in INSTALLED_APPS. """ apps_set = set(settings.INSTALLED_APPS) apps_set.update(apps) return override_settings(INSTALLED_APPS=list(apps_set))
Class decorator that makes sure the passed apps are not present in INSTALLED_APPS.
def without_apps(*apps): """ Class decorator that makes sure the passed apps are not present in INSTALLED_APPS. """ apps_list = [a for a in settings.INSTALLED_APPS if a not in apps] return override_settings(INSTALLED_APPS=apps_list)
Return a dictionary of all global_settings values.
def get_global_settings(self): """ Return a dictionary of all global_settings values. """ return dict((key, getattr(global_settings, key)) for key in dir(global_settings) if key.isupper())
Handle the retrieval of the code
def do_GET(self): """ Handle the retrieval of the code """ parsed_url = urlparse(self.path) if parsed_url[2] == "/" + SERVER_REDIRECT_PATH: # 2 = Path parsed_query = parse_qs(parsed_url[4]) # 4 = Query if "code" not in parsed_query: self.send_response(200) self.send_header("Content-Type", "text/plain") self.end_headers() self.wfile.write("No code found, try again!".encode("utf-8")) return self.server.response_code = parsed_query["code"][0] self.send_response(200) self.send_header("Content-Type", "text/plain") self.end_headers() self.wfile.write( "Thank you for using OAuth2Util. The authorization was successful, " "you can now close this window.".encode("utf-8")) elif parsed_url[2] == "/" + SERVER_LINK_PATH: # 2 = Path self.send_response(200) self.send_header("Content-Type", "text/html") self.end_headers() self.wfile.write("<html><body>Hey there!<br/>Click <a href=\"{0}\">here</a> to claim your prize.</body></html>" .format(self.server.authorize_url).encode("utf-8")) else: self.send_response(404) self.send_header("Content-Type", "text/plain") self.end_headers() self.wfile.write("404 not found".encode("utf-8"))
Set the app info ( id & secret ) read from the config file on the Reddit object
def _set_app_info(self): """ Set the app info (id & secret) read from the config file on the Reddit object """ redirect_url = "http://{0}:{1}/{2}".format(SERVER_URL, SERVER_PORT, SERVER_REDIRECT_PATH) self.r.set_oauth_app_info(self._get_value(CONFIGKEY_APP_KEY), self._get_value(CONFIGKEY_APP_SECRET), redirect_url)
Helper method to get a value from the config
def _get_value(self, key, func=None, split_val=None, as_boolean=False, exception_default=None): """ Helper method to get a value from the config """ try: if as_boolean: return self.config.getboolean(key[0], key[1]) value = self.config.get(key[0], key[1]) if split_val is not None: value = value.split(split_val) if func is not None: return func(value) return value except (KeyError, configparser.NoSectionError, configparser.NoOptionError) as e: if exception_default is not None: return exception_default raise KeyError(e)
Change the value of the given key in the given file to the given value
def _change_value(self, key, value): """ Change the value of the given key in the given file to the given value """ if not self.config.has_section(key[0]): self.config.add_section(key[0]) self.config.set(key[0], key[1], str(value)) with open(self.configfile, "w") as f: self.config.write(f)
Migrates the old config file format to the new one
def _migrate_config(self, oldname=DEFAULT_CONFIG, newname=DEFAULT_CONFIG): """ Migrates the old config file format to the new one """ self._log("Your OAuth2Util config file is in an old format and needs " "to be changed. I tried as best as I could to migrate it.", logging.WARNING) with open(oldname, "r") as old: with open(newname, "w") as new: new.write("[app]\n") new.write(old.read())
Start the webserver that will receive the code
def _start_webserver(self, authorize_url=None): """ Start the webserver that will receive the code """ server_address = (SERVER_URL, SERVER_PORT) self.server = HTTPServer(server_address, OAuth2UtilRequestHandler) self.server.response_code = None self.server.authorize_url = authorize_url t = Thread(target=self.server.serve_forever) t.daemon = True t.start()
Wait until the user accepted or rejected the request
def _wait_for_response(self): """ Wait until the user accepted or rejected the request """ while not self.server.response_code: time.sleep(2) time.sleep(5) self.server.shutdown()
Request new access information from reddit using the built in webserver
def _get_new_access_information(self): """ Request new access information from reddit using the built in webserver """ if not self.r.has_oauth_app_info: self._log('Cannot obtain authorize url from PRAW. Please check your configuration.', logging.ERROR) raise AttributeError('Reddit Session invalid, please check your designated config file.') url = self.r.get_authorize_url('UsingOAuth2Util', self._get_value(CONFIGKEY_SCOPE, set, split_val=','), self._get_value(CONFIGKEY_REFRESHABLE, as_boolean=True)) self._start_webserver(url) if not self._get_value(CONFIGKEY_SERVER_MODE, as_boolean=True): webbrowser.open(url) else: print("Webserver is waiting for you :D. Please open {0}:{1}/{2} " "in your browser" .format(SERVER_URL, SERVER_PORT, SERVER_LINK_PATH)) self._wait_for_response() try: access_information = self.r.get_access_information( self.server.response_code) except praw.errors.OAuthException: self._log("Can not authenticate, maybe the app infos (e.g. secret) are wrong.", logging.ERROR) raise self._change_value(CONFIGKEY_TOKEN, access_information["access_token"]) self._change_value(CONFIGKEY_REFRESH_TOKEN, access_information["refresh_token"]) self._change_value(CONFIGKEY_VALID_UNTIL, time.time() + TOKEN_VALID_DURATION)
Check whether the tokens are set and request new ones if not
def _check_token_present(self): """ Check whether the tokens are set and request new ones if not """ try: self._get_value(CONFIGKEY_TOKEN) self._get_value(CONFIGKEY_REFRESH_TOKEN) self._get_value(CONFIGKEY_REFRESHABLE) except KeyError: self._log("Request new Token (CTP)") self._get_new_access_information()
Set the token on the Reddit Object again
def set_access_credentials(self, _retry=0): """ Set the token on the Reddit Object again """ if _retry >= 5: raise ConnectionAbortedError('Reddit is not accessible right now, cannot refresh OAuth2 tokens.') self._check_token_present() try: self.r.set_access_credentials(self._get_value(CONFIGKEY_SCOPE, set, split_val=","), self._get_value(CONFIGKEY_TOKEN), self._get_value(CONFIGKEY_REFRESH_TOKEN)) except (praw.errors.OAuthInvalidToken, praw.errors.HTTPException) as e: # todo check e status code # self._log('Retrying in 5s.') # time.sleep(5) # self.set_access_credentials(_retry=_retry + 1) self._log("Request new Token (SAC)") self._get_new_access_information()
Check if the token is still valid and requests a new if it is not valid anymore
def refresh(self, force=False, _retry=0): """ Check if the token is still valid and requests a new if it is not valid anymore Call this method before a call to praw if there might have passed more than one hour force: if true, a new token will be retrieved no matter what """ if _retry >= 5: raise ConnectionAbortedError('Reddit is not accessible right now, cannot refresh OAuth2 tokens.') self._check_token_present() # We check whether another instance already refreshed the token if time.time() > self._get_value(CONFIGKEY_VALID_UNTIL, float, exception_default=0) - REFRESH_MARGIN: self.config.read(self.configfile) if time.time() < self._get_value(CONFIGKEY_VALID_UNTIL, float, exception_default=0) - REFRESH_MARGIN: self._log("Found new token") self.set_access_credentials() if force or time.time() > self._get_value(CONFIGKEY_VALID_UNTIL, float, exception_default=0) - REFRESH_MARGIN: self._log("Refresh Token") try: new_token = self.r.refresh_access_information(self._get_value(CONFIGKEY_REFRESH_TOKEN)) self._change_value(CONFIGKEY_TOKEN, new_token["access_token"]) self._change_value(CONFIGKEY_VALID_UNTIL, time.time() + TOKEN_VALID_DURATION) self.set_access_credentials() except (praw.errors.OAuthInvalidToken, praw.errors.HTTPException) as e: # todo check e status code # self._log('Retrying in 5s.') # time.sleep(5) # self.refresh(_retry=_retry + 1) self._log("Request new Token (REF)") self._get_new_access_information()
Create DynamoDB table for run manifests
def create_manifest_table(dynamodb_client, table_name): """Create DynamoDB table for run manifests Arguments: dynamodb_client - boto3 DynamoDB client (not service) table_name - string representing existing table name """ try: dynamodb_client.create_table( AttributeDefinitions=[ { 'AttributeName': DYNAMODB_RUNID_ATTRIBUTE, 'AttributeType': 'S' }, ], TableName=table_name, KeySchema=[ { 'AttributeName': DYNAMODB_RUNID_ATTRIBUTE, 'KeyType': 'HASH' }, ], ProvisionedThroughput={ 'ReadCapacityUnits': 5, 'WriteCapacityUnits': 5 } ) dynamodb_client.get_waiter('table_exists').wait(TableName=table_name) except ClientError as e: # Table already exists if e.response['Error']['Code'] == 'ResourceInUseException': pass else: raise e
Return list of all run ids inside S3 folder. It does not respect S3 pagination ( MaxKeys ) and returns ** all ** keys from bucket and won t list any prefixes with object archived to AWS Glacier
def list_runids(s3_client, full_path): """Return list of all run ids inside S3 folder. It does not respect S3 pagination (`MaxKeys`) and returns **all** keys from bucket and won't list any prefixes with object archived to AWS Glacier Arguments: s3_client - boto3 S3 client (not service) full_path - full valid S3 path to events (such as enriched-archive) example: s3://acme-events-bucket/main-pipeline/enriched-archive """ listing_finished = False # last response was not truncated run_ids_buffer = [] last_continuation_token = None (bucket, prefix) = split_full_path(full_path) while not listing_finished: options = clean_dict({ 'Bucket': bucket, 'Prefix': prefix, 'Delimiter': '/', 'ContinuationToken': last_continuation_token }) response = s3_client.list_objects_v2(**options) keys = [extract_run_id(key['Prefix']) for key in response.get('CommonPrefixes', [])] run_ids_buffer.extend([key for key in keys if key is not None]) last_continuation_token = response.get('NextContinuationToken', None) if not response['IsTruncated']: listing_finished = True non_archived_run_ids = [run_id for run_id in run_ids_buffer if not is_glacier(s3_client, bucket, run_id)] return non_archived_run_ids
Return pair of bucket without protocol and path
def split_full_path(path): """Return pair of bucket without protocol and path Arguments: path - valid S3 path, such as s3://somebucket/events >>> split_full_path('s3://mybucket/path-to-events') ('mybucket', 'path-to-events/') >>> split_full_path('s3://mybucket') ('mybucket', None) >>> split_full_path('s3n://snowplow-bucket/some/prefix/') ('snowplow-bucket', 'some/prefix/') """ if path.startswith('s3://'): path = path[5:] elif path.startswith('s3n://'): path = path[6:] elif path.startswith('s3a://'): path = path[6:] else: raise ValueError("S3 path should start with s3://, s3n:// or " "s3a:// prefix") parts = path.split('/') bucket = parts[0] path = '/'.join(parts[1:]) return bucket, normalize_prefix(path)
Check if prefix is archived in Glacier by checking storage class of first object inside that prefix
def is_glacier(s3_client, bucket, prefix): """Check if prefix is archived in Glacier, by checking storage class of first object inside that prefix Arguments: s3_client - boto3 S3 client (not service) bucket - valid extracted bucket (without protocol and prefix) example: sowplow-events-data prefix - valid S3 prefix (usually, run_id) example: snowplow-archive/enriched/archive/ """ response = s3_client.list_objects_v2(Bucket=bucket, Prefix=prefix, MaxKeys=3) # 3 to not fetch _SUCCESS for key in response['Contents']: if key.get('StorageClass', 'STANDARD') == 'GLACIER': return True return False
Extract date part from run id
def extract_run_id(key): """Extract date part from run id Arguments: key - full key name, such as shredded-archive/run=2012-12-11-01-31-33/ (trailing slash is required) >>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33/') 'shredded-archive/run=2012-12-11-01-11-33/' >>> extract_run_id('shredded-archive/run=2012-12-11-01-11-33') >>> extract_run_id('shredded-archive/run=2012-13-11-01-11-33/') """ filename = key.split('/')[-2] # -1 element is empty string run_id = filename.lstrip('run=') try: datetime.strptime(run_id, '%Y-%m-%d-%H-%M-%S') return key except ValueError: return None
Remove all keys with Nones as values
def clean_dict(dict): """Remove all keys with Nones as values >>> clean_dict({'key': None}) {} >>> clean_dict({'empty_s': ''}) {'empty_s': ''} """ if sys.version_info[0] < 3: return {k: v for k, v in dict.iteritems() if v is not None} else: return {k: v for k, v in dict.items() if v is not None}
Add run_id into DynamoDB manifest table
def add_to_manifest(dynamodb_client, table_name, run_id): """Add run_id into DynamoDB manifest table Arguments: dynamodb_client - boto3 DynamoDB client (not service) table_name - string representing existing table name run_id - string representing run_id to store """ dynamodb_client.put_item( TableName=table_name, Item={ DYNAMODB_RUNID_ATTRIBUTE: { 'S': run_id } } )
Check if run_id is stored in DynamoDB table. Return True if run_id is stored or False otherwise.
def is_in_manifest(dynamodb_client, table_name, run_id): """Check if run_id is stored in DynamoDB table. Return True if run_id is stored or False otherwise. Arguments: dynamodb_client - boto3 DynamoDB client (not service) table_name - string representing existing table name run_id - string representing run_id to store """ response = dynamodb_client.get_item( TableName=table_name, Key={ DYNAMODB_RUNID_ATTRIBUTE: { 'S': run_id } } ) return response.get('Item') is not None
Extracts Schema information from Iglu URI
def extract_schema(uri): """ Extracts Schema information from Iglu URI >>> extract_schema("iglu:com.acme-corporation_underscore/event_name-dash/jsonschema/1-10-1")['vendor'] 'com.acme-corporation_underscore' """ match = re.match(SCHEMA_URI_REGEX, uri) if match: return { 'vendor': match.group(1), 'name': match.group(2), 'format': match.group(3), 'version': match.group(4) } else: raise SnowplowEventTransformationException([ "Schema {} does not conform to regular expression {}".format(uri, SCHEMA_URI) ])
Create an Elasticsearch field name from a schema string
def fix_schema(prefix, schema): """ Create an Elasticsearch field name from a schema string """ schema_dict = extract_schema(schema) snake_case_organization = schema_dict['vendor'].replace('.', '_').lower() snake_case_name = re.sub('([^A-Z_])([A-Z])', '\g<1>_\g<2>', schema_dict['name']).lower() model = schema_dict['version'].split('-')[0] return "{}_{}_{}_{}".format(prefix, snake_case_organization, snake_case_name, model)
Convert a contexts JSON to an Elasticsearch - compatible list of key - value pairs For example the JSON
def parse_contexts(contexts): """ Convert a contexts JSON to an Elasticsearch-compatible list of key-value pairs For example, the JSON { "data": [ { "data": { "unique": true }, "schema": "iglu:com.acme/unduplicated/jsonschema/1-0-0" }, { "data": { "value": 1 }, "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0" }, { "data": { "value": 2 }, "schema": "iglu:com.acme/duplicated/jsonschema/1-0-0" } ], "schema": "iglu:com.snowplowanalytics.snowplow/contexts/jsonschema/1-0-0" } would become [ ("context_com_acme_duplicated_1", [{"value": 1}, {"value": 2}]), ("context_com_acme_unduplicated_1", [{"unique": true}]) ] """ my_json = json.loads(contexts) data = my_json['data'] distinct_contexts = {} for context in data: schema = fix_schema("contexts", context['schema']) inner_data = context['data'] if schema not in distinct_contexts: distinct_contexts[schema] = [inner_data] else: distinct_contexts[schema].append(inner_data) output = [] for key in distinct_contexts: output.append((key, distinct_contexts[key])) return output
Convert an unstructured event JSON to a list containing one Elasticsearch - compatible key - value pair For example the JSON
def parse_unstruct(unstruct): """ Convert an unstructured event JSON to a list containing one Elasticsearch-compatible key-value pair For example, the JSON { "data": { "data": { "key": "value" }, "schema": "iglu:com.snowplowanalytics.snowplow/link_click/jsonschema/1-0-1" }, "schema": "iglu:com.snowplowanalytics.snowplow/unstruct_event/jsonschema/1-0-0" } would become [ ( "unstruct_com_snowplowanalytics_snowplow_link_click_1", { "key": "value" } ) ] """ my_json = json.loads(unstruct) data = my_json['data'] schema = data['schema'] if 'data' in data: inner_data = data['data'] else: raise SnowplowEventTransformationException(["Could not extract inner data field from unstructured event"]) fixed_schema = fix_schema("unstruct_event", schema) return [(fixed_schema, inner_data)]
Convert a Snowplow enriched event TSV into a JSON
def transform(line, known_fields=ENRICHED_EVENT_FIELD_TYPES, add_geolocation_data=True): """ Convert a Snowplow enriched event TSV into a JSON """ return jsonify_good_event(line.split('\t'), known_fields, add_geolocation_data)
Convert a Snowplow enriched event in the form of an array of fields into a JSON
def jsonify_good_event(event, known_fields=ENRICHED_EVENT_FIELD_TYPES, add_geolocation_data=True): """ Convert a Snowplow enriched event in the form of an array of fields into a JSON """ if len(event) != len(known_fields): raise SnowplowEventTransformationException( ["Expected {} fields, received {} fields.".format(len(known_fields), len(event))] ) else: output = {} errors = [] if add_geolocation_data and event[LATITUDE_INDEX] != '' and event[LONGITUDE_INDEX] != '': output['geo_location'] = event[LATITUDE_INDEX] + ',' + event[LONGITUDE_INDEX] for i in range(len(event)): key = known_fields[i][0] if event[i] != '': try: kvpairs = known_fields[i][1](key, event[i]) for kvpair in kvpairs: output[kvpair[0]] = kvpair[1] except SnowplowEventTransformationException as sete: errors += sete.error_messages except Exception as e: errors += ["Unexpected exception parsing field with key {} and value {}: {}".format( known_fields[i][0], event[i], repr(e) )] if errors: raise SnowplowEventTransformationException(errors) else: return output
Extract the used view from the TemplateResponse context ( ContextMixin )
def _get_view_data(self, context_data): """ Extract the used view from the TemplateResponse context (ContextMixin) """ view = context_data.get('view') if not isinstance(view, View): view = None # Denote interesting objects in the template context template_context = [] for key, obj in context_data.items(): if isinstance(obj, (BaseForm, BaseFormSet, Model)): template_context.append((key, _format_path(obj.__class__))) return { 'model': _get_view_model(view), 'form': _get_form_class(view), 'template_context': template_context, }
Get the template used in a TemplateResponse. This returns a tuple of active choice all choices
def get_used_template(response): """ Get the template used in a TemplateResponse. This returns a tuple of "active choice, all choices" """ if not hasattr(response, 'template_name'): return None, None template = response.template_name if template is None: return None, None if isinstance(template, (list, tuple)): # See which template name was really used. if len(template) == 1: return template[0], None else: used_name = _get_used_template_name(template) return used_name, template elif isinstance(template, six.string_types): # Single string return template, None else: # Template object. filename = _get_template_filename(template) template_name = '<template object from {0}>'.format(filename) if filename else '<template object>' return template_name, None
Print the entire template context
def print_context(self, context): """ Print the entire template context """ text = [CONTEXT_TITLE] for i, context_scope in enumerate(context): dump1 = linebreaksbr(pformat_django_context_html(context_scope)) dump2 = pformat_dict_summary_html(context_scope) # Collapse long objects by default (e.g. request, LANGUAGES and sql_queries) if len(context_scope) <= 3 and dump1.count('<br />') > 20: (dump1, dump2) = (dump2, dump1) text.append(CONTEXT_BLOCK.format( style=PRE_STYLE, num=i, dump1=dump1, dump2=dump2 )) return u''.join(text)
Print a set of variables
def print_variables(self, context): """ Print a set of variables """ text = [] for name, expr in self.variables: # Some extended resolving, to handle unknown variables data = '' try: if isinstance(expr.var, Variable): data = expr.var.resolve(context) else: data = expr.resolve(context) # could return TEMPLATE_STRING_IF_INVALID except VariableDoesNotExist as e: # Failed to resolve, display exception inline keys = [] for scope in context: keys += scope.keys() keys = sorted(set(keys)) # Remove duplicates, e.g. csrf_token return ERROR_TYPE_BLOCK.format(style=PRE_ALERT_STYLE, error=escape(u"Variable '{0}' not found! Available context variables are:\n\n{1}".format(expr, u', '.join(keys)))) else: # Regular format textdata = linebreaksbr(pformat_django_context_html(data)) # At top level, prefix class name if it's a longer result if isinstance(data, SHORT_NAME_TYPES): text.append(BASIC_TYPE_BLOCK.format(style=PRE_STYLE, name=name, value=textdata)) else: text.append(OBJECT_TYPE_BLOCK.format(style=PRE_STYLE, name=name, type=data.__class__.__name__, value=textdata)) return u''.join(text)
Highlight common SQL words in a string.
def pformat_sql_html(sql): """ Highlight common SQL words in a string. """ sql = escape(sql) sql = RE_SQL_NL.sub(u'<br>\n\\1', sql) sql = RE_SQL.sub(u'<strong>\\1</strong>', sql) return sql
Dump a variable to a HTML string with sensible output for template context fields. It filters out all fields which are not usable in a template context.
def pformat_django_context_html(object): """ Dump a variable to a HTML string with sensible output for template context fields. It filters out all fields which are not usable in a template context. """ if isinstance(object, QuerySet): text = '' lineno = 0 for item in object.all()[:21]: lineno += 1 if lineno >= 21: text += u' (remaining items truncated...)' break text += u' {0}\n'.format(escape(repr(item))) return text elif isinstance(object, Manager): return mark_safe(u' (use <kbd>.all</kbd> to read it)') elif isinstance(object, six.string_types): return escape(repr(object)) elif isinstance(object, Promise): # lazy() object return escape(_format_lazy(object)) elif isinstance(object, dict): # This can also be a ContextDict return _format_dict(object) elif isinstance(object, list): return _format_list(object) elif hasattr(object, '__dict__'): return _format_object(object) else: # Use regular pprint as fallback. text = DebugPrettyPrinter(width=200).pformat(object) return _style_text(text)
Briefly print the dictionary keys.
def pformat_dict_summary_html(dict): """ Briefly print the dictionary keys. """ if not dict: return ' {}' html = [] for key, value in sorted(six.iteritems(dict)): if not isinstance(value, DICT_EXPANDED_TYPES): value = '...' html.append(_format_dict_item(key, value)) return mark_safe(u'<br/>'.join(html))
Apply some HTML highlighting to the contents. This can t be done in the
def _style_text(text): """ Apply some HTML highlighting to the contents. This can't be done in the """ # Escape text and apply some formatting. # To have really good highlighting, pprint would have to be re-implemented. text = escape(text) text = text.replace(' &lt;iterator object&gt;', " <small>&lt;<var>this object can be used in a 'for' loop</var>&gt;</small>") text = text.replace(' &lt;dynamic item&gt;', ' <small>&lt;<var>this object may have extra field names</var>&gt;</small>') text = text.replace(' &lt;dynamic attribute&gt;', ' <small>&lt;<var>this object may have extra field names</var>&gt;</small>') text = RE_PROXY.sub('\g<1><small>&lt;<var>proxy object</var>&gt;</small>', text) text = RE_FUNCTION.sub('\g<1><small>&lt;<var>object method</var>&gt;</small>', text) text = RE_GENERATOR.sub("\g<1><small>&lt;<var>generator, use 'for' to traverse it</var>&gt;</small>", text) text = RE_OBJECT_ADDRESS.sub('\g<1><small>&lt;<var>\g<2> object</var>&gt;</small>', text) text = RE_MANAGER.sub('\g<1><small>&lt;<var>manager, use <kbd>.all</kbd> to traverse it</var>&gt;</small>', text) text = RE_CLASS_REPR.sub('\g<1><small>&lt;<var>\g<2> class</var>&gt;</small>', text) # Since Django's WSGIRequest does a pprint like format for it's __repr__, make that styling consistent text = RE_REQUEST_FIELDNAME.sub('\g<1>:\n <strong style="color: #222;">\g<2></strong>: ', text) text = RE_REQUEST_CLEANUP1.sub('\g<1>', text) text = RE_REQUEST_CLEANUP2.sub(')', text) return mark_safe(text)
# Instead of just printing <SomeType at 0xfoobar > expand the fields.
def _format_object(object): """ # Instead of just printing <SomeType at 0xfoobar>, expand the fields. """ attrs = iter(object.__dict__.items()) if object.__class__: # Add class members too. attrs = chain(attrs, iter(object.__class__.__dict__.items())) # Remove private and protected variables # Filter needless exception classes which are added to each model. # Filter unremoved form.Meta (unline model.Meta) which makes no sense either is_model = isinstance(object, Model) is_form = isinstance(object, BaseForm) attrs = dict( (k, v) for k, v in attrs if not k.startswith('_') and not getattr(v, 'alters_data', False) and not (is_model and k in ('DoesNotExist', 'MultipleObjectsReturned')) and not (is_form and k in ('Meta',)) ) # Add members which are not found in __dict__. # This includes values such as auto_id, c, errors in a form. for member in dir(object): try: if member.startswith('_') or not hasattr(object, member): continue except HANDLED_EXCEPTIONS as e: attrs[member] = _format_exception(e) continue value = getattr(object, member) if callable(value) or member in attrs or getattr(value, 'alters_data', False): continue attrs[member] = value # Format property objects for name, value in list(attrs.items()): # not iteritems(), so can delete. if isinstance(value, property): attrs[name] = _try_call(lambda: getattr(object, name)) elif isinstance(value, types.FunctionType): if PY3: spec = inspect.getfullargspec(value) else: spec = inspect.getargspec(value) if len(spec.args) == 1 or len(spec.args) == len(spec.defaults or ()) + 1: if _is_unsafe_name(name): # The delete and save methods should have an alters_data = True set. # however, when delete or save methods are overridden, this is often missed. attrs[name] = LiteralStr('<Skipped for safety reasons (could alter the database)>') else: # should be simple method(self) signature to be callable in the template # function may have args (e.g. BoundField.as_textarea) as long as they have defaults. attrs[name] = _try_call(lambda: value(object)) else: del attrs[name] elif hasattr(value, '__get__'): # fetched the descriptor, e.g. django.db.models.fields.related.ForeignRelatedObjectsDescriptor attrs[name] = value = _try_call(lambda: getattr(object, name), return_exceptions=True) if isinstance(value, Manager): attrs[name] = LiteralStr('<{0} manager>'.format(value.__class__.__name__)) elif isinstance(value, AttributeError): del attrs[name] # e.g. Manager isn't accessible via Model instances. elif isinstance(value, HANDLED_EXCEPTIONS): attrs[name] = _format_exception(value) # Include representations which are relevant in template context. if getattr(object, '__str__', None) is not object.__str__: attrs['__str__'] = _try_call(lambda: smart_str(object)) elif getattr(object, '__unicode__', None) is not object.__unicode__: attrs['__unicode__'] = _try_call(lambda: smart_str(object)) if hasattr(object, '__getattr__'): attrs['__getattr__'] = LiteralStr('<dynamic attribute>') if hasattr(object, '__getitem__'): attrs['__getitem__'] = LiteralStr('<dynamic item>') if hasattr(object, '__iter__'): attrs['__iter__'] = LiteralStr('<iterator object>') if hasattr(object, '__len__'): attrs['__len__'] = len(object) # Add known __getattr__ members which are useful for template designers. if isinstance(object, BaseForm): for field_name in list(object.fields.keys()): attrs[field_name] = object[field_name] del attrs['__getitem__'] return _format_dict(attrs)
Expand a _ ( TEST ) call to something meaningful.
def _format_lazy(value): """ Expand a _("TEST") call to something meaningful. """ args = value._proxy____args kw = value._proxy____kw if not kw and len(args) == 1 and isinstance(args[0], six.string_types): # Found one of the Xgettext_lazy() calls. return LiteralStr(u'ugettext_lazy({0})'.format(repr(value._proxy____args[0]))) # Prints <django.functional.utils.__proxy__ object at ..> return value
Call a method but: param func:: type func:: param extra_exceptions:: type extra_exceptions:: return:: rtype:
def _try_call(func, extra_exceptions=(), return_exceptions=False): """ Call a method, but :param func: :type func: :param extra_exceptions: :type extra_exceptions: :return: :rtype: """ try: return func() except HANDLED_EXCEPTIONS as e: if return_exceptions: return e else: return _format_exception(e) except extra_exceptions as e: if return_exceptions: return e else: return _format_exception(e)
Format an item in the result. Could be a dictionary key value etc..
def format(self, object, context, maxlevels, level): """ Format an item in the result. Could be a dictionary key, value, etc.. """ try: return PrettyPrinter.format(self, object, context, maxlevels, level) except HANDLED_EXCEPTIONS as e: return _format_exception(e), True, False
Recursive part of the formatting
def _format(self, object, stream, indent, allowance, context, level): """ Recursive part of the formatting """ try: PrettyPrinter._format(self, object, stream, indent, allowance, context, level) except Exception as e: stream.write(_format_exception(e))