Search is not available for this dataset
text
stringlengths
75
104k
def timestamp_YmdHMS(value): """Convert timestamp string to time in seconds since epoch. Timestamps strings like '20130618120000' are able to be converted by this function. Args: value: A timestamp string in the format '%Y%m%d%H%M%S'. Returns: The time in seconds since epoch as an integer. Raises: ValueError: If timestamp is invalid. Note: The timezone is assumed to be UTC/GMT. """ i = int(value) S = i M = S//100 H = M//100 d = H//100 m = d//100 Y = m//100 return int(calendar.timegm(( Y % 10000, m % 100, d % 100, H % 100, M % 100, S % 100, 0, 0, 0) ))
def datetimeobj_YmdHMS(value): """Convert timestamp string to a datetime object. Timestamps strings like '20130618120000' are able to be converted by this function. Args: value: A timestamp string in the format '%Y%m%d%H%M%S'. Returns: A datetime object. Raises: ValueError: If timestamp is invalid. Note: The timezone is assumed to be UTC/GMT. """ i = int(value) S = i M = S//100 H = M//100 d = H//100 m = d//100 Y = m//100 return datetime.datetime( Y % 10000, m % 100, d % 100, H % 100, M % 100, S % 100, tzinfo=TZ_GMT )
def datetimeobj_epoch(value): """Convert timestamp string to a datetime object. Timestamps strings like '1383470155' are able to be converted by this function. Args: value: A timestamp string as seconds since epoch. Returns: A datetime object. Raises: ValueError: If timestamp is invalid. """ return datetime.datetime.utcfromtimestamp(int(value)).replace(tzinfo=TZ_GMT)
def timestamp_fmt(value, fmt): """Convert timestamp string to time in seconds since epoch. Wraps the datetime.datetime.strptime(). This is slow use the other timestamp_*() functions if possible. Args: value: A timestamp string. fmt: A timestamp format string. Returns: The time in seconds since epoch as an integer. """ return int(calendar.timegm( datetime.datetime.strptime(value, fmt).utctimetuple() ))
def timestamp_any(value): """Convert timestamp string to time in seconds since epoch. Most timestamps strings are supported in fact this wraps the dateutil.parser.parse() method. This is SLOW use the other timestamp_*() functions if possible. Args: value: A timestamp string. Returns: The time in seconds since epoch as an integer. """ return int(calendar.timegm(dateutil.parser.parse(value).utctimetuple()))
def timestamp(value, fmt=None): """Parse a datetime to a unix timestamp. Uses fast custom parsing for common datetime formats or the slow dateutil parser for other formats. This is a trade off between ease of use and speed and is very useful for fast parsing of timestamp strings whose format may standard but varied or unknown prior to parsing. Common formats include: 1 Feb 2010 12:00:00 GMT Mon, 1 Feb 2010 22:00:00 +1000 20100201120000 1383470155 (seconds since epoch) See the other timestamp_*() functions for more details. Args: value: A string representing a datetime. fmt: A timestamp format string like for time.strptime(). Returns: The time in seconds since epoch as and integer for the value specified. """ if fmt: return _timestamp_formats.get(fmt, lambda v: timestamp_fmt(v, fmt) )(value) l = len(value) if 19 <= l <= 24 and value[3] == " ": # '%d %b %Y %H:%M:%Sxxxx' try: return timestamp_d_b_Y_H_M_S(value) except (KeyError, ValueError, OverflowError): pass if 30 <= l <= 31: # '%a, %d %b %Y %H:%M:%S %z' try: return timestamp_a__d_b_Y_H_M_S_z(value) except (KeyError, ValueError, OverflowError): pass if l == 14: # '%Y%m%d%H%M%S' try: return timestamp_YmdHMS(value) except (ValueError, OverflowError): pass # epoch timestamp try: return timestamp_epoch(value) except ValueError: pass # slow version return timestamp_any(value)
def datetimeobj(value, fmt=None): """Parse a datetime to a datetime object. Uses fast custom parsing for common datetime formats or the slow dateutil parser for other formats. This is a trade off between ease of use and speed and is very useful for fast parsing of timestamp strings whose format may standard but varied or unknown prior to parsing. Common formats include: 1 Feb 2010 12:00:00 GMT Mon, 1 Feb 2010 22:00:00 +1000 20100201120000 1383470155 (seconds since epoch) See the other datetimeobj_*() functions for more details. Args: value: A string representing a datetime. Returns: A datetime object. """ if fmt: return _datetimeobj_formats.get(fmt, lambda v: datetimeobj_fmt(v, fmt) )(value) l = len(value) if 19 <= l <= 24 and value[3] == " ": # '%d %b %Y %H:%M:%Sxxxx' try: return datetimeobj_d_b_Y_H_M_S(value) except (KeyError, ValueError): pass if 30 <= l <= 31: # '%a, %d %b %Y %H:%M:%S %z' try: return datetimeobj_a__d_b_Y_H_M_S_z(value) except (KeyError, ValueError): pass if l == 14: # '%Y%m%d%H%M%S' try: return datetimeobj_YmdHMS(value) except ValueError: pass # epoch timestamp try: return datetimeobj_epoch(value) except ValueError: pass # slow version return datetimeobj_any(value)
def _fix_alert_config_dict(alert_config): """ Fix the alert config .args() dict for the correct key name """ data = alert_config.args() data['params_set'] = data.get('args') del data['args'] return data
def _get_login_payload(self, username, password): """ returns the payload the login page expects :rtype: dict """ payload = { 'csrfmiddlewaretoken': self._get_csrf_token(), 'ajax': '1', 'next': '/app/', 'username': username, 'password': password } return payload
def _api_post(self, url, **kwargs): """ Convenience method for posting """ response = self.session.post( url=url, headers=self._get_api_headers(), **kwargs ) if not response.ok: raise ServerException( '{0}: {1}'.format( response.status_code, response.text or response.reason )) return response.json()
def _api_delete(self, url, **kwargs): """ Convenience method for deleting """ response = self.session.delete( url=url, headers=self._get_api_headers(), **kwargs ) if not response.ok: raise ServerException( '{0}: {1}'.format( response.status_code, response.text or response.reason )) return response
def _api_get(self, url, **kwargs): """ Convenience method for getting """ response = self.session.get( url=url, headers=self._get_api_headers(), **kwargs ) if not response.ok: raise ServerException( '{0}: {1}'.format( response.status_code, response.text or response.reason )) return response.json()
def _login(self, username, password): """ ._login() makes three requests: * One to the /login/ page to get a CSRF cookie * One to /login/ajax/ to get a logged-in session cookie * One to /app/ to get the beginning of the account id :param username: A valid username (email) :type username: str :param password: A valid password :type password: str :return: The account's url id :rtype: str """ login_url = 'https://logentries.com/login/' login_page_response = self.session.get(url=login_url, headers=self.default_headers) if not login_page_response.ok: raise ServerException(login_page_response.text) login_headers = { 'Referer': login_url, 'X-Requested-With': 'XMLHttpRequest', } login_headers.update(self.default_headers) login_response = self.session.post( 'https://logentries.com/login/ajax/', headers=login_headers, data=self._get_login_payload( username, password), ) if not login_response.ok: raise ServerException(login_response.text) app_response = self.session.get('https://logentries.com/app/', headers=self.default_headers) return app_response.url.split('/')[-1]
def list_scheduled_queries(self): """ List all scheduled_queries :return: A list of all scheduled query dicts :rtype: list of dict :raises: This will raise a :class:`ServerException<logentries_api.exceptions.ServerException>` if there is an error from Logentries """ url = 'https://logentries.com/rest/{account_id}/api/scheduled_queries/'.format( account_id=self.account_id) return self._api_get(url=url).get('scheduled_searches')
def list_tags(self): """ List all tags for the account. The response differs from ``Hooks().list()``, in that tag dicts for anomaly alerts include a 'scheduled_query_id' key with the value being the UUID for the associated scheduled query :return: A list of all tag dicts :rtype: list of dict :raises: This will raise a :class:`ServerException<logentries_api.exceptions.ServerException>` if there is an error from Logentries """ url = 'https://logentries.com/rest/{account_id}/api/tags/'.format( account_id=self.account_id) return self._api_get(url=url).get('tags')
def get(self, name_or_id): """ Get alert by name or id :param name_or_id: The alert's name or id :type name_or_id: str :return: A list of matching tags. An empty list is returned if there are not any matches :rtype: list of dict :raises: This will raise a :class:`ServerException<logentries_api.exceptions.ServerException>` if there is an error from Logentries """ return [ tag for tag in self.list_tags() if name_or_id == tag.get('id') or name_or_id == tag.get('name') ]
def create(self, name, patterns, logs, trigger_config, alert_reports): """ Create an inactivity alert :param name: A name for the inactivity alert :type name: str :param patterns: A list of regexes to match :type patterns: list of str :param logs: A list of log UUID's. (The 'key' key of a log) :type logs: list of str :param trigger_config: A AlertTriggerConfig describing how far back to look for inactivity. :type trigger_config: :class:`AlertTriggerConfig<logentries_api.special_alerts.AlertTriggerConfig>` :param alert_reports: A list of AlertReportConfigs to send alerts to :type alert_reports: list of :class:`AlertReportConfig<logentries_api.special_alerts.AlertReportConfig>` :return: The API response :rtype: dict :raises: This will raise a :class:`ServerException<logentries_api.exceptions.ServerException>` if there is an error from Logentries """ data = { 'tag': { 'actions': [ alert_report.to_dict() for alert_report in alert_reports ], 'name': name, 'patterns': patterns, 'sources': [ {'id': log} for log in logs ], 'sub_type': 'InactivityAlert', 'type': 'AlertNotify' } } data['tag'].update(trigger_config.to_dict()) return self._api_post( url=self.url_template.format(account_id=self.account_id), data=json.dumps(data, sort_keys=True) )
def delete(self, tag_id): """ Delete the specified InactivityAlert :param tag_id: The tag ID to delete :type tag_id: str :raises: This will raise a :class:`ServerException <logentries_api.exceptions.ServerException>` if there is an error from Logentries """ tag_url = 'https://logentries.com/rest/{account_id}/api/tags/{tag_id}' self._api_delete( url=tag_url.format( account_id=self.account_id, tag_id=tag_id ) )
def _create_scheduled_query(self, query, change, scope_unit, scope_count): """ Create the scheduled query """ query_data = { 'scheduled_query': { 'name': 'ForAnomalyReport', 'query': query, 'threshold_type': '%', 'threshold_value': change, 'time_period': scope_unit.title(), 'time_value': scope_count, } } query_url = 'https://logentries.com/rest/{account_id}/api/scheduled_queries' return self._api_post( url=query_url.format(account_id=self.account_id), data=json.dumps(query_data, sort_keys=True) )
def create(self, name, query, scope_count, scope_unit, increase_positive, percentage_change, trigger_config, logs, alert_reports): """ Create an anomaly alert. This call makes 2 requests, one to create a "scheduled_query", and another to create the alert. :param name: The name for the alert :type name: str :param query: The `LEQL`_ query to use for detecting anomalies. Must result in a numerical value, so it should look something like ``where(...) calculate(COUNT)`` :type query: str :param scope_count: How many ``scope_unit`` s to inspect for detecting an anomaly :type scope_count: int :param scope_unit: How far to look back in detecting an anomaly. Must be one of "hour", "day", or "week" :type scope_unit: str :param increase_positive: Detect a positive increase for the anomaly. A value of ``False`` results in detecting a decrease for the anomaly. :type increase_positive: bool :param percentage_change: The percentage of change to detect. Must be a number between 0 and 100 (inclusive). :type percentage_change: int :param trigger_config: A AlertTriggerConfig describing how far back to look back to compare to the anomaly scope. :type trigger_config: :class:`AlertTriggerConfig<logentries_api.special_alerts.AlertTriggerConfig>` :param logs: A list of log UUID's. (The 'key' key of a log) :type logs: list of str :param alert_reports: A list of AlertReportConfig to send alerts to :type alert_reports: list of :class:`AlertReportConfig<logentries_api.special_alerts.AlertReportConfig>` :return: The API response of the alert creation :rtype: dict :raises: This will raise a :class:`ServerException <logentries_api.exceptions.ServerException>` if there is an error from Logentries .. _Leql: https://blog.logentries.com/2015/06/introducing-leql/ """ change = '{pos}{change}'.format( pos='+' if increase_positive else '-', change=str(percentage_change) ) query_response = self._create_scheduled_query( query=query, change=change, scope_unit=scope_unit, scope_count=scope_count, ) scheduled_query_id = query_response.get('scheduled_query', {}).get('id') tag_data = { 'tag': { 'actions': [ alert_report.to_dict() for alert_report in alert_reports ], 'name': name, 'scheduled_query_id': scheduled_query_id, 'sources': [ {'id': log} for log in logs ], 'sub_type': 'AnomalyAlert', 'type': 'AlertNotify' } } tag_data['tag'].update(trigger_config.to_dict()) tag_url = 'https://logentries.com/rest/{account_id}/api/tags'.format( account_id=self.account_id ) return self._api_post( url=tag_url, data=json.dumps(tag_data, sort_keys=True), )
def delete(self, tag_id): """ Delete a specified anomaly alert tag and its scheduled query This method makes 3 requests: * One to get the associated scheduled_query_id * One to delete the alert * One to delete get scheduled query :param tag_id: The tag ID to delete :type tag_id: str :raises: This will raise a :class:`ServerException <logentries_api.exceptions.ServerException>` if there is an error from Logentries """ this_alert = [tag for tag in self.list_tags() if tag.get('id') == tag_id] if len(this_alert) < 1: return query_id = this_alert[0].get('scheduled_query_id') tag_url = 'https://logentries.com/rest/{account_id}/api/tags/{tag_id}' self._api_delete( url=tag_url.format( account_id=self.account_id, tag_id=tag_id ) ) query_url = 'https://logentries.com/rest/{account_id}/api/scheduled_queries/{query_id}' self._api_delete( url=query_url.format( account_id=self.account_id, query_id=query_id ) )
def unparse_range(obj): """Unparse a range argument. Args: obj: An article range. There are a number of valid formats; an integer specifying a single article or a tuple specifying an article range. If the range doesn't give a start article then all articles up to the specified last article are included. If the range doesn't specify a last article then all articles from the first specified article up to the current last article for the group are included. Returns: The range as a string that can be used by an NNTP command. Note: Sample valid formats. 4678 (,5234) (4245,) (4245, 5234) """ if isinstance(obj, (int, long)): return str(obj) if isinstance(obj, tuple): arg = str(obj[0]) + "-" if len(obj) > 1: arg += str(obj[1]) return arg raise ValueError("Must be an integer or tuple")
def parse_newsgroup(line): """Parse a newsgroup info line to python types. Args: line: An info response line containing newsgroup info. Returns: A tuple of group name, low-water as integer, high-water as integer and posting status. Raises: ValueError: If the newsgroup info cannot be parsed. Note: Posting status is a character is one of (but not limited to): "y" posting allowed "n" posting not allowed "m" posting is moderated """ parts = line.split() try: group = parts[0] low = int(parts[1]) high = int(parts[2]) status = parts[3] except (IndexError, ValueError): raise ValueError("Invalid newsgroup info") return group, low, high, status
def parse_header(line): """Parse a header line. Args: line: A header line as a string. Returns: None if end of headers is found. A string giving the continuation line if a continuation is found. A tuple of name, value when a header line is found. Raises: ValueError: If the line cannot be parsed as a header. """ if not line or line == "\r\n": return None if line[0] in " \t": return line[1:].rstrip() name, value = line.split(":", 1) return (name.strip(), value.strip())
def parse_headers(obj): """Parse a string a iterable object (including file like objects) to a python dictionary. Args: obj: An iterable object including file-like objects. Returns: An dictionary of headers. If a header is repeated then the last value for that header is given. Raises: ValueError: If the first line is a continuation line or the headers cannot be parsed. """ if isinstance(obj, basestring): obj = cStringIO.StringIO(obj) hdrs = [] for line in obj: hdr = parse_header(line) if not hdr: break if isinstance(hdr, basestring): if not hdrs: raise ValueError("First header is a continuation") hdrs[-1] = (hdrs[-1][0], hdrs[-1][1] + hdr) continue hdrs.append(hdr) return iodict.IODict(hdrs)
def unparse_headers(hdrs): """Parse a dictionary of headers to a string. Args: hdrs: A dictionary of headers. Returns: The headers as a string that can be used in an NNTP POST. """ return "".join([unparse_header(n, v) for n, v in hdrs.items()]) + "\r\n"
def do_POST(self): """ Handles the POST request sent by Boundary Url Action """ self.send_response(urllib2.httplib.OK) self.end_headers() content_length = int(self.headers['Content-Length']) body = self.rfile.read(content_length) print("Client: {0}".format(str(self.client_address))) print("headers: {0}".format(self.headers)) print("path: {0}".format(self.path)) print("body: {0}".format(body))
def run(tests=(), reporter=None, stop_after=None): """ Run the tests that are loaded by each of the strings provided. Arguments: tests (iterable): the collection of tests (specified as `str` s) to run reporter (Reporter): a `Reporter` to use for the run. If unprovided, the default is to return a `virtue.reporters.Counter` (which produces no output). stop_after (int): a number of non-successful tests to allow before stopping the run. """ if reporter is None: reporter = Counter() if stop_after is not None: reporter = _StopAfterWrapper(reporter=reporter, limit=stop_after) locator = ObjectLocator() cases = ( case for test in tests for loader in locator.locate_by_name(name=test) for case in loader.load() ) suite = unittest.TestSuite(cases) getattr(reporter, "startTestRun", lambda: None)() suite.run(reporter) getattr(reporter, "stopTestRun", lambda: None)() return reporter
def defaults_docstring(defaults, header=None, indent=None, footer=None): """Return a docstring from a list of defaults. """ if indent is None: indent = '' if header is None: header = '' if footer is None: footer = '' width = 60 #hbar = indent + width * '=' + '\n' # horizontal bar hbar = '\n' s = hbar + (header) + hbar for key, value, desc in defaults: if isinstance(value, basestring): value = "'" + value + "'" if hasattr(value, '__call__'): value = "<" + value.__name__ + ">" s += indent +'%-12s\n' % ("%s :" % key) s += indent + indent + (indent + 23 * ' ').join(desc.split('\n')) s += ' [%s]\n\n' % str(value) s += hbar s += footer return s
def defaults_decorator(defaults): """Decorator to append default kwargs to a function. """ def decorator(func): """Function that appends default kwargs to a function. """ kwargs = dict(header='Keyword arguments\n-----------------\n', indent=' ', footer='\n') doc = defaults_docstring(defaults, **kwargs) if func.__doc__ is None: func.__doc__ = '' func.__doc__ += doc return func return decorator
def _load(self, **kwargs): """Load kwargs key,value pairs into __dict__ """ defaults = dict([(d[0], d[1]) for d in self.defaults]) # Require kwargs are in defaults for k in kwargs: if k not in defaults: msg = "Unrecognized attribute of %s: %s" % ( self.__class__.__name__, k) raise AttributeError(msg) defaults.update(kwargs) # This doesn't overwrite the properties self.__dict__.update(defaults) # This should now be set self.check_type(self.__dict__['default']) # This sets the underlying property values (i.e., __value__) self.set(**defaults)
def defaults_docstring(cls, header=None, indent=None, footer=None): """Add the default values to the class docstring""" return defaults_docstring(cls.defaults, header=header, indent=indent, footer=footer)
def set_value(self, value): """Set the value This invokes hooks for type-checking and bounds-checking that may be implemented by sub-classes. """ self.check_bounds(value) self.check_type(value) self.__value__ = value
def check_type(self, value): """Hook for type-checking, invoked during assignment. raises TypeError if neither value nor self.dtype are None and they do not match. will not raise an exception if either value or self.dtype is None """ if self.__dict__['dtype'] is None: return elif value is None: return elif isinstance(value, self.__dict__['dtype']): return msg = "Value of type %s, when %s was expected." % ( type(value), self.__dict__['dtype']) raise TypeError(msg)
def value(self): """Return the current value. This first checks if the value is cached (i.e., if `self.__value__` is not None) If it is not cached then it invokes the `loader` function to compute the value, and caches the computed value """ if self.__value__ is None: try: loader = self.__dict__['loader'] except KeyError: raise AttributeError("Loader is not defined") # Try to run the loader. # Don't catch expections here, let the Model class figure it out val = loader() # Try to set the value try: self.set_value(val) except TypeError: msg = "Loader must return variable of type %s or None, got %s" % (self.__dict__['dtype'], type(val)) raise TypeError(msg) return self.__value__
def check_type(self, value): """Hook for type-checking, invoked during assignment. Allows size 1 numpy arrays and lists, but raises TypeError if value can not be cast to a scalar. """ try: scalar = asscalar(value) except ValueError as e: raise TypeError(e) super(Parameter, self).check_type(scalar)
def symmetric_error(self): """Return the symmertic error Similar to above, but zero implies no error estimate, and otherwise this will either be the symmetric error, or the average of the low,high asymmetric errors. """ # ADW: Should this be `np.nan`? if self.__errors__ is None: return 0. if np.isscalar(self.__errors__): return self.__errors__ return 0.5 * (self.__errors__[0] + self.__errors__[1])
def set_free(self, free): """Set free/fixed status """ if free is None: self.__free__ = False return self.__free__ = bool(free)
def set_errors(self, errors): """Set parameter error estimate """ if errors is None: self.__errors__ = None return self.__errors__ = [asscalar(e) for e in errors]
def set(self, **kwargs): """Set the value,bounds,free,errors based on corresponding kwargs The invokes hooks for type-checking and bounds-checking that may be implemented by sub-classes. """ # Probably want to reset bounds if set fails if 'bounds' in kwargs: self.set_bounds(kwargs.pop('bounds')) if 'free' in kwargs: self.set_free(kwargs.pop('free')) if 'errors' in kwargs: self.set_errors(kwargs.pop('errors')) if 'value' in kwargs: self.set_value(kwargs.pop('value'))
def load_and_parse(self): """ Load the metrics file from the given path """ f = open(self.file_path, "r") metrics_json = f.read() self.metrics = json.loads(metrics_json)
def import_metrics(self): """ 1) Get command line arguments 2) Read the JSON file 3) Parse into a dictionary 4) Create or update definitions using API call """ self.v2Metrics = self.metricDefinitionV2(self.metrics) if self.v2Metrics: metrics = self.metrics else: metrics = self.metrics['result'] # Loop through the metrics and call the API # to create/update for m in metrics: if self.v2Metrics: metric = metrics[m] metric['name'] = m else: metric = m self.create_update(metric)
def create_from_pytz(cls, tz_info): """Create an instance using the result of the timezone() call in "pytz". """ zone_name = tz_info.zone utc_transition_times_list_raw = getattr(tz_info, '_utc_transition_times', None) utc_transition_times_list = [tuple(utt.timetuple()) for utt in utc_transition_times_list_raw] \ if utc_transition_times_list_raw is not None \ else None transition_info_list_raw = getattr(tz_info, '_transition_info', None) transition_info_list = [(utcoffset_td.total_seconds(), dst_td.total_seconds(), tzname) for (utcoffset_td, dst_td, tzname) in transition_info_list_raw] \ if transition_info_list_raw is not None \ else None try: utcoffset_dt = tz_info._utcoffset except AttributeError: utcoffset = None else: utcoffset = utcoffset_dt.total_seconds() tzname = getattr(tz_info, '_tzname', None) parent_class_name = getmro(tz_info.__class__)[1].__name__ return cls(zone_name, parent_class_name, utc_transition_times_list, transition_info_list, utcoffset, tzname)
def extract_dictionary(self, metrics): """ Extract required fields from an array """ new_metrics = {} for m in metrics: metric = self.extract_fields(m) new_metrics[m['name']] = metric return new_metrics
def filter(self): """ Apply the criteria to filter out on the metrics required """ if self.filter_expression is not None: new_metrics = [] metrics = self.metrics['result'] for m in metrics: if self.filter_expression.search(m['name']): new_metrics.append(m) else: new_metrics = self.metrics['result'] self.metrics = self.extract_dictionary(new_metrics)
def get_arguments(self): """ Extracts the specific arguments of this CLI """ ApiCli.get_arguments(self) if self.args.hostGroupId is not None: self.hostGroupId = self.args.hostGroupId self.path = "v1/hostgroup/{0}".format(str(self.hostGroupId))
def get_arguments(self): """ Extracts the specific arguments of this CLI """ ApiCli.get_arguments(self) if self.args.tenant_id is not None: self._tenant_id = self.args.tenant_id if self.args.fingerprint_fields is not None: self._fingerprint_fields = self.args.fingerprint_fields if self.args.title is not None: self._title = self.args.title if self.args.source is not None: self._source = self.args.source if self.args.severity is not None: self._severity = self.args.severity if self.args.message is not None: self._message = self.args.message event = {} if self._title is not None: event['title'] = self._title if self._severity is not None: event['severity'] = self._severity if self._message is not None: event['message'] = self._message if self._source is not None: if 'source' not in event: event['source'] = {} if len(self._source) >= 1: event['source']['ref'] = self._source[0] if len(self._source) >= 2: event['source']['type'] = self._source[1] self._process_properties(self.args.properties) if self._properties is not None: event['properties'] = self._properties if self._fingerprint_fields is not None: event['fingerprintFields'] = self._fingerprint_fields self.data = json.dumps(event, sort_keys=True) self.headers = {'Content-Type': 'application/json'}
def _call_api(self): """ Make a call to the meter via JSON RPC """ # Allocate a socket and connect to the meter sockobj = socket(AF_INET, SOCK_STREAM) sockobj.connect((self.rpc_host, self.rpc_port)) self.get_json() message = [self.rpc_message.encode('utf-8')] for line in message: sockobj.send(line) data = sockobj.recv(self.MAX_LINE) print(data) self.rpc_data.append(data) sockobj.close()
def get_arguments(self): """ Extracts the specific arguments of this CLI """ HostgroupModify.get_arguments(self) if self.args.host_group_id is not None: self.host_group_id = self.args.host_group_id self.path = "v1/hostgroup/" + str(self.host_group_id)
def identifier(self, text): """identifier = alpha_character | "_" . {alpha_character | "_" | digit} ;""" self._attempting(text) return concatenation([ alternation([ self.alpha_character, "_" ]), zero_or_more( alternation([ self.alpha_character, "_", self.digit ]) ) ], ignore_whitespace=False)(text).compressed(TokenType.identifier)
def expression(self, text): """expression = number , op_mult , expression | expression_terminal , op_mult , number , [operator , expression] | expression_terminal , op_add , [operator , expression] | expression_terminal , [operator , expression] ; """ self._attempting(text) return alternation([ # number , op_mult , expression concatenation([ self.number, self.op_mult, self.expression ], ignore_whitespace=True), # expression_terminal , op_mult , number , [operator , expression] concatenation([ self.expression_terminal, self.op_mult, self.number, option( concatenation([ self.operator, self.expression ], ignore_whitespace=True) ) ], ignore_whitespace=True), # expression_terminal , op_add , [operator , expression] concatenation([ self.expression_terminal, self.op_add, option( concatenation([ self.operator, self.expression ], ignore_whitespace=True) ) ], ignore_whitespace=True), # expression_terminal , [operator , expression] concatenation([ self.expression_terminal, option( concatenation([ self.operator, self.expression ], ignore_whitespace=True) ) ], ignore_whitespace=True) ])(text).retyped(TokenType.expression)
def expression_terminal(self, text): """expression_terminal = identifier | terminal | option_group | repetition_group | grouping_group | special_handling ; """ self._attempting(text) return alternation([ self.identifier, self.terminal, self.option_group, self.repetition_group, self.grouping_group, self.special_handling ])(text)
def option_group(self, text): """option_group = "[" , expression , "]" ;""" self._attempting(text) return concatenation([ "[", self.expression, "]" ], ignore_whitespace=True)(text).retyped(TokenType.option_group)
def terminal(self, text): """terminal = '"' . (printable - '"') + . '"' | "'" . (printable - "'") + . "'" ; """ self._attempting(text) return alternation([ concatenation([ '"', one_or_more( exclusion(self.printable, '"') ), '"' ], ignore_whitespace=False), concatenation([ "'", one_or_more( exclusion(self.printable,"'") ), "'" ], ignore_whitespace=False) ])(text).compressed(TokenType.terminal)
def operator(self, text): """operator = "|" | "." | "," | "-";""" self._attempting(text) return alternation([ "|", ".", ",", "-" ])(text).retyped(TokenType.operator)
def op_mult(self, text): """op_mult = "*" ;""" self._attempting(text) return terminal("*")(text).retyped(TokenType.op_mult)
def op_add(self, text): """op_add = "+" ;""" self._attempting(text) return terminal("+")(text).retyped(TokenType.op_add)
def setp(self, name, clear_derived=True, value=None, bounds=None, free=None, errors=None): """ Set the value (and bounds) of the named parameter. Parameters ---------- name : str The parameter name. clear_derived : bool Flag to clear derived objects in this model value: The value of the parameter, if None, it is not changed bounds: tuple or None The bounds on the parameter, if None, they are not set free : bool or None Flag to say if parameter is fixed or free in fitting, if None, it is not changed errors : tuple or None Uncertainties on the parameter, if None, they are not changed """ name = self._mapping.get(name, name) try: self.params[name].set( value=value, bounds=bounds, free=free, errors=errors) except TypeError as msg: print(msg, name) if clear_derived: self.clear_derived() self._cache(name)
def set_attributes(self, **kwargs): """ Set a group of attributes (parameters and members). Calls `setp` directly, so kwargs can include more than just the parameter value (e.g., bounds, free, etc.). """ self.clear_derived() kwargs = dict(kwargs) for name, value in kwargs.items(): # Raise AttributeError if param not found try: self.getp(name) except KeyError: print ("Warning: %s does not have attribute %s" % (type(self), name)) # Set attributes try: self.setp(name, clear_derived=False, **value) except TypeError: try: self.setp(name, clear_derived=False, *value) except (TypeError, KeyError): try: self.setp(name, clear_derived=False, value=value) except (TypeError, KeyError): self.__setattr__(name, value) # pop this attribued off the list of missing properties self._missing.pop(name, None) # Check to make sure we got all the required properties if self._missing: raise ValueError( "One or more required properties are missing ", self._missing.keys())
def _init_properties(self): """ Loop through the list of Properties, extract the derived and required properties and do the appropriate book-keeping """ self._missing = {} for k, p in self.params.items(): if p.required: self._missing[k] = p if isinstance(p, Derived): if p.loader is None: # Default to using _<param_name> p.loader = self.__getattribute__("_%s" % k) elif isinstance(p.loader, str): p.loader = self.__getattribute__(p.loader)
def get_params(self, pnames=None): """ Return a list of Parameter objects Parameters ---------- pname : list or None If a list get the Parameter objects with those names If none, get all the Parameter objects Returns ------- params : list list of Parameters """ l = [] if pnames is None: pnames = self.params.keys() for pname in pnames: p = self.params[pname] if isinstance(p, Parameter): l.append(p) return l
def param_values(self, pnames=None): """ Return an array with the parameter values Parameters ---------- pname : list or None If a list, get the values of the `Parameter` objects with those names If none, get all values of all the `Parameter` objects Returns ------- values : `np.array` Parameter values """ l = self.get_params(pnames) v = [p.value for p in l] return np.array(v)
def param_errors(self, pnames=None): """ Return an array with the parameter errors Parameters ---------- pname : list of string or none If a list of strings, get the Parameter objects with those names If none, get all the Parameter objects Returns ------- ~numpy.array of parameter errors Note that this is a N x 2 array. """ l = self.get_params(pnames) v = [p.errors for p in l] return np.array(v)
def clear_derived(self): """ Reset the value of all Derived properties to None This is called by setp (and by extension __setattr__) """ for p in self.params.values(): if isinstance(p, Derived): p.clear_value()
def get_arguments(self): """ Extracts the specific arguments of this CLI """ ApiCli.get_arguments(self) if self.args.pluginName is not None: self.pluginName = self.args.pluginName self.path = "v1/plugins/{0}/components".format(self.pluginName)
def method(self, value): """ Before assigning the value validate that is in one of the HTTP methods we implement """ keys = self._methods.keys() if value not in keys: raise AttributeError("Method value not in " + str(keys)) else: self._method = value
def _get_environment(self): """ Gets the configuration stored in environment variables """ if 'TSP_EMAIL' in os.environ: self._email = os.environ['TSP_EMAIL'] if 'TSP_API_TOKEN' in os.environ: self._api_token = os.environ['TSP_API_TOKEN'] if 'TSP_API_HOST' in os.environ: self._api_host = os.environ['TSP_API_HOST'] else: self._api_host = 'api.truesight.bmc.com'
def _get_url_parameters(self): """ Encode URL parameters """ url_parameters = '' if self._url_parameters is not None: url_parameters = '?' + urllib.urlencode(self._url_parameters) return url_parameters
def metric_get(self, enabled=False, custom=False): """ Returns a metric definition identified by name :param enabled: Return only enabled metrics :param custom: Return only custom metrics :return Metrics: """ self.path = 'v1/metrics?enabled={0}&{1}'.format(enabled, custom) self._call_api() self._handle_results() return self.metrics
def _do_get(self): """ HTTP Get Request """ return requests.get(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
def _do_delete(self): """ HTTP Delete Request """ return requests.delete(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
def _do_post(self): """ HTTP Post Request """ return requests.post(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
def _do_put(self): """ HTTP Put Request """ return requests.put(self._url, data=self._data, headers=self._headers, auth=(self._email, self._api_token))
def _call_api(self): """ Make an API call to get the metric definition """ self._url = self.form_url() if self._headers is not None: logging.debug(self._headers) if self._data is not None: logging.debug(self._data) if len(self._get_url_parameters()) > 0: logging.debug(self._get_url_parameters()) result = self._methods[self._method]() if not self.good_response(result.status_code): logging.error(self._url) logging.error(self._method) if self._data is not None: logging.error(self._data) logging.error(result) self._api_result = result
def get_arguments(self): """ Extracts the specific arguments of this CLI """ # ApiCli.get_arguments(self) if self.args.file_name is not None: self.file_name = self.args.file_name
def execute(self): """ Run the steps to execute the CLI """ # self._get_environment() self.add_arguments() self._parse_args() self.get_arguments() if self._validate_arguments(): self._plot_data() else: print(self._message)
def validate_sceneInfo(self): """Check scene name and whether remote file exists. Raises WrongSceneNameError if the scene name is wrong. """ if self.sceneInfo.prefix not in self.__satellitesMap: raise WrongSceneNameError('USGS Downloader: Prefix of %s (%s) is invalid' % (self.sceneInfo.name, self.sceneInfo.prefix))
def verify_type_product(self, satellite): """Gets satellite id """ if satellite == 'L5': id_satellite = '3119' stations = ['GLC', 'ASA', 'KIR', 'MOR', 'KHC', 'PAC', 'KIS', 'CHM', 'LGS', 'MGR', 'COA', 'MPS'] elif satellite == 'L7': id_satellite = '3373' stations = ['EDC', 'SGS', 'AGS', 'ASN', 'SG1'] elif satellite == 'L8': id_satellite = '4923' stations = ['LGN'] else: raise ProductInvalidError('Type product invalid. the permitted types are: L5, L7, L8. ') typ_product = dict(id_satelite=id_satellite, stations=stations) return typ_product
def get_remote_file_size(self, url): """Gets the filesize of a remote file """ try: req = urllib.request.urlopen(url) return int(req.getheader('Content-Length').strip()) except urllib.error.HTTPError as error: logger.error('Error retrieving size of the remote file %s' % error) print('Error retrieving size of the remote file %s' % error) self.connect_earthexplorer() self.get_remote_file_size(url)
def download(self, bands=None, download_dir=None, metadata=False): """Download remote .tar.bz file.""" if not download_dir: download_dir = DOWNLOAD_DIR if bands is None: bands = list(range(1, 12)) + ['BQA'] else: self.validate_bands(bands) pattern = re.compile('^[^\s]+_(.+)\.tiff?', re.I) band_list = ['B%i' % (i,) if isinstance(i, int) else i for i in bands] image_list = [] # Connect Earth explore self.connect_earthexplorer() # tgz name tgzname = self.sceneInfo.name + '.tgz' dest_dir = check_create_folder(join(download_dir, self.sceneInfo.name)) # Download File downloaded = self.download_file(self.url, dest_dir, tgzname) # Log logger.debug('Status downloaded %s' % downloaded) print('\n Status downloaded %s' % downloaded) if downloaded['sucess']: # Log print('\n Downloaded sucess') logger.debug('Downloaded sucess of scene: %s' % self.sceneInfo.name) try: tar = tarfile.open(downloaded['file_path'], 'r') folder_path = join(download_dir, self.sceneInfo.name) tar.extractall(folder_path) remove(downloaded['file_path']) images_path = listdir(folder_path) for image_path in images_path: matched = pattern.match(image_path) file_path = join(folder_path, image_path) if matched and matched.group(1) in band_list: image_list.append([file_path, getsize(file_path)]) elif matched: remove(file_path) except tarfile.ReadError as error: print('\nError when extracting files. %s' % error) logger.error('Error when extracting files. %s' % error) return image_list else: logger.debug('Info downloaded: %s' % downloaded) print('\n Info downloaded: %s' % downloaded) return downloaded
def validate_bands(bands): """Validate bands parameter.""" if not isinstance(bands, list): raise TypeError('Parameter bands must be a "list"') valid_bands = list(range(1, 12)) + ['BQA'] for band in bands: if band not in valid_bands: raise InvalidBandError('%s is not a valid band' % band)
def connect_earthexplorer(self): """ Connection to Earth explorer without proxy """ logger.info("Establishing connection to Earthexplorer") print("\n Establishing connection to Earthexplorer") try: opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor()) urllib.request.install_opener(opener) params = urllib.parse.urlencode(dict(username=self.user, password=self.password)) params = params.encode('utf-8') f = opener.open("https://ers.cr.usgs.gov/login", params) data = f.read().decode('utf-8') f.close() if data.find( 'You must sign in as a registered user to download data or place orders for USGS EROS products') > 0: print("\n Authentification failed") logger.error("Authentification failed") raise AutenticationUSGSFailed('Authentification USGS failed') print('User %s connected with USGS' % self.user) logger.debug('User %s connected with USGS' % self.user) return except Exception as e: print('\nError when trying to connect USGS: %s' % e) raise logger.error('Error when trying to connect USGS: %s' % e)
def download_file(self, url, download_dir, sceneName): """ Downloads large files in pieces """ try: # Log logger.info('\nStarting download..') print('\n Starting download..\n') # Request req = urllib.request.urlopen(url) try: if req.info().get_content_type() == 'text/html': logger.error("error : the file format is html") lines = req.read() if lines.find('Download Not Found') > 0: raise TypeError('Download USGS not found for scene: %s' % self.sceneInfo.name) else: print(lines) print(sys.exit(-1)) except Exception as e: logger.error('Erro in USGS download for scene %s error: %s' % (self.sceneInfo.name, e)) raise CredentialsUsgsError('User or Password invalid ! ') total_size = int(req.getheader('Content-Length').strip()) if total_size < 50000: logger.error("Error: The file is too small to be a Landsat Image for scene %s" % self.sceneInfo.name) raise SmallLandsatImageError("Error: The file is too small to be a Landsat Image") total_size_fmt = sizeof_fmt(total_size) downloaded = 0 CHUNK = 1024 * 1024 * 8 with open(download_dir + '/' + sceneName, 'wb') as fp: start = time.clock() logger.debug('Downloading {0} ({1}):'.format(self.sceneInfo.name, total_size_fmt)) print('Downloading {0} ({1}):'.format(self.sceneInfo.name, total_size_fmt)) while True: chunk = req.read(CHUNK) downloaded += len(chunk) done = int(50 * downloaded / total_size) print('\r[{1}{2}]{0:3.0f}% {3}ps'.format(floor((float(downloaded) / total_size) * 100), '-' * done, ' ' * (50 - done), sizeof_fmt((downloaded // (time.clock() - start)) / 8))) if not chunk: logger.debug('Download {0} completed({1}):'.format(self.sceneInfo.name, total_size_fmt)) break fp.write(chunk) except urllib.error.HTTPError as e: if e.code == 500: logger.error("File doesn't exist") print("\n File doesn't exist: %s " % e) raise RemoteFileDoesntExist("File doesn't exist") elif e.code == 403: # Log celery logger.error("HTTP Error:", e.code, url) logger.debug('\n trying to download it again scene: %s' % self.sceneInfo.name) # Log shell print("\n HTTP Error:", e.code, url) print('\n trying to download it again scene: %s' % self.sceneInfo.name) self.connect_earthexplorer() self.download_file(url, download_dir, sceneName) else: logger.error("HTTP Error:", e) print("HTTP Error:", e.code, url) raise e except urllib.error.URLError as e: print("URL Error:", e.reason, url) logger.error("URL Error: %s in %s" % (e, url)) raise e except ConnectionResetError as e: print('Error ConnectionResetError: %s' % e) logger.error('Error ConnectionResetError: %s' % e) print('\n trying to download it again scene: %s' % self.sceneInfo.name) logger.debug('trying to download it again scene: %s' % self.sceneInfo.name) self.download_file(url, download_dir, sceneName) except urllib.error.HTTPError as e: print('\n HttpError: %s' % e) print('\n trying to download it again scene: %s' % self.sceneInfo.name) logger.error('HttpError: %s' % e) logger.debug('trying to download it again scene: %s' % self.sceneInfo.name) self.download_file(url, download_dir, sceneName) except Exception as error: logger.error('Error unknown %s in download %s at scene: %s' % (error, url, self.sceneInfo.name)) print('Error unknown %s in download % at scene: %s' % (error, url, self.sceneInfo.name)) logger.debug('trying to download it again scene: %s' % self.sceneInfo.name) self.download_file(url, download_dir, sceneName) percent = floor((float(downloaded) / total_size) * 100) or 0 if percent != 100: logger.debug('trying to download it again scene: %s' % self.sceneInfo.name) logger.error('Download interrupted in %s%%, trying to download it again scene: %s' % ( percent, self.sceneInfo.name)) print('\n Download interrupted in %s%%, trying to download it again scene: %s' % ( percent, self.sceneInfo.name)) self.download_file(url, download_dir, sceneName) path_item = download_dir + '/' + sceneName info = {'total_size': total_size_fmt, 'scene': self.sceneInfo.name, 'sucess': verify_sucess(total_size, path_item), 'file_path': path_item} return info
def prefixed_by(prefix): """ Make a callable returning True for names starting with the given prefix. The returned callable takes two arguments, the attribute or name of the object, and possibly its corresponding value (which is ignored), as suitable for use with :meth:`ObjectLocator.is_test_module` and :meth:`ObjectLocator.is_test_method`\ . """ def prefixed_by_(name, value=None): return name.startswith(prefix) prefixed_by_.__name__ += prefix return prefixed_by_
def get_arguments(self): """ Extracts the specific arguments of this CLI """ ApiCli.get_arguments(self) if self.args.metric_name is not None: self._metric_name = self.args.metric_name self.path = "v1/metrics/{0}".format(self._metric_name)
def timezone(zone): r''' Return a datetime.tzinfo implementation for the given timezone >>> from datetime import datetime, timedelta >>> utc = timezone('UTC') >>> eastern = timezone('US/Eastern') >>> eastern.zone 'US/Eastern' >>> timezone(unicode('US/Eastern')) is eastern True >>> utc_dt = datetime(2002, 10, 27, 6, 0, 0, tzinfo=utc) >>> loc_dt = utc_dt.astimezone(eastern) >>> fmt = '%Y-%m-%d %H:%M:%S %Z (%z)' >>> loc_dt.strftime(fmt) '2002-10-27 01:00:00 EST (-0500)' >>> (loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 00:50:00 EST (-0500)' >>> eastern.normalize(loc_dt - timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:50:00 EDT (-0400)' >>> (loc_dt + timedelta(minutes=10)).strftime(fmt) '2002-10-27 01:10:00 EST (-0500)' Raises UnknownTimeZoneError if passed an unknown zone. >>> try: ... timezone('Asia/Shangri-La') ... except UnknownTimeZoneError: ... print('Unknown') Unknown >>> try: ... timezone(unicode('\N{TRADE MARK SIGN}')) ... except UnknownTimeZoneError: ... print('Unknown') Unknown ''' if zone.upper() == 'UTC': return utc try: zone = ascii(zone) except UnicodeEncodeError: # All valid timezones are ASCII raise UnknownTimeZoneError(zone) zone = _unmunge_zone(zone) if zone not in _tzinfo_cache: if zone in all_timezones_set: # fp = open_resource(zone) # try: _tzinfo_cache[zone] = build_tzinfo(zone)#, fp) # finally: # fp.close() else: raise UnknownTimeZoneError(zone) return _tzinfo_cache[zone]
def normalize(self, dt, is_dst=False): '''Correct the timezone information on the given datetime''' if dt.tzinfo is None: raise ValueError('Naive time - no tzinfo set') return dt.replace(tzinfo=self)
def get_arguments(self): """ Extracts the specific arguments of this CLI """ ApiCli.get_arguments(self) if self.args.hostGroupId is not None: self.hostGroupId = self.args.hostGroupId if self.args.force is not None: self.force = self.args.force if self.force: self.url_parameters = {"forceRemove": True} self.path = "v1/hostgroup/{0}".format(str(self.hostGroupId))
def get_arguments(self): """ Extracts the specific arguments of this CLI """ ApiCli.get_arguments(self) self._actions = self.args.actions if self.args.actions is not None else None self._alarm_name = self.args.alarm_name if self.args.alarm_name is not None else None self._metric = self.args.metric if self.args.metric is not None else None self._aggregate = self.args.aggregate if self.args.aggregate is not None else None self._operation = self.args.operation if self.args.operation is not None else None self._threshold = self.args.threshold if self.args.threshold is not None else None self._trigger_interval = self.args.trigger_interval if self.args.trigger_interval is not None else None self._host_group_id = self.args.host_group_id if self.args.host_group_id is not None else None self._note = self.args.note if self.args.note is not None else None self._per_host_notify = self.args.per_host_notify if self.args.per_host_notify is not None else None self._is_disabled = self.args.is_disabled if self.args.is_disabled is not None else None self._notify_clear = self.args.notify_clear if self.args.notify_clear is not None else None self._notify_set = self.args.notify_set if self.args.notify_set is not None else None self._timeout_interval = self.args.timeout_interval if self.args.timeout_interval is not None else None
def esc_split(text, delimiter=" ", maxsplit=-1, escape="\\", *, ignore_empty=False): """Escape-aware text splitting: Split text on on a delimiter, recognizing escaped delimiters.""" is_escaped = False split_count = 0 yval = [] for char in text: if is_escaped: is_escaped = False yval.append(char) else: if char == escape: is_escaped = True elif char in delimiter and split_count != maxsplit: if yval or not ignore_empty: yield "".join(yval) split_count += 1 yval = [] else: yval.append(char) yield "".join(yval)
def esc_join(iterable, delimiter=" ", escape="\\"): """Join an iterable by a delimiter, replacing instances of delimiter in items with escape + delimiter. """ rep = escape + delimiter return delimiter.join(i.replace(delimiter, rep) for i in iterable)
def get_newline_positions(text): """Returns a list of the positions in the text where all new lines occur. This is used by get_line_and_char to efficiently find coordinates represented by offset positions. """ pos = [] for i, c in enumerate(text): if c == "\n": pos.append(i) return pos
def get_line_and_char(newline_positions, position): """Given a list of newline positions, and an offset from the start of the source code that newline_positions was pulled from, return a 2-tuple of (line, char) coordinates. """ if newline_positions: for line_no, nl_pos in enumerate(newline_positions): if nl_pos >= position: if line_no == 0: return (line_no, position) else: return (line_no, position - newline_positions[line_no - 1] - 1) return (line_no + 1, position - newline_positions[-1] - 1) else: return (0, position)
def point_to_source(source, position, fmt=(2, True, "~~~~~", "^")): """Point to a position in source code. source is the text we're pointing in. position is a 2-tuple of (line_number, character_number) to point to. fmt is a 4-tuple of formatting parameters, they are: name default description ---- ------- ----------- surrounding_lines 2 the number of lines above and below the target line to print show_line_numbers True if true line numbers will be generated for the output_lines tail_body "~~~~~" the body of the tail pointer_char "^" the character that will point to the position """ surrounding_lines, show_line_numbers, tail_body, pointer_char = fmt line_no, char_no = position lines = source.split("\n") line = lines[line_no] if char_no >= len(tail_body): tail = " " * (char_no - len(tail_body)) + tail_body + pointer_char else: tail = " " * char_no + pointer_char + tail_body if show_line_numbers: line_no_width = int(math.ceil(math.log10(max(1, line_no + surrounding_lines))) + 1) line_fmt = "{0:" + str(line_no_width) + "}: {1}" else: line_fmt = "{1}" pivot = line_no + 1 output_lines = [(pivot, line), ("", tail)] for i in range(surrounding_lines): upper_ofst = i + 1 upper_idx = line_no + upper_ofst lower_ofst = -upper_ofst lower_idx = line_no + lower_ofst if lower_idx >= 0: output_lines.insert(0, (pivot + lower_ofst, lines[lower_idx])) if upper_idx < len(lines): output_lines.append((pivot + upper_ofst, lines[upper_idx])) return "\n".join(line_fmt.format(n, c) for n, c in output_lines)
def _dump_text(self): """ Send output in textual format """ results = self._relay_output['result']; for l in results: dt = time.strftime("%Y-%m-%dT%H:%M:%SZ", time.gmtime(int(l[1]['ts']))) print("{0} {1} {2} {3}".format(l[0], dt, l[1]['type'], l[1]['msg']))
def _handle_results(self): """ Call back function to be implemented by the CLI. """ # Only process if we get HTTP result of 200 if self._api_result.status_code == requests.codes.ok: self._relay_output = json.loads(self._api_result.text) if self._raw: self._dump_json() else: self._dump_text()
def get_arguments(self): """ Extracts the specific arguments of this CLI """ PluginBase.get_arguments(self) if self.args.organizationName is not None: self.organizationName = self.args.organizationName if self.args.repositoryName is not None: self.repositoryName = self.args.repositoryName self.path = "v1/plugins/private/{0}/{1}/{2}".format(self.pluginName, self.organizationName, self.repositoryName)
def extract_fields(self, metric): """ Extract only the required fields for the create/update API call """ m = {} if 'name' in metric: m['name'] = metric['name'] if 'description' in metric: m['description'] = metric['description'] if 'displayName' in metric: m['displayName'] = metric['displayName'] if 'displayNameShort' in metric: m['displayNameShort'] = metric['displayNameShort'] if 'unit' in metric: m['unit'] = metric['unit'] if 'defaultAggregate' in metric: m['defaultAggregate'] = metric['defaultAggregate'] if 'defaultResolutionMS' in metric: m['defaultResolutionMS'] = metric['defaultResolutionMS'] if 'isDisabled' in metric: m['isDisabled'] = metric['isDisabled'] if 'isBuiltin' in metric: m['isBuiltin'] = metric['isBuiltin'] if 'type' in metric: m['type'] = metric['type'] return m
def get_arguments(self): """ Extracts the specific arguments of this CLI """ AlarmModify.get_arguments(self) self._alarm_id = self.args.alarm_id if self.args.alarm_id is not None else None self.get_api_parameters()
def _filter(self): """ Apply the criteria to filter out on the output required """ if self._metrics or self._control or self._plugins: relays = self._relays['result']['relays'] for relay in relays: if self._metrics: del relays[relay]['metrics'] if self._control: del relays[relay]['control'] if self._plugins: if 'plugins' in relays[relay]: del relays[relay]['plugins']