desc
stringlengths
3
26.7k
decl
stringlengths
11
7.89k
bodies
stringlengths
8
553k
'Return the distance between ``place1`` and ``place2`` members of the ``name`` key. The units must be one of the following : m, km mi, ft. By default meters are used.'
def geodist(self, name, place1, place2, unit=None):
pieces = [name, place1, place2] if (unit and (unit not in ('m', 'km', 'mi', 'ft'))): raise RedisError('GEODIST invalid unit') elif unit: pieces.append(unit) return self.execute_command('GEODIST', *pieces)
'Return the geo hash string for each item of ``values`` members of the specified key identified by the ``name``argument.'
def geohash(self, name, *values):
return self.execute_command('GEOHASH', name, *values)
'Return the positions of each item of ``values`` as members of the specified key identified by the ``name``argument. Each position is represented by the pairs lon and lat.'
def geopos(self, name, *values):
return self.execute_command('GEOPOS', name, *values)
'Return the members of the specified key identified by the ``name`` argument which are within the borders of the area specified with the ``latitude`` and ``longitude`` location and the maximum distance from the center specified by the ``radius`` value. The units must be one of the following : m, km mi, ft. By default `...
def georadius(self, name, longitude, latitude, radius, unit=None, withdist=False, withcoord=False, withhash=False, count=None, sort=None, store=None, store_dist=None):
return self._georadiusgeneric('GEORADIUS', name, longitude, latitude, radius, unit=unit, withdist=withdist, withcoord=withcoord, withhash=withhash, count=count, sort=sort, store=store, store_dist=store_dist)
'This command is exactly like ``georadius`` with the sole difference that instead of taking, as the center of the area to query, a longitude and latitude value, it takes the name of a member already existing inside the geospatial index represented by the sorted set.'
def georadiusbymember(self, name, member, radius, unit=None, withdist=False, withcoord=False, withhash=False, count=None, sort=None, store=None, store_dist=None):
return self._georadiusgeneric('GEORADIUSBYMEMBER', name, member, radius, unit=unit, withdist=withdist, withcoord=withcoord, withhash=withhash, count=count, sort=sort, store=store, store_dist=store_dist)
'Return a new pipeline object that can queue multiple commands for later execution. ``transaction`` indicates whether all commands should be executed atomically. Apart from making a group of operations atomic, pipelines are useful for reducing the back-and-forth overhead between the client and server.'
def pipeline(self, transaction=True, shard_hint=None):
return Pipeline(self.connection_pool, self.response_callbacks, transaction, shard_hint)
'Set the value of key ``name`` to ``value`` that expires in ``time`` seconds. ``time`` can be represented by an integer or a Python timedelta object.'
def setex(self, name, value, time):
if isinstance(time, datetime.timedelta): time = (time.seconds + ((time.days * 24) * 3600)) return self.execute_command('SETEX', name, time, value)
'Remove the first ``num`` occurrences of elements equal to ``value`` from the list stored at ``name``. The ``num`` argument influences the operation in the following ways: num > 0: Remove elements equal to value moving from head to tail. num < 0: Remove elements equal to value moving from tail to head. num = 0: Remove ...
def lrem(self, name, value, num=0):
return self.execute_command('LREM', name, num, value)
'NOTE: The order of arguments differs from that of the official ZADD command. For backwards compatability, this method accepts arguments in the form of name1, score1, name2, score2, while the official Redis documents expects score1, name1, score2, name2. If you\'re looking to use the standard syntax, consider using the...
def zadd(self, name, *args, **kwargs):
pieces = [] if args: if ((len(args) % 2) != 0): raise RedisError('ZADD requires an equal number of values and scores') pieces.extend(reversed(args)) for pair in iteritems(kwargs): pieces.append(pair[1]) pieces.append(pair[0]) return sel...
'Re-subscribe to any channels and patterns previously subscribed to'
def on_connect(self, connection):
if self.channels: channels = {} for (k, v) in iteritems(self.channels): if (not self.decode_responses): k = k.decode(self.encoding, self.encoding_errors) channels[k] = v self.subscribe(**channels) if self.patterns: patterns = {} for...
'Encode the value so that it\'s identical to what we\'ll read off the connection'
def encode(self, value):
if (self.decode_responses and isinstance(value, bytes)): value = value.decode(self.encoding, self.encoding_errors) elif ((not self.decode_responses) and isinstance(value, unicode)): value = value.encode(self.encoding, self.encoding_errors) return value
'Indicates if there are subscriptions to any channels or patterns'
@property def subscribed(self):
return bool((self.channels or self.patterns))
'Execute a publish/subscribe command'
def execute_command(self, *args, **kwargs):
if (self.connection is None): self.connection = self.connection_pool.get_connection('pubsub', self.shard_hint) self.connection.register_connect_callback(self.on_connect) connection = self.connection self._execute(connection, connection.send_command, *args)
'Parse the response from a publish/subscribe command'
def parse_response(self, block=True, timeout=0):
connection = self.connection if (connection is None): raise RuntimeError('pubsub connection not set: did you forget to call subscribe() or psubscribe()?') if ((not block) and (not connection.can_read(timeout=timeout))): return None return self._execute(co...
'Subscribe to channel patterns. Patterns supplied as keyword arguments expect a pattern name as the key and a callable as the value. A pattern\'s callable will be invoked automatically when a message is received on that pattern rather than producing a message via ``listen()``.'
def psubscribe(self, *args, **kwargs):
if args: args = list_or_args(args[0], args[1:]) new_patterns = {} new_patterns.update(dict.fromkeys(imap(self.encode, args))) for (pattern, handler) in iteritems(kwargs): new_patterns[self.encode(pattern)] = handler ret_val = self.execute_command('PSUBSCRIBE', *iterkeys(new_patterns)...
'Unsubscribe from the supplied patterns. If empy, unsubscribe from all patterns.'
def punsubscribe(self, *args):
if args: args = list_or_args(args[0], args[1:]) return self.execute_command('PUNSUBSCRIBE', *args)
'Subscribe to channels. Channels supplied as keyword arguments expect a channel name as the key and a callable as the value. A channel\'s callable will be invoked automatically when a message is received on that channel rather than producing a message via ``listen()`` or ``get_message()``.'
def subscribe(self, *args, **kwargs):
if args: args = list_or_args(args[0], args[1:]) new_channels = {} new_channels.update(dict.fromkeys(imap(self.encode, args))) for (channel, handler) in iteritems(kwargs): new_channels[self.encode(channel)] = handler ret_val = self.execute_command('SUBSCRIBE', *iterkeys(new_channels))...
'Unsubscribe from the supplied channels. If empty, unsubscribe from all channels'
def unsubscribe(self, *args):
if args: args = list_or_args(args[0], args[1:]) return self.execute_command('UNSUBSCRIBE', *args)
'Listen for messages on channels this client has been subscribed to'
def listen(self):
while self.subscribed: response = self.handle_message(self.parse_response(block=True)) if (response is not None): (yield response)
'Get the next message if one is available, otherwise None. If timeout is specified, the system will wait for `timeout` seconds before returning. Timeout should be specified as a floating point number.'
def get_message(self, ignore_subscribe_messages=False, timeout=0):
response = self.parse_response(block=False, timeout=timeout) if response: return self.handle_message(response, ignore_subscribe_messages) return None
'Parses a pub/sub message. If the channel or pattern was subscribed to with a message handler, the handler is invoked instead of a parsed message being returned.'
def handle_message(self, response, ignore_subscribe_messages=False):
message_type = nativestr(response[0]) if (message_type == 'pmessage'): message = {'type': message_type, 'pattern': response[1], 'channel': response[2], 'data': response[3]} else: message = {'type': message_type, 'pattern': None, 'channel': response[1], 'data': response[2]} if (message_ty...
'Start a transactional block of the pipeline after WATCH commands are issued. End the transactional block with `execute`.'
def multi(self):
if self.explicit_transaction: raise RedisError('Cannot issue nested calls to MULTI') if self.command_stack: raise RedisError('Commands without an initial WATCH have already been issued') self.explicit_transaction = True
'Execute a command immediately, but don\'t auto-retry on a ConnectionError if we\'re already WATCHing a variable. Used when issuing WATCH or subsequent commands retrieving their values but before MULTI is called.'
def immediate_execute_command(self, *args, **options):
command_name = args[0] conn = self.connection if (not conn): conn = self.connection_pool.get_connection(command_name, self.shard_hint) self.connection = conn try: conn.send_command(*args) return self.parse_response(conn, command_name, **options) except (ConnectionErro...
'Stage a command to be executed when execute() is next called Returns the current Pipeline object back so commands can be chained together, such as: pipe = pipe.set(\'foo\', \'bar\').incr(\'baz\').decr(\'bang\') At some other point, you can then run: pipe.execute(), which will execute all commands queued in the pipe.'
def pipeline_execute_command(self, *args, **options):
self.command_stack.append((args, options)) return self
'Execute all the commands in the current pipeline'
def execute(self, raise_on_error=True):
stack = self.command_stack if (not stack): return [] if self.scripts: self.load_scripts() if (self.transaction or self.explicit_transaction): execute = self._execute_transaction else: execute = self._execute_pipeline conn = self.connection if (not conn): ...
'Watches the values at keys ``names``'
def watch(self, *names):
if self.explicit_transaction: raise RedisError('Cannot issue a WATCH after a MULTI') return self.execute_command('WATCH', *names)
'Unwatches all previously specified keys'
def unwatch(self):
return ((self.watching and self.execute_command('UNWATCH')) or True)
'Execute the script, passing any required ``args``'
def __call__(self, keys=[], args=[], client=None):
if (client is None): client = self.registered_client args = (tuple(keys) + tuple(args)) if isinstance(client, BasePipeline): client.scripts.add(self) try: return client.evalsha(self.sha, len(keys), *args) except NoScriptError: self.sha = client.script_load(self.script...
'Gets a cached token object or creates a new one if not already cached'
@classmethod def get_token(cls, value):
try: return cls._cache[value] except KeyError: token = Token(value) cls._cache[value] = token return token
'Parse an error response'
def parse_error(self, response):
error_code = response.split(' ')[0] if (error_code in self.EXCEPTION_CLASSES): response = response[(len(error_code) + 1):] exception_class = self.EXCEPTION_CLASSES[error_code] if isinstance(exception_class, dict): exception_class = exception_class.get(response, ResponseErr...
'Called when the socket connects'
def on_connect(self, connection):
self._sock = connection._sock self._buffer = SocketBuffer(self._sock, self.socket_read_size) if connection.decode_responses: self.encoding = connection.encoding
'Called when the socket disconnects'
def on_disconnect(self):
if (self._sock is not None): self._sock.close() self._sock = None if (self._buffer is not None): self._buffer.close() self._buffer = None self.encoding = None
'Connects to the Redis server if not already connected'
def connect(self):
if self._sock: return try: sock = self._connect() except socket.timeout: raise TimeoutError('Timeout connecting to server') except socket.error: e = sys.exc_info()[1] raise ConnectionError(self._error_message(e)) self._sock = sock try: sel...
'Create a TCP socket connection'
def _connect(self):
err = None for res in socket.getaddrinfo(self.host, self.port, 0, socket.SOCK_STREAM): (family, socktype, proto, canonname, socket_address) = res sock = None try: sock = socket.socket(family, socktype, proto) sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY,...
'Initialize the connection, authenticate and select a database'
def on_connect(self):
self._parser.on_connect(self) if self.password: self.send_command('AUTH', self.password) if (nativestr(self.read_response()) != 'OK'): raise AuthenticationError('Invalid Password') if self.db: self.send_command('SELECT', self.db) if (nativestr(self.read_respons...
'Disconnects from the Redis server'
def disconnect(self):
self._parser.on_disconnect() if (self._sock is None): return try: self._sock.shutdown(socket.SHUT_RDWR) self._sock.close() except socket.error: pass self._sock = None
'Send an already packed command to the Redis server'
def send_packed_command(self, command):
if (not self._sock): self.connect() try: if isinstance(command, str): command = [command] for item in command: self._sock.sendall(item) except socket.timeout: self.disconnect() raise TimeoutError('Timeout writing to socket') except...
'Pack and send a command to the Redis server'
def send_command(self, *args):
self.send_packed_command(self.pack_command(*args))
'Poll the socket to see if there\'s data that can be read.'
def can_read(self, timeout=0):
sock = self._sock if (not sock): self.connect() sock = self._sock return (self._parser.can_read() or bool(select([sock], [], [], timeout)[0]))
'Read the response from a previously sent command'
def read_response(self):
try: response = self._parser.read_response() except: self.disconnect() raise if isinstance(response, ResponseError): raise response return response
'Return a bytestring representation of the value'
def encode(self, value):
if isinstance(value, Token): return value.encoded_value elif isinstance(value, bytes): return value elif isinstance(value, (int, long)): value = b(str(value)) elif isinstance(value, float): value = b(repr(value)) elif (not isinstance(value, basestring)): value...
'Pack a series of arguments into the Redis protocol'
def pack_command(self, *args):
output = [] command = args[0] if (' ' in command): args = (tuple([Token.get_token(s) for s in command.split()]) + args[1:]) else: args = ((Token.get_token(command),) + args[1:]) buff = SYM_EMPTY.join((SYM_STAR, b(str(len(args))), SYM_CRLF)) for arg in imap(self.encode, args): ...
'Pack multiple commands into the Redis protocol'
def pack_commands(self, commands):
output = [] pieces = [] buffer_length = 0 for cmd in commands: for chunk in self.pack_command(*cmd): pieces.append(chunk) buffer_length += len(chunk) if (buffer_length > 6000): output.append(SYM_EMPTY.join(pieces)) buffer_length = 0 ...
'Wrap the socket with SSL support'
def _connect(self):
sock = super(SSLConnection, self)._connect() sock = ssl.wrap_socket(sock, cert_reqs=self.cert_reqs, keyfile=self.keyfile, certfile=self.certfile, ca_certs=self.ca_certs) return sock
'Create a Unix domain socket connection'
def _connect(self):
sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) sock.settimeout(self.socket_timeout) sock.connect(self.path) return sock
'Return a connection pool configured from the given URL. For example:: redis://[:password]@localhost:6379/0 rediss://[:password]@localhost:6379/0 unix://[:password]@/path/to/socket.sock?db=0 Three URL schemes are supported: - ```redis://`` <http://www.iana.org/assignments/uri-schemes/prov/redis>`_ creates a normal TCP ...
@classmethod def from_url(cls, url, db=None, decode_components=False, **kwargs):
url_string = url url = urlparse(url) qs = '' if (('?' in url.path) and (not url.query)): qs = url.path.split('?', 1)[1] url = urlparse(url_string[:(- (len(qs) + 1))]) else: qs = url.query url_options = {} for (name, value) in iteritems(parse_qs(qs)): if (value...
'Create a connection pool. If max_connections is set, then this object raises redis.ConnectionError when the pool\'s limit is reached. By default, TCP connections are created unless connection_class is specified. Use redis.UnixDomainSocketConnection for unix sockets. Any additional keyword arguments are passed to the c...
def __init__(self, connection_class=Connection, max_connections=None, **connection_kwargs):
max_connections = (max_connections or (2 ** 31)) if ((not isinstance(max_connections, (int, long))) or (max_connections < 0)): raise ValueError('"max_connections" must be a positive integer') self.connection_class = connection_class self.connection_kwargs = connection_kwargs s...
'Get a connection from the pool'
def get_connection(self, command_name, *keys, **options):
self._checkpid() try: connection = self._available_connections.pop() except IndexError: connection = self.make_connection() self._in_use_connections.add(connection) return connection
'Return information about the encoding settings'
def get_encoding(self):
kwargs = self.connection_kwargs return {'encoding': kwargs.get('encoding', 'utf-8'), 'encoding_errors': kwargs.get('encoding_errors', 'strict'), 'decode_responses': kwargs.get('decode_responses', False)}
'Create a new connection'
def make_connection(self):
if (self._created_connections >= self.max_connections): raise ConnectionError('Too many connections') self._created_connections += 1 return self.connection_class(**self.connection_kwargs)
'Releases the connection back to the pool'
def release(self, connection):
self._checkpid() if (connection.pid != self.pid): return self._in_use_connections.remove(connection) self._available_connections.append(connection)
'Disconnects all connections in the pool'
def disconnect(self):
all_conns = chain(self._available_connections, self._in_use_connections) for connection in all_conns: connection.disconnect()
'Make a fresh connection.'
def make_connection(self):
connection = self.connection_class(**self.connection_kwargs) self._connections.append(connection) return connection
'Get a connection, blocking for ``self.timeout`` until a connection is available from the pool. If the connection returned is ``None`` then creates a new connection. Because we use a last-in first-out queue, the existing connections (having been returned to the pool after the initial ``None`` values were added) will be...
def get_connection(self, command_name, *keys, **options):
self._checkpid() connection = None try: connection = self.pool.get(block=True, timeout=self.timeout) except Empty: raise ConnectionError('No connection available.') if (connection is None): connection = self.make_connection() return connection
'Releases the connection back to the pool.'
def release(self, connection):
self._checkpid() if (connection.pid != self.pid): return try: self.pool.put_nowait(connection) except Full: pass
'Disconnects all connections in the pool.'
def disconnect(self):
for connection in self._connections: connection.disconnect()
'Create a new Lock instance named ``name`` using the Redis client supplied by ``redis``. ``timeout`` indicates a maximum life for the lock. By default, it will remain locked until release() is called. ``timeout`` can be specified as a float or integer, both representing the number of seconds to wait. ``sleep`` indicate...
def __init__(self, redis, name, timeout=None, sleep=0.1, blocking=True, blocking_timeout=None, thread_local=True):
self.redis = redis self.name = name self.timeout = timeout self.sleep = sleep self.blocking = blocking self.blocking_timeout = blocking_timeout self.thread_local = bool(thread_local) self.local = (threading.local() if self.thread_local else dummy()) self.local.token = None if (se...
'Use Redis to hold a shared, distributed lock named ``name``. Returns True once the lock is acquired. If ``blocking`` is False, always return immediately. If the lock was acquired, return True, otherwise return False. ``blocking_timeout`` specifies the maximum number of seconds to wait trying to acquire the lock.'
def acquire(self, blocking=None, blocking_timeout=None):
sleep = self.sleep token = b(uuid.uuid1().hex) if (blocking is None): blocking = self.blocking if (blocking_timeout is None): blocking_timeout = self.blocking_timeout stop_trying_at = None if (blocking_timeout is not None): stop_trying_at = (mod_time.time() + blocking_tim...
'Releases the already acquired lock'
def release(self):
expected_token = self.local.token if (expected_token is None): raise LockError('Cannot release an unlocked lock') self.local.token = None self.do_release(expected_token)
'Adds more time to an already acquired lock. ``additional_time`` can be specified as an integer or a float, both representing the number of seconds to add.'
def extend(self, additional_time):
if (self.local.token is None): raise LockError('Cannot extend an unlocked lock') if (self.timeout is None): raise LockError('Cannot extend a lock with no timeout') return self.do_extend(additional_time)
'Initialize the NewRelicPluginAgent object. :param argparse.Namespace args: Command line arguments :param str operating_system: The operating_system name'
def __init__(self, args, operating_system):
super(NewRelicPluginAgent, self).__init__(args, operating_system) self.derive_last_interval = dict() self.endpoint = self.PLATFORM_URL self.http_headers = {'Accept': 'application/json', 'Content-Type': 'application/json'} self.last_interval_start = None self.min_max_values = dict() self._wak...
'Setup the internal state for the controller class. This is invoked on Controller.run(). Items requiring the configuration object should be assigned here due to startup order of operations.'
def setup(self):
if hasattr(self.config.application, 'endpoint'): self.endpoint = self.config.application.endpoint self.http_headers['X-License-Key'] = self.license_key self.last_interval_start = time.time()
'Return the agent data section of the NewRelic Platform data payload :rtype: dict'
@property def agent_data(self):
return {'host': socket.gethostname(), 'pid': os.getpid(), 'version': __version__}
'Return the NewRelic license key from the configuration values. :rtype: str'
@property def license_key(self):
return self.config.application.license_key
'Kick off a background thread to run the processing task. :param newrelic_plugin_agent.plugins.base.Plugin plugin: The plugin :param dict config: The config for the plugin'
def poll_plugin(self, plugin_name, plugin, config):
if (not isinstance(config, (list, tuple))): config = [config] for instance in config: thread = threading.Thread(target=self.thread_process, kwargs={'config': instance, 'name': plugin_name, 'plugin': plugin, 'poll_interval': int(self._wake_interval)}) thread.run() self.threads.app...
'This method is called after every sleep interval. If the intention is to use an IOLoop instead of sleep interval based daemon, override the run method.'
def process(self):
start_time = time.time() self.start_plugin_polling() while self.threads_running: time.sleep(1) self.threads = list() self.send_data_to_newrelic() duration = (time.time() - start_time) self.next_wake_interval = (self._wake_interval - duration) if (self.next_wake_interval < 1): ...
'Agent keeps track of previous values, so compute the differences for min/max values. :param dict component: The component to calc min/max values for'
def process_min_max_values(self, component):
guid = component['guid'] name = component['name'] if (guid not in self.min_max_values.keys()): self.min_max_values[guid] = dict() if (name not in self.min_max_values[guid].keys()): self.min_max_values[guid][name] = dict() for metric in component['metrics']: (min_val, max_val)...
'Return the proxy used to access NewRelic. :rtype: dict'
@property def proxies(self):
if ('proxy' in self.config.application): return {'http': self.config.application['proxy'], 'https': self.config.application['proxy']} return None
'Create the headers and payload to send to NewRelic platform as a JSON encoded POST body.'
def send_components(self, components, metrics):
if (not metrics): LOGGER.warning('No metrics to send to NewRelic this interval') return LOGGER.info('Sending %i metrics to NewRelic', metrics) body = {'agent': self.agent_data, 'components': components} LOGGER.debug(body) try: response = reque...
'Given a qualified class name (eg. foo.bar.Foo), return the class :rtype: object'
@staticmethod def _get_plugin(plugin_path):
try: (package, class_name) = plugin_path.rsplit('.', 1) except ValueError: return None try: module_handle = importlib.import_module(package) class_handle = getattr(module_handle, class_name) return class_handle except ImportError: LOGGER.exception('Attempt...
'Iterate through each plugin and start the polling process.'
def start_plugin_polling(self):
for plugin in [key for key in self.config.application.keys() if (key not in self.IGNORE_KEYS)]: LOGGER.info('Enabling plugin: %s', plugin) plugin_class = None if (plugin in plugins.available): plugin_class = self._get_plugin(plugins.available[plugin]) elif ('.' in p...
'Return True if any of the child threads are alive :rtype: bool'
@property def threads_running(self):
for thread in self.threads: if thread.is_alive(): return True return False
'Created a thread process for the given name, plugin class, config and poll interval. Process is added to a Queue object which used to maintain the stack of running plugins. :param str name: The name of the plugin :param newrelic_plugin_agent.plugin.Plugin plugin: The plugin class :param dict config: The plugin configu...
def thread_process(self, name, plugin, config, poll_interval):
instance_name = ('%s:%s' % (name, config.get('name', 'unnamed'))) obj = plugin(config, poll_interval, self.derive_last_interval.get(instance_name)) obj.poll() self.publish_queue.put((instance_name, obj.values(), obj.derive_last_interval))
'Return the wake interval in seconds as the number of seconds until the next minute. :rtype: int'
@property def wake_interval(self):
return self.next_wake_interval
'Add all of the data points for a node :param dict stats: all of the nodes'
def add_datapoints(self, stats):
self.add_gauge_value('Clients/Blocked', 'clients', stats.get('blocked_clients', 0)) self.add_gauge_value('Clients/Connected', 'clients', stats.get('connected_clients', 0)) self.add_gauge_value('Slaves/Connected', 'slaves', stats.get('connected_slaves', 0)) self.add_gauge_value('Last master IO s...
'Top level interface to create a socket and connect it to the redis daemon. :rtype: socket'
def connect(self):
connection = super(Redis, self).connect() if (connection and self.config.get('password')): connection.send(('*2\r\n$4\r\nAUTH\r\n$%i\r\n%s\r\n' % (len(self.config['password']), self.config['password']))) buffer_value = connection.recv(self.SOCKET_RECV_MAX) if (buffer_value == '+OK\r\n'):...
'Loop in and read in all the data until we have received it all. :param socket connection: The connection :rtype: dict'
def fetch_data(self, connection):
connection.send('*0\r\ninfo\r\n') buffer_value = connection.recv(self.SOCKET_RECV_MAX) lines = buffer_value.split('\r\n') if (lines[0][0] == '$'): byte_size = int(lines[0][1:].strip()) else: return None while (len(buffer_value) < byte_size): buffer_value += connection.rec...
'Extend this method to process the data points retrieved during the poll process. :param mixed data: The data received during the poll process'
def add_datapoints(self, data):
raise NotImplementedError
'Add a value that will derive the current value from the difference between the last interval value and the current value. If this is the first time a stat is being added, it will report a 0 value until the next poll interval and it is able to calculate the derivative value. :param str metric_name: The name of the metr...
def add_derive_value(self, metric_name, units, value, count=None):
if (value is None): value = 0 metric = self.metric_name(metric_name, units) if (metric not in self.derive_last_interval.keys()): LOGGER.debug('Bypassing initial %s value for first run', metric) self.derive_values[metric] = self.metric_payload(0, count=0) else: ...
'For timing based metrics that have a count of objects for the timing and an optional last value. :param str metric_name: The name of the metric :param str units: The unit type :param int count: The number of items the timing is for :param int total_value: The timing value :param int last_value: The last value'
def add_derive_timing_value(self, metric_name, units, count, total_value, last_value=None):
if (last_value is None): return self.add_derive_value(metric_name, units, total_value, count) self.add_derive_value(('%s/Total' % metric_name), units, total_value, count) self.add_derive_value(('%s/Last' % metric_name), units, last_value, count)
'Add a value that is not a rolling counter but rather an absolute gauge :param str metric_name: The name of the metric :param str units: The unit type :param int value: The value to add :param float value: The sum of squares for the values'
def add_gauge_value(self, metric_name, units, value, min_val=None, max_val=None, count=None, sum_of_squares=None):
metric = self.metric_name(metric_name, units) self.gauge_values[metric] = self.metric_payload(value, min_val, max_val, count, sum_of_squares) LOGGER.debug('%s: %r', metric_name, self.gauge_values[metric])
'Create the component section of the NewRelic Platform data payload message. :rtype: dict'
def component_data(self):
metrics = dict() metrics.update(self.derive_values.items()) metrics.update(self.gauge_values.items()) return {'name': self.name, 'guid': self.GUID, 'duration': self.poll_interval, 'metrics': metrics}
'Output an error message when stats collection fails'
def error_message(self):
LOGGER.error('Error collecting stats data from %s. Please check configuration and sure it conforms with YAML syntax', self.__class__.__name__)
'Note the end of the stat collection run and let the user know of any errors.'
def finish(self):
if ((not self.derive_values) and (not self.gauge_values)): self.error_message() else: LOGGER.info('%s poll successful, completed in %.2f seconds', self.__class__.__name__, (time.time() - self.poll_start_time))
'Empty stats collection dictionaries for the polling interval'
def initialize(self):
self.poll_start_time = time.time() self.derive_values = dict() self.gauge_values = dict()
'Create a new set of counters for the given key list :param list keys: Keys to initialize in the counters :rtype: tuple'
def initialize_counters(self, keys):
(count, total, min_val, max_val, values) = (dict(), dict(), dict(), dict(), dict()) for key in keys: (count[key], total[key], min_val[key], max_val[key], values[key]) = (0, 0, self.MAX_VAL, 0, list()) return (count, total, min_val, max_val, values)
'Return the metric name in the format for the NewRelic platform :param str metric: The name of th metric :param str units: The unit name'
def metric_name(self, metric, units):
if (not units): return ('Component/%s' % metric) return ('Component/%s[%s]' % (metric, units))
'Return the metric in the standard payload format for the NewRelic agent. :rtype: dict'
def metric_payload(self, value, min_value=None, max_value=None, count=None, squares=None):
if (not value): value = 0 if isinstance(value, basestring): value = 0 sum_of_squares = int((squares or (value * value))) if (sum_of_squares > self.MAX_VAL): sum_of_squares = 0 return {'min': min_value, 'max': max_value, 'total': value, 'count': (count or 1), 'sum_of_squares':...
'Return the name of the component :rtype: str'
@property def name(self):
return self.config.get('name', socket.gethostname().split('.')[0])
'Poll the server returning the results in the expected component format.'
def poll(self):
raise NotImplementedError
'Return the sum_of_squares for the given values :param list values: The values list :rtype: float'
def sum_of_squares(self, values):
value_sum = sum(values) if (not value_sum): return 0 squares = list() for value in values: squares.append((value * value)) return (sum(squares) - (float((value_sum * value_sum)) / len(values)))
'Return the poll results :rtype: dict'
def values(self):
return self.component_data()
'Top level interface to create a socket and connect it to the socket. :rtype: socket'
def connect(self):
try: connection = self.socket_connect() except socket.error as error: LOGGER.error('Error connecting to %s: %s', self.__class__.__name__, error) else: return connection
'Read the data from the socket :param socket connection: The connection'
def fetch_data(self, connection, read_till_empty=False):
LOGGER.debug('Fetching data') received = connection.recv(self.SOCKET_RECV_MAX) while read_till_empty: chunk = connection.recv(self.SOCKET_RECV_MAX) if chunk: received += chunk else: break return received
'This method is called after every sleep interval. If the intention is to use an IOLoop instead of sleep interval based daemon, override the run method.'
def poll(self):
LOGGER.info('Polling %s', self.__class__.__name__) self.initialize() connection = self.connect() if (not connection): LOGGER.error('%s could not connect, skipping poll interval', self.__class__.__name__) return data = self.fetch_data(connection) connection.cl...
'Low level interface to create a socket and connect to it. :rtype: socket'
def socket_connect(self):
if ('path' in self.config): if path.exists(self.config['path']): LOGGER.debug('Connecting to UNIX domain socket: %s', self.config['path']) connection = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) connection.connect(self.config['path']) else: ...
'Fetch the data from the stats URL :rtype: str'
def fetch_data(self):
data = self.http_get() return (data.content if data else '')
'Fetch the data from the stats URL or a specified one. :param str url: URL to fetch instead of the stats URL :rtype: requests.models.Response'
def http_get(self, url=None):
LOGGER.debug('Polling %s Stats at %s', self.__class__.__name__, (url or self.stats_url)) req_kwargs = self.request_kwargs req_kwargs.update(({'url': url} if url else {})) try: response = requests.get(**req_kwargs) except requests.ConnectionError as error: LOGGER.error('Er...
'Poll HTTP server for stats data'
def poll(self):
self.initialize() data = self.fetch_data() if data: self.add_datapoints(data) self.finish()
'Return the configured URL in a uniform way for all HTTP based data sources. :rtype: str'
@property def stats_url(self):
netloc = self.config.get('host', 'localhost') if self.config.get('port'): netloc += (':%s' % self.config['port']) return urlparse.urlunparse((self.config.get('scheme', 'http'), netloc, self.config.get('path', self.DEFAULT_PATH), None, self.config.get('query', self.DEFAULT_QUERY), None))