id
int32 0
252k
| repo
stringlengths 7
55
| path
stringlengths 4
127
| func_name
stringlengths 1
88
| original_string
stringlengths 75
19.8k
| language
stringclasses 1
value | code
stringlengths 75
19.8k
| code_tokens
list | docstring
stringlengths 3
17.3k
| docstring_tokens
list | sha
stringlengths 40
40
| url
stringlengths 87
242
|
|---|---|---|---|---|---|---|---|---|---|---|---|
18,200
|
sebdah/dynamic-dynamodb
|
dynamic_dynamodb/config/config_file_parser.py
|
__parse_options
|
def __parse_options(config_file, section, options):
""" Parse the section options
:type config_file: ConfigParser object
:param config_file: The config file object to use
:type section: str
:param section: Which section to read in the configuration file
:type options: list of dicts
:param options:
A list of options to parse. Example list::
[{
'key': 'aws_access_key_id',
'option': 'aws-access-key-id',
'required': False,
'type': str
}]
:returns: dict
"""
configuration = {}
for option in options:
try:
if option.get('type') == 'str':
configuration[option.get('key')] = \
config_file.get(section, option.get('option'))
elif option.get('type') == 'int':
try:
configuration[option.get('key')] = \
config_file.getint(section, option.get('option'))
except ValueError:
print('Error: Expected an integer value for {0}'.format(
option.get('option')))
sys.exit(1)
elif option.get('type') == 'float':
try:
configuration[option.get('key')] = \
config_file.getfloat(section, option.get('option'))
except ValueError:
print('Error: Expected an float value for {0}'.format(
option.get('option')))
sys.exit(1)
elif option.get('type') == 'bool':
try:
configuration[option.get('key')] = \
config_file.getboolean(section, option.get('option'))
except ValueError:
print('Error: Expected an boolean value for {0}'.format(
option.get('option')))
sys.exit(1)
elif option.get('type') == 'dict':
configuration[option.get('key')] = \
ast.literal_eval(
config_file.get(section, option.get('option')))
else:
configuration[option.get('key')] = \
config_file.get(section, option.get('option'))
except ConfigParser.NoOptionError:
if option.get('required'):
print('Missing [{0}] option "{1}" in configuration'.format(
section, option.get('option')))
sys.exit(1)
return configuration
|
python
|
def __parse_options(config_file, section, options):
""" Parse the section options
:type config_file: ConfigParser object
:param config_file: The config file object to use
:type section: str
:param section: Which section to read in the configuration file
:type options: list of dicts
:param options:
A list of options to parse. Example list::
[{
'key': 'aws_access_key_id',
'option': 'aws-access-key-id',
'required': False,
'type': str
}]
:returns: dict
"""
configuration = {}
for option in options:
try:
if option.get('type') == 'str':
configuration[option.get('key')] = \
config_file.get(section, option.get('option'))
elif option.get('type') == 'int':
try:
configuration[option.get('key')] = \
config_file.getint(section, option.get('option'))
except ValueError:
print('Error: Expected an integer value for {0}'.format(
option.get('option')))
sys.exit(1)
elif option.get('type') == 'float':
try:
configuration[option.get('key')] = \
config_file.getfloat(section, option.get('option'))
except ValueError:
print('Error: Expected an float value for {0}'.format(
option.get('option')))
sys.exit(1)
elif option.get('type') == 'bool':
try:
configuration[option.get('key')] = \
config_file.getboolean(section, option.get('option'))
except ValueError:
print('Error: Expected an boolean value for {0}'.format(
option.get('option')))
sys.exit(1)
elif option.get('type') == 'dict':
configuration[option.get('key')] = \
ast.literal_eval(
config_file.get(section, option.get('option')))
else:
configuration[option.get('key')] = \
config_file.get(section, option.get('option'))
except ConfigParser.NoOptionError:
if option.get('required'):
print('Missing [{0}] option "{1}" in configuration'.format(
section, option.get('option')))
sys.exit(1)
return configuration
|
[
"def",
"__parse_options",
"(",
"config_file",
",",
"section",
",",
"options",
")",
":",
"configuration",
"=",
"{",
"}",
"for",
"option",
"in",
"options",
":",
"try",
":",
"if",
"option",
".",
"get",
"(",
"'type'",
")",
"==",
"'str'",
":",
"configuration",
"[",
"option",
".",
"get",
"(",
"'key'",
")",
"]",
"=",
"config_file",
".",
"get",
"(",
"section",
",",
"option",
".",
"get",
"(",
"'option'",
")",
")",
"elif",
"option",
".",
"get",
"(",
"'type'",
")",
"==",
"'int'",
":",
"try",
":",
"configuration",
"[",
"option",
".",
"get",
"(",
"'key'",
")",
"]",
"=",
"config_file",
".",
"getint",
"(",
"section",
",",
"option",
".",
"get",
"(",
"'option'",
")",
")",
"except",
"ValueError",
":",
"print",
"(",
"'Error: Expected an integer value for {0}'",
".",
"format",
"(",
"option",
".",
"get",
"(",
"'option'",
")",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"elif",
"option",
".",
"get",
"(",
"'type'",
")",
"==",
"'float'",
":",
"try",
":",
"configuration",
"[",
"option",
".",
"get",
"(",
"'key'",
")",
"]",
"=",
"config_file",
".",
"getfloat",
"(",
"section",
",",
"option",
".",
"get",
"(",
"'option'",
")",
")",
"except",
"ValueError",
":",
"print",
"(",
"'Error: Expected an float value for {0}'",
".",
"format",
"(",
"option",
".",
"get",
"(",
"'option'",
")",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"elif",
"option",
".",
"get",
"(",
"'type'",
")",
"==",
"'bool'",
":",
"try",
":",
"configuration",
"[",
"option",
".",
"get",
"(",
"'key'",
")",
"]",
"=",
"config_file",
".",
"getboolean",
"(",
"section",
",",
"option",
".",
"get",
"(",
"'option'",
")",
")",
"except",
"ValueError",
":",
"print",
"(",
"'Error: Expected an boolean value for {0}'",
".",
"format",
"(",
"option",
".",
"get",
"(",
"'option'",
")",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"elif",
"option",
".",
"get",
"(",
"'type'",
")",
"==",
"'dict'",
":",
"configuration",
"[",
"option",
".",
"get",
"(",
"'key'",
")",
"]",
"=",
"ast",
".",
"literal_eval",
"(",
"config_file",
".",
"get",
"(",
"section",
",",
"option",
".",
"get",
"(",
"'option'",
")",
")",
")",
"else",
":",
"configuration",
"[",
"option",
".",
"get",
"(",
"'key'",
")",
"]",
"=",
"config_file",
".",
"get",
"(",
"section",
",",
"option",
".",
"get",
"(",
"'option'",
")",
")",
"except",
"ConfigParser",
".",
"NoOptionError",
":",
"if",
"option",
".",
"get",
"(",
"'required'",
")",
":",
"print",
"(",
"'Missing [{0}] option \"{1}\" in configuration'",
".",
"format",
"(",
"section",
",",
"option",
".",
"get",
"(",
"'option'",
")",
")",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"return",
"configuration"
] |
Parse the section options
:type config_file: ConfigParser object
:param config_file: The config file object to use
:type section: str
:param section: Which section to read in the configuration file
:type options: list of dicts
:param options:
A list of options to parse. Example list::
[{
'key': 'aws_access_key_id',
'option': 'aws-access-key-id',
'required': False,
'type': str
}]
:returns: dict
|
[
"Parse",
"the",
"section",
"options"
] |
bfd0ca806b1c3301e724696de90ef0f973410493
|
https://github.com/sebdah/dynamic-dynamodb/blob/bfd0ca806b1c3301e724696de90ef0f973410493/dynamic_dynamodb/config/config_file_parser.py#L392-L453
|
18,201
|
sebdah/dynamic-dynamodb
|
dynamic_dynamodb/__init__.py
|
main
|
def main():
""" Main function called from dynamic-dynamodb """
try:
if get_global_option('show_config'):
print json.dumps(config.get_configuration(), indent=2)
elif get_global_option('daemon'):
daemon = DynamicDynamoDBDaemon(
'{0}/dynamic-dynamodb.{1}.pid'.format(
get_global_option('pid_file_dir'),
get_global_option('instance')))
if get_global_option('daemon') == 'start':
logger.debug('Starting daemon')
try:
daemon.start()
logger.info('Daemon started')
except IOError as error:
logger.error('Could not create pid file: {0}'.format(error))
logger.error('Daemon not started')
elif get_global_option('daemon') == 'stop':
logger.debug('Stopping daemon')
daemon.stop()
logger.info('Daemon stopped')
sys.exit(0)
elif get_global_option('daemon') == 'restart':
logger.debug('Restarting daemon')
daemon.restart()
logger.info('Daemon restarted')
elif get_global_option('daemon') in ['foreground', 'fg']:
logger.debug('Starting daemon in foreground')
daemon.run()
logger.info('Daemon started in foreground')
else:
print(
'Valid options for --daemon are start, '
'stop, restart, and foreground')
sys.exit(1)
else:
if get_global_option('run_once'):
execute()
else:
while True:
execute()
except Exception as error:
logger.exception(error)
|
python
|
def main():
""" Main function called from dynamic-dynamodb """
try:
if get_global_option('show_config'):
print json.dumps(config.get_configuration(), indent=2)
elif get_global_option('daemon'):
daemon = DynamicDynamoDBDaemon(
'{0}/dynamic-dynamodb.{1}.pid'.format(
get_global_option('pid_file_dir'),
get_global_option('instance')))
if get_global_option('daemon') == 'start':
logger.debug('Starting daemon')
try:
daemon.start()
logger.info('Daemon started')
except IOError as error:
logger.error('Could not create pid file: {0}'.format(error))
logger.error('Daemon not started')
elif get_global_option('daemon') == 'stop':
logger.debug('Stopping daemon')
daemon.stop()
logger.info('Daemon stopped')
sys.exit(0)
elif get_global_option('daemon') == 'restart':
logger.debug('Restarting daemon')
daemon.restart()
logger.info('Daemon restarted')
elif get_global_option('daemon') in ['foreground', 'fg']:
logger.debug('Starting daemon in foreground')
daemon.run()
logger.info('Daemon started in foreground')
else:
print(
'Valid options for --daemon are start, '
'stop, restart, and foreground')
sys.exit(1)
else:
if get_global_option('run_once'):
execute()
else:
while True:
execute()
except Exception as error:
logger.exception(error)
|
[
"def",
"main",
"(",
")",
":",
"try",
":",
"if",
"get_global_option",
"(",
"'show_config'",
")",
":",
"print",
"json",
".",
"dumps",
"(",
"config",
".",
"get_configuration",
"(",
")",
",",
"indent",
"=",
"2",
")",
"elif",
"get_global_option",
"(",
"'daemon'",
")",
":",
"daemon",
"=",
"DynamicDynamoDBDaemon",
"(",
"'{0}/dynamic-dynamodb.{1}.pid'",
".",
"format",
"(",
"get_global_option",
"(",
"'pid_file_dir'",
")",
",",
"get_global_option",
"(",
"'instance'",
")",
")",
")",
"if",
"get_global_option",
"(",
"'daemon'",
")",
"==",
"'start'",
":",
"logger",
".",
"debug",
"(",
"'Starting daemon'",
")",
"try",
":",
"daemon",
".",
"start",
"(",
")",
"logger",
".",
"info",
"(",
"'Daemon started'",
")",
"except",
"IOError",
"as",
"error",
":",
"logger",
".",
"error",
"(",
"'Could not create pid file: {0}'",
".",
"format",
"(",
"error",
")",
")",
"logger",
".",
"error",
"(",
"'Daemon not started'",
")",
"elif",
"get_global_option",
"(",
"'daemon'",
")",
"==",
"'stop'",
":",
"logger",
".",
"debug",
"(",
"'Stopping daemon'",
")",
"daemon",
".",
"stop",
"(",
")",
"logger",
".",
"info",
"(",
"'Daemon stopped'",
")",
"sys",
".",
"exit",
"(",
"0",
")",
"elif",
"get_global_option",
"(",
"'daemon'",
")",
"==",
"'restart'",
":",
"logger",
".",
"debug",
"(",
"'Restarting daemon'",
")",
"daemon",
".",
"restart",
"(",
")",
"logger",
".",
"info",
"(",
"'Daemon restarted'",
")",
"elif",
"get_global_option",
"(",
"'daemon'",
")",
"in",
"[",
"'foreground'",
",",
"'fg'",
"]",
":",
"logger",
".",
"debug",
"(",
"'Starting daemon in foreground'",
")",
"daemon",
".",
"run",
"(",
")",
"logger",
".",
"info",
"(",
"'Daemon started in foreground'",
")",
"else",
":",
"print",
"(",
"'Valid options for --daemon are start, '",
"'stop, restart, and foreground'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"else",
":",
"if",
"get_global_option",
"(",
"'run_once'",
")",
":",
"execute",
"(",
")",
"else",
":",
"while",
"True",
":",
"execute",
"(",
")",
"except",
"Exception",
"as",
"error",
":",
"logger",
".",
"exception",
"(",
"error",
")"
] |
Main function called from dynamic-dynamodb
|
[
"Main",
"function",
"called",
"from",
"dynamic",
"-",
"dynamodb"
] |
bfd0ca806b1c3301e724696de90ef0f973410493
|
https://github.com/sebdah/dynamic-dynamodb/blob/bfd0ca806b1c3301e724696de90ef0f973410493/dynamic_dynamodb/__init__.py#L56-L104
|
18,202
|
ransford/sllurp
|
sllurp/llrp_decoder.py
|
decode_tve_parameter
|
def decode_tve_parameter(data):
"""Generic byte decoding function for TVE parameters.
Given an array of bytes, tries to interpret a TVE parameter from the
beginning of the array. Returns the decoded data and the number of bytes
it read."""
(nontve,) = struct.unpack(nontve_header, data[:nontve_header_len])
if nontve == 1023: # customparameter
(size,) = struct.unpack('!H',
data[nontve_header_len:nontve_header_len+2])
(subtype,) = struct.unpack('!H', data[size-4:size-2])
param_name, param_fmt = ext_param_formats[subtype]
(unpacked,) = struct.unpack(param_fmt, data[size-2:size])
return {param_name: unpacked}, size
# decode the TVE field's header (1 bit "reserved" + 7-bit type)
(msgtype,) = struct.unpack(tve_header, data[:tve_header_len])
if not msgtype & 0b10000000:
# not a TV-encoded param
return None, 0
msgtype = msgtype & 0x7f
try:
param_name, param_fmt = tve_param_formats[msgtype]
logger.debug('found %s (type=%s)', param_name, msgtype)
except KeyError:
return None, 0
# decode the body
nbytes = struct.calcsize(param_fmt)
end = tve_header_len + nbytes
try:
unpacked = struct.unpack(param_fmt, data[tve_header_len:end])
return {param_name: unpacked}, end
except struct.error:
return None, 0
|
python
|
def decode_tve_parameter(data):
"""Generic byte decoding function for TVE parameters.
Given an array of bytes, tries to interpret a TVE parameter from the
beginning of the array. Returns the decoded data and the number of bytes
it read."""
(nontve,) = struct.unpack(nontve_header, data[:nontve_header_len])
if nontve == 1023: # customparameter
(size,) = struct.unpack('!H',
data[nontve_header_len:nontve_header_len+2])
(subtype,) = struct.unpack('!H', data[size-4:size-2])
param_name, param_fmt = ext_param_formats[subtype]
(unpacked,) = struct.unpack(param_fmt, data[size-2:size])
return {param_name: unpacked}, size
# decode the TVE field's header (1 bit "reserved" + 7-bit type)
(msgtype,) = struct.unpack(tve_header, data[:tve_header_len])
if not msgtype & 0b10000000:
# not a TV-encoded param
return None, 0
msgtype = msgtype & 0x7f
try:
param_name, param_fmt = tve_param_formats[msgtype]
logger.debug('found %s (type=%s)', param_name, msgtype)
except KeyError:
return None, 0
# decode the body
nbytes = struct.calcsize(param_fmt)
end = tve_header_len + nbytes
try:
unpacked = struct.unpack(param_fmt, data[tve_header_len:end])
return {param_name: unpacked}, end
except struct.error:
return None, 0
|
[
"def",
"decode_tve_parameter",
"(",
"data",
")",
":",
"(",
"nontve",
",",
")",
"=",
"struct",
".",
"unpack",
"(",
"nontve_header",
",",
"data",
"[",
":",
"nontve_header_len",
"]",
")",
"if",
"nontve",
"==",
"1023",
":",
"# customparameter",
"(",
"size",
",",
")",
"=",
"struct",
".",
"unpack",
"(",
"'!H'",
",",
"data",
"[",
"nontve_header_len",
":",
"nontve_header_len",
"+",
"2",
"]",
")",
"(",
"subtype",
",",
")",
"=",
"struct",
".",
"unpack",
"(",
"'!H'",
",",
"data",
"[",
"size",
"-",
"4",
":",
"size",
"-",
"2",
"]",
")",
"param_name",
",",
"param_fmt",
"=",
"ext_param_formats",
"[",
"subtype",
"]",
"(",
"unpacked",
",",
")",
"=",
"struct",
".",
"unpack",
"(",
"param_fmt",
",",
"data",
"[",
"size",
"-",
"2",
":",
"size",
"]",
")",
"return",
"{",
"param_name",
":",
"unpacked",
"}",
",",
"size",
"# decode the TVE field's header (1 bit \"reserved\" + 7-bit type)",
"(",
"msgtype",
",",
")",
"=",
"struct",
".",
"unpack",
"(",
"tve_header",
",",
"data",
"[",
":",
"tve_header_len",
"]",
")",
"if",
"not",
"msgtype",
"&",
"0b10000000",
":",
"# not a TV-encoded param",
"return",
"None",
",",
"0",
"msgtype",
"=",
"msgtype",
"&",
"0x7f",
"try",
":",
"param_name",
",",
"param_fmt",
"=",
"tve_param_formats",
"[",
"msgtype",
"]",
"logger",
".",
"debug",
"(",
"'found %s (type=%s)'",
",",
"param_name",
",",
"msgtype",
")",
"except",
"KeyError",
":",
"return",
"None",
",",
"0",
"# decode the body",
"nbytes",
"=",
"struct",
".",
"calcsize",
"(",
"param_fmt",
")",
"end",
"=",
"tve_header_len",
"+",
"nbytes",
"try",
":",
"unpacked",
"=",
"struct",
".",
"unpack",
"(",
"param_fmt",
",",
"data",
"[",
"tve_header_len",
":",
"end",
"]",
")",
"return",
"{",
"param_name",
":",
"unpacked",
"}",
",",
"end",
"except",
"struct",
".",
"error",
":",
"return",
"None",
",",
"0"
] |
Generic byte decoding function for TVE parameters.
Given an array of bytes, tries to interpret a TVE parameter from the
beginning of the array. Returns the decoded data and the number of bytes
it read.
|
[
"Generic",
"byte",
"decoding",
"function",
"for",
"TVE",
"parameters",
"."
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp_decoder.py#L39-L74
|
18,203
|
ransford/sllurp
|
setup.py
|
read
|
def read(filename):
"""
Get the long description from a file.
"""
fname = os.path.join(here, filename)
with codecs.open(fname, encoding='utf-8') as f:
return f.read()
|
python
|
def read(filename):
"""
Get the long description from a file.
"""
fname = os.path.join(here, filename)
with codecs.open(fname, encoding='utf-8') as f:
return f.read()
|
[
"def",
"read",
"(",
"filename",
")",
":",
"fname",
"=",
"os",
".",
"path",
".",
"join",
"(",
"here",
",",
"filename",
")",
"with",
"codecs",
".",
"open",
"(",
"fname",
",",
"encoding",
"=",
"'utf-8'",
")",
"as",
"f",
":",
"return",
"f",
".",
"read",
"(",
")"
] |
Get the long description from a file.
|
[
"Get",
"the",
"long",
"description",
"from",
"a",
"file",
"."
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/setup.py#L10-L16
|
18,204
|
ransford/sllurp
|
sllurp/llrp.py
|
LLRPMessage.deserialize
|
def deserialize(self):
"""Turns a sequence of bytes into a message dictionary."""
if self.msgbytes is None:
raise LLRPError('No message bytes to deserialize.')
data = self.msgbytes
msgtype, length, msgid = struct.unpack(self.full_hdr_fmt,
data[:self.full_hdr_len])
ver = (msgtype >> 10) & BITMASK(3)
msgtype = msgtype & BITMASK(10)
try:
name = Message_Type2Name[msgtype]
logger.debug('deserializing %s command', name)
decoder = Message_struct[name]['decode']
except KeyError:
raise LLRPError('Cannot find decoder for message type '
'{}'.format(msgtype))
body = data[self.full_hdr_len:length]
try:
self.msgdict = {
name: dict(decoder(body))
}
self.msgdict[name]['Ver'] = ver
self.msgdict[name]['Type'] = msgtype
self.msgdict[name]['ID'] = msgid
logger.debug('done deserializing %s command', name)
except ValueError:
logger.exception('Unable to decode body %s, %s', body,
decoder(body))
except LLRPError:
logger.exception('Problem with %s message format', name)
return ''
return ''
|
python
|
def deserialize(self):
"""Turns a sequence of bytes into a message dictionary."""
if self.msgbytes is None:
raise LLRPError('No message bytes to deserialize.')
data = self.msgbytes
msgtype, length, msgid = struct.unpack(self.full_hdr_fmt,
data[:self.full_hdr_len])
ver = (msgtype >> 10) & BITMASK(3)
msgtype = msgtype & BITMASK(10)
try:
name = Message_Type2Name[msgtype]
logger.debug('deserializing %s command', name)
decoder = Message_struct[name]['decode']
except KeyError:
raise LLRPError('Cannot find decoder for message type '
'{}'.format(msgtype))
body = data[self.full_hdr_len:length]
try:
self.msgdict = {
name: dict(decoder(body))
}
self.msgdict[name]['Ver'] = ver
self.msgdict[name]['Type'] = msgtype
self.msgdict[name]['ID'] = msgid
logger.debug('done deserializing %s command', name)
except ValueError:
logger.exception('Unable to decode body %s, %s', body,
decoder(body))
except LLRPError:
logger.exception('Problem with %s message format', name)
return ''
return ''
|
[
"def",
"deserialize",
"(",
"self",
")",
":",
"if",
"self",
".",
"msgbytes",
"is",
"None",
":",
"raise",
"LLRPError",
"(",
"'No message bytes to deserialize.'",
")",
"data",
"=",
"self",
".",
"msgbytes",
"msgtype",
",",
"length",
",",
"msgid",
"=",
"struct",
".",
"unpack",
"(",
"self",
".",
"full_hdr_fmt",
",",
"data",
"[",
":",
"self",
".",
"full_hdr_len",
"]",
")",
"ver",
"=",
"(",
"msgtype",
">>",
"10",
")",
"&",
"BITMASK",
"(",
"3",
")",
"msgtype",
"=",
"msgtype",
"&",
"BITMASK",
"(",
"10",
")",
"try",
":",
"name",
"=",
"Message_Type2Name",
"[",
"msgtype",
"]",
"logger",
".",
"debug",
"(",
"'deserializing %s command'",
",",
"name",
")",
"decoder",
"=",
"Message_struct",
"[",
"name",
"]",
"[",
"'decode'",
"]",
"except",
"KeyError",
":",
"raise",
"LLRPError",
"(",
"'Cannot find decoder for message type '",
"'{}'",
".",
"format",
"(",
"msgtype",
")",
")",
"body",
"=",
"data",
"[",
"self",
".",
"full_hdr_len",
":",
"length",
"]",
"try",
":",
"self",
".",
"msgdict",
"=",
"{",
"name",
":",
"dict",
"(",
"decoder",
"(",
"body",
")",
")",
"}",
"self",
".",
"msgdict",
"[",
"name",
"]",
"[",
"'Ver'",
"]",
"=",
"ver",
"self",
".",
"msgdict",
"[",
"name",
"]",
"[",
"'Type'",
"]",
"=",
"msgtype",
"self",
".",
"msgdict",
"[",
"name",
"]",
"[",
"'ID'",
"]",
"=",
"msgid",
"logger",
".",
"debug",
"(",
"'done deserializing %s command'",
",",
"name",
")",
"except",
"ValueError",
":",
"logger",
".",
"exception",
"(",
"'Unable to decode body %s, %s'",
",",
"body",
",",
"decoder",
"(",
"body",
")",
")",
"except",
"LLRPError",
":",
"logger",
".",
"exception",
"(",
"'Problem with %s message format'",
",",
"name",
")",
"return",
"''",
"return",
"''"
] |
Turns a sequence of bytes into a message dictionary.
|
[
"Turns",
"a",
"sequence",
"of",
"bytes",
"into",
"a",
"message",
"dictionary",
"."
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp.py#L66-L97
|
18,205
|
ransford/sllurp
|
sllurp/llrp.py
|
LLRPClient.parseReaderConfig
|
def parseReaderConfig(self, confdict):
"""Parse a reader configuration dictionary.
Examples:
{
Type: 23,
Data: b'\x00'
}
{
Type: 1023,
Vendor: 25882,
Subtype: 21,
Data: b'\x00'
}
"""
logger.debug('parseReaderConfig input: %s', confdict)
conf = {}
for k, v in confdict.items():
if not k.startswith('Parameter'):
continue
ty = v['Type']
data = v['Data']
vendor = None
subtype = None
try:
vendor, subtype = v['Vendor'], v['Subtype']
except KeyError:
pass
if ty == 1023:
if vendor == 25882 and subtype == 37:
tempc = struct.unpack('!H', data)[0]
conf.update(temperature=tempc)
else:
conf[ty] = data
return conf
|
python
|
def parseReaderConfig(self, confdict):
"""Parse a reader configuration dictionary.
Examples:
{
Type: 23,
Data: b'\x00'
}
{
Type: 1023,
Vendor: 25882,
Subtype: 21,
Data: b'\x00'
}
"""
logger.debug('parseReaderConfig input: %s', confdict)
conf = {}
for k, v in confdict.items():
if not k.startswith('Parameter'):
continue
ty = v['Type']
data = v['Data']
vendor = None
subtype = None
try:
vendor, subtype = v['Vendor'], v['Subtype']
except KeyError:
pass
if ty == 1023:
if vendor == 25882 and subtype == 37:
tempc = struct.unpack('!H', data)[0]
conf.update(temperature=tempc)
else:
conf[ty] = data
return conf
|
[
"def",
"parseReaderConfig",
"(",
"self",
",",
"confdict",
")",
":",
"logger",
".",
"debug",
"(",
"'parseReaderConfig input: %s'",
",",
"confdict",
")",
"conf",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"confdict",
".",
"items",
"(",
")",
":",
"if",
"not",
"k",
".",
"startswith",
"(",
"'Parameter'",
")",
":",
"continue",
"ty",
"=",
"v",
"[",
"'Type'",
"]",
"data",
"=",
"v",
"[",
"'Data'",
"]",
"vendor",
"=",
"None",
"subtype",
"=",
"None",
"try",
":",
"vendor",
",",
"subtype",
"=",
"v",
"[",
"'Vendor'",
"]",
",",
"v",
"[",
"'Subtype'",
"]",
"except",
"KeyError",
":",
"pass",
"if",
"ty",
"==",
"1023",
":",
"if",
"vendor",
"==",
"25882",
"and",
"subtype",
"==",
"37",
":",
"tempc",
"=",
"struct",
".",
"unpack",
"(",
"'!H'",
",",
"data",
")",
"[",
"0",
"]",
"conf",
".",
"update",
"(",
"temperature",
"=",
"tempc",
")",
"else",
":",
"conf",
"[",
"ty",
"]",
"=",
"data",
"return",
"conf"
] |
Parse a reader configuration dictionary.
Examples:
{
Type: 23,
Data: b'\x00'
}
{
Type: 1023,
Vendor: 25882,
Subtype: 21,
Data: b'\x00'
}
|
[
"Parse",
"a",
"reader",
"configuration",
"dictionary",
"."
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp.py#L291-L326
|
18,206
|
ransford/sllurp
|
sllurp/llrp.py
|
LLRPClient.parseCapabilities
|
def parseCapabilities(self, capdict):
"""Parse a capabilities dictionary and adjust instance settings.
At the time this function is called, the user has requested some
settings (e.g., mode identifier), but we haven't yet asked the reader
whether those requested settings are within its capabilities. This
function's job is to parse the reader's capabilities, compare them
against any requested settings, and raise an error if there are any
incompatibilities.
Sets the following instance variables:
- self.antennas (list of antenna numbers, e.g., [1] or [1, 2])
- self.tx_power_table (list of dBm values)
- self.reader_mode (dictionary of mode settings, e.g., Tari)
Raises ReaderConfigurationError if the requested settings are not
within the reader's capabilities.
"""
# check requested antenna set
gdc = capdict['GeneralDeviceCapabilities']
max_ant = gdc['MaxNumberOfAntennaSupported']
if max(self.antennas) > max_ant:
reqd = ','.join(map(str, self.antennas))
avail = ','.join(map(str, range(1, max_ant + 1)))
errmsg = ('Invalid antenna set specified: requested={},'
' available={}; ignoring invalid antennas'.format(
reqd, avail))
raise ReaderConfigurationError(errmsg)
logger.debug('set antennas: %s', self.antennas)
# parse available transmit power entries, set self.tx_power
bandcap = capdict['RegulatoryCapabilities']['UHFBandCapabilities']
self.tx_power_table = self.parsePowerTable(bandcap)
logger.debug('tx_power_table: %s', self.tx_power_table)
self.setTxPower(self.tx_power)
# parse list of reader's supported mode identifiers
regcap = capdict['RegulatoryCapabilities']
modes = regcap['UHFBandCapabilities']['UHFRFModeTable']
mode_list = [modes[k] for k in sorted(modes.keys(), key=natural_keys)]
# select a mode by matching available modes to requested parameters
if self.mode_identifier is not None:
logger.debug('Setting mode from mode_identifier=%s',
self.mode_identifier)
try:
mode = [mo for mo in mode_list
if mo['ModeIdentifier'] == self.mode_identifier][0]
self.reader_mode = mode
except IndexError:
valid_modes = sorted(mo['ModeIdentifier'] for mo in mode_list)
errstr = ('Invalid mode_identifier; valid mode_identifiers'
' are {}'.format(valid_modes))
raise ReaderConfigurationError(errstr)
# if we're trying to set Tari explicitly, but the selected mode doesn't
# support the requested Tari, that's a configuration error.
if self.reader_mode and self.tari:
if self.reader_mode['MinTari'] < self.tari < self.reader_mode['MaxTari']:
logger.debug('Overriding mode Tari %s with requested Tari %s',
self.reader_mode['MaxTari'], self.tari)
else:
errstr = ('Requested Tari {} is incompatible with selected '
'mode {}'.format(self.tari, self.reader_mode))
logger.info('using reader mode: %s', self.reader_mode)
|
python
|
def parseCapabilities(self, capdict):
"""Parse a capabilities dictionary and adjust instance settings.
At the time this function is called, the user has requested some
settings (e.g., mode identifier), but we haven't yet asked the reader
whether those requested settings are within its capabilities. This
function's job is to parse the reader's capabilities, compare them
against any requested settings, and raise an error if there are any
incompatibilities.
Sets the following instance variables:
- self.antennas (list of antenna numbers, e.g., [1] or [1, 2])
- self.tx_power_table (list of dBm values)
- self.reader_mode (dictionary of mode settings, e.g., Tari)
Raises ReaderConfigurationError if the requested settings are not
within the reader's capabilities.
"""
# check requested antenna set
gdc = capdict['GeneralDeviceCapabilities']
max_ant = gdc['MaxNumberOfAntennaSupported']
if max(self.antennas) > max_ant:
reqd = ','.join(map(str, self.antennas))
avail = ','.join(map(str, range(1, max_ant + 1)))
errmsg = ('Invalid antenna set specified: requested={},'
' available={}; ignoring invalid antennas'.format(
reqd, avail))
raise ReaderConfigurationError(errmsg)
logger.debug('set antennas: %s', self.antennas)
# parse available transmit power entries, set self.tx_power
bandcap = capdict['RegulatoryCapabilities']['UHFBandCapabilities']
self.tx_power_table = self.parsePowerTable(bandcap)
logger.debug('tx_power_table: %s', self.tx_power_table)
self.setTxPower(self.tx_power)
# parse list of reader's supported mode identifiers
regcap = capdict['RegulatoryCapabilities']
modes = regcap['UHFBandCapabilities']['UHFRFModeTable']
mode_list = [modes[k] for k in sorted(modes.keys(), key=natural_keys)]
# select a mode by matching available modes to requested parameters
if self.mode_identifier is not None:
logger.debug('Setting mode from mode_identifier=%s',
self.mode_identifier)
try:
mode = [mo for mo in mode_list
if mo['ModeIdentifier'] == self.mode_identifier][0]
self.reader_mode = mode
except IndexError:
valid_modes = sorted(mo['ModeIdentifier'] for mo in mode_list)
errstr = ('Invalid mode_identifier; valid mode_identifiers'
' are {}'.format(valid_modes))
raise ReaderConfigurationError(errstr)
# if we're trying to set Tari explicitly, but the selected mode doesn't
# support the requested Tari, that's a configuration error.
if self.reader_mode and self.tari:
if self.reader_mode['MinTari'] < self.tari < self.reader_mode['MaxTari']:
logger.debug('Overriding mode Tari %s with requested Tari %s',
self.reader_mode['MaxTari'], self.tari)
else:
errstr = ('Requested Tari {} is incompatible with selected '
'mode {}'.format(self.tari, self.reader_mode))
logger.info('using reader mode: %s', self.reader_mode)
|
[
"def",
"parseCapabilities",
"(",
"self",
",",
"capdict",
")",
":",
"# check requested antenna set",
"gdc",
"=",
"capdict",
"[",
"'GeneralDeviceCapabilities'",
"]",
"max_ant",
"=",
"gdc",
"[",
"'MaxNumberOfAntennaSupported'",
"]",
"if",
"max",
"(",
"self",
".",
"antennas",
")",
">",
"max_ant",
":",
"reqd",
"=",
"','",
".",
"join",
"(",
"map",
"(",
"str",
",",
"self",
".",
"antennas",
")",
")",
"avail",
"=",
"','",
".",
"join",
"(",
"map",
"(",
"str",
",",
"range",
"(",
"1",
",",
"max_ant",
"+",
"1",
")",
")",
")",
"errmsg",
"=",
"(",
"'Invalid antenna set specified: requested={},'",
"' available={}; ignoring invalid antennas'",
".",
"format",
"(",
"reqd",
",",
"avail",
")",
")",
"raise",
"ReaderConfigurationError",
"(",
"errmsg",
")",
"logger",
".",
"debug",
"(",
"'set antennas: %s'",
",",
"self",
".",
"antennas",
")",
"# parse available transmit power entries, set self.tx_power",
"bandcap",
"=",
"capdict",
"[",
"'RegulatoryCapabilities'",
"]",
"[",
"'UHFBandCapabilities'",
"]",
"self",
".",
"tx_power_table",
"=",
"self",
".",
"parsePowerTable",
"(",
"bandcap",
")",
"logger",
".",
"debug",
"(",
"'tx_power_table: %s'",
",",
"self",
".",
"tx_power_table",
")",
"self",
".",
"setTxPower",
"(",
"self",
".",
"tx_power",
")",
"# parse list of reader's supported mode identifiers",
"regcap",
"=",
"capdict",
"[",
"'RegulatoryCapabilities'",
"]",
"modes",
"=",
"regcap",
"[",
"'UHFBandCapabilities'",
"]",
"[",
"'UHFRFModeTable'",
"]",
"mode_list",
"=",
"[",
"modes",
"[",
"k",
"]",
"for",
"k",
"in",
"sorted",
"(",
"modes",
".",
"keys",
"(",
")",
",",
"key",
"=",
"natural_keys",
")",
"]",
"# select a mode by matching available modes to requested parameters",
"if",
"self",
".",
"mode_identifier",
"is",
"not",
"None",
":",
"logger",
".",
"debug",
"(",
"'Setting mode from mode_identifier=%s'",
",",
"self",
".",
"mode_identifier",
")",
"try",
":",
"mode",
"=",
"[",
"mo",
"for",
"mo",
"in",
"mode_list",
"if",
"mo",
"[",
"'ModeIdentifier'",
"]",
"==",
"self",
".",
"mode_identifier",
"]",
"[",
"0",
"]",
"self",
".",
"reader_mode",
"=",
"mode",
"except",
"IndexError",
":",
"valid_modes",
"=",
"sorted",
"(",
"mo",
"[",
"'ModeIdentifier'",
"]",
"for",
"mo",
"in",
"mode_list",
")",
"errstr",
"=",
"(",
"'Invalid mode_identifier; valid mode_identifiers'",
"' are {}'",
".",
"format",
"(",
"valid_modes",
")",
")",
"raise",
"ReaderConfigurationError",
"(",
"errstr",
")",
"# if we're trying to set Tari explicitly, but the selected mode doesn't",
"# support the requested Tari, that's a configuration error.",
"if",
"self",
".",
"reader_mode",
"and",
"self",
".",
"tari",
":",
"if",
"self",
".",
"reader_mode",
"[",
"'MinTari'",
"]",
"<",
"self",
".",
"tari",
"<",
"self",
".",
"reader_mode",
"[",
"'MaxTari'",
"]",
":",
"logger",
".",
"debug",
"(",
"'Overriding mode Tari %s with requested Tari %s'",
",",
"self",
".",
"reader_mode",
"[",
"'MaxTari'",
"]",
",",
"self",
".",
"tari",
")",
"else",
":",
"errstr",
"=",
"(",
"'Requested Tari {} is incompatible with selected '",
"'mode {}'",
".",
"format",
"(",
"self",
".",
"tari",
",",
"self",
".",
"reader_mode",
")",
")",
"logger",
".",
"info",
"(",
"'using reader mode: %s'",
",",
"self",
".",
"reader_mode",
")"
] |
Parse a capabilities dictionary and adjust instance settings.
At the time this function is called, the user has requested some
settings (e.g., mode identifier), but we haven't yet asked the reader
whether those requested settings are within its capabilities. This
function's job is to parse the reader's capabilities, compare them
against any requested settings, and raise an error if there are any
incompatibilities.
Sets the following instance variables:
- self.antennas (list of antenna numbers, e.g., [1] or [1, 2])
- self.tx_power_table (list of dBm values)
- self.reader_mode (dictionary of mode settings, e.g., Tari)
Raises ReaderConfigurationError if the requested settings are not
within the reader's capabilities.
|
[
"Parse",
"a",
"capabilities",
"dictionary",
"and",
"adjust",
"instance",
"settings",
"."
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp.py#L328-L393
|
18,207
|
ransford/sllurp
|
sllurp/llrp.py
|
LLRPClient.startInventory
|
def startInventory(self, proto=None, force_regen_rospec=False):
"""Add a ROSpec to the reader and enable it."""
if self.state == LLRPClient.STATE_INVENTORYING:
logger.warn('ignoring startInventory() while already inventorying')
return None
rospec = self.getROSpec(force_new=force_regen_rospec)['ROSpec']
logger.info('starting inventory')
# upside-down chain of callbacks: add, enable, start ROSpec
# started_rospec = defer.Deferred()
# started_rospec.addCallback(self._setState_wrapper,
# LLRPClient.STATE_INVENTORYING)
# started_rospec.addErrback(self.panic, 'START_ROSPEC failed')
# logger.debug('made started_rospec')
enabled_rospec = defer.Deferred()
enabled_rospec.addCallback(self._setState_wrapper,
LLRPClient.STATE_INVENTORYING)
# enabled_rospec.addCallback(self.send_START_ROSPEC, rospec,
# onCompletion=started_rospec)
enabled_rospec.addErrback(self.panic, 'ENABLE_ROSPEC failed')
logger.debug('made enabled_rospec')
added_rospec = defer.Deferred()
added_rospec.addCallback(self.send_ENABLE_ROSPEC, rospec,
onCompletion=enabled_rospec)
added_rospec.addErrback(self.panic, 'ADD_ROSPEC failed')
logger.debug('made added_rospec')
self.send_ADD_ROSPEC(rospec, onCompletion=added_rospec)
|
python
|
def startInventory(self, proto=None, force_regen_rospec=False):
"""Add a ROSpec to the reader and enable it."""
if self.state == LLRPClient.STATE_INVENTORYING:
logger.warn('ignoring startInventory() while already inventorying')
return None
rospec = self.getROSpec(force_new=force_regen_rospec)['ROSpec']
logger.info('starting inventory')
# upside-down chain of callbacks: add, enable, start ROSpec
# started_rospec = defer.Deferred()
# started_rospec.addCallback(self._setState_wrapper,
# LLRPClient.STATE_INVENTORYING)
# started_rospec.addErrback(self.panic, 'START_ROSPEC failed')
# logger.debug('made started_rospec')
enabled_rospec = defer.Deferred()
enabled_rospec.addCallback(self._setState_wrapper,
LLRPClient.STATE_INVENTORYING)
# enabled_rospec.addCallback(self.send_START_ROSPEC, rospec,
# onCompletion=started_rospec)
enabled_rospec.addErrback(self.panic, 'ENABLE_ROSPEC failed')
logger.debug('made enabled_rospec')
added_rospec = defer.Deferred()
added_rospec.addCallback(self.send_ENABLE_ROSPEC, rospec,
onCompletion=enabled_rospec)
added_rospec.addErrback(self.panic, 'ADD_ROSPEC failed')
logger.debug('made added_rospec')
self.send_ADD_ROSPEC(rospec, onCompletion=added_rospec)
|
[
"def",
"startInventory",
"(",
"self",
",",
"proto",
"=",
"None",
",",
"force_regen_rospec",
"=",
"False",
")",
":",
"if",
"self",
".",
"state",
"==",
"LLRPClient",
".",
"STATE_INVENTORYING",
":",
"logger",
".",
"warn",
"(",
"'ignoring startInventory() while already inventorying'",
")",
"return",
"None",
"rospec",
"=",
"self",
".",
"getROSpec",
"(",
"force_new",
"=",
"force_regen_rospec",
")",
"[",
"'ROSpec'",
"]",
"logger",
".",
"info",
"(",
"'starting inventory'",
")",
"# upside-down chain of callbacks: add, enable, start ROSpec",
"# started_rospec = defer.Deferred()",
"# started_rospec.addCallback(self._setState_wrapper,",
"# LLRPClient.STATE_INVENTORYING)",
"# started_rospec.addErrback(self.panic, 'START_ROSPEC failed')",
"# logger.debug('made started_rospec')",
"enabled_rospec",
"=",
"defer",
".",
"Deferred",
"(",
")",
"enabled_rospec",
".",
"addCallback",
"(",
"self",
".",
"_setState_wrapper",
",",
"LLRPClient",
".",
"STATE_INVENTORYING",
")",
"# enabled_rospec.addCallback(self.send_START_ROSPEC, rospec,",
"# onCompletion=started_rospec)",
"enabled_rospec",
".",
"addErrback",
"(",
"self",
".",
"panic",
",",
"'ENABLE_ROSPEC failed'",
")",
"logger",
".",
"debug",
"(",
"'made enabled_rospec'",
")",
"added_rospec",
"=",
"defer",
".",
"Deferred",
"(",
")",
"added_rospec",
".",
"addCallback",
"(",
"self",
".",
"send_ENABLE_ROSPEC",
",",
"rospec",
",",
"onCompletion",
"=",
"enabled_rospec",
")",
"added_rospec",
".",
"addErrback",
"(",
"self",
".",
"panic",
",",
"'ADD_ROSPEC failed'",
")",
"logger",
".",
"debug",
"(",
"'made added_rospec'",
")",
"self",
".",
"send_ADD_ROSPEC",
"(",
"rospec",
",",
"onCompletion",
"=",
"added_rospec",
")"
] |
Add a ROSpec to the reader and enable it.
|
[
"Add",
"a",
"ROSpec",
"to",
"the",
"reader",
"and",
"enable",
"it",
"."
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp.py#L1038-L1069
|
18,208
|
ransford/sllurp
|
sllurp/llrp.py
|
LLRPClient.stopPolitely
|
def stopPolitely(self, disconnect=False):
"""Delete all active ROSpecs. Return a Deferred that will be called
when the DELETE_ROSPEC_RESPONSE comes back."""
logger.info('stopping politely')
if disconnect:
logger.info('will disconnect when stopped')
self.disconnecting = True
self.sendMessage({
'DELETE_ACCESSSPEC': {
'Ver': 1,
'Type': 41,
'ID': 0,
'AccessSpecID': 0 # all AccessSpecs
}})
self.setState(LLRPClient.STATE_SENT_DELETE_ACCESSSPEC)
d = defer.Deferred()
d.addCallback(self.stopAllROSpecs)
d.addErrback(self.panic, 'DELETE_ACCESSSPEC failed')
self._deferreds['DELETE_ACCESSSPEC_RESPONSE'].append(d)
return d
|
python
|
def stopPolitely(self, disconnect=False):
"""Delete all active ROSpecs. Return a Deferred that will be called
when the DELETE_ROSPEC_RESPONSE comes back."""
logger.info('stopping politely')
if disconnect:
logger.info('will disconnect when stopped')
self.disconnecting = True
self.sendMessage({
'DELETE_ACCESSSPEC': {
'Ver': 1,
'Type': 41,
'ID': 0,
'AccessSpecID': 0 # all AccessSpecs
}})
self.setState(LLRPClient.STATE_SENT_DELETE_ACCESSSPEC)
d = defer.Deferred()
d.addCallback(self.stopAllROSpecs)
d.addErrback(self.panic, 'DELETE_ACCESSSPEC failed')
self._deferreds['DELETE_ACCESSSPEC_RESPONSE'].append(d)
return d
|
[
"def",
"stopPolitely",
"(",
"self",
",",
"disconnect",
"=",
"False",
")",
":",
"logger",
".",
"info",
"(",
"'stopping politely'",
")",
"if",
"disconnect",
":",
"logger",
".",
"info",
"(",
"'will disconnect when stopped'",
")",
"self",
".",
"disconnecting",
"=",
"True",
"self",
".",
"sendMessage",
"(",
"{",
"'DELETE_ACCESSSPEC'",
":",
"{",
"'Ver'",
":",
"1",
",",
"'Type'",
":",
"41",
",",
"'ID'",
":",
"0",
",",
"'AccessSpecID'",
":",
"0",
"# all AccessSpecs",
"}",
"}",
")",
"self",
".",
"setState",
"(",
"LLRPClient",
".",
"STATE_SENT_DELETE_ACCESSSPEC",
")",
"d",
"=",
"defer",
".",
"Deferred",
"(",
")",
"d",
".",
"addCallback",
"(",
"self",
".",
"stopAllROSpecs",
")",
"d",
".",
"addErrback",
"(",
"self",
".",
"panic",
",",
"'DELETE_ACCESSSPEC failed'",
")",
"self",
".",
"_deferreds",
"[",
"'DELETE_ACCESSSPEC_RESPONSE'",
"]",
".",
"append",
"(",
"d",
")",
"return",
"d"
] |
Delete all active ROSpecs. Return a Deferred that will be called
when the DELETE_ROSPEC_RESPONSE comes back.
|
[
"Delete",
"all",
"active",
"ROSpecs",
".",
"Return",
"a",
"Deferred",
"that",
"will",
"be",
"called",
"when",
"the",
"DELETE_ROSPEC_RESPONSE",
"comes",
"back",
"."
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp.py#L1103-L1124
|
18,209
|
ransford/sllurp
|
sllurp/llrp.py
|
LLRPClient.parsePowerTable
|
def parsePowerTable(uhfbandcap):
"""Parse the transmit power table
@param uhfbandcap: Capability dictionary from
self.capabilities['RegulatoryCapabilities']['UHFBandCapabilities']
@return: a list of [0, dBm value, dBm value, ...]
>>> LLRPClient.parsePowerTable({'TransmitPowerLevelTableEntry1': \
{'Index': 1, 'TransmitPowerValue': 3225}})
[0, 32.25]
>>> LLRPClient.parsePowerTable({})
[0]
"""
bandtbl = {k: v for k, v in uhfbandcap.items()
if k.startswith('TransmitPowerLevelTableEntry')}
tx_power_table = [0] * (len(bandtbl) + 1)
for k, v in bandtbl.items():
idx = v['Index']
tx_power_table[idx] = int(v['TransmitPowerValue']) / 100.0
return tx_power_table
|
python
|
def parsePowerTable(uhfbandcap):
"""Parse the transmit power table
@param uhfbandcap: Capability dictionary from
self.capabilities['RegulatoryCapabilities']['UHFBandCapabilities']
@return: a list of [0, dBm value, dBm value, ...]
>>> LLRPClient.parsePowerTable({'TransmitPowerLevelTableEntry1': \
{'Index': 1, 'TransmitPowerValue': 3225}})
[0, 32.25]
>>> LLRPClient.parsePowerTable({})
[0]
"""
bandtbl = {k: v for k, v in uhfbandcap.items()
if k.startswith('TransmitPowerLevelTableEntry')}
tx_power_table = [0] * (len(bandtbl) + 1)
for k, v in bandtbl.items():
idx = v['Index']
tx_power_table[idx] = int(v['TransmitPowerValue']) / 100.0
return tx_power_table
|
[
"def",
"parsePowerTable",
"(",
"uhfbandcap",
")",
":",
"bandtbl",
"=",
"{",
"k",
":",
"v",
"for",
"k",
",",
"v",
"in",
"uhfbandcap",
".",
"items",
"(",
")",
"if",
"k",
".",
"startswith",
"(",
"'TransmitPowerLevelTableEntry'",
")",
"}",
"tx_power_table",
"=",
"[",
"0",
"]",
"*",
"(",
"len",
"(",
"bandtbl",
")",
"+",
"1",
")",
"for",
"k",
",",
"v",
"in",
"bandtbl",
".",
"items",
"(",
")",
":",
"idx",
"=",
"v",
"[",
"'Index'",
"]",
"tx_power_table",
"[",
"idx",
"]",
"=",
"int",
"(",
"v",
"[",
"'TransmitPowerValue'",
"]",
")",
"/",
"100.0",
"return",
"tx_power_table"
] |
Parse the transmit power table
@param uhfbandcap: Capability dictionary from
self.capabilities['RegulatoryCapabilities']['UHFBandCapabilities']
@return: a list of [0, dBm value, dBm value, ...]
>>> LLRPClient.parsePowerTable({'TransmitPowerLevelTableEntry1': \
{'Index': 1, 'TransmitPowerValue': 3225}})
[0, 32.25]
>>> LLRPClient.parsePowerTable({})
[0]
|
[
"Parse",
"the",
"transmit",
"power",
"table"
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp.py#L1143-L1163
|
18,210
|
ransford/sllurp
|
sllurp/llrp.py
|
LLRPClient.get_tx_power
|
def get_tx_power(self, tx_power):
"""Validates tx_power against self.tx_power_table
@param tx_power: index into the self.tx_power_table list; if tx_power
is 0 then the max power from self.tx_power_table
@return: a dict {antenna: (tx_power_index, power_dbm)} from
self.tx_power_table
@raise: LLRPError if the requested index is out of range
"""
if not self.tx_power_table:
logger.warn('get_tx_power(): tx_power_table is empty!')
return {}
logger.debug('requested tx_power: %s', tx_power)
min_power = self.tx_power_table.index(min(self.tx_power_table))
max_power = self.tx_power_table.index(max(self.tx_power_table))
ret = {}
for antid, tx_power in tx_power.items():
if tx_power == 0:
# tx_power = 0 means max power
max_power_dbm = max(self.tx_power_table)
tx_power = self.tx_power_table.index(max_power_dbm)
ret[antid] = (tx_power, max_power_dbm)
try:
power_dbm = self.tx_power_table[tx_power]
ret[antid] = (tx_power, power_dbm)
except IndexError:
raise LLRPError('Invalid tx_power for antenna {}: '
'requested={}, min_available={}, '
'max_available={}'.format(
antid, self.tx_power, min_power,
max_power))
return ret
|
python
|
def get_tx_power(self, tx_power):
"""Validates tx_power against self.tx_power_table
@param tx_power: index into the self.tx_power_table list; if tx_power
is 0 then the max power from self.tx_power_table
@return: a dict {antenna: (tx_power_index, power_dbm)} from
self.tx_power_table
@raise: LLRPError if the requested index is out of range
"""
if not self.tx_power_table:
logger.warn('get_tx_power(): tx_power_table is empty!')
return {}
logger.debug('requested tx_power: %s', tx_power)
min_power = self.tx_power_table.index(min(self.tx_power_table))
max_power = self.tx_power_table.index(max(self.tx_power_table))
ret = {}
for antid, tx_power in tx_power.items():
if tx_power == 0:
# tx_power = 0 means max power
max_power_dbm = max(self.tx_power_table)
tx_power = self.tx_power_table.index(max_power_dbm)
ret[antid] = (tx_power, max_power_dbm)
try:
power_dbm = self.tx_power_table[tx_power]
ret[antid] = (tx_power, power_dbm)
except IndexError:
raise LLRPError('Invalid tx_power for antenna {}: '
'requested={}, min_available={}, '
'max_available={}'.format(
antid, self.tx_power, min_power,
max_power))
return ret
|
[
"def",
"get_tx_power",
"(",
"self",
",",
"tx_power",
")",
":",
"if",
"not",
"self",
".",
"tx_power_table",
":",
"logger",
".",
"warn",
"(",
"'get_tx_power(): tx_power_table is empty!'",
")",
"return",
"{",
"}",
"logger",
".",
"debug",
"(",
"'requested tx_power: %s'",
",",
"tx_power",
")",
"min_power",
"=",
"self",
".",
"tx_power_table",
".",
"index",
"(",
"min",
"(",
"self",
".",
"tx_power_table",
")",
")",
"max_power",
"=",
"self",
".",
"tx_power_table",
".",
"index",
"(",
"max",
"(",
"self",
".",
"tx_power_table",
")",
")",
"ret",
"=",
"{",
"}",
"for",
"antid",
",",
"tx_power",
"in",
"tx_power",
".",
"items",
"(",
")",
":",
"if",
"tx_power",
"==",
"0",
":",
"# tx_power = 0 means max power",
"max_power_dbm",
"=",
"max",
"(",
"self",
".",
"tx_power_table",
")",
"tx_power",
"=",
"self",
".",
"tx_power_table",
".",
"index",
"(",
"max_power_dbm",
")",
"ret",
"[",
"antid",
"]",
"=",
"(",
"tx_power",
",",
"max_power_dbm",
")",
"try",
":",
"power_dbm",
"=",
"self",
".",
"tx_power_table",
"[",
"tx_power",
"]",
"ret",
"[",
"antid",
"]",
"=",
"(",
"tx_power",
",",
"power_dbm",
")",
"except",
"IndexError",
":",
"raise",
"LLRPError",
"(",
"'Invalid tx_power for antenna {}: '",
"'requested={}, min_available={}, '",
"'max_available={}'",
".",
"format",
"(",
"antid",
",",
"self",
".",
"tx_power",
",",
"min_power",
",",
"max_power",
")",
")",
"return",
"ret"
] |
Validates tx_power against self.tx_power_table
@param tx_power: index into the self.tx_power_table list; if tx_power
is 0 then the max power from self.tx_power_table
@return: a dict {antenna: (tx_power_index, power_dbm)} from
self.tx_power_table
@raise: LLRPError if the requested index is out of range
|
[
"Validates",
"tx_power",
"against",
"self",
".",
"tx_power_table"
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp.py#L1165-L1199
|
18,211
|
ransford/sllurp
|
sllurp/llrp.py
|
LLRPClient.setTxPower
|
def setTxPower(self, tx_power):
"""Set the transmission power for one or more antennas.
@param tx_power: index into self.tx_power_table
"""
tx_pow_validated = self.get_tx_power(tx_power)
logger.debug('tx_pow_validated: %s', tx_pow_validated)
needs_update = False
for ant, (tx_pow_idx, tx_pow_dbm) in tx_pow_validated.items():
if self.tx_power[ant] != tx_pow_idx:
self.tx_power[ant] = tx_pow_idx
needs_update = True
logger.debug('tx_power for antenna %s: %s (%s dBm)', ant,
tx_pow_idx, tx_pow_dbm)
if needs_update and self.state == LLRPClient.STATE_INVENTORYING:
logger.debug('changing tx power; will stop politely, then resume')
d = self.stopPolitely()
d.addCallback(self.startInventory, force_regen_rospec=True)
|
python
|
def setTxPower(self, tx_power):
"""Set the transmission power for one or more antennas.
@param tx_power: index into self.tx_power_table
"""
tx_pow_validated = self.get_tx_power(tx_power)
logger.debug('tx_pow_validated: %s', tx_pow_validated)
needs_update = False
for ant, (tx_pow_idx, tx_pow_dbm) in tx_pow_validated.items():
if self.tx_power[ant] != tx_pow_idx:
self.tx_power[ant] = tx_pow_idx
needs_update = True
logger.debug('tx_power for antenna %s: %s (%s dBm)', ant,
tx_pow_idx, tx_pow_dbm)
if needs_update and self.state == LLRPClient.STATE_INVENTORYING:
logger.debug('changing tx power; will stop politely, then resume')
d = self.stopPolitely()
d.addCallback(self.startInventory, force_regen_rospec=True)
|
[
"def",
"setTxPower",
"(",
"self",
",",
"tx_power",
")",
":",
"tx_pow_validated",
"=",
"self",
".",
"get_tx_power",
"(",
"tx_power",
")",
"logger",
".",
"debug",
"(",
"'tx_pow_validated: %s'",
",",
"tx_pow_validated",
")",
"needs_update",
"=",
"False",
"for",
"ant",
",",
"(",
"tx_pow_idx",
",",
"tx_pow_dbm",
")",
"in",
"tx_pow_validated",
".",
"items",
"(",
")",
":",
"if",
"self",
".",
"tx_power",
"[",
"ant",
"]",
"!=",
"tx_pow_idx",
":",
"self",
".",
"tx_power",
"[",
"ant",
"]",
"=",
"tx_pow_idx",
"needs_update",
"=",
"True",
"logger",
".",
"debug",
"(",
"'tx_power for antenna %s: %s (%s dBm)'",
",",
"ant",
",",
"tx_pow_idx",
",",
"tx_pow_dbm",
")",
"if",
"needs_update",
"and",
"self",
".",
"state",
"==",
"LLRPClient",
".",
"STATE_INVENTORYING",
":",
"logger",
".",
"debug",
"(",
"'changing tx power; will stop politely, then resume'",
")",
"d",
"=",
"self",
".",
"stopPolitely",
"(",
")",
"d",
".",
"addCallback",
"(",
"self",
".",
"startInventory",
",",
"force_regen_rospec",
"=",
"True",
")"
] |
Set the transmission power for one or more antennas.
@param tx_power: index into self.tx_power_table
|
[
"Set",
"the",
"transmission",
"power",
"for",
"one",
"or",
"more",
"antennas",
"."
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp.py#L1201-L1220
|
18,212
|
ransford/sllurp
|
sllurp/llrp.py
|
LLRPClient.pause
|
def pause(self, duration_seconds=0, force=False, force_regen_rospec=False):
"""Pause an inventory operation for a set amount of time."""
logger.debug('pause(%s)', duration_seconds)
if self.state != LLRPClient.STATE_INVENTORYING:
if not force:
logger.info('ignoring pause(); not inventorying (state==%s)',
self.getStateName(self.state))
return None
else:
logger.info('forcing pause()')
if duration_seconds:
logger.info('pausing for %s seconds', duration_seconds)
rospec = self.getROSpec(force_new=force_regen_rospec)['ROSpec']
self.sendMessage({
'DISABLE_ROSPEC': {
'Ver': 1,
'Type': 25,
'ID': 0,
'ROSpecID': rospec['ROSpecID']
}})
self.setState(LLRPClient.STATE_PAUSING)
d = defer.Deferred()
d.addCallback(self._setState_wrapper, LLRPClient.STATE_PAUSED)
d.addErrback(self.complain, 'pause() failed')
self._deferreds['DISABLE_ROSPEC_RESPONSE'].append(d)
if duration_seconds > 0:
startAgain = task.deferLater(reactor, duration_seconds,
lambda: None)
startAgain.addCallback(lambda _: self.resume())
return d
|
python
|
def pause(self, duration_seconds=0, force=False, force_regen_rospec=False):
"""Pause an inventory operation for a set amount of time."""
logger.debug('pause(%s)', duration_seconds)
if self.state != LLRPClient.STATE_INVENTORYING:
if not force:
logger.info('ignoring pause(); not inventorying (state==%s)',
self.getStateName(self.state))
return None
else:
logger.info('forcing pause()')
if duration_seconds:
logger.info('pausing for %s seconds', duration_seconds)
rospec = self.getROSpec(force_new=force_regen_rospec)['ROSpec']
self.sendMessage({
'DISABLE_ROSPEC': {
'Ver': 1,
'Type': 25,
'ID': 0,
'ROSpecID': rospec['ROSpecID']
}})
self.setState(LLRPClient.STATE_PAUSING)
d = defer.Deferred()
d.addCallback(self._setState_wrapper, LLRPClient.STATE_PAUSED)
d.addErrback(self.complain, 'pause() failed')
self._deferreds['DISABLE_ROSPEC_RESPONSE'].append(d)
if duration_seconds > 0:
startAgain = task.deferLater(reactor, duration_seconds,
lambda: None)
startAgain.addCallback(lambda _: self.resume())
return d
|
[
"def",
"pause",
"(",
"self",
",",
"duration_seconds",
"=",
"0",
",",
"force",
"=",
"False",
",",
"force_regen_rospec",
"=",
"False",
")",
":",
"logger",
".",
"debug",
"(",
"'pause(%s)'",
",",
"duration_seconds",
")",
"if",
"self",
".",
"state",
"!=",
"LLRPClient",
".",
"STATE_INVENTORYING",
":",
"if",
"not",
"force",
":",
"logger",
".",
"info",
"(",
"'ignoring pause(); not inventorying (state==%s)'",
",",
"self",
".",
"getStateName",
"(",
"self",
".",
"state",
")",
")",
"return",
"None",
"else",
":",
"logger",
".",
"info",
"(",
"'forcing pause()'",
")",
"if",
"duration_seconds",
":",
"logger",
".",
"info",
"(",
"'pausing for %s seconds'",
",",
"duration_seconds",
")",
"rospec",
"=",
"self",
".",
"getROSpec",
"(",
"force_new",
"=",
"force_regen_rospec",
")",
"[",
"'ROSpec'",
"]",
"self",
".",
"sendMessage",
"(",
"{",
"'DISABLE_ROSPEC'",
":",
"{",
"'Ver'",
":",
"1",
",",
"'Type'",
":",
"25",
",",
"'ID'",
":",
"0",
",",
"'ROSpecID'",
":",
"rospec",
"[",
"'ROSpecID'",
"]",
"}",
"}",
")",
"self",
".",
"setState",
"(",
"LLRPClient",
".",
"STATE_PAUSING",
")",
"d",
"=",
"defer",
".",
"Deferred",
"(",
")",
"d",
".",
"addCallback",
"(",
"self",
".",
"_setState_wrapper",
",",
"LLRPClient",
".",
"STATE_PAUSED",
")",
"d",
".",
"addErrback",
"(",
"self",
".",
"complain",
",",
"'pause() failed'",
")",
"self",
".",
"_deferreds",
"[",
"'DISABLE_ROSPEC_RESPONSE'",
"]",
".",
"append",
"(",
"d",
")",
"if",
"duration_seconds",
">",
"0",
":",
"startAgain",
"=",
"task",
".",
"deferLater",
"(",
"reactor",
",",
"duration_seconds",
",",
"lambda",
":",
"None",
")",
"startAgain",
".",
"addCallback",
"(",
"lambda",
"_",
":",
"self",
".",
"resume",
"(",
")",
")",
"return",
"d"
] |
Pause an inventory operation for a set amount of time.
|
[
"Pause",
"an",
"inventory",
"operation",
"for",
"a",
"set",
"amount",
"of",
"time",
"."
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp.py#L1222-L1257
|
18,213
|
ransford/sllurp
|
sllurp/llrp.py
|
LLRPClient.sendMessage
|
def sendMessage(self, msg_dict):
"""Serialize and send a dict LLRP Message
Note: IDs should be modified in original msg_dict as it is a reference.
That should be ok.
"""
sent_ids = []
for name in msg_dict:
self.last_msg_id += 1
msg_dict[name]['ID'] = self.last_msg_id
sent_ids.append((name, self.last_msg_id))
llrp_msg = LLRPMessage(msgdict=msg_dict)
assert llrp_msg.msgbytes, "LLRPMessage is empty"
self.transport.write(llrp_msg.msgbytes)
return sent_ids
|
python
|
def sendMessage(self, msg_dict):
"""Serialize and send a dict LLRP Message
Note: IDs should be modified in original msg_dict as it is a reference.
That should be ok.
"""
sent_ids = []
for name in msg_dict:
self.last_msg_id += 1
msg_dict[name]['ID'] = self.last_msg_id
sent_ids.append((name, self.last_msg_id))
llrp_msg = LLRPMessage(msgdict=msg_dict)
assert llrp_msg.msgbytes, "LLRPMessage is empty"
self.transport.write(llrp_msg.msgbytes)
return sent_ids
|
[
"def",
"sendMessage",
"(",
"self",
",",
"msg_dict",
")",
":",
"sent_ids",
"=",
"[",
"]",
"for",
"name",
"in",
"msg_dict",
":",
"self",
".",
"last_msg_id",
"+=",
"1",
"msg_dict",
"[",
"name",
"]",
"[",
"'ID'",
"]",
"=",
"self",
".",
"last_msg_id",
"sent_ids",
".",
"append",
"(",
"(",
"name",
",",
"self",
".",
"last_msg_id",
")",
")",
"llrp_msg",
"=",
"LLRPMessage",
"(",
"msgdict",
"=",
"msg_dict",
")",
"assert",
"llrp_msg",
".",
"msgbytes",
",",
"\"LLRPMessage is empty\"",
"self",
".",
"transport",
".",
"write",
"(",
"llrp_msg",
".",
"msgbytes",
")",
"return",
"sent_ids"
] |
Serialize and send a dict LLRP Message
Note: IDs should be modified in original msg_dict as it is a reference.
That should be ok.
|
[
"Serialize",
"and",
"send",
"a",
"dict",
"LLRP",
"Message"
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp.py#L1283-L1299
|
18,214
|
ransford/sllurp
|
sllurp/llrp.py
|
LLRPClientFactory.buildProtocol
|
def buildProtocol(self, addr):
"""Get a new LLRP client protocol object.
Consult self.antenna_dict to look up antennas to use.
"""
self.resetDelay() # reset reconnection backoff state
clargs = self.client_args.copy()
# optionally configure antennas from self.antenna_dict, which looks
# like {'10.0.0.1:5084': {'1': 'ant1', '2': 'ant2'}}
hostport = '{}:{}'.format(addr.host, addr.port)
logger.debug('Building protocol for %s', hostport)
if hostport in self.antenna_dict:
clargs['antennas'] = [
int(x) for x in self.antenna_dict[hostport].keys()]
elif addr.host in self.antenna_dict:
clargs['antennas'] = [
int(x) for x in self.antenna_dict[addr.host].keys()]
logger.debug('Antennas in buildProtocol: %s', clargs.get('antennas'))
logger.debug('%s start_inventory: %s', hostport,
clargs.get('start_inventory'))
if self.start_first and not self.protocols:
# this is the first protocol, so let's start it inventorying
clargs['start_inventory'] = True
proto = LLRPClient(factory=self, **clargs)
# register state-change callbacks with new client
for state, cbs in self._state_callbacks.items():
for cb in cbs:
proto.addStateCallback(state, cb)
# register message callbacks with new client
for msg_type, cbs in self._message_callbacks.items():
for cb in cbs:
proto.addMessageCallback(msg_type, cb)
return proto
|
python
|
def buildProtocol(self, addr):
"""Get a new LLRP client protocol object.
Consult self.antenna_dict to look up antennas to use.
"""
self.resetDelay() # reset reconnection backoff state
clargs = self.client_args.copy()
# optionally configure antennas from self.antenna_dict, which looks
# like {'10.0.0.1:5084': {'1': 'ant1', '2': 'ant2'}}
hostport = '{}:{}'.format(addr.host, addr.port)
logger.debug('Building protocol for %s', hostport)
if hostport in self.antenna_dict:
clargs['antennas'] = [
int(x) for x in self.antenna_dict[hostport].keys()]
elif addr.host in self.antenna_dict:
clargs['antennas'] = [
int(x) for x in self.antenna_dict[addr.host].keys()]
logger.debug('Antennas in buildProtocol: %s', clargs.get('antennas'))
logger.debug('%s start_inventory: %s', hostport,
clargs.get('start_inventory'))
if self.start_first and not self.protocols:
# this is the first protocol, so let's start it inventorying
clargs['start_inventory'] = True
proto = LLRPClient(factory=self, **clargs)
# register state-change callbacks with new client
for state, cbs in self._state_callbacks.items():
for cb in cbs:
proto.addStateCallback(state, cb)
# register message callbacks with new client
for msg_type, cbs in self._message_callbacks.items():
for cb in cbs:
proto.addMessageCallback(msg_type, cb)
return proto
|
[
"def",
"buildProtocol",
"(",
"self",
",",
"addr",
")",
":",
"self",
".",
"resetDelay",
"(",
")",
"# reset reconnection backoff state",
"clargs",
"=",
"self",
".",
"client_args",
".",
"copy",
"(",
")",
"# optionally configure antennas from self.antenna_dict, which looks",
"# like {'10.0.0.1:5084': {'1': 'ant1', '2': 'ant2'}}",
"hostport",
"=",
"'{}:{}'",
".",
"format",
"(",
"addr",
".",
"host",
",",
"addr",
".",
"port",
")",
"logger",
".",
"debug",
"(",
"'Building protocol for %s'",
",",
"hostport",
")",
"if",
"hostport",
"in",
"self",
".",
"antenna_dict",
":",
"clargs",
"[",
"'antennas'",
"]",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"self",
".",
"antenna_dict",
"[",
"hostport",
"]",
".",
"keys",
"(",
")",
"]",
"elif",
"addr",
".",
"host",
"in",
"self",
".",
"antenna_dict",
":",
"clargs",
"[",
"'antennas'",
"]",
"=",
"[",
"int",
"(",
"x",
")",
"for",
"x",
"in",
"self",
".",
"antenna_dict",
"[",
"addr",
".",
"host",
"]",
".",
"keys",
"(",
")",
"]",
"logger",
".",
"debug",
"(",
"'Antennas in buildProtocol: %s'",
",",
"clargs",
".",
"get",
"(",
"'antennas'",
")",
")",
"logger",
".",
"debug",
"(",
"'%s start_inventory: %s'",
",",
"hostport",
",",
"clargs",
".",
"get",
"(",
"'start_inventory'",
")",
")",
"if",
"self",
".",
"start_first",
"and",
"not",
"self",
".",
"protocols",
":",
"# this is the first protocol, so let's start it inventorying",
"clargs",
"[",
"'start_inventory'",
"]",
"=",
"True",
"proto",
"=",
"LLRPClient",
"(",
"factory",
"=",
"self",
",",
"*",
"*",
"clargs",
")",
"# register state-change callbacks with new client",
"for",
"state",
",",
"cbs",
"in",
"self",
".",
"_state_callbacks",
".",
"items",
"(",
")",
":",
"for",
"cb",
"in",
"cbs",
":",
"proto",
".",
"addStateCallback",
"(",
"state",
",",
"cb",
")",
"# register message callbacks with new client",
"for",
"msg_type",
",",
"cbs",
"in",
"self",
".",
"_message_callbacks",
".",
"items",
"(",
")",
":",
"for",
"cb",
"in",
"cbs",
":",
"proto",
".",
"addMessageCallback",
"(",
"msg_type",
",",
"cb",
")",
"return",
"proto"
] |
Get a new LLRP client protocol object.
Consult self.antenna_dict to look up antennas to use.
|
[
"Get",
"a",
"new",
"LLRP",
"client",
"protocol",
"object",
"."
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp.py#L1339-L1376
|
18,215
|
ransford/sllurp
|
sllurp/llrp.py
|
LLRPClientFactory.setTxPower
|
def setTxPower(self, tx_power, peername=None):
"""Set the transmit power on one or all readers
If peername is None, set the transmit power for all readers.
Otherwise, set it for that specific reader.
"""
if peername:
protocols = [p for p in self.protocols
if p.peername[0] == peername]
else:
protocols = self.protocols
for proto in protocols:
proto.setTxPower(tx_power)
|
python
|
def setTxPower(self, tx_power, peername=None):
"""Set the transmit power on one or all readers
If peername is None, set the transmit power for all readers.
Otherwise, set it for that specific reader.
"""
if peername:
protocols = [p for p in self.protocols
if p.peername[0] == peername]
else:
protocols = self.protocols
for proto in protocols:
proto.setTxPower(tx_power)
|
[
"def",
"setTxPower",
"(",
"self",
",",
"tx_power",
",",
"peername",
"=",
"None",
")",
":",
"if",
"peername",
":",
"protocols",
"=",
"[",
"p",
"for",
"p",
"in",
"self",
".",
"protocols",
"if",
"p",
".",
"peername",
"[",
"0",
"]",
"==",
"peername",
"]",
"else",
":",
"protocols",
"=",
"self",
".",
"protocols",
"for",
"proto",
"in",
"protocols",
":",
"proto",
".",
"setTxPower",
"(",
"tx_power",
")"
] |
Set the transmit power on one or all readers
If peername is None, set the transmit power for all readers.
Otherwise, set it for that specific reader.
|
[
"Set",
"the",
"transmit",
"power",
"on",
"one",
"or",
"all",
"readers"
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp.py#L1411-L1423
|
18,216
|
ransford/sllurp
|
sllurp/llrp.py
|
LLRPClientFactory.politeShutdown
|
def politeShutdown(self):
"""Stop inventory on all connected readers."""
protoDeferreds = []
for proto in self.protocols:
protoDeferreds.append(proto.stopPolitely(disconnect=True))
return defer.DeferredList(protoDeferreds)
|
python
|
def politeShutdown(self):
"""Stop inventory on all connected readers."""
protoDeferreds = []
for proto in self.protocols:
protoDeferreds.append(proto.stopPolitely(disconnect=True))
return defer.DeferredList(protoDeferreds)
|
[
"def",
"politeShutdown",
"(",
"self",
")",
":",
"protoDeferreds",
"=",
"[",
"]",
"for",
"proto",
"in",
"self",
".",
"protocols",
":",
"protoDeferreds",
".",
"append",
"(",
"proto",
".",
"stopPolitely",
"(",
"disconnect",
"=",
"True",
")",
")",
"return",
"defer",
".",
"DeferredList",
"(",
"protoDeferreds",
")"
] |
Stop inventory on all connected readers.
|
[
"Stop",
"inventory",
"on",
"all",
"connected",
"readers",
"."
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp.py#L1425-L1430
|
18,217
|
ransford/sllurp
|
sllurp/epc/sgtin_96.py
|
parse_sgtin_96
|
def parse_sgtin_96(sgtin_96):
'''Given a SGTIN-96 hex string, parse each segment.
Returns a dictionary of the segments.'''
if not sgtin_96:
raise Exception('Pass in a value.')
if not sgtin_96.startswith("30"):
# not a sgtin, not handled
raise Exception('Not SGTIN-96.')
binary = "{0:020b}".format(int(sgtin_96, 16)).zfill(96)
header = int(binary[:8], 2)
tag_filter = int(binary[8:11], 2)
partition = binary[11:14]
partition_value = int(partition, 2)
m, l, n, k = SGTIN_96_PARTITION_MAP[partition_value]
company_start = 8 + 3 + 3
company_end = company_start + m
company_data = int(binary[company_start:company_end], 2)
if company_data > pow(10, l):
# can't be too large
raise Exception('Company value is too large')
company_prefix = str(company_data).zfill(l)
item_start = company_end
item_end = item_start + n
item_data = binary[item_start:item_end]
item_number = int(item_data, 2)
item_reference = str(item_number).zfill(k)
serial = int(binary[-38:], 2)
return {
"header": header,
"filter": tag_filter,
"partition": partition,
"company_prefix": company_prefix,
"item_reference": item_reference,
"serial": serial
}
|
python
|
def parse_sgtin_96(sgtin_96):
'''Given a SGTIN-96 hex string, parse each segment.
Returns a dictionary of the segments.'''
if not sgtin_96:
raise Exception('Pass in a value.')
if not sgtin_96.startswith("30"):
# not a sgtin, not handled
raise Exception('Not SGTIN-96.')
binary = "{0:020b}".format(int(sgtin_96, 16)).zfill(96)
header = int(binary[:8], 2)
tag_filter = int(binary[8:11], 2)
partition = binary[11:14]
partition_value = int(partition, 2)
m, l, n, k = SGTIN_96_PARTITION_MAP[partition_value]
company_start = 8 + 3 + 3
company_end = company_start + m
company_data = int(binary[company_start:company_end], 2)
if company_data > pow(10, l):
# can't be too large
raise Exception('Company value is too large')
company_prefix = str(company_data).zfill(l)
item_start = company_end
item_end = item_start + n
item_data = binary[item_start:item_end]
item_number = int(item_data, 2)
item_reference = str(item_number).zfill(k)
serial = int(binary[-38:], 2)
return {
"header": header,
"filter": tag_filter,
"partition": partition,
"company_prefix": company_prefix,
"item_reference": item_reference,
"serial": serial
}
|
[
"def",
"parse_sgtin_96",
"(",
"sgtin_96",
")",
":",
"if",
"not",
"sgtin_96",
":",
"raise",
"Exception",
"(",
"'Pass in a value.'",
")",
"if",
"not",
"sgtin_96",
".",
"startswith",
"(",
"\"30\"",
")",
":",
"# not a sgtin, not handled",
"raise",
"Exception",
"(",
"'Not SGTIN-96.'",
")",
"binary",
"=",
"\"{0:020b}\"",
".",
"format",
"(",
"int",
"(",
"sgtin_96",
",",
"16",
")",
")",
".",
"zfill",
"(",
"96",
")",
"header",
"=",
"int",
"(",
"binary",
"[",
":",
"8",
"]",
",",
"2",
")",
"tag_filter",
"=",
"int",
"(",
"binary",
"[",
"8",
":",
"11",
"]",
",",
"2",
")",
"partition",
"=",
"binary",
"[",
"11",
":",
"14",
"]",
"partition_value",
"=",
"int",
"(",
"partition",
",",
"2",
")",
"m",
",",
"l",
",",
"n",
",",
"k",
"=",
"SGTIN_96_PARTITION_MAP",
"[",
"partition_value",
"]",
"company_start",
"=",
"8",
"+",
"3",
"+",
"3",
"company_end",
"=",
"company_start",
"+",
"m",
"company_data",
"=",
"int",
"(",
"binary",
"[",
"company_start",
":",
"company_end",
"]",
",",
"2",
")",
"if",
"company_data",
">",
"pow",
"(",
"10",
",",
"l",
")",
":",
"# can't be too large",
"raise",
"Exception",
"(",
"'Company value is too large'",
")",
"company_prefix",
"=",
"str",
"(",
"company_data",
")",
".",
"zfill",
"(",
"l",
")",
"item_start",
"=",
"company_end",
"item_end",
"=",
"item_start",
"+",
"n",
"item_data",
"=",
"binary",
"[",
"item_start",
":",
"item_end",
"]",
"item_number",
"=",
"int",
"(",
"item_data",
",",
"2",
")",
"item_reference",
"=",
"str",
"(",
"item_number",
")",
".",
"zfill",
"(",
"k",
")",
"serial",
"=",
"int",
"(",
"binary",
"[",
"-",
"38",
":",
"]",
",",
"2",
")",
"return",
"{",
"\"header\"",
":",
"header",
",",
"\"filter\"",
":",
"tag_filter",
",",
"\"partition\"",
":",
"partition",
",",
"\"company_prefix\"",
":",
"company_prefix",
",",
"\"item_reference\"",
":",
"item_reference",
",",
"\"serial\"",
":",
"serial",
"}"
] |
Given a SGTIN-96 hex string, parse each segment.
Returns a dictionary of the segments.
|
[
"Given",
"a",
"SGTIN",
"-",
"96",
"hex",
"string",
"parse",
"each",
"segment",
".",
"Returns",
"a",
"dictionary",
"of",
"the",
"segments",
"."
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/epc/sgtin_96.py#L27-L71
|
18,218
|
ransford/sllurp
|
sllurp/llrp_proto.py
|
decode_param
|
def decode_param(data):
"""Decode any parameter to a byte sequence.
:param data: byte sequence representing an LLRP parameter.
:returns dict, bytes: where dict is {'Type': <decoded type>, 'Data':
<decoded data>} and bytes is the remaining bytes trailing the bytes we
could decode.
"""
logger.debug('decode_param data: %r', data)
header_len = struct.calcsize('!HH')
partype, parlen = struct.unpack('!HH', data[:header_len])
pardata = data[header_len:parlen]
logger.debug('decode_param pardata: %r', pardata)
ret = {
'Type': partype,
}
if partype == 1023:
vsfmt = '!II'
vendor, subtype = struct.unpack(vsfmt, pardata[:struct.calcsize(vsfmt)])
ret['Vendor'] = vendor
ret['Subtype'] = subtype
ret['Data'] = pardata[struct.calcsize(vsfmt):]
else:
ret['Data'] = pardata,
return ret, data[parlen:]
|
python
|
def decode_param(data):
"""Decode any parameter to a byte sequence.
:param data: byte sequence representing an LLRP parameter.
:returns dict, bytes: where dict is {'Type': <decoded type>, 'Data':
<decoded data>} and bytes is the remaining bytes trailing the bytes we
could decode.
"""
logger.debug('decode_param data: %r', data)
header_len = struct.calcsize('!HH')
partype, parlen = struct.unpack('!HH', data[:header_len])
pardata = data[header_len:parlen]
logger.debug('decode_param pardata: %r', pardata)
ret = {
'Type': partype,
}
if partype == 1023:
vsfmt = '!II'
vendor, subtype = struct.unpack(vsfmt, pardata[:struct.calcsize(vsfmt)])
ret['Vendor'] = vendor
ret['Subtype'] = subtype
ret['Data'] = pardata[struct.calcsize(vsfmt):]
else:
ret['Data'] = pardata,
return ret, data[parlen:]
|
[
"def",
"decode_param",
"(",
"data",
")",
":",
"logger",
".",
"debug",
"(",
"'decode_param data: %r'",
",",
"data",
")",
"header_len",
"=",
"struct",
".",
"calcsize",
"(",
"'!HH'",
")",
"partype",
",",
"parlen",
"=",
"struct",
".",
"unpack",
"(",
"'!HH'",
",",
"data",
"[",
":",
"header_len",
"]",
")",
"pardata",
"=",
"data",
"[",
"header_len",
":",
"parlen",
"]",
"logger",
".",
"debug",
"(",
"'decode_param pardata: %r'",
",",
"pardata",
")",
"ret",
"=",
"{",
"'Type'",
":",
"partype",
",",
"}",
"if",
"partype",
"==",
"1023",
":",
"vsfmt",
"=",
"'!II'",
"vendor",
",",
"subtype",
"=",
"struct",
".",
"unpack",
"(",
"vsfmt",
",",
"pardata",
"[",
":",
"struct",
".",
"calcsize",
"(",
"vsfmt",
")",
"]",
")",
"ret",
"[",
"'Vendor'",
"]",
"=",
"vendor",
"ret",
"[",
"'Subtype'",
"]",
"=",
"subtype",
"ret",
"[",
"'Data'",
"]",
"=",
"pardata",
"[",
"struct",
".",
"calcsize",
"(",
"vsfmt",
")",
":",
"]",
"else",
":",
"ret",
"[",
"'Data'",
"]",
"=",
"pardata",
",",
"return",
"ret",
",",
"data",
"[",
"parlen",
":",
"]"
] |
Decode any parameter to a byte sequence.
:param data: byte sequence representing an LLRP parameter.
:returns dict, bytes: where dict is {'Type': <decoded type>, 'Data':
<decoded data>} and bytes is the remaining bytes trailing the bytes we
could decode.
|
[
"Decode",
"any",
"parameter",
"to",
"a",
"byte",
"sequence",
"."
] |
d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1
|
https://github.com/ransford/sllurp/blob/d744b7e17d7ba64a24d9a31bde6cba65d91ad9b1/sllurp/llrp_proto.py#L341-L369
|
18,219
|
Amsterdam/objectstore
|
examples/handelsregister.py
|
download_files
|
def download_files(file_list):
"""Download the latest data. """
for _, source_data_file in file_list:
sql_gz_name = source_data_file['name'].split('/')[-1]
msg = 'Downloading: %s' % (sql_gz_name)
log.debug(msg)
new_data = objectstore.get_object(
handelsregister_conn, source_data_file, 'handelsregister')
# save output to file!
with open('data/{}'.format(sql_gz_name), 'wb') as outputzip:
outputzip.write(new_data)
|
python
|
def download_files(file_list):
"""Download the latest data. """
for _, source_data_file in file_list:
sql_gz_name = source_data_file['name'].split('/')[-1]
msg = 'Downloading: %s' % (sql_gz_name)
log.debug(msg)
new_data = objectstore.get_object(
handelsregister_conn, source_data_file, 'handelsregister')
# save output to file!
with open('data/{}'.format(sql_gz_name), 'wb') as outputzip:
outputzip.write(new_data)
|
[
"def",
"download_files",
"(",
"file_list",
")",
":",
"for",
"_",
",",
"source_data_file",
"in",
"file_list",
":",
"sql_gz_name",
"=",
"source_data_file",
"[",
"'name'",
"]",
".",
"split",
"(",
"'/'",
")",
"[",
"-",
"1",
"]",
"msg",
"=",
"'Downloading: %s'",
"%",
"(",
"sql_gz_name",
")",
"log",
".",
"debug",
"(",
"msg",
")",
"new_data",
"=",
"objectstore",
".",
"get_object",
"(",
"handelsregister_conn",
",",
"source_data_file",
",",
"'handelsregister'",
")",
"# save output to file!",
"with",
"open",
"(",
"'data/{}'",
".",
"format",
"(",
"sql_gz_name",
")",
",",
"'wb'",
")",
"as",
"outputzip",
":",
"outputzip",
".",
"write",
"(",
"new_data",
")"
] |
Download the latest data.
|
[
"Download",
"the",
"latest",
"data",
"."
] |
15852e8a3f159e4e6eb018866df0a84feb2c7f68
|
https://github.com/Amsterdam/objectstore/blob/15852e8a3f159e4e6eb018866df0a84feb2c7f68/examples/handelsregister.py#L48-L60
|
18,220
|
Amsterdam/objectstore
|
objectstore/objectstore.py
|
get_connection
|
def get_connection(store_settings: dict={}) -> Connection:
"""
get an objectsctore connection
"""
store = store_settings
if not store_settings:
store = make_config_from_env()
os_options = {
'tenant_id': store['TENANT_ID'],
'region_name': store['REGION_NAME'],
# 'endpoint_type': 'internalURL'
}
# when we are running in cloudvps we should use internal urls
use_internal = os.getenv('OBJECTSTORE_LOCAL', '')
if use_internal:
os_options['endpoint_type'] = 'internalURL'
connection = Connection(
authurl=store['AUTHURL'],
user=store['USER'],
key=store['PASSWORD'],
tenant_name=store['TENANT_NAME'],
auth_version=store['VERSION'],
os_options=os_options
)
return connection
|
python
|
def get_connection(store_settings: dict={}) -> Connection:
"""
get an objectsctore connection
"""
store = store_settings
if not store_settings:
store = make_config_from_env()
os_options = {
'tenant_id': store['TENANT_ID'],
'region_name': store['REGION_NAME'],
# 'endpoint_type': 'internalURL'
}
# when we are running in cloudvps we should use internal urls
use_internal = os.getenv('OBJECTSTORE_LOCAL', '')
if use_internal:
os_options['endpoint_type'] = 'internalURL'
connection = Connection(
authurl=store['AUTHURL'],
user=store['USER'],
key=store['PASSWORD'],
tenant_name=store['TENANT_NAME'],
auth_version=store['VERSION'],
os_options=os_options
)
return connection
|
[
"def",
"get_connection",
"(",
"store_settings",
":",
"dict",
"=",
"{",
"}",
")",
"->",
"Connection",
":",
"store",
"=",
"store_settings",
"if",
"not",
"store_settings",
":",
"store",
"=",
"make_config_from_env",
"(",
")",
"os_options",
"=",
"{",
"'tenant_id'",
":",
"store",
"[",
"'TENANT_ID'",
"]",
",",
"'region_name'",
":",
"store",
"[",
"'REGION_NAME'",
"]",
",",
"# 'endpoint_type': 'internalURL'",
"}",
"# when we are running in cloudvps we should use internal urls",
"use_internal",
"=",
"os",
".",
"getenv",
"(",
"'OBJECTSTORE_LOCAL'",
",",
"''",
")",
"if",
"use_internal",
":",
"os_options",
"[",
"'endpoint_type'",
"]",
"=",
"'internalURL'",
"connection",
"=",
"Connection",
"(",
"authurl",
"=",
"store",
"[",
"'AUTHURL'",
"]",
",",
"user",
"=",
"store",
"[",
"'USER'",
"]",
",",
"key",
"=",
"store",
"[",
"'PASSWORD'",
"]",
",",
"tenant_name",
"=",
"store",
"[",
"'TENANT_NAME'",
"]",
",",
"auth_version",
"=",
"store",
"[",
"'VERSION'",
"]",
",",
"os_options",
"=",
"os_options",
")",
"return",
"connection"
] |
get an objectsctore connection
|
[
"get",
"an",
"objectsctore",
"connection"
] |
15852e8a3f159e4e6eb018866df0a84feb2c7f68
|
https://github.com/Amsterdam/objectstore/blob/15852e8a3f159e4e6eb018866df0a84feb2c7f68/objectstore/objectstore.py#L67-L96
|
18,221
|
Amsterdam/objectstore
|
objectstore/objectstore.py
|
get_object
|
def get_object(connection, object_meta_data: dict, dirname: str):
"""
Download object from objectstore.
object_meta_data is an object retured when
using 'get_full_container_list'
"""
return connection.get_object(dirname, object_meta_data['name'])[1]
|
python
|
def get_object(connection, object_meta_data: dict, dirname: str):
"""
Download object from objectstore.
object_meta_data is an object retured when
using 'get_full_container_list'
"""
return connection.get_object(dirname, object_meta_data['name'])[1]
|
[
"def",
"get_object",
"(",
"connection",
",",
"object_meta_data",
":",
"dict",
",",
"dirname",
":",
"str",
")",
":",
"return",
"connection",
".",
"get_object",
"(",
"dirname",
",",
"object_meta_data",
"[",
"'name'",
"]",
")",
"[",
"1",
"]"
] |
Download object from objectstore.
object_meta_data is an object retured when
using 'get_full_container_list'
|
[
"Download",
"object",
"from",
"objectstore",
".",
"object_meta_data",
"is",
"an",
"object",
"retured",
"when",
"using",
"get_full_container_list"
] |
15852e8a3f159e4e6eb018866df0a84feb2c7f68
|
https://github.com/Amsterdam/objectstore/blob/15852e8a3f159e4e6eb018866df0a84feb2c7f68/objectstore/objectstore.py#L117-L123
|
18,222
|
Amsterdam/objectstore
|
objectstore/objectstore.py
|
put_object
|
def put_object(
connection, container: str, object_name: str,
contents, content_type: str) -> None:
"""
Put file to objectstore
container == "path/in/store"
object_name = "your_file_name.txt"
contents=thefiledata (fileobject) open('ourfile', 'rb')
content_type='csv' / 'application/json' .. etc
"""
connection.put_object(
container, object_name, contents=contents,
content_type=content_type)
|
python
|
def put_object(
connection, container: str, object_name: str,
contents, content_type: str) -> None:
"""
Put file to objectstore
container == "path/in/store"
object_name = "your_file_name.txt"
contents=thefiledata (fileobject) open('ourfile', 'rb')
content_type='csv' / 'application/json' .. etc
"""
connection.put_object(
container, object_name, contents=contents,
content_type=content_type)
|
[
"def",
"put_object",
"(",
"connection",
",",
"container",
":",
"str",
",",
"object_name",
":",
"str",
",",
"contents",
",",
"content_type",
":",
"str",
")",
"->",
"None",
":",
"connection",
".",
"put_object",
"(",
"container",
",",
"object_name",
",",
"contents",
"=",
"contents",
",",
"content_type",
"=",
"content_type",
")"
] |
Put file to objectstore
container == "path/in/store"
object_name = "your_file_name.txt"
contents=thefiledata (fileobject) open('ourfile', 'rb')
content_type='csv' / 'application/json' .. etc
|
[
"Put",
"file",
"to",
"objectstore"
] |
15852e8a3f159e4e6eb018866df0a84feb2c7f68
|
https://github.com/Amsterdam/objectstore/blob/15852e8a3f159e4e6eb018866df0a84feb2c7f68/objectstore/objectstore.py#L126-L140
|
18,223
|
Amsterdam/objectstore
|
objectstore/objectstore.py
|
delete_object
|
def delete_object(connection, container: str, object_meta_data: dict) -> None:
"""
Delete single object from objectstore
"""
connection.delete_object(container, object_meta_data['name'])
|
python
|
def delete_object(connection, container: str, object_meta_data: dict) -> None:
"""
Delete single object from objectstore
"""
connection.delete_object(container, object_meta_data['name'])
|
[
"def",
"delete_object",
"(",
"connection",
",",
"container",
":",
"str",
",",
"object_meta_data",
":",
"dict",
")",
"->",
"None",
":",
"connection",
".",
"delete_object",
"(",
"container",
",",
"object_meta_data",
"[",
"'name'",
"]",
")"
] |
Delete single object from objectstore
|
[
"Delete",
"single",
"object",
"from",
"objectstore"
] |
15852e8a3f159e4e6eb018866df0a84feb2c7f68
|
https://github.com/Amsterdam/objectstore/blob/15852e8a3f159e4e6eb018866df0a84feb2c7f68/objectstore/objectstore.py#L143-L147
|
18,224
|
Amsterdam/objectstore
|
objectstore/databasedumps.py
|
return_file_objects
|
def return_file_objects(connection, container, prefix='database'):
"""Given connecton and container find database dumps
"""
options = []
meta_data = objectstore.get_full_container_list(
connection, container, prefix='database')
env = ENV.upper()
for o_info in meta_data:
expected_file = f'database.{ENV}'
if o_info['name'].startswith(expected_file):
dt = dateparser.parse(o_info['last_modified'])
now = datetime.datetime.now()
delta = now - dt
LOG.debug('AGE: %d %s', delta.days, expected_file)
options.append((dt, o_info))
options.sort()
return options
|
python
|
def return_file_objects(connection, container, prefix='database'):
"""Given connecton and container find database dumps
"""
options = []
meta_data = objectstore.get_full_container_list(
connection, container, prefix='database')
env = ENV.upper()
for o_info in meta_data:
expected_file = f'database.{ENV}'
if o_info['name'].startswith(expected_file):
dt = dateparser.parse(o_info['last_modified'])
now = datetime.datetime.now()
delta = now - dt
LOG.debug('AGE: %d %s', delta.days, expected_file)
options.append((dt, o_info))
options.sort()
return options
|
[
"def",
"return_file_objects",
"(",
"connection",
",",
"container",
",",
"prefix",
"=",
"'database'",
")",
":",
"options",
"=",
"[",
"]",
"meta_data",
"=",
"objectstore",
".",
"get_full_container_list",
"(",
"connection",
",",
"container",
",",
"prefix",
"=",
"'database'",
")",
"env",
"=",
"ENV",
".",
"upper",
"(",
")",
"for",
"o_info",
"in",
"meta_data",
":",
"expected_file",
"=",
"f'database.{ENV}'",
"if",
"o_info",
"[",
"'name'",
"]",
".",
"startswith",
"(",
"expected_file",
")",
":",
"dt",
"=",
"dateparser",
".",
"parse",
"(",
"o_info",
"[",
"'last_modified'",
"]",
")",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"delta",
"=",
"now",
"-",
"dt",
"LOG",
".",
"debug",
"(",
"'AGE: %d %s'",
",",
"delta",
".",
"days",
",",
"expected_file",
")",
"options",
".",
"append",
"(",
"(",
"dt",
",",
"o_info",
")",
")",
"options",
".",
"sort",
"(",
")",
"return",
"options"
] |
Given connecton and container find database dumps
|
[
"Given",
"connecton",
"and",
"container",
"find",
"database",
"dumps"
] |
15852e8a3f159e4e6eb018866df0a84feb2c7f68
|
https://github.com/Amsterdam/objectstore/blob/15852e8a3f159e4e6eb018866df0a84feb2c7f68/objectstore/databasedumps.py#L66-L91
|
18,225
|
Amsterdam/objectstore
|
objectstore/databasedumps.py
|
remove_old_dumps
|
def remove_old_dumps(connection, container: str, days=None):
"""Remove dumps older than x days
"""
if not days:
return
if days < 20:
LOG.error('A minimum of 20 backups is stored')
return
options = return_file_objects(connection, container)
for dt, o_info in options:
now = datetime.datetime.now()
delta = now - dt
if delta.days > days:
LOG.info('Deleting %s', o_info['name'])
objectstore.delete_object(connection, container, o_info)
|
python
|
def remove_old_dumps(connection, container: str, days=None):
"""Remove dumps older than x days
"""
if not days:
return
if days < 20:
LOG.error('A minimum of 20 backups is stored')
return
options = return_file_objects(connection, container)
for dt, o_info in options:
now = datetime.datetime.now()
delta = now - dt
if delta.days > days:
LOG.info('Deleting %s', o_info['name'])
objectstore.delete_object(connection, container, o_info)
|
[
"def",
"remove_old_dumps",
"(",
"connection",
",",
"container",
":",
"str",
",",
"days",
"=",
"None",
")",
":",
"if",
"not",
"days",
":",
"return",
"if",
"days",
"<",
"20",
":",
"LOG",
".",
"error",
"(",
"'A minimum of 20 backups is stored'",
")",
"return",
"options",
"=",
"return_file_objects",
"(",
"connection",
",",
"container",
")",
"for",
"dt",
",",
"o_info",
"in",
"options",
":",
"now",
"=",
"datetime",
".",
"datetime",
".",
"now",
"(",
")",
"delta",
"=",
"now",
"-",
"dt",
"if",
"delta",
".",
"days",
">",
"days",
":",
"LOG",
".",
"info",
"(",
"'Deleting %s'",
",",
"o_info",
"[",
"'name'",
"]",
")",
"objectstore",
".",
"delete_object",
"(",
"connection",
",",
"container",
",",
"o_info",
")"
] |
Remove dumps older than x days
|
[
"Remove",
"dumps",
"older",
"than",
"x",
"days"
] |
15852e8a3f159e4e6eb018866df0a84feb2c7f68
|
https://github.com/Amsterdam/objectstore/blob/15852e8a3f159e4e6eb018866df0a84feb2c7f68/objectstore/databasedumps.py#L94-L112
|
18,226
|
Amsterdam/objectstore
|
objectstore/databasedumps.py
|
download_database
|
def download_database(connection, container: str, target: str=""):
"""
Download database dump
"""
meta_data = objectstore.get_full_container_list(
connection, container, prefix='database')
options = return_file_objects(connection, container)
for o_info in meta_data:
expected_file = f'database.{ENV}'
LOG.info(o_info['name'])
if o_info['name'].startswith(expected_file):
dt = dateparser.parse(o_info['last_modified'])
# now = datetime.datetime.now()
options.append((dt, o_info))
options.sort()
if not options:
LOG.error('Dumps missing? ENVIRONMENT wrong? (acceptance / production')
LOG.error('Environtment {ENV}')
sys.exit(1)
newest = options[-1][1]
LOG.debug('Downloading: %s', (newest['name']))
target_file = os.path.join(target, expected_file)
LOG.info('TARGET: %s', target_file)
if os.path.exists(target_file):
LOG.info('Already downloaded')
return
LOG.error('TARGET does not exists downloading...')
new_data = objectstore.get_object(connection, newest, container)
# save output to file!
with open(target_file, 'wb') as outputzip:
outputzip.write(new_data)
|
python
|
def download_database(connection, container: str, target: str=""):
"""
Download database dump
"""
meta_data = objectstore.get_full_container_list(
connection, container, prefix='database')
options = return_file_objects(connection, container)
for o_info in meta_data:
expected_file = f'database.{ENV}'
LOG.info(o_info['name'])
if o_info['name'].startswith(expected_file):
dt = dateparser.parse(o_info['last_modified'])
# now = datetime.datetime.now()
options.append((dt, o_info))
options.sort()
if not options:
LOG.error('Dumps missing? ENVIRONMENT wrong? (acceptance / production')
LOG.error('Environtment {ENV}')
sys.exit(1)
newest = options[-1][1]
LOG.debug('Downloading: %s', (newest['name']))
target_file = os.path.join(target, expected_file)
LOG.info('TARGET: %s', target_file)
if os.path.exists(target_file):
LOG.info('Already downloaded')
return
LOG.error('TARGET does not exists downloading...')
new_data = objectstore.get_object(connection, newest, container)
# save output to file!
with open(target_file, 'wb') as outputzip:
outputzip.write(new_data)
|
[
"def",
"download_database",
"(",
"connection",
",",
"container",
":",
"str",
",",
"target",
":",
"str",
"=",
"\"\"",
")",
":",
"meta_data",
"=",
"objectstore",
".",
"get_full_container_list",
"(",
"connection",
",",
"container",
",",
"prefix",
"=",
"'database'",
")",
"options",
"=",
"return_file_objects",
"(",
"connection",
",",
"container",
")",
"for",
"o_info",
"in",
"meta_data",
":",
"expected_file",
"=",
"f'database.{ENV}'",
"LOG",
".",
"info",
"(",
"o_info",
"[",
"'name'",
"]",
")",
"if",
"o_info",
"[",
"'name'",
"]",
".",
"startswith",
"(",
"expected_file",
")",
":",
"dt",
"=",
"dateparser",
".",
"parse",
"(",
"o_info",
"[",
"'last_modified'",
"]",
")",
"# now = datetime.datetime.now()",
"options",
".",
"append",
"(",
"(",
"dt",
",",
"o_info",
")",
")",
"options",
".",
"sort",
"(",
")",
"if",
"not",
"options",
":",
"LOG",
".",
"error",
"(",
"'Dumps missing? ENVIRONMENT wrong? (acceptance / production'",
")",
"LOG",
".",
"error",
"(",
"'Environtment {ENV}'",
")",
"sys",
".",
"exit",
"(",
"1",
")",
"newest",
"=",
"options",
"[",
"-",
"1",
"]",
"[",
"1",
"]",
"LOG",
".",
"debug",
"(",
"'Downloading: %s'",
",",
"(",
"newest",
"[",
"'name'",
"]",
")",
")",
"target_file",
"=",
"os",
".",
"path",
".",
"join",
"(",
"target",
",",
"expected_file",
")",
"LOG",
".",
"info",
"(",
"'TARGET: %s'",
",",
"target_file",
")",
"if",
"os",
".",
"path",
".",
"exists",
"(",
"target_file",
")",
":",
"LOG",
".",
"info",
"(",
"'Already downloaded'",
")",
"return",
"LOG",
".",
"error",
"(",
"'TARGET does not exists downloading...'",
")",
"new_data",
"=",
"objectstore",
".",
"get_object",
"(",
"connection",
",",
"newest",
",",
"container",
")",
"# save output to file!",
"with",
"open",
"(",
"target_file",
",",
"'wb'",
")",
"as",
"outputzip",
":",
"outputzip",
".",
"write",
"(",
"new_data",
")"
] |
Download database dump
|
[
"Download",
"database",
"dump"
] |
15852e8a3f159e4e6eb018866df0a84feb2c7f68
|
https://github.com/Amsterdam/objectstore/blob/15852e8a3f159e4e6eb018866df0a84feb2c7f68/objectstore/databasedumps.py#L115-L160
|
18,227
|
conwetlab/ckanext-oauth2
|
ckanext/oauth2/oauth2.py
|
OAuth2Helper.remember
|
def remember(self, user_name):
'''
Remember the authenticated identity.
This method simply delegates to another IIdentifier plugin if configured.
'''
log.debug('Repoze OAuth remember')
environ = toolkit.request.environ
rememberer = self._get_rememberer(environ)
identity = {'repoze.who.userid': user_name}
headers = rememberer.remember(environ, identity)
for header, value in headers:
toolkit.response.headers.add(header, value)
|
python
|
def remember(self, user_name):
'''
Remember the authenticated identity.
This method simply delegates to another IIdentifier plugin if configured.
'''
log.debug('Repoze OAuth remember')
environ = toolkit.request.environ
rememberer = self._get_rememberer(environ)
identity = {'repoze.who.userid': user_name}
headers = rememberer.remember(environ, identity)
for header, value in headers:
toolkit.response.headers.add(header, value)
|
[
"def",
"remember",
"(",
"self",
",",
"user_name",
")",
":",
"log",
".",
"debug",
"(",
"'Repoze OAuth remember'",
")",
"environ",
"=",
"toolkit",
".",
"request",
".",
"environ",
"rememberer",
"=",
"self",
".",
"_get_rememberer",
"(",
"environ",
")",
"identity",
"=",
"{",
"'repoze.who.userid'",
":",
"user_name",
"}",
"headers",
"=",
"rememberer",
".",
"remember",
"(",
"environ",
",",
"identity",
")",
"for",
"header",
",",
"value",
"in",
"headers",
":",
"toolkit",
".",
"response",
".",
"headers",
".",
"add",
"(",
"header",
",",
"value",
")"
] |
Remember the authenticated identity.
This method simply delegates to another IIdentifier plugin if configured.
|
[
"Remember",
"the",
"authenticated",
"identity",
"."
] |
e68dd2664229b7563d77b2c8fc869fe57b747c88
|
https://github.com/conwetlab/ckanext-oauth2/blob/e68dd2664229b7563d77b2c8fc869fe57b747c88/ckanext/oauth2/oauth2.py#L206-L218
|
18,228
|
conwetlab/ckanext-oauth2
|
ckanext/oauth2/oauth2.py
|
OAuth2Helper.redirect_from_callback
|
def redirect_from_callback(self):
'''Redirect to the callback URL after a successful authentication.'''
state = toolkit.request.params.get('state')
came_from = get_came_from(state)
toolkit.response.status = 302
toolkit.response.location = came_from
|
python
|
def redirect_from_callback(self):
'''Redirect to the callback URL after a successful authentication.'''
state = toolkit.request.params.get('state')
came_from = get_came_from(state)
toolkit.response.status = 302
toolkit.response.location = came_from
|
[
"def",
"redirect_from_callback",
"(",
"self",
")",
":",
"state",
"=",
"toolkit",
".",
"request",
".",
"params",
".",
"get",
"(",
"'state'",
")",
"came_from",
"=",
"get_came_from",
"(",
"state",
")",
"toolkit",
".",
"response",
".",
"status",
"=",
"302",
"toolkit",
".",
"response",
".",
"location",
"=",
"came_from"
] |
Redirect to the callback URL after a successful authentication.
|
[
"Redirect",
"to",
"the",
"callback",
"URL",
"after",
"a",
"successful",
"authentication",
"."
] |
e68dd2664229b7563d77b2c8fc869fe57b747c88
|
https://github.com/conwetlab/ckanext-oauth2/blob/e68dd2664229b7563d77b2c8fc869fe57b747c88/ckanext/oauth2/oauth2.py#L220-L225
|
18,229
|
pinax/pinax-documents
|
pinax/documents/hooks.py
|
DocumentsDefaultHookSet.can_share_folder
|
def can_share_folder(self, user, folder):
"""
Return True if `user` can share `folder`.
"""
return folder.parent_id is None and folder.author_id == user.id
|
python
|
def can_share_folder(self, user, folder):
"""
Return True if `user` can share `folder`.
"""
return folder.parent_id is None and folder.author_id == user.id
|
[
"def",
"can_share_folder",
"(",
"self",
",",
"user",
",",
"folder",
")",
":",
"return",
"folder",
".",
"parent_id",
"is",
"None",
"and",
"folder",
".",
"author_id",
"==",
"user",
".",
"id"
] |
Return True if `user` can share `folder`.
|
[
"Return",
"True",
"if",
"user",
"can",
"share",
"folder",
"."
] |
b8c6a748976ec4b22cff9b195eb426b46c7a2a1e
|
https://github.com/pinax/pinax-documents/blob/b8c6a748976ec4b22cff9b195eb426b46c7a2a1e/pinax/documents/hooks.py#L20-L24
|
18,230
|
pinax/pinax-documents
|
pinax/documents/hooks.py
|
DocumentsDefaultHookSet.storage_color
|
def storage_color(self, user_storage):
"""
Return labels indicating amount of storage used.
"""
p = user_storage.percentage
if p >= 0 and p < 60:
return "success"
if p >= 60 and p < 90:
return "warning"
if p >= 90 and p <= 100:
return "danger"
raise ValueError("percentage out of range")
|
python
|
def storage_color(self, user_storage):
"""
Return labels indicating amount of storage used.
"""
p = user_storage.percentage
if p >= 0 and p < 60:
return "success"
if p >= 60 and p < 90:
return "warning"
if p >= 90 and p <= 100:
return "danger"
raise ValueError("percentage out of range")
|
[
"def",
"storage_color",
"(",
"self",
",",
"user_storage",
")",
":",
"p",
"=",
"user_storage",
".",
"percentage",
"if",
"p",
">=",
"0",
"and",
"p",
"<",
"60",
":",
"return",
"\"success\"",
"if",
"p",
">=",
"60",
"and",
"p",
"<",
"90",
":",
"return",
"\"warning\"",
"if",
"p",
">=",
"90",
"and",
"p",
"<=",
"100",
":",
"return",
"\"danger\"",
"raise",
"ValueError",
"(",
"\"percentage out of range\"",
")"
] |
Return labels indicating amount of storage used.
|
[
"Return",
"labels",
"indicating",
"amount",
"of",
"storage",
"used",
"."
] |
b8c6a748976ec4b22cff9b195eb426b46c7a2a1e
|
https://github.com/pinax/pinax-documents/blob/b8c6a748976ec4b22cff9b195eb426b46c7a2a1e/pinax/documents/hooks.py#L26-L37
|
18,231
|
pinax/pinax-documents
|
pinax/documents/hooks.py
|
DocumentsDefaultHookSet.folder_created_message
|
def folder_created_message(self, request, folder):
"""
Send messages.success message after successful folder creation.
"""
messages.success(request, _("Folder {} was created".format(folder)))
|
python
|
def folder_created_message(self, request, folder):
"""
Send messages.success message after successful folder creation.
"""
messages.success(request, _("Folder {} was created".format(folder)))
|
[
"def",
"folder_created_message",
"(",
"self",
",",
"request",
",",
"folder",
")",
":",
"messages",
".",
"success",
"(",
"request",
",",
"_",
"(",
"\"Folder {} was created\"",
".",
"format",
"(",
"folder",
")",
")",
")"
] |
Send messages.success message after successful folder creation.
|
[
"Send",
"messages",
".",
"success",
"message",
"after",
"successful",
"folder",
"creation",
"."
] |
b8c6a748976ec4b22cff9b195eb426b46c7a2a1e
|
https://github.com/pinax/pinax-documents/blob/b8c6a748976ec4b22cff9b195eb426b46c7a2a1e/pinax/documents/hooks.py#L39-L43
|
18,232
|
pinax/pinax-documents
|
pinax/documents/hooks.py
|
DocumentsDefaultHookSet.document_created_message
|
def document_created_message(self, request, document):
"""
Send messages.success message after successful document creation.
"""
messages.success(request, _("Document {} was created".format(document)))
|
python
|
def document_created_message(self, request, document):
"""
Send messages.success message after successful document creation.
"""
messages.success(request, _("Document {} was created".format(document)))
|
[
"def",
"document_created_message",
"(",
"self",
",",
"request",
",",
"document",
")",
":",
"messages",
".",
"success",
"(",
"request",
",",
"_",
"(",
"\"Document {} was created\"",
".",
"format",
"(",
"document",
")",
")",
")"
] |
Send messages.success message after successful document creation.
|
[
"Send",
"messages",
".",
"success",
"message",
"after",
"successful",
"document",
"creation",
"."
] |
b8c6a748976ec4b22cff9b195eb426b46c7a2a1e
|
https://github.com/pinax/pinax-documents/blob/b8c6a748976ec4b22cff9b195eb426b46c7a2a1e/pinax/documents/hooks.py#L45-L49
|
18,233
|
pinax/pinax-documents
|
pinax/documents/hooks.py
|
DocumentsDefaultHookSet.folder_shared_message
|
def folder_shared_message(self, request, user, folder):
"""
Send messages.success message after successful share.
"""
messages.success(request, _("Folder {} is now shared with {}".format(folder, user)))
|
python
|
def folder_shared_message(self, request, user, folder):
"""
Send messages.success message after successful share.
"""
messages.success(request, _("Folder {} is now shared with {}".format(folder, user)))
|
[
"def",
"folder_shared_message",
"(",
"self",
",",
"request",
",",
"user",
",",
"folder",
")",
":",
"messages",
".",
"success",
"(",
"request",
",",
"_",
"(",
"\"Folder {} is now shared with {}\"",
".",
"format",
"(",
"folder",
",",
"user",
")",
")",
")"
] |
Send messages.success message after successful share.
|
[
"Send",
"messages",
".",
"success",
"message",
"after",
"successful",
"share",
"."
] |
b8c6a748976ec4b22cff9b195eb426b46c7a2a1e
|
https://github.com/pinax/pinax-documents/blob/b8c6a748976ec4b22cff9b195eb426b46c7a2a1e/pinax/documents/hooks.py#L51-L55
|
18,234
|
pinax/pinax-documents
|
pinax/documents/hooks.py
|
DocumentsDefaultHookSet.folder_pre_delete
|
def folder_pre_delete(self, request, folder):
"""
Perform folder operations prior to deletions. For example, deleting all contents.
"""
for m in folder.members():
if m.__class__ == folder.__class__:
self.folder_pre_delete(request, m)
m.delete()
|
python
|
def folder_pre_delete(self, request, folder):
"""
Perform folder operations prior to deletions. For example, deleting all contents.
"""
for m in folder.members():
if m.__class__ == folder.__class__:
self.folder_pre_delete(request, m)
m.delete()
|
[
"def",
"folder_pre_delete",
"(",
"self",
",",
"request",
",",
"folder",
")",
":",
"for",
"m",
"in",
"folder",
".",
"members",
"(",
")",
":",
"if",
"m",
".",
"__class__",
"==",
"folder",
".",
"__class__",
":",
"self",
".",
"folder_pre_delete",
"(",
"request",
",",
"m",
")",
"m",
".",
"delete",
"(",
")"
] |
Perform folder operations prior to deletions. For example, deleting all contents.
|
[
"Perform",
"folder",
"operations",
"prior",
"to",
"deletions",
".",
"For",
"example",
"deleting",
"all",
"contents",
"."
] |
b8c6a748976ec4b22cff9b195eb426b46c7a2a1e
|
https://github.com/pinax/pinax-documents/blob/b8c6a748976ec4b22cff9b195eb426b46c7a2a1e/pinax/documents/hooks.py#L75-L82
|
18,235
|
pinax/pinax-documents
|
pinax/documents/hooks.py
|
DocumentsDefaultHookSet.file_upload_to
|
def file_upload_to(self, instance, filename):
"""
Callable passed to the FileField's upload_to kwarg on Document.file
"""
ext = filename.split(".")[-1]
filename = "{}.{}".format(uuid.uuid4(), ext)
return os.path.join("document", filename)
|
python
|
def file_upload_to(self, instance, filename):
"""
Callable passed to the FileField's upload_to kwarg on Document.file
"""
ext = filename.split(".")[-1]
filename = "{}.{}".format(uuid.uuid4(), ext)
return os.path.join("document", filename)
|
[
"def",
"file_upload_to",
"(",
"self",
",",
"instance",
",",
"filename",
")",
":",
"ext",
"=",
"filename",
".",
"split",
"(",
"\".\"",
")",
"[",
"-",
"1",
"]",
"filename",
"=",
"\"{}.{}\"",
".",
"format",
"(",
"uuid",
".",
"uuid4",
"(",
")",
",",
"ext",
")",
"return",
"os",
".",
"path",
".",
"join",
"(",
"\"document\"",
",",
"filename",
")"
] |
Callable passed to the FileField's upload_to kwarg on Document.file
|
[
"Callable",
"passed",
"to",
"the",
"FileField",
"s",
"upload_to",
"kwarg",
"on",
"Document",
".",
"file"
] |
b8c6a748976ec4b22cff9b195eb426b46c7a2a1e
|
https://github.com/pinax/pinax-documents/blob/b8c6a748976ec4b22cff9b195eb426b46c7a2a1e/pinax/documents/hooks.py#L84-L90
|
18,236
|
pinax/pinax-documents
|
pinax/documents/managers.py
|
FolderQuerySet.for_user
|
def for_user(self, user):
"""
All folders the given user can do something with.
"""
qs = SharedMemberQuerySet(model=self.model, using=self._db, user=user)
qs = qs.filter(Q(author=user) | Q(foldershareduser__user=user))
return qs.distinct() & self.distinct()
|
python
|
def for_user(self, user):
"""
All folders the given user can do something with.
"""
qs = SharedMemberQuerySet(model=self.model, using=self._db, user=user)
qs = qs.filter(Q(author=user) | Q(foldershareduser__user=user))
return qs.distinct() & self.distinct()
|
[
"def",
"for_user",
"(",
"self",
",",
"user",
")",
":",
"qs",
"=",
"SharedMemberQuerySet",
"(",
"model",
"=",
"self",
".",
"model",
",",
"using",
"=",
"self",
".",
"_db",
",",
"user",
"=",
"user",
")",
"qs",
"=",
"qs",
".",
"filter",
"(",
"Q",
"(",
"author",
"=",
"user",
")",
"|",
"Q",
"(",
"foldershareduser__user",
"=",
"user",
")",
")",
"return",
"qs",
".",
"distinct",
"(",
")",
"&",
"self",
".",
"distinct",
"(",
")"
] |
All folders the given user can do something with.
|
[
"All",
"folders",
"the",
"given",
"user",
"can",
"do",
"something",
"with",
"."
] |
b8c6a748976ec4b22cff9b195eb426b46c7a2a1e
|
https://github.com/pinax/pinax-documents/blob/b8c6a748976ec4b22cff9b195eb426b46c7a2a1e/pinax/documents/managers.py#L31-L37
|
18,237
|
qubole/qds-sdk-py
|
qds_sdk/cluster.py
|
Cluster._parse_list
|
def _parse_list(cls, args):
"""
Parse command line arguments to construct a dictionary of cluster
parameters that can be used to determine which clusters to list.
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used to determine which clusters to list
"""
argparser = ArgumentParser(prog="cluster list")
group = argparser.add_mutually_exclusive_group()
group.add_argument("--id", dest="cluster_id",
help="show cluster with this id")
group.add_argument("--label", dest="label",
help="show cluster with this label")
group.add_argument("--state", dest="state", action="store",
choices=['up', 'down', 'pending', 'terminating'],
help="list only clusters in the given state")
pagination_group = group.add_argument_group()
pagination_group.add_argument("--page", dest="page", action="store", type=int,
help="page number")
pagination_group.add_argument("--per-page", dest="per_page", action="store", type=int,
help="number of clusters to be retrieved per page")
arguments = argparser.parse_args(args)
return vars(arguments)
|
python
|
def _parse_list(cls, args):
"""
Parse command line arguments to construct a dictionary of cluster
parameters that can be used to determine which clusters to list.
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used to determine which clusters to list
"""
argparser = ArgumentParser(prog="cluster list")
group = argparser.add_mutually_exclusive_group()
group.add_argument("--id", dest="cluster_id",
help="show cluster with this id")
group.add_argument("--label", dest="label",
help="show cluster with this label")
group.add_argument("--state", dest="state", action="store",
choices=['up', 'down', 'pending', 'terminating'],
help="list only clusters in the given state")
pagination_group = group.add_argument_group()
pagination_group.add_argument("--page", dest="page", action="store", type=int,
help="page number")
pagination_group.add_argument("--per-page", dest="per_page", action="store", type=int,
help="number of clusters to be retrieved per page")
arguments = argparser.parse_args(args)
return vars(arguments)
|
[
"def",
"_parse_list",
"(",
"cls",
",",
"args",
")",
":",
"argparser",
"=",
"ArgumentParser",
"(",
"prog",
"=",
"\"cluster list\"",
")",
"group",
"=",
"argparser",
".",
"add_mutually_exclusive_group",
"(",
")",
"group",
".",
"add_argument",
"(",
"\"--id\"",
",",
"dest",
"=",
"\"cluster_id\"",
",",
"help",
"=",
"\"show cluster with this id\"",
")",
"group",
".",
"add_argument",
"(",
"\"--label\"",
",",
"dest",
"=",
"\"label\"",
",",
"help",
"=",
"\"show cluster with this label\"",
")",
"group",
".",
"add_argument",
"(",
"\"--state\"",
",",
"dest",
"=",
"\"state\"",
",",
"action",
"=",
"\"store\"",
",",
"choices",
"=",
"[",
"'up'",
",",
"'down'",
",",
"'pending'",
",",
"'terminating'",
"]",
",",
"help",
"=",
"\"list only clusters in the given state\"",
")",
"pagination_group",
"=",
"group",
".",
"add_argument_group",
"(",
")",
"pagination_group",
".",
"add_argument",
"(",
"\"--page\"",
",",
"dest",
"=",
"\"page\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"page number\"",
")",
"pagination_group",
".",
"add_argument",
"(",
"\"--per-page\"",
",",
"dest",
"=",
"\"per_page\"",
",",
"action",
"=",
"\"store\"",
",",
"type",
"=",
"int",
",",
"help",
"=",
"\"number of clusters to be retrieved per page\"",
")",
"arguments",
"=",
"argparser",
".",
"parse_args",
"(",
"args",
")",
"return",
"vars",
"(",
"arguments",
")"
] |
Parse command line arguments to construct a dictionary of cluster
parameters that can be used to determine which clusters to list.
Args:
`args`: sequence of arguments
Returns:
Dictionary that can be used to determine which clusters to list
|
[
"Parse",
"command",
"line",
"arguments",
"to",
"construct",
"a",
"dictionary",
"of",
"cluster",
"parameters",
"that",
"can",
"be",
"used",
"to",
"determine",
"which",
"clusters",
"to",
"list",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/cluster.py#L31-L62
|
18,238
|
qubole/qds-sdk-py
|
qds_sdk/cluster.py
|
Cluster._parse_cluster_manage_command
|
def _parse_cluster_manage_command(cls, args, action):
"""
Parse command line arguments for cluster manage commands.
"""
argparser = ArgumentParser(prog="cluster_manage_command")
group = argparser.add_mutually_exclusive_group(required=True)
group.add_argument("--id", dest="cluster_id",
help="execute on cluster with this id")
group.add_argument("--label", dest="label",
help="execute on cluster with this label")
if action == "remove" or action == "update":
argparser.add_argument("--private_dns",
help="the private_dns of the machine to be updated/removed", required=True)
if action == "update":
argparser.add_argument("--command",
help="the update command to be executed", required=True, choices=["replace"])
arguments = argparser.parse_args(args)
return arguments
|
python
|
def _parse_cluster_manage_command(cls, args, action):
"""
Parse command line arguments for cluster manage commands.
"""
argparser = ArgumentParser(prog="cluster_manage_command")
group = argparser.add_mutually_exclusive_group(required=True)
group.add_argument("--id", dest="cluster_id",
help="execute on cluster with this id")
group.add_argument("--label", dest="label",
help="execute on cluster with this label")
if action == "remove" or action == "update":
argparser.add_argument("--private_dns",
help="the private_dns of the machine to be updated/removed", required=True)
if action == "update":
argparser.add_argument("--command",
help="the update command to be executed", required=True, choices=["replace"])
arguments = argparser.parse_args(args)
return arguments
|
[
"def",
"_parse_cluster_manage_command",
"(",
"cls",
",",
"args",
",",
"action",
")",
":",
"argparser",
"=",
"ArgumentParser",
"(",
"prog",
"=",
"\"cluster_manage_command\"",
")",
"group",
"=",
"argparser",
".",
"add_mutually_exclusive_group",
"(",
"required",
"=",
"True",
")",
"group",
".",
"add_argument",
"(",
"\"--id\"",
",",
"dest",
"=",
"\"cluster_id\"",
",",
"help",
"=",
"\"execute on cluster with this id\"",
")",
"group",
".",
"add_argument",
"(",
"\"--label\"",
",",
"dest",
"=",
"\"label\"",
",",
"help",
"=",
"\"execute on cluster with this label\"",
")",
"if",
"action",
"==",
"\"remove\"",
"or",
"action",
"==",
"\"update\"",
":",
"argparser",
".",
"add_argument",
"(",
"\"--private_dns\"",
",",
"help",
"=",
"\"the private_dns of the machine to be updated/removed\"",
",",
"required",
"=",
"True",
")",
"if",
"action",
"==",
"\"update\"",
":",
"argparser",
".",
"add_argument",
"(",
"\"--command\"",
",",
"help",
"=",
"\"the update command to be executed\"",
",",
"required",
"=",
"True",
",",
"choices",
"=",
"[",
"\"replace\"",
"]",
")",
"arguments",
"=",
"argparser",
".",
"parse_args",
"(",
"args",
")",
"return",
"arguments"
] |
Parse command line arguments for cluster manage commands.
|
[
"Parse",
"command",
"line",
"arguments",
"for",
"cluster",
"manage",
"commands",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/cluster.py#L530-L553
|
18,239
|
qubole/qds-sdk-py
|
qds_sdk/cluster.py
|
Cluster._parse_reassign_label
|
def _parse_reassign_label(cls, args):
"""
Parse command line arguments for reassigning label.
"""
argparser = ArgumentParser(prog="cluster reassign_label")
argparser.add_argument("destination_cluster",
metavar="destination_cluster_id_label",
help="id/label of the cluster to move the label to")
argparser.add_argument("label",
help="label to be moved from the source cluster")
arguments = argparser.parse_args(args)
return arguments
|
python
|
def _parse_reassign_label(cls, args):
"""
Parse command line arguments for reassigning label.
"""
argparser = ArgumentParser(prog="cluster reassign_label")
argparser.add_argument("destination_cluster",
metavar="destination_cluster_id_label",
help="id/label of the cluster to move the label to")
argparser.add_argument("label",
help="label to be moved from the source cluster")
arguments = argparser.parse_args(args)
return arguments
|
[
"def",
"_parse_reassign_label",
"(",
"cls",
",",
"args",
")",
":",
"argparser",
"=",
"ArgumentParser",
"(",
"prog",
"=",
"\"cluster reassign_label\"",
")",
"argparser",
".",
"add_argument",
"(",
"\"destination_cluster\"",
",",
"metavar",
"=",
"\"destination_cluster_id_label\"",
",",
"help",
"=",
"\"id/label of the cluster to move the label to\"",
")",
"argparser",
".",
"add_argument",
"(",
"\"label\"",
",",
"help",
"=",
"\"label to be moved from the source cluster\"",
")",
"arguments",
"=",
"argparser",
".",
"parse_args",
"(",
"args",
")",
"return",
"arguments"
] |
Parse command line arguments for reassigning label.
|
[
"Parse",
"command",
"line",
"arguments",
"for",
"reassigning",
"label",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/cluster.py#L556-L570
|
18,240
|
qubole/qds-sdk-py
|
qds_sdk/cluster.py
|
Cluster.reassign_label
|
def reassign_label(cls, destination_cluster, label):
"""
Reassign a label from one cluster to another.
Args:
`destination_cluster`: id/label of the cluster to move the label to
`label`: label to be moved from the source cluster
"""
conn = Qubole.agent(version=Cluster.api_version)
data = {
"destination_cluster": destination_cluster,
"label": label
}
return conn.put(cls.rest_entity_path + "/reassign-label", data)
|
python
|
def reassign_label(cls, destination_cluster, label):
"""
Reassign a label from one cluster to another.
Args:
`destination_cluster`: id/label of the cluster to move the label to
`label`: label to be moved from the source cluster
"""
conn = Qubole.agent(version=Cluster.api_version)
data = {
"destination_cluster": destination_cluster,
"label": label
}
return conn.put(cls.rest_entity_path + "/reassign-label", data)
|
[
"def",
"reassign_label",
"(",
"cls",
",",
"destination_cluster",
",",
"label",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
"version",
"=",
"Cluster",
".",
"api_version",
")",
"data",
"=",
"{",
"\"destination_cluster\"",
":",
"destination_cluster",
",",
"\"label\"",
":",
"label",
"}",
"return",
"conn",
".",
"put",
"(",
"cls",
".",
"rest_entity_path",
"+",
"\"/reassign-label\"",
",",
"data",
")"
] |
Reassign a label from one cluster to another.
Args:
`destination_cluster`: id/label of the cluster to move the label to
`label`: label to be moved from the source cluster
|
[
"Reassign",
"a",
"label",
"from",
"one",
"cluster",
"to",
"another",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/cluster.py#L573-L587
|
18,241
|
qubole/qds-sdk-py
|
qds_sdk/cluster.py
|
Cluster._parse_snapshot_restore_command
|
def _parse_snapshot_restore_command(cls, args, action):
"""
Parse command line arguments for snapshot command.
"""
argparser = ArgumentParser(prog="cluster %s" % action)
group = argparser.add_mutually_exclusive_group(required=True)
group.add_argument("--id", dest="cluster_id",
help="execute on cluster with this id")
group.add_argument("--label", dest="label",
help="execute on cluster with this label")
argparser.add_argument("--s3_location",
help="s3_location where backup is stored", required=True)
if action == "snapshot":
argparser.add_argument("--backup_type",
help="backup_type: full/incremental, default is full")
elif action == "restore_point":
argparser.add_argument("--backup_id",
help="back_id from which restoration will be done", required=True)
argparser.add_argument("--table_names",
help="table(s) which are to be restored", required=True)
argparser.add_argument("--no-overwrite", action="store_false",
help="With this option, restore overwrites to the existing table if theres any in restore target")
argparser.add_argument("--no-automatic", action="store_false",
help="With this option, all the dependencies are automatically restored together with this backup image following the correct order")
arguments = argparser.parse_args(args)
return arguments
|
python
|
def _parse_snapshot_restore_command(cls, args, action):
"""
Parse command line arguments for snapshot command.
"""
argparser = ArgumentParser(prog="cluster %s" % action)
group = argparser.add_mutually_exclusive_group(required=True)
group.add_argument("--id", dest="cluster_id",
help="execute on cluster with this id")
group.add_argument("--label", dest="label",
help="execute on cluster with this label")
argparser.add_argument("--s3_location",
help="s3_location where backup is stored", required=True)
if action == "snapshot":
argparser.add_argument("--backup_type",
help="backup_type: full/incremental, default is full")
elif action == "restore_point":
argparser.add_argument("--backup_id",
help="back_id from which restoration will be done", required=True)
argparser.add_argument("--table_names",
help="table(s) which are to be restored", required=True)
argparser.add_argument("--no-overwrite", action="store_false",
help="With this option, restore overwrites to the existing table if theres any in restore target")
argparser.add_argument("--no-automatic", action="store_false",
help="With this option, all the dependencies are automatically restored together with this backup image following the correct order")
arguments = argparser.parse_args(args)
return arguments
|
[
"def",
"_parse_snapshot_restore_command",
"(",
"cls",
",",
"args",
",",
"action",
")",
":",
"argparser",
"=",
"ArgumentParser",
"(",
"prog",
"=",
"\"cluster %s\"",
"%",
"action",
")",
"group",
"=",
"argparser",
".",
"add_mutually_exclusive_group",
"(",
"required",
"=",
"True",
")",
"group",
".",
"add_argument",
"(",
"\"--id\"",
",",
"dest",
"=",
"\"cluster_id\"",
",",
"help",
"=",
"\"execute on cluster with this id\"",
")",
"group",
".",
"add_argument",
"(",
"\"--label\"",
",",
"dest",
"=",
"\"label\"",
",",
"help",
"=",
"\"execute on cluster with this label\"",
")",
"argparser",
".",
"add_argument",
"(",
"\"--s3_location\"",
",",
"help",
"=",
"\"s3_location where backup is stored\"",
",",
"required",
"=",
"True",
")",
"if",
"action",
"==",
"\"snapshot\"",
":",
"argparser",
".",
"add_argument",
"(",
"\"--backup_type\"",
",",
"help",
"=",
"\"backup_type: full/incremental, default is full\"",
")",
"elif",
"action",
"==",
"\"restore_point\"",
":",
"argparser",
".",
"add_argument",
"(",
"\"--backup_id\"",
",",
"help",
"=",
"\"back_id from which restoration will be done\"",
",",
"required",
"=",
"True",
")",
"argparser",
".",
"add_argument",
"(",
"\"--table_names\"",
",",
"help",
"=",
"\"table(s) which are to be restored\"",
",",
"required",
"=",
"True",
")",
"argparser",
".",
"add_argument",
"(",
"\"--no-overwrite\"",
",",
"action",
"=",
"\"store_false\"",
",",
"help",
"=",
"\"With this option, restore overwrites to the existing table if theres any in restore target\"",
")",
"argparser",
".",
"add_argument",
"(",
"\"--no-automatic\"",
",",
"action",
"=",
"\"store_false\"",
",",
"help",
"=",
"\"With this option, all the dependencies are automatically restored together with this backup image following the correct order\"",
")",
"arguments",
"=",
"argparser",
".",
"parse_args",
"(",
"args",
")",
"return",
"arguments"
] |
Parse command line arguments for snapshot command.
|
[
"Parse",
"command",
"line",
"arguments",
"for",
"snapshot",
"command",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/cluster.py#L598-L625
|
18,242
|
qubole/qds-sdk-py
|
qds_sdk/cluster.py
|
Cluster.restore_point
|
def restore_point(cls, cluster_id_label, s3_location, backup_id, table_names, overwrite=True, automatic=True):
"""
Restoring cluster from a given hbase snapshot id
"""
conn = Qubole.agent(version=Cluster.api_version)
parameters = {}
parameters['s3_location'] = s3_location
parameters['backup_id'] = backup_id
parameters['table_names'] = table_names
parameters['overwrite'] = overwrite
parameters['automatic'] = automatic
return conn.post(cls.element_path(cluster_id_label) + "/restore_point", data=parameters)
|
python
|
def restore_point(cls, cluster_id_label, s3_location, backup_id, table_names, overwrite=True, automatic=True):
"""
Restoring cluster from a given hbase snapshot id
"""
conn = Qubole.agent(version=Cluster.api_version)
parameters = {}
parameters['s3_location'] = s3_location
parameters['backup_id'] = backup_id
parameters['table_names'] = table_names
parameters['overwrite'] = overwrite
parameters['automatic'] = automatic
return conn.post(cls.element_path(cluster_id_label) + "/restore_point", data=parameters)
|
[
"def",
"restore_point",
"(",
"cls",
",",
"cluster_id_label",
",",
"s3_location",
",",
"backup_id",
",",
"table_names",
",",
"overwrite",
"=",
"True",
",",
"automatic",
"=",
"True",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
"version",
"=",
"Cluster",
".",
"api_version",
")",
"parameters",
"=",
"{",
"}",
"parameters",
"[",
"'s3_location'",
"]",
"=",
"s3_location",
"parameters",
"[",
"'backup_id'",
"]",
"=",
"backup_id",
"parameters",
"[",
"'table_names'",
"]",
"=",
"table_names",
"parameters",
"[",
"'overwrite'",
"]",
"=",
"overwrite",
"parameters",
"[",
"'automatic'",
"]",
"=",
"automatic",
"return",
"conn",
".",
"post",
"(",
"cls",
".",
"element_path",
"(",
"cluster_id_label",
")",
"+",
"\"/restore_point\"",
",",
"data",
"=",
"parameters",
")"
] |
Restoring cluster from a given hbase snapshot id
|
[
"Restoring",
"cluster",
"from",
"a",
"given",
"hbase",
"snapshot",
"id"
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/cluster.py#L682-L693
|
18,243
|
qubole/qds-sdk-py
|
qds_sdk/cluster.py
|
Cluster.update_snapshot_schedule
|
def update_snapshot_schedule(cls, cluster_id_label, s3_location=None, frequency_unit=None, frequency_num=None, status=None):
"""
Update for snapshot schedule
"""
conn = Qubole.agent(version=Cluster.api_version)
data = {}
if s3_location is not None:
data["s3_location"] = s3_location
if frequency_unit is not None:
data["frequency_unit"] = frequency_unit
if frequency_num is not None:
data["frequency_num"] = frequency_num
if status is not None:
data["status"] = status
return conn.put(cls.element_path(cluster_id_label) + "/snapshot_schedule", data)
|
python
|
def update_snapshot_schedule(cls, cluster_id_label, s3_location=None, frequency_unit=None, frequency_num=None, status=None):
"""
Update for snapshot schedule
"""
conn = Qubole.agent(version=Cluster.api_version)
data = {}
if s3_location is not None:
data["s3_location"] = s3_location
if frequency_unit is not None:
data["frequency_unit"] = frequency_unit
if frequency_num is not None:
data["frequency_num"] = frequency_num
if status is not None:
data["status"] = status
return conn.put(cls.element_path(cluster_id_label) + "/snapshot_schedule", data)
|
[
"def",
"update_snapshot_schedule",
"(",
"cls",
",",
"cluster_id_label",
",",
"s3_location",
"=",
"None",
",",
"frequency_unit",
"=",
"None",
",",
"frequency_num",
"=",
"None",
",",
"status",
"=",
"None",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
"version",
"=",
"Cluster",
".",
"api_version",
")",
"data",
"=",
"{",
"}",
"if",
"s3_location",
"is",
"not",
"None",
":",
"data",
"[",
"\"s3_location\"",
"]",
"=",
"s3_location",
"if",
"frequency_unit",
"is",
"not",
"None",
":",
"data",
"[",
"\"frequency_unit\"",
"]",
"=",
"frequency_unit",
"if",
"frequency_num",
"is",
"not",
"None",
":",
"data",
"[",
"\"frequency_num\"",
"]",
"=",
"frequency_num",
"if",
"status",
"is",
"not",
"None",
":",
"data",
"[",
"\"status\"",
"]",
"=",
"status",
"return",
"conn",
".",
"put",
"(",
"cls",
".",
"element_path",
"(",
"cluster_id_label",
")",
"+",
"\"/snapshot_schedule\"",
",",
"data",
")"
] |
Update for snapshot schedule
|
[
"Update",
"for",
"snapshot",
"schedule"
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/cluster.py#L704-L719
|
18,244
|
qubole/qds-sdk-py
|
qds_sdk/cluster.py
|
ClusterInfo.set_spot_instance_settings
|
def set_spot_instance_settings(self, maximum_bid_price_percentage=None,
timeout_for_request=None,
maximum_spot_instance_percentage=None):
"""
Purchase options for spot instances. Valid only when
`slave_request_type` is hybrid or spot.
`maximum_bid_price_percentage`: Maximum value to bid for spot
instances, expressed as a percentage of the base price for the
slave node instance type.
`timeout_for_request`: Timeout for a spot instance request (Unit:
minutes)
`maximum_spot_instance_percentage`: Maximum percentage of instances
that may be purchased from the AWS Spot market. Valid only when
slave_request_type is "hybrid".
"""
self.hadoop_settings['spot_instance_settings'] = {
'maximum_bid_price_percentage': maximum_bid_price_percentage,
'timeout_for_request': timeout_for_request,
'maximum_spot_instance_percentage': maximum_spot_instance_percentage}
|
python
|
def set_spot_instance_settings(self, maximum_bid_price_percentage=None,
timeout_for_request=None,
maximum_spot_instance_percentage=None):
"""
Purchase options for spot instances. Valid only when
`slave_request_type` is hybrid or spot.
`maximum_bid_price_percentage`: Maximum value to bid for spot
instances, expressed as a percentage of the base price for the
slave node instance type.
`timeout_for_request`: Timeout for a spot instance request (Unit:
minutes)
`maximum_spot_instance_percentage`: Maximum percentage of instances
that may be purchased from the AWS Spot market. Valid only when
slave_request_type is "hybrid".
"""
self.hadoop_settings['spot_instance_settings'] = {
'maximum_bid_price_percentage': maximum_bid_price_percentage,
'timeout_for_request': timeout_for_request,
'maximum_spot_instance_percentage': maximum_spot_instance_percentage}
|
[
"def",
"set_spot_instance_settings",
"(",
"self",
",",
"maximum_bid_price_percentage",
"=",
"None",
",",
"timeout_for_request",
"=",
"None",
",",
"maximum_spot_instance_percentage",
"=",
"None",
")",
":",
"self",
".",
"hadoop_settings",
"[",
"'spot_instance_settings'",
"]",
"=",
"{",
"'maximum_bid_price_percentage'",
":",
"maximum_bid_price_percentage",
",",
"'timeout_for_request'",
":",
"timeout_for_request",
",",
"'maximum_spot_instance_percentage'",
":",
"maximum_spot_instance_percentage",
"}"
] |
Purchase options for spot instances. Valid only when
`slave_request_type` is hybrid or spot.
`maximum_bid_price_percentage`: Maximum value to bid for spot
instances, expressed as a percentage of the base price for the
slave node instance type.
`timeout_for_request`: Timeout for a spot instance request (Unit:
minutes)
`maximum_spot_instance_percentage`: Maximum percentage of instances
that may be purchased from the AWS Spot market. Valid only when
slave_request_type is "hybrid".
|
[
"Purchase",
"options",
"for",
"spot",
"instances",
".",
"Valid",
"only",
"when",
"slave_request_type",
"is",
"hybrid",
"or",
"spot",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/cluster.py#L883-L904
|
18,245
|
qubole/qds-sdk-py
|
qds_sdk/cluster.py
|
ClusterInfo.set_stable_spot_instance_settings
|
def set_stable_spot_instance_settings(self, maximum_bid_price_percentage=None,
timeout_for_request=None,
allow_fallback=True):
"""
Purchase options for stable spot instances.
`maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`allow_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
"""
self.hadoop_settings['stable_spot_instance_settings'] = {
'maximum_bid_price_percentage': maximum_bid_price_percentage,
'timeout_for_request': timeout_for_request,
'allow_fallback': allow_fallback}
|
python
|
def set_stable_spot_instance_settings(self, maximum_bid_price_percentage=None,
timeout_for_request=None,
allow_fallback=True):
"""
Purchase options for stable spot instances.
`maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`allow_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
"""
self.hadoop_settings['stable_spot_instance_settings'] = {
'maximum_bid_price_percentage': maximum_bid_price_percentage,
'timeout_for_request': timeout_for_request,
'allow_fallback': allow_fallback}
|
[
"def",
"set_stable_spot_instance_settings",
"(",
"self",
",",
"maximum_bid_price_percentage",
"=",
"None",
",",
"timeout_for_request",
"=",
"None",
",",
"allow_fallback",
"=",
"True",
")",
":",
"self",
".",
"hadoop_settings",
"[",
"'stable_spot_instance_settings'",
"]",
"=",
"{",
"'maximum_bid_price_percentage'",
":",
"maximum_bid_price_percentage",
",",
"'timeout_for_request'",
":",
"timeout_for_request",
",",
"'allow_fallback'",
":",
"allow_fallback",
"}"
] |
Purchase options for stable spot instances.
`maximum_bid_price_percentage`: Maximum value to bid for stable node spot
instances, expressed as a percentage of the base price
(applies to both master and slave nodes).
`timeout_for_request`: Timeout for a stable node spot instance request (Unit:
minutes)
`allow_fallback`: Whether to fallback to on-demand instances for
stable nodes if spot instances are not available
|
[
"Purchase",
"options",
"for",
"stable",
"spot",
"instances",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/cluster.py#L907-L926
|
18,246
|
qubole/qds-sdk-py
|
qds_sdk/cluster.py
|
ClusterInfoV13.minimal_payload
|
def minimal_payload(self):
"""
This method can be used to create the payload which is sent while
creating or updating a cluster.
"""
payload_dict = self.__dict__
payload_dict.pop("api_version", None)
return util._make_minimal(payload_dict)
|
python
|
def minimal_payload(self):
"""
This method can be used to create the payload which is sent while
creating or updating a cluster.
"""
payload_dict = self.__dict__
payload_dict.pop("api_version", None)
return util._make_minimal(payload_dict)
|
[
"def",
"minimal_payload",
"(",
"self",
")",
":",
"payload_dict",
"=",
"self",
".",
"__dict__",
"payload_dict",
".",
"pop",
"(",
"\"api_version\"",
",",
"None",
")",
"return",
"util",
".",
"_make_minimal",
"(",
"payload_dict",
")"
] |
This method can be used to create the payload which is sent while
creating or updating a cluster.
|
[
"This",
"method",
"can",
"be",
"used",
"to",
"create",
"the",
"payload",
"which",
"is",
"sent",
"while",
"creating",
"or",
"updating",
"a",
"cluster",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/cluster.py#L1307-L1314
|
18,247
|
qubole/qds-sdk-py
|
qds_sdk/connection.py
|
Connection._handle_error
|
def _handle_error(response):
"""Raise exceptions in response to any http errors
Args:
response: A Response object
Raises:
BadRequest: if HTTP error code 400 returned.
UnauthorizedAccess: if HTTP error code 401 returned.
ForbiddenAccess: if HTTP error code 403 returned.
ResourceNotFound: if HTTP error code 404 is returned.
MethodNotAllowed: if HTTP error code 405 is returned.
ResourceConflict: if HTTP error code 409 is returned.
ResourceInvalid: if HTTP error code 422 is returned.
ClientError: if HTTP error code falls in 401 - 499.
ServerError: if HTTP error code falls in 500 - 599.
ConnectionError: if unknown HTTP error code returned.
"""
code = response.status_code
if 200 <= code < 400:
return
if code == 400:
sys.stderr.write(response.text + "\n")
raise BadRequest(response)
elif code == 401:
sys.stderr.write(response.text + "\n")
raise UnauthorizedAccess(response)
elif code == 403:
sys.stderr.write(response.text + "\n")
raise ForbiddenAccess(response)
elif code == 404:
sys.stderr.write(response.text + "\n")
raise ResourceNotFound(response)
elif code == 405:
sys.stderr.write(response.text + "\n")
raise MethodNotAllowed(response)
elif code == 409:
sys.stderr.write(response.text + "\n")
raise ResourceConflict(response)
elif code == 422:
sys.stderr.write(response.text + "\n")
raise ResourceInvalid(response)
elif code in (449, 502, 503, 504):
sys.stderr.write(response.text + "\n")
raise RetryWithDelay(response)
elif 401 <= code < 500:
sys.stderr.write(response.text + "\n")
raise ClientError(response)
elif 500 <= code < 600:
sys.stderr.write(response.text + "\n")
raise ServerError(response)
else:
raise ConnectionError(response)
|
python
|
def _handle_error(response):
"""Raise exceptions in response to any http errors
Args:
response: A Response object
Raises:
BadRequest: if HTTP error code 400 returned.
UnauthorizedAccess: if HTTP error code 401 returned.
ForbiddenAccess: if HTTP error code 403 returned.
ResourceNotFound: if HTTP error code 404 is returned.
MethodNotAllowed: if HTTP error code 405 is returned.
ResourceConflict: if HTTP error code 409 is returned.
ResourceInvalid: if HTTP error code 422 is returned.
ClientError: if HTTP error code falls in 401 - 499.
ServerError: if HTTP error code falls in 500 - 599.
ConnectionError: if unknown HTTP error code returned.
"""
code = response.status_code
if 200 <= code < 400:
return
if code == 400:
sys.stderr.write(response.text + "\n")
raise BadRequest(response)
elif code == 401:
sys.stderr.write(response.text + "\n")
raise UnauthorizedAccess(response)
elif code == 403:
sys.stderr.write(response.text + "\n")
raise ForbiddenAccess(response)
elif code == 404:
sys.stderr.write(response.text + "\n")
raise ResourceNotFound(response)
elif code == 405:
sys.stderr.write(response.text + "\n")
raise MethodNotAllowed(response)
elif code == 409:
sys.stderr.write(response.text + "\n")
raise ResourceConflict(response)
elif code == 422:
sys.stderr.write(response.text + "\n")
raise ResourceInvalid(response)
elif code in (449, 502, 503, 504):
sys.stderr.write(response.text + "\n")
raise RetryWithDelay(response)
elif 401 <= code < 500:
sys.stderr.write(response.text + "\n")
raise ClientError(response)
elif 500 <= code < 600:
sys.stderr.write(response.text + "\n")
raise ServerError(response)
else:
raise ConnectionError(response)
|
[
"def",
"_handle_error",
"(",
"response",
")",
":",
"code",
"=",
"response",
".",
"status_code",
"if",
"200",
"<=",
"code",
"<",
"400",
":",
"return",
"if",
"code",
"==",
"400",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"BadRequest",
"(",
"response",
")",
"elif",
"code",
"==",
"401",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"UnauthorizedAccess",
"(",
"response",
")",
"elif",
"code",
"==",
"403",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"ForbiddenAccess",
"(",
"response",
")",
"elif",
"code",
"==",
"404",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"ResourceNotFound",
"(",
"response",
")",
"elif",
"code",
"==",
"405",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"MethodNotAllowed",
"(",
"response",
")",
"elif",
"code",
"==",
"409",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"ResourceConflict",
"(",
"response",
")",
"elif",
"code",
"==",
"422",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"ResourceInvalid",
"(",
"response",
")",
"elif",
"code",
"in",
"(",
"449",
",",
"502",
",",
"503",
",",
"504",
")",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"RetryWithDelay",
"(",
"response",
")",
"elif",
"401",
"<=",
"code",
"<",
"500",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"ClientError",
"(",
"response",
")",
"elif",
"500",
"<=",
"code",
"<",
"600",
":",
"sys",
".",
"stderr",
".",
"write",
"(",
"response",
".",
"text",
"+",
"\"\\n\"",
")",
"raise",
"ServerError",
"(",
"response",
")",
"else",
":",
"raise",
"ConnectionError",
"(",
"response",
")"
] |
Raise exceptions in response to any http errors
Args:
response: A Response object
Raises:
BadRequest: if HTTP error code 400 returned.
UnauthorizedAccess: if HTTP error code 401 returned.
ForbiddenAccess: if HTTP error code 403 returned.
ResourceNotFound: if HTTP error code 404 is returned.
MethodNotAllowed: if HTTP error code 405 is returned.
ResourceConflict: if HTTP error code 409 is returned.
ResourceInvalid: if HTTP error code 422 is returned.
ClientError: if HTTP error code falls in 401 - 499.
ServerError: if HTTP error code falls in 500 - 599.
ConnectionError: if unknown HTTP error code returned.
|
[
"Raise",
"exceptions",
"in",
"response",
"to",
"any",
"http",
"errors"
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/connection.py#L111-L165
|
18,248
|
qubole/qds-sdk-py
|
qds_sdk/template.py
|
Template.createTemplate
|
def createTemplate(data):
"""
Create a new template.
Args:
`data`: json data required for creating a template
Returns:
Dictionary containing the details of the template with its ID.
"""
conn = Qubole.agent()
return conn.post(Template.rest_entity_path, data)
|
python
|
def createTemplate(data):
"""
Create a new template.
Args:
`data`: json data required for creating a template
Returns:
Dictionary containing the details of the template with its ID.
"""
conn = Qubole.agent()
return conn.post(Template.rest_entity_path, data)
|
[
"def",
"createTemplate",
"(",
"data",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"return",
"conn",
".",
"post",
"(",
"Template",
".",
"rest_entity_path",
",",
"data",
")"
] |
Create a new template.
Args:
`data`: json data required for creating a template
Returns:
Dictionary containing the details of the template with its ID.
|
[
"Create",
"a",
"new",
"template",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/template.py#L162-L172
|
18,249
|
qubole/qds-sdk-py
|
qds_sdk/template.py
|
Template.editTemplate
|
def editTemplate(id, data):
"""
Edit an existing template.
Args:
`id`: ID of the template to edit
`data`: json data to be updated
Returns:
Dictionary containing the updated details of the template.
"""
conn = Qubole.agent()
return conn.put(Template.element_path(id), data)
|
python
|
def editTemplate(id, data):
"""
Edit an existing template.
Args:
`id`: ID of the template to edit
`data`: json data to be updated
Returns:
Dictionary containing the updated details of the template.
"""
conn = Qubole.agent()
return conn.put(Template.element_path(id), data)
|
[
"def",
"editTemplate",
"(",
"id",
",",
"data",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"return",
"conn",
".",
"put",
"(",
"Template",
".",
"element_path",
"(",
"id",
")",
",",
"data",
")"
] |
Edit an existing template.
Args:
`id`: ID of the template to edit
`data`: json data to be updated
Returns:
Dictionary containing the updated details of the template.
|
[
"Edit",
"an",
"existing",
"template",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/template.py#L175-L186
|
18,250
|
qubole/qds-sdk-py
|
qds_sdk/template.py
|
Template.viewTemplate
|
def viewTemplate(id):
"""
View an existing Template details.
Args:
`id`: ID of the template to fetch
Returns:
Dictionary containing the details of the template.
"""
conn = Qubole.agent()
return conn.get(Template.element_path(id))
|
python
|
def viewTemplate(id):
"""
View an existing Template details.
Args:
`id`: ID of the template to fetch
Returns:
Dictionary containing the details of the template.
"""
conn = Qubole.agent()
return conn.get(Template.element_path(id))
|
[
"def",
"viewTemplate",
"(",
"id",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"return",
"conn",
".",
"get",
"(",
"Template",
".",
"element_path",
"(",
"id",
")",
")"
] |
View an existing Template details.
Args:
`id`: ID of the template to fetch
Returns:
Dictionary containing the details of the template.
|
[
"View",
"an",
"existing",
"Template",
"details",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/template.py#L204-L215
|
18,251
|
qubole/qds-sdk-py
|
qds_sdk/template.py
|
Template.submitTemplate
|
def submitTemplate(id, data={}):
"""
Submit an existing Template.
Args:
`id`: ID of the template to submit
`data`: json data containing the input_vars
Returns:
Dictionary containing Command Object details.
"""
conn = Qubole.agent()
path = str(id) + "/run"
return conn.post(Template.element_path(path), data)
|
python
|
def submitTemplate(id, data={}):
"""
Submit an existing Template.
Args:
`id`: ID of the template to submit
`data`: json data containing the input_vars
Returns:
Dictionary containing Command Object details.
"""
conn = Qubole.agent()
path = str(id) + "/run"
return conn.post(Template.element_path(path), data)
|
[
"def",
"submitTemplate",
"(",
"id",
",",
"data",
"=",
"{",
"}",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"path",
"=",
"str",
"(",
"id",
")",
"+",
"\"/run\"",
"return",
"conn",
".",
"post",
"(",
"Template",
".",
"element_path",
"(",
"path",
")",
",",
"data",
")"
] |
Submit an existing Template.
Args:
`id`: ID of the template to submit
`data`: json data containing the input_vars
Returns:
Dictionary containing Command Object details.
|
[
"Submit",
"an",
"existing",
"Template",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/template.py#L218-L230
|
18,252
|
qubole/qds-sdk-py
|
qds_sdk/template.py
|
Template.runTemplate
|
def runTemplate(id, data={}):
"""
Run an existing Template and waits for the Result.
Prints result to stdout.
Args:
`id`: ID of the template to run
`data`: json data containing the input_vars
Returns:
An integer as status (0: success, 1: failure)
"""
conn = Qubole.agent()
path = str(id) + "/run"
res = conn.post(Template.element_path(path), data)
cmdType = res['command_type']
cmdId = res['id']
cmdClass = eval(cmdType)
cmd = cmdClass.find(cmdId)
while not Command.is_done(cmd.status):
time.sleep(Qubole.poll_interval)
cmd = cmdClass.find(cmd.id)
return Template.getResult(cmdClass, cmd)
|
python
|
def runTemplate(id, data={}):
"""
Run an existing Template and waits for the Result.
Prints result to stdout.
Args:
`id`: ID of the template to run
`data`: json data containing the input_vars
Returns:
An integer as status (0: success, 1: failure)
"""
conn = Qubole.agent()
path = str(id) + "/run"
res = conn.post(Template.element_path(path), data)
cmdType = res['command_type']
cmdId = res['id']
cmdClass = eval(cmdType)
cmd = cmdClass.find(cmdId)
while not Command.is_done(cmd.status):
time.sleep(Qubole.poll_interval)
cmd = cmdClass.find(cmd.id)
return Template.getResult(cmdClass, cmd)
|
[
"def",
"runTemplate",
"(",
"id",
",",
"data",
"=",
"{",
"}",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"path",
"=",
"str",
"(",
"id",
")",
"+",
"\"/run\"",
"res",
"=",
"conn",
".",
"post",
"(",
"Template",
".",
"element_path",
"(",
"path",
")",
",",
"data",
")",
"cmdType",
"=",
"res",
"[",
"'command_type'",
"]",
"cmdId",
"=",
"res",
"[",
"'id'",
"]",
"cmdClass",
"=",
"eval",
"(",
"cmdType",
")",
"cmd",
"=",
"cmdClass",
".",
"find",
"(",
"cmdId",
")",
"while",
"not",
"Command",
".",
"is_done",
"(",
"cmd",
".",
"status",
")",
":",
"time",
".",
"sleep",
"(",
"Qubole",
".",
"poll_interval",
")",
"cmd",
"=",
"cmdClass",
".",
"find",
"(",
"cmd",
".",
"id",
")",
"return",
"Template",
".",
"getResult",
"(",
"cmdClass",
",",
"cmd",
")"
] |
Run an existing Template and waits for the Result.
Prints result to stdout.
Args:
`id`: ID of the template to run
`data`: json data containing the input_vars
Returns:
An integer as status (0: success, 1: failure)
|
[
"Run",
"an",
"existing",
"Template",
"and",
"waits",
"for",
"the",
"Result",
".",
"Prints",
"result",
"to",
"stdout",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/template.py#L233-L255
|
18,253
|
qubole/qds-sdk-py
|
qds_sdk/template.py
|
Template.listTemplates
|
def listTemplates(data={}):
"""
Fetch existing Templates details.
Args:
`data`: dictionary containing the value of page number and per-page value
Returns:
Dictionary containing paging_info and command_templates details
"""
conn = Qubole.agent()
url_path = Template.rest_entity_path
page_attr = []
if "page" in data and data["page"] is not None:
page_attr.append("page=%s" % data["page"])
if "per_page" in data and data["per_page"] is not None:
page_attr.append("per_page=%s" % data["per_page"])
if page_attr:
url_path = "%s?%s" % (url_path, "&".join(page_attr))
return conn.get(url_path)
|
python
|
def listTemplates(data={}):
"""
Fetch existing Templates details.
Args:
`data`: dictionary containing the value of page number and per-page value
Returns:
Dictionary containing paging_info and command_templates details
"""
conn = Qubole.agent()
url_path = Template.rest_entity_path
page_attr = []
if "page" in data and data["page"] is not None:
page_attr.append("page=%s" % data["page"])
if "per_page" in data and data["per_page"] is not None:
page_attr.append("per_page=%s" % data["per_page"])
if page_attr:
url_path = "%s?%s" % (url_path, "&".join(page_attr))
return conn.get(url_path)
|
[
"def",
"listTemplates",
"(",
"data",
"=",
"{",
"}",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"url_path",
"=",
"Template",
".",
"rest_entity_path",
"page_attr",
"=",
"[",
"]",
"if",
"\"page\"",
"in",
"data",
"and",
"data",
"[",
"\"page\"",
"]",
"is",
"not",
"None",
":",
"page_attr",
".",
"append",
"(",
"\"page=%s\"",
"%",
"data",
"[",
"\"page\"",
"]",
")",
"if",
"\"per_page\"",
"in",
"data",
"and",
"data",
"[",
"\"per_page\"",
"]",
"is",
"not",
"None",
":",
"page_attr",
".",
"append",
"(",
"\"per_page=%s\"",
"%",
"data",
"[",
"\"per_page\"",
"]",
")",
"if",
"page_attr",
":",
"url_path",
"=",
"\"%s?%s\"",
"%",
"(",
"url_path",
",",
"\"&\"",
".",
"join",
"(",
"page_attr",
")",
")",
"return",
"conn",
".",
"get",
"(",
"url_path",
")"
] |
Fetch existing Templates details.
Args:
`data`: dictionary containing the value of page number and per-page value
Returns:
Dictionary containing paging_info and command_templates details
|
[
"Fetch",
"existing",
"Templates",
"details",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/template.py#L269-L288
|
18,254
|
qubole/qds-sdk-py
|
qds_sdk/dbtaps.py
|
DbTapCmdLine.edit
|
def edit(args):
tap = DbTap.find(args.id)
""" Carefully setup a dict """
options = {}
if not args.name is None:
options["db_name"]=args.name
if args.host is not None:
options["db_host"]=args.host
if args.user is not None:
options["db_user"]=args.user
if args.password is not None:
options["db_passwd"] = args.password
if args.type is not None:
options["db_type"] = args.type
if args.location is not None:
options["db_location"] = args.location
if args.port is not None:
options["port"] = args.port
tap = tap.edit(**options)
return json.dumps(tap.attributes, sort_keys=True, indent=4)
|
python
|
def edit(args):
tap = DbTap.find(args.id)
""" Carefully setup a dict """
options = {}
if not args.name is None:
options["db_name"]=args.name
if args.host is not None:
options["db_host"]=args.host
if args.user is not None:
options["db_user"]=args.user
if args.password is not None:
options["db_passwd"] = args.password
if args.type is not None:
options["db_type"] = args.type
if args.location is not None:
options["db_location"] = args.location
if args.port is not None:
options["port"] = args.port
tap = tap.edit(**options)
return json.dumps(tap.attributes, sort_keys=True, indent=4)
|
[
"def",
"edit",
"(",
"args",
")",
":",
"tap",
"=",
"DbTap",
".",
"find",
"(",
"args",
".",
"id",
")",
"options",
"=",
"{",
"}",
"if",
"not",
"args",
".",
"name",
"is",
"None",
":",
"options",
"[",
"\"db_name\"",
"]",
"=",
"args",
".",
"name",
"if",
"args",
".",
"host",
"is",
"not",
"None",
":",
"options",
"[",
"\"db_host\"",
"]",
"=",
"args",
".",
"host",
"if",
"args",
".",
"user",
"is",
"not",
"None",
":",
"options",
"[",
"\"db_user\"",
"]",
"=",
"args",
".",
"user",
"if",
"args",
".",
"password",
"is",
"not",
"None",
":",
"options",
"[",
"\"db_passwd\"",
"]",
"=",
"args",
".",
"password",
"if",
"args",
".",
"type",
"is",
"not",
"None",
":",
"options",
"[",
"\"db_type\"",
"]",
"=",
"args",
".",
"type",
"if",
"args",
".",
"location",
"is",
"not",
"None",
":",
"options",
"[",
"\"db_location\"",
"]",
"=",
"args",
".",
"location",
"if",
"args",
".",
"port",
"is",
"not",
"None",
":",
"options",
"[",
"\"port\"",
"]",
"=",
"args",
".",
"port",
"tap",
"=",
"tap",
".",
"edit",
"(",
"*",
"*",
"options",
")",
"return",
"json",
".",
"dumps",
"(",
"tap",
".",
"attributes",
",",
"sort_keys",
"=",
"True",
",",
"indent",
"=",
"4",
")"
] |
Carefully setup a dict
|
[
"Carefully",
"setup",
"a",
"dict"
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/dbtaps.py#L135-L154
|
18,255
|
qubole/qds-sdk-py
|
qds_sdk/app.py
|
App.create
|
def create(cls, name, config=None, kind="spark"):
"""
Create a new app.
Args:
`name`: the name of the app
`config`: a dictionary of key-value pairs
`kind`: kind of the app (default=spark)
"""
conn = Qubole.agent()
return conn.post(cls.rest_entity_path,
data={'name': name, 'config': config, 'kind': kind})
|
python
|
def create(cls, name, config=None, kind="spark"):
"""
Create a new app.
Args:
`name`: the name of the app
`config`: a dictionary of key-value pairs
`kind`: kind of the app (default=spark)
"""
conn = Qubole.agent()
return conn.post(cls.rest_entity_path,
data={'name': name, 'config': config, 'kind': kind})
|
[
"def",
"create",
"(",
"cls",
",",
"name",
",",
"config",
"=",
"None",
",",
"kind",
"=",
"\"spark\"",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"return",
"conn",
".",
"post",
"(",
"cls",
".",
"rest_entity_path",
",",
"data",
"=",
"{",
"'name'",
":",
"name",
",",
"'config'",
":",
"config",
",",
"'kind'",
":",
"kind",
"}",
")"
] |
Create a new app.
Args:
`name`: the name of the app
`config`: a dictionary of key-value pairs
`kind`: kind of the app (default=spark)
|
[
"Create",
"a",
"new",
"app",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/app.py#L127-L140
|
18,256
|
qubole/qds-sdk-py
|
qds_sdk/qubole.py
|
Qubole.configure
|
def configure(cls, api_token,
api_url="https://api.qubole.com/api/", version="v1.2",
poll_interval=5, skip_ssl_cert_check=False, cloud_name="AWS"):
"""
Set parameters governing interaction with QDS
Args:
`api_token`: authorization token for QDS. required
`api_url`: the base URL for QDS API. configurable for testing only
`version`: QDS REST api version. Will be used throughout unless overridden in Qubole.agent(..)
`poll_interval`: interval in secs when polling QDS for events
"""
cls._auth = QuboleAuth(api_token)
cls.api_token = api_token
cls.version = version
cls.baseurl = api_url
if poll_interval < Qubole.MIN_POLL_INTERVAL:
log.warn("Poll interval cannot be less than %s seconds. Setting it to %s seconds.\n" % (Qubole.MIN_POLL_INTERVAL, Qubole.MIN_POLL_INTERVAL))
cls.poll_interval = Qubole.MIN_POLL_INTERVAL
else:
cls.poll_interval = poll_interval
cls.skip_ssl_cert_check = skip_ssl_cert_check
cls.cloud_name = cloud_name.lower()
cls.cached_agent = None
|
python
|
def configure(cls, api_token,
api_url="https://api.qubole.com/api/", version="v1.2",
poll_interval=5, skip_ssl_cert_check=False, cloud_name="AWS"):
"""
Set parameters governing interaction with QDS
Args:
`api_token`: authorization token for QDS. required
`api_url`: the base URL for QDS API. configurable for testing only
`version`: QDS REST api version. Will be used throughout unless overridden in Qubole.agent(..)
`poll_interval`: interval in secs when polling QDS for events
"""
cls._auth = QuboleAuth(api_token)
cls.api_token = api_token
cls.version = version
cls.baseurl = api_url
if poll_interval < Qubole.MIN_POLL_INTERVAL:
log.warn("Poll interval cannot be less than %s seconds. Setting it to %s seconds.\n" % (Qubole.MIN_POLL_INTERVAL, Qubole.MIN_POLL_INTERVAL))
cls.poll_interval = Qubole.MIN_POLL_INTERVAL
else:
cls.poll_interval = poll_interval
cls.skip_ssl_cert_check = skip_ssl_cert_check
cls.cloud_name = cloud_name.lower()
cls.cached_agent = None
|
[
"def",
"configure",
"(",
"cls",
",",
"api_token",
",",
"api_url",
"=",
"\"https://api.qubole.com/api/\"",
",",
"version",
"=",
"\"v1.2\"",
",",
"poll_interval",
"=",
"5",
",",
"skip_ssl_cert_check",
"=",
"False",
",",
"cloud_name",
"=",
"\"AWS\"",
")",
":",
"cls",
".",
"_auth",
"=",
"QuboleAuth",
"(",
"api_token",
")",
"cls",
".",
"api_token",
"=",
"api_token",
"cls",
".",
"version",
"=",
"version",
"cls",
".",
"baseurl",
"=",
"api_url",
"if",
"poll_interval",
"<",
"Qubole",
".",
"MIN_POLL_INTERVAL",
":",
"log",
".",
"warn",
"(",
"\"Poll interval cannot be less than %s seconds. Setting it to %s seconds.\\n\"",
"%",
"(",
"Qubole",
".",
"MIN_POLL_INTERVAL",
",",
"Qubole",
".",
"MIN_POLL_INTERVAL",
")",
")",
"cls",
".",
"poll_interval",
"=",
"Qubole",
".",
"MIN_POLL_INTERVAL",
"else",
":",
"cls",
".",
"poll_interval",
"=",
"poll_interval",
"cls",
".",
"skip_ssl_cert_check",
"=",
"skip_ssl_cert_check",
"cls",
".",
"cloud_name",
"=",
"cloud_name",
".",
"lower",
"(",
")",
"cls",
".",
"cached_agent",
"=",
"None"
] |
Set parameters governing interaction with QDS
Args:
`api_token`: authorization token for QDS. required
`api_url`: the base URL for QDS API. configurable for testing only
`version`: QDS REST api version. Will be used throughout unless overridden in Qubole.agent(..)
`poll_interval`: interval in secs when polling QDS for events
|
[
"Set",
"parameters",
"governing",
"interaction",
"with",
"QDS"
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/qubole.py#L36-L63
|
18,257
|
qubole/qds-sdk-py
|
qds_sdk/clusterv2.py
|
ClusterCmdLine.get_cluster_request_parameters
|
def get_cluster_request_parameters(cluster_info, cloud_config, engine_config):
'''
Use this to return final minimal request from cluster_info, cloud_config or engine_config objects
Alternatively call util._make_minimal if only one object needs to be implemented
'''
cluster_request = {}
cloud_config = util._make_minimal(cloud_config.__dict__)
if bool(cloud_config): cluster_request['cloud_config'] = cloud_config
engine_config = util._make_minimal(engine_config.__dict__)
if bool(engine_config): cluster_request['engine_config'] = engine_config
cluster_request.update(util._make_minimal(cluster_info.__dict__))
return cluster_request
|
python
|
def get_cluster_request_parameters(cluster_info, cloud_config, engine_config):
'''
Use this to return final minimal request from cluster_info, cloud_config or engine_config objects
Alternatively call util._make_minimal if only one object needs to be implemented
'''
cluster_request = {}
cloud_config = util._make_minimal(cloud_config.__dict__)
if bool(cloud_config): cluster_request['cloud_config'] = cloud_config
engine_config = util._make_minimal(engine_config.__dict__)
if bool(engine_config): cluster_request['engine_config'] = engine_config
cluster_request.update(util._make_minimal(cluster_info.__dict__))
return cluster_request
|
[
"def",
"get_cluster_request_parameters",
"(",
"cluster_info",
",",
"cloud_config",
",",
"engine_config",
")",
":",
"cluster_request",
"=",
"{",
"}",
"cloud_config",
"=",
"util",
".",
"_make_minimal",
"(",
"cloud_config",
".",
"__dict__",
")",
"if",
"bool",
"(",
"cloud_config",
")",
":",
"cluster_request",
"[",
"'cloud_config'",
"]",
"=",
"cloud_config",
"engine_config",
"=",
"util",
".",
"_make_minimal",
"(",
"engine_config",
".",
"__dict__",
")",
"if",
"bool",
"(",
"engine_config",
")",
":",
"cluster_request",
"[",
"'engine_config'",
"]",
"=",
"engine_config",
"cluster_request",
".",
"update",
"(",
"util",
".",
"_make_minimal",
"(",
"cluster_info",
".",
"__dict__",
")",
")",
"return",
"cluster_request"
] |
Use this to return final minimal request from cluster_info, cloud_config or engine_config objects
Alternatively call util._make_minimal if only one object needs to be implemented
|
[
"Use",
"this",
"to",
"return",
"final",
"minimal",
"request",
"from",
"cluster_info",
"cloud_config",
"or",
"engine_config",
"objects",
"Alternatively",
"call",
"util",
".",
"_make_minimal",
"if",
"only",
"one",
"object",
"needs",
"to",
"be",
"implemented"
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/clusterv2.py#L131-L145
|
18,258
|
qubole/qds-sdk-py
|
qds_sdk/clusterv2.py
|
ClusterV2.create
|
def create(cls, cluster_info):
"""
Create a new cluster using information provided in `cluster_info`.
"""
conn = Qubole.agent(version="v2")
return conn.post(cls.rest_entity_path, data=cluster_info)
|
python
|
def create(cls, cluster_info):
"""
Create a new cluster using information provided in `cluster_info`.
"""
conn = Qubole.agent(version="v2")
return conn.post(cls.rest_entity_path, data=cluster_info)
|
[
"def",
"create",
"(",
"cls",
",",
"cluster_info",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
"version",
"=",
"\"v2\"",
")",
"return",
"conn",
".",
"post",
"(",
"cls",
".",
"rest_entity_path",
",",
"data",
"=",
"cluster_info",
")"
] |
Create a new cluster using information provided in `cluster_info`.
|
[
"Create",
"a",
"new",
"cluster",
"using",
"information",
"provided",
"in",
"cluster_info",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/clusterv2.py#L713-L718
|
18,259
|
qubole/qds-sdk-py
|
qds_sdk/commands.py
|
_download_to_local
|
def _download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=None):
'''
Downloads the contents of all objects in s3_path into fp
Args:
`boto_conn`: S3 connection object
`s3_path`: S3 path to be downloaded
`fp`: The file object where data is to be downloaded
'''
#Progress bar to display download progress
def _callback(downloaded, total):
'''
Call function for upload.
`downloaded`: File size already downloaded (int)
`total`: Total file size to be downloaded (int)
'''
if (total is 0) or (downloaded == total):
return
progress = downloaded*100/total
sys.stderr.write('\r[{0}] {1}%'.format('#'*progress, progress))
sys.stderr.flush()
m = _URI_RE.match(s3_path)
bucket_name = m.group(1)
bucket = boto_conn.get_bucket(bucket_name)
retries = 6
if s3_path.endswith('/') is False:
#It is a file
key_name = m.group(2)
key_instance = bucket.get_key(key_name)
while key_instance is None and retries > 0:
retries = retries - 1
log.info("Results file is not available on s3. Retry: " + str(6-retries))
time.sleep(10)
key_instance = bucket.get_key(key_name)
if key_instance is None:
raise Exception("Results file not available on s3 yet. This can be because of s3 eventual consistency issues.")
log.info("Downloading file from %s" % s3_path)
if delim is None:
try:
key_instance.get_contents_to_file(fp) # cb=_callback
except boto.exception.S3ResponseError as e:
if (e.status == 403):
# SDK-191, boto gives an error while fetching the objects using versions which happens by default
# in the get_contents_to_file() api. So attempt one without specifying version.
log.warn("Access denied while fetching the s3 object. Retrying without specifying the version....")
key_instance.open()
fp.write(key_instance.read())
key_instance.close()
else:
raise
else:
# Get contents as string. Replace parameters and write to file.
_read_iteratively(key_instance, fp, delim=delim)
else:
#It is a folder
key_prefix = m.group(2)
bucket_paths = bucket.list(key_prefix)
for one_path in bucket_paths:
name = one_path.name
# Eliminate _tmp_ files which ends with $folder$
if name.endswith('$folder$'):
continue
log.info("Downloading file from %s" % name)
if delim is None:
one_path.get_contents_to_file(fp) # cb=_callback
else:
_read_iteratively(one_path, fp, delim=delim)
|
python
|
def _download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=None):
'''
Downloads the contents of all objects in s3_path into fp
Args:
`boto_conn`: S3 connection object
`s3_path`: S3 path to be downloaded
`fp`: The file object where data is to be downloaded
'''
#Progress bar to display download progress
def _callback(downloaded, total):
'''
Call function for upload.
`downloaded`: File size already downloaded (int)
`total`: Total file size to be downloaded (int)
'''
if (total is 0) or (downloaded == total):
return
progress = downloaded*100/total
sys.stderr.write('\r[{0}] {1}%'.format('#'*progress, progress))
sys.stderr.flush()
m = _URI_RE.match(s3_path)
bucket_name = m.group(1)
bucket = boto_conn.get_bucket(bucket_name)
retries = 6
if s3_path.endswith('/') is False:
#It is a file
key_name = m.group(2)
key_instance = bucket.get_key(key_name)
while key_instance is None and retries > 0:
retries = retries - 1
log.info("Results file is not available on s3. Retry: " + str(6-retries))
time.sleep(10)
key_instance = bucket.get_key(key_name)
if key_instance is None:
raise Exception("Results file not available on s3 yet. This can be because of s3 eventual consistency issues.")
log.info("Downloading file from %s" % s3_path)
if delim is None:
try:
key_instance.get_contents_to_file(fp) # cb=_callback
except boto.exception.S3ResponseError as e:
if (e.status == 403):
# SDK-191, boto gives an error while fetching the objects using versions which happens by default
# in the get_contents_to_file() api. So attempt one without specifying version.
log.warn("Access denied while fetching the s3 object. Retrying without specifying the version....")
key_instance.open()
fp.write(key_instance.read())
key_instance.close()
else:
raise
else:
# Get contents as string. Replace parameters and write to file.
_read_iteratively(key_instance, fp, delim=delim)
else:
#It is a folder
key_prefix = m.group(2)
bucket_paths = bucket.list(key_prefix)
for one_path in bucket_paths:
name = one_path.name
# Eliminate _tmp_ files which ends with $folder$
if name.endswith('$folder$'):
continue
log.info("Downloading file from %s" % name)
if delim is None:
one_path.get_contents_to_file(fp) # cb=_callback
else:
_read_iteratively(one_path, fp, delim=delim)
|
[
"def",
"_download_to_local",
"(",
"boto_conn",
",",
"s3_path",
",",
"fp",
",",
"num_result_dir",
",",
"delim",
"=",
"None",
")",
":",
"#Progress bar to display download progress",
"def",
"_callback",
"(",
"downloaded",
",",
"total",
")",
":",
"'''\n Call function for upload.\n\n `downloaded`: File size already downloaded (int)\n\n `total`: Total file size to be downloaded (int)\n '''",
"if",
"(",
"total",
"is",
"0",
")",
"or",
"(",
"downloaded",
"==",
"total",
")",
":",
"return",
"progress",
"=",
"downloaded",
"*",
"100",
"/",
"total",
"sys",
".",
"stderr",
".",
"write",
"(",
"'\\r[{0}] {1}%'",
".",
"format",
"(",
"'#'",
"*",
"progress",
",",
"progress",
")",
")",
"sys",
".",
"stderr",
".",
"flush",
"(",
")",
"m",
"=",
"_URI_RE",
".",
"match",
"(",
"s3_path",
")",
"bucket_name",
"=",
"m",
".",
"group",
"(",
"1",
")",
"bucket",
"=",
"boto_conn",
".",
"get_bucket",
"(",
"bucket_name",
")",
"retries",
"=",
"6",
"if",
"s3_path",
".",
"endswith",
"(",
"'/'",
")",
"is",
"False",
":",
"#It is a file",
"key_name",
"=",
"m",
".",
"group",
"(",
"2",
")",
"key_instance",
"=",
"bucket",
".",
"get_key",
"(",
"key_name",
")",
"while",
"key_instance",
"is",
"None",
"and",
"retries",
">",
"0",
":",
"retries",
"=",
"retries",
"-",
"1",
"log",
".",
"info",
"(",
"\"Results file is not available on s3. Retry: \"",
"+",
"str",
"(",
"6",
"-",
"retries",
")",
")",
"time",
".",
"sleep",
"(",
"10",
")",
"key_instance",
"=",
"bucket",
".",
"get_key",
"(",
"key_name",
")",
"if",
"key_instance",
"is",
"None",
":",
"raise",
"Exception",
"(",
"\"Results file not available on s3 yet. This can be because of s3 eventual consistency issues.\"",
")",
"log",
".",
"info",
"(",
"\"Downloading file from %s\"",
"%",
"s3_path",
")",
"if",
"delim",
"is",
"None",
":",
"try",
":",
"key_instance",
".",
"get_contents_to_file",
"(",
"fp",
")",
"# cb=_callback",
"except",
"boto",
".",
"exception",
".",
"S3ResponseError",
"as",
"e",
":",
"if",
"(",
"e",
".",
"status",
"==",
"403",
")",
":",
"# SDK-191, boto gives an error while fetching the objects using versions which happens by default",
"# in the get_contents_to_file() api. So attempt one without specifying version.",
"log",
".",
"warn",
"(",
"\"Access denied while fetching the s3 object. Retrying without specifying the version....\"",
")",
"key_instance",
".",
"open",
"(",
")",
"fp",
".",
"write",
"(",
"key_instance",
".",
"read",
"(",
")",
")",
"key_instance",
".",
"close",
"(",
")",
"else",
":",
"raise",
"else",
":",
"# Get contents as string. Replace parameters and write to file.",
"_read_iteratively",
"(",
"key_instance",
",",
"fp",
",",
"delim",
"=",
"delim",
")",
"else",
":",
"#It is a folder",
"key_prefix",
"=",
"m",
".",
"group",
"(",
"2",
")",
"bucket_paths",
"=",
"bucket",
".",
"list",
"(",
"key_prefix",
")",
"for",
"one_path",
"in",
"bucket_paths",
":",
"name",
"=",
"one_path",
".",
"name",
"# Eliminate _tmp_ files which ends with $folder$",
"if",
"name",
".",
"endswith",
"(",
"'$folder$'",
")",
":",
"continue",
"log",
".",
"info",
"(",
"\"Downloading file from %s\"",
"%",
"name",
")",
"if",
"delim",
"is",
"None",
":",
"one_path",
".",
"get_contents_to_file",
"(",
"fp",
")",
"# cb=_callback",
"else",
":",
"_read_iteratively",
"(",
"one_path",
",",
"fp",
",",
"delim",
"=",
"delim",
")"
] |
Downloads the contents of all objects in s3_path into fp
Args:
`boto_conn`: S3 connection object
`s3_path`: S3 path to be downloaded
`fp`: The file object where data is to be downloaded
|
[
"Downloads",
"the",
"contents",
"of",
"all",
"objects",
"in",
"s3_path",
"into",
"fp"
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/commands.py#L1415-L1489
|
18,260
|
qubole/qds-sdk-py
|
qds_sdk/commands.py
|
Command.cancel_id
|
def cancel_id(cls, id):
"""
Cancels command denoted by this id
Args:
`id`: command id
"""
conn = Qubole.agent()
data = {"status": "kill"}
return conn.put(cls.element_path(id), data)
|
python
|
def cancel_id(cls, id):
"""
Cancels command denoted by this id
Args:
`id`: command id
"""
conn = Qubole.agent()
data = {"status": "kill"}
return conn.put(cls.element_path(id), data)
|
[
"def",
"cancel_id",
"(",
"cls",
",",
"id",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"data",
"=",
"{",
"\"status\"",
":",
"\"kill\"",
"}",
"return",
"conn",
".",
"put",
"(",
"cls",
".",
"element_path",
"(",
"id",
")",
",",
"data",
")"
] |
Cancels command denoted by this id
Args:
`id`: command id
|
[
"Cancels",
"command",
"denoted",
"by",
"this",
"id"
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/commands.py#L181-L190
|
18,261
|
qubole/qds-sdk-py
|
qds_sdk/commands.py
|
Command.get_log_id
|
def get_log_id(cls, id):
"""
Fetches log for the command represented by this id
Args:
`id`: command id
"""
conn = Qubole.agent()
r = conn.get_raw(cls.element_path(id) + "/logs")
return r.text
|
python
|
def get_log_id(cls, id):
"""
Fetches log for the command represented by this id
Args:
`id`: command id
"""
conn = Qubole.agent()
r = conn.get_raw(cls.element_path(id) + "/logs")
return r.text
|
[
"def",
"get_log_id",
"(",
"cls",
",",
"id",
")",
":",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"r",
"=",
"conn",
".",
"get_raw",
"(",
"cls",
".",
"element_path",
"(",
"id",
")",
"+",
"\"/logs\"",
")",
"return",
"r",
".",
"text"
] |
Fetches log for the command represented by this id
Args:
`id`: command id
|
[
"Fetches",
"log",
"for",
"the",
"command",
"represented",
"by",
"this",
"id"
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/commands.py#L199-L208
|
18,262
|
qubole/qds-sdk-py
|
qds_sdk/commands.py
|
Command.get_log
|
def get_log(self):
"""
Fetches log for the command represented by this object
Returns:
The log as a string
"""
log_path = self.meta_data['logs_resource']
conn = Qubole.agent()
r = conn.get_raw(log_path)
return r.text
|
python
|
def get_log(self):
"""
Fetches log for the command represented by this object
Returns:
The log as a string
"""
log_path = self.meta_data['logs_resource']
conn = Qubole.agent()
r = conn.get_raw(log_path)
return r.text
|
[
"def",
"get_log",
"(",
"self",
")",
":",
"log_path",
"=",
"self",
".",
"meta_data",
"[",
"'logs_resource'",
"]",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"r",
"=",
"conn",
".",
"get_raw",
"(",
"log_path",
")",
"return",
"r",
".",
"text"
] |
Fetches log for the command represented by this object
Returns:
The log as a string
|
[
"Fetches",
"log",
"for",
"the",
"command",
"represented",
"by",
"this",
"object"
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/commands.py#L210-L220
|
18,263
|
qubole/qds-sdk-py
|
qds_sdk/commands.py
|
Command.get_results
|
def get_results(self, fp=sys.stdout, inline=True, delim=None, fetch=True, qlog=None, arguments=[]):
"""
Fetches the result for the command represented by this object
get_results will retrieve results of the command and write to stdout by default.
Optionally one can write to a filestream specified in `fp`. The `inline` argument
decides whether the result can be returned as a CRLF separated string. In cases where
the results are greater than 20MB, get_results will attempt to read from s3 and write
to fp. The retrieval of results from s3 can be turned off by the `fetch` argument
Args:
`fp`: a file object to write the results to directly
`inline`: whether or not results are returned inline as CRLF separated string
`fetch`: True to fetch the result even if it is greater than 20MB, False to
only get the result location on s3
"""
result_path = self.meta_data['results_resource']
conn = Qubole.agent()
include_header = "false"
if len(arguments) == 1:
include_header = arguments.pop(0)
if include_header not in ('true', 'false'):
raise ParseError("incude_header can be either true or false")
r = conn.get(result_path, {'inline': inline, 'include_headers': include_header})
if r.get('inline'):
raw_results = r['results']
encoded_results = raw_results.encode('utf8')
if sys.version_info < (3, 0, 0):
fp.write(encoded_results)
else:
import io
if isinstance(fp, io.TextIOBase):
if hasattr(fp, 'buffer'):
fp.buffer.write(encoded_results)
else:
fp.write(raw_results)
elif isinstance(fp, io.BufferedIOBase) or isinstance(fp, io.RawIOBase):
fp.write(encoded_results)
else:
# Can this happen? Don't know what's the right thing to do in this case.
pass
else:
if fetch:
storage_credentials = conn.get(Account.credentials_rest_entity_path)
if storage_credentials['region_endpoint'] is not None:
boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'],
aws_secret_access_key=storage_credentials['storage_secret_key'],
security_token = storage_credentials['session_token'],
host = storage_credentials['region_endpoint'])
else:
boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'],
aws_secret_access_key=storage_credentials['storage_secret_key'],
security_token=storage_credentials['session_token'])
log.info("Starting download from result locations: [%s]" % ",".join(r['result_location']))
#fetch latest value of num_result_dir
num_result_dir = Command.find(self.id).num_result_dir
# If column/header names are not able to fetch then use include header as true
if include_header.lower() == "true" and qlog is not None:
write_headers(qlog, fp)
for s3_path in r['result_location']:
# In Python 3,
# If the delim is None, fp should be in binary mode because
# boto expects it to be.
# If the delim is not None, then both text and binary modes
# work.
_download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=delim)
else:
fp.write(",".join(r['result_location']))
|
python
|
def get_results(self, fp=sys.stdout, inline=True, delim=None, fetch=True, qlog=None, arguments=[]):
"""
Fetches the result for the command represented by this object
get_results will retrieve results of the command and write to stdout by default.
Optionally one can write to a filestream specified in `fp`. The `inline` argument
decides whether the result can be returned as a CRLF separated string. In cases where
the results are greater than 20MB, get_results will attempt to read from s3 and write
to fp. The retrieval of results from s3 can be turned off by the `fetch` argument
Args:
`fp`: a file object to write the results to directly
`inline`: whether or not results are returned inline as CRLF separated string
`fetch`: True to fetch the result even if it is greater than 20MB, False to
only get the result location on s3
"""
result_path = self.meta_data['results_resource']
conn = Qubole.agent()
include_header = "false"
if len(arguments) == 1:
include_header = arguments.pop(0)
if include_header not in ('true', 'false'):
raise ParseError("incude_header can be either true or false")
r = conn.get(result_path, {'inline': inline, 'include_headers': include_header})
if r.get('inline'):
raw_results = r['results']
encoded_results = raw_results.encode('utf8')
if sys.version_info < (3, 0, 0):
fp.write(encoded_results)
else:
import io
if isinstance(fp, io.TextIOBase):
if hasattr(fp, 'buffer'):
fp.buffer.write(encoded_results)
else:
fp.write(raw_results)
elif isinstance(fp, io.BufferedIOBase) or isinstance(fp, io.RawIOBase):
fp.write(encoded_results)
else:
# Can this happen? Don't know what's the right thing to do in this case.
pass
else:
if fetch:
storage_credentials = conn.get(Account.credentials_rest_entity_path)
if storage_credentials['region_endpoint'] is not None:
boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'],
aws_secret_access_key=storage_credentials['storage_secret_key'],
security_token = storage_credentials['session_token'],
host = storage_credentials['region_endpoint'])
else:
boto_conn = boto.connect_s3(aws_access_key_id=storage_credentials['storage_access_key'],
aws_secret_access_key=storage_credentials['storage_secret_key'],
security_token=storage_credentials['session_token'])
log.info("Starting download from result locations: [%s]" % ",".join(r['result_location']))
#fetch latest value of num_result_dir
num_result_dir = Command.find(self.id).num_result_dir
# If column/header names are not able to fetch then use include header as true
if include_header.lower() == "true" and qlog is not None:
write_headers(qlog, fp)
for s3_path in r['result_location']:
# In Python 3,
# If the delim is None, fp should be in binary mode because
# boto expects it to be.
# If the delim is not None, then both text and binary modes
# work.
_download_to_local(boto_conn, s3_path, fp, num_result_dir, delim=delim)
else:
fp.write(",".join(r['result_location']))
|
[
"def",
"get_results",
"(",
"self",
",",
"fp",
"=",
"sys",
".",
"stdout",
",",
"inline",
"=",
"True",
",",
"delim",
"=",
"None",
",",
"fetch",
"=",
"True",
",",
"qlog",
"=",
"None",
",",
"arguments",
"=",
"[",
"]",
")",
":",
"result_path",
"=",
"self",
".",
"meta_data",
"[",
"'results_resource'",
"]",
"conn",
"=",
"Qubole",
".",
"agent",
"(",
")",
"include_header",
"=",
"\"false\"",
"if",
"len",
"(",
"arguments",
")",
"==",
"1",
":",
"include_header",
"=",
"arguments",
".",
"pop",
"(",
"0",
")",
"if",
"include_header",
"not",
"in",
"(",
"'true'",
",",
"'false'",
")",
":",
"raise",
"ParseError",
"(",
"\"incude_header can be either true or false\"",
")",
"r",
"=",
"conn",
".",
"get",
"(",
"result_path",
",",
"{",
"'inline'",
":",
"inline",
",",
"'include_headers'",
":",
"include_header",
"}",
")",
"if",
"r",
".",
"get",
"(",
"'inline'",
")",
":",
"raw_results",
"=",
"r",
"[",
"'results'",
"]",
"encoded_results",
"=",
"raw_results",
".",
"encode",
"(",
"'utf8'",
")",
"if",
"sys",
".",
"version_info",
"<",
"(",
"3",
",",
"0",
",",
"0",
")",
":",
"fp",
".",
"write",
"(",
"encoded_results",
")",
"else",
":",
"import",
"io",
"if",
"isinstance",
"(",
"fp",
",",
"io",
".",
"TextIOBase",
")",
":",
"if",
"hasattr",
"(",
"fp",
",",
"'buffer'",
")",
":",
"fp",
".",
"buffer",
".",
"write",
"(",
"encoded_results",
")",
"else",
":",
"fp",
".",
"write",
"(",
"raw_results",
")",
"elif",
"isinstance",
"(",
"fp",
",",
"io",
".",
"BufferedIOBase",
")",
"or",
"isinstance",
"(",
"fp",
",",
"io",
".",
"RawIOBase",
")",
":",
"fp",
".",
"write",
"(",
"encoded_results",
")",
"else",
":",
"# Can this happen? Don't know what's the right thing to do in this case.",
"pass",
"else",
":",
"if",
"fetch",
":",
"storage_credentials",
"=",
"conn",
".",
"get",
"(",
"Account",
".",
"credentials_rest_entity_path",
")",
"if",
"storage_credentials",
"[",
"'region_endpoint'",
"]",
"is",
"not",
"None",
":",
"boto_conn",
"=",
"boto",
".",
"connect_s3",
"(",
"aws_access_key_id",
"=",
"storage_credentials",
"[",
"'storage_access_key'",
"]",
",",
"aws_secret_access_key",
"=",
"storage_credentials",
"[",
"'storage_secret_key'",
"]",
",",
"security_token",
"=",
"storage_credentials",
"[",
"'session_token'",
"]",
",",
"host",
"=",
"storage_credentials",
"[",
"'region_endpoint'",
"]",
")",
"else",
":",
"boto_conn",
"=",
"boto",
".",
"connect_s3",
"(",
"aws_access_key_id",
"=",
"storage_credentials",
"[",
"'storage_access_key'",
"]",
",",
"aws_secret_access_key",
"=",
"storage_credentials",
"[",
"'storage_secret_key'",
"]",
",",
"security_token",
"=",
"storage_credentials",
"[",
"'session_token'",
"]",
")",
"log",
".",
"info",
"(",
"\"Starting download from result locations: [%s]\"",
"%",
"\",\"",
".",
"join",
"(",
"r",
"[",
"'result_location'",
"]",
")",
")",
"#fetch latest value of num_result_dir",
"num_result_dir",
"=",
"Command",
".",
"find",
"(",
"self",
".",
"id",
")",
".",
"num_result_dir",
"# If column/header names are not able to fetch then use include header as true",
"if",
"include_header",
".",
"lower",
"(",
")",
"==",
"\"true\"",
"and",
"qlog",
"is",
"not",
"None",
":",
"write_headers",
"(",
"qlog",
",",
"fp",
")",
"for",
"s3_path",
"in",
"r",
"[",
"'result_location'",
"]",
":",
"# In Python 3,",
"# If the delim is None, fp should be in binary mode because",
"# boto expects it to be.",
"# If the delim is not None, then both text and binary modes",
"# work.",
"_download_to_local",
"(",
"boto_conn",
",",
"s3_path",
",",
"fp",
",",
"num_result_dir",
",",
"delim",
"=",
"delim",
")",
"else",
":",
"fp",
".",
"write",
"(",
"\",\"",
".",
"join",
"(",
"r",
"[",
"'result_location'",
"]",
")",
")"
] |
Fetches the result for the command represented by this object
get_results will retrieve results of the command and write to stdout by default.
Optionally one can write to a filestream specified in `fp`. The `inline` argument
decides whether the result can be returned as a CRLF separated string. In cases where
the results are greater than 20MB, get_results will attempt to read from s3 and write
to fp. The retrieval of results from s3 can be turned off by the `fetch` argument
Args:
`fp`: a file object to write the results to directly
`inline`: whether or not results are returned inline as CRLF separated string
`fetch`: True to fetch the result even if it is greater than 20MB, False to
only get the result location on s3
|
[
"Fetches",
"the",
"result",
"for",
"the",
"command",
"represented",
"by",
"this",
"object"
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/commands.py#L257-L333
|
18,264
|
qubole/qds-sdk-py
|
qds_sdk/util.py
|
pluralize
|
def pluralize(singular):
"""Convert singular word to its plural form.
Args:
singular: A word in its singular form.
Returns:
The word in its plural form.
"""
if singular in UNCOUNTABLES:
return singular
for i in IRREGULAR:
if i[0] == singular:
return i[1]
for i in PLURALIZE_PATTERNS:
if re.search(i[0], singular):
return re.sub(i[0], i[1], singular)
|
python
|
def pluralize(singular):
"""Convert singular word to its plural form.
Args:
singular: A word in its singular form.
Returns:
The word in its plural form.
"""
if singular in UNCOUNTABLES:
return singular
for i in IRREGULAR:
if i[0] == singular:
return i[1]
for i in PLURALIZE_PATTERNS:
if re.search(i[0], singular):
return re.sub(i[0], i[1], singular)
|
[
"def",
"pluralize",
"(",
"singular",
")",
":",
"if",
"singular",
"in",
"UNCOUNTABLES",
":",
"return",
"singular",
"for",
"i",
"in",
"IRREGULAR",
":",
"if",
"i",
"[",
"0",
"]",
"==",
"singular",
":",
"return",
"i",
"[",
"1",
"]",
"for",
"i",
"in",
"PLURALIZE_PATTERNS",
":",
"if",
"re",
".",
"search",
"(",
"i",
"[",
"0",
"]",
",",
"singular",
")",
":",
"return",
"re",
".",
"sub",
"(",
"i",
"[",
"0",
"]",
",",
"i",
"[",
"1",
"]",
",",
"singular",
")"
] |
Convert singular word to its plural form.
Args:
singular: A word in its singular form.
Returns:
The word in its plural form.
|
[
"Convert",
"singular",
"word",
"to",
"its",
"plural",
"form",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/util.py#L85-L101
|
18,265
|
qubole/qds-sdk-py
|
qds_sdk/util.py
|
singularize
|
def singularize(plural):
"""Convert plural word to its singular form.
Args:
plural: A word in its plural form.
Returns:
The word in its singular form.
"""
if plural in UNCOUNTABLES:
return plural
for i in IRREGULAR:
if i[1] == plural:
return i[0]
for i in SINGULARIZE_PATTERNS:
if re.search(i[0], plural):
return re.sub(i[0], i[1], plural)
return plural
|
python
|
def singularize(plural):
"""Convert plural word to its singular form.
Args:
plural: A word in its plural form.
Returns:
The word in its singular form.
"""
if plural in UNCOUNTABLES:
return plural
for i in IRREGULAR:
if i[1] == plural:
return i[0]
for i in SINGULARIZE_PATTERNS:
if re.search(i[0], plural):
return re.sub(i[0], i[1], plural)
return plural
|
[
"def",
"singularize",
"(",
"plural",
")",
":",
"if",
"plural",
"in",
"UNCOUNTABLES",
":",
"return",
"plural",
"for",
"i",
"in",
"IRREGULAR",
":",
"if",
"i",
"[",
"1",
"]",
"==",
"plural",
":",
"return",
"i",
"[",
"0",
"]",
"for",
"i",
"in",
"SINGULARIZE_PATTERNS",
":",
"if",
"re",
".",
"search",
"(",
"i",
"[",
"0",
"]",
",",
"plural",
")",
":",
"return",
"re",
".",
"sub",
"(",
"i",
"[",
"0",
"]",
",",
"i",
"[",
"1",
"]",
",",
"plural",
")",
"return",
"plural"
] |
Convert plural word to its singular form.
Args:
plural: A word in its plural form.
Returns:
The word in its singular form.
|
[
"Convert",
"plural",
"word",
"to",
"its",
"singular",
"form",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/util.py#L104-L120
|
18,266
|
qubole/qds-sdk-py
|
qds_sdk/util.py
|
camelize
|
def camelize(word):
"""Convert a word from lower_with_underscores to CamelCase.
Args:
word: The string to convert.
Returns:
The modified string.
"""
return ''.join(w[0].upper() + w[1:]
for w in re.sub('[^A-Z^a-z^0-9^:]+', ' ', word).split(' '))
|
python
|
def camelize(word):
"""Convert a word from lower_with_underscores to CamelCase.
Args:
word: The string to convert.
Returns:
The modified string.
"""
return ''.join(w[0].upper() + w[1:]
for w in re.sub('[^A-Z^a-z^0-9^:]+', ' ', word).split(' '))
|
[
"def",
"camelize",
"(",
"word",
")",
":",
"return",
"''",
".",
"join",
"(",
"w",
"[",
"0",
"]",
".",
"upper",
"(",
")",
"+",
"w",
"[",
"1",
":",
"]",
"for",
"w",
"in",
"re",
".",
"sub",
"(",
"'[^A-Z^a-z^0-9^:]+'",
",",
"' '",
",",
"word",
")",
".",
"split",
"(",
"' '",
")",
")"
] |
Convert a word from lower_with_underscores to CamelCase.
Args:
word: The string to convert.
Returns:
The modified string.
|
[
"Convert",
"a",
"word",
"from",
"lower_with_underscores",
"to",
"CamelCase",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/util.py#L123-L132
|
18,267
|
qubole/qds-sdk-py
|
qds_sdk/util.py
|
_make_minimal
|
def _make_minimal(dictionary):
"""
This function removes all the keys whose value is either None or an empty
dictionary.
"""
new_dict = {}
for key, value in dictionary.items():
if value is not None:
if isinstance(value, dict):
new_value = _make_minimal(value)
if new_value:
new_dict[key] = new_value
else:
new_dict[key] = value
return new_dict
|
python
|
def _make_minimal(dictionary):
"""
This function removes all the keys whose value is either None or an empty
dictionary.
"""
new_dict = {}
for key, value in dictionary.items():
if value is not None:
if isinstance(value, dict):
new_value = _make_minimal(value)
if new_value:
new_dict[key] = new_value
else:
new_dict[key] = value
return new_dict
|
[
"def",
"_make_minimal",
"(",
"dictionary",
")",
":",
"new_dict",
"=",
"{",
"}",
"for",
"key",
",",
"value",
"in",
"dictionary",
".",
"items",
"(",
")",
":",
"if",
"value",
"is",
"not",
"None",
":",
"if",
"isinstance",
"(",
"value",
",",
"dict",
")",
":",
"new_value",
"=",
"_make_minimal",
"(",
"value",
")",
"if",
"new_value",
":",
"new_dict",
"[",
"key",
"]",
"=",
"new_value",
"else",
":",
"new_dict",
"[",
"key",
"]",
"=",
"value",
"return",
"new_dict"
] |
This function removes all the keys whose value is either None or an empty
dictionary.
|
[
"This",
"function",
"removes",
"all",
"the",
"keys",
"whose",
"value",
"is",
"either",
"None",
"or",
"an",
"empty",
"dictionary",
"."
] |
77210fb64e5a7d567aedeea3b742a1d872fd0e5e
|
https://github.com/qubole/qds-sdk-py/blob/77210fb64e5a7d567aedeea3b742a1d872fd0e5e/qds_sdk/util.py#L146-L160
|
18,268
|
iopipe/iopipe-python
|
iopipe/contrib/profiler/request.py
|
upload_profiler_report
|
def upload_profiler_report(url, filename, config):
"""
Uploads a profiler report to IOpipe
:param url: The signed URL
:param filename: The profiler report file
:param config: The IOpipe config
"""
try:
logger.debug("Uploading profiler report to IOpipe")
with open(filename, "rb") as data:
response = requests.put(url, data=data, timeout=config["network_timeout"])
response.raise_for_status()
except Exception as e:
logger.debug("Error while uploading profiler report: %s", e)
if hasattr(e, "response"):
logger.debug(e.response.content)
else:
logger.debug("Profiler report uploaded successfully")
finally:
if os.path.isfile(filename):
os.remove(filename)
|
python
|
def upload_profiler_report(url, filename, config):
"""
Uploads a profiler report to IOpipe
:param url: The signed URL
:param filename: The profiler report file
:param config: The IOpipe config
"""
try:
logger.debug("Uploading profiler report to IOpipe")
with open(filename, "rb") as data:
response = requests.put(url, data=data, timeout=config["network_timeout"])
response.raise_for_status()
except Exception as e:
logger.debug("Error while uploading profiler report: %s", e)
if hasattr(e, "response"):
logger.debug(e.response.content)
else:
logger.debug("Profiler report uploaded successfully")
finally:
if os.path.isfile(filename):
os.remove(filename)
|
[
"def",
"upload_profiler_report",
"(",
"url",
",",
"filename",
",",
"config",
")",
":",
"try",
":",
"logger",
".",
"debug",
"(",
"\"Uploading profiler report to IOpipe\"",
")",
"with",
"open",
"(",
"filename",
",",
"\"rb\"",
")",
"as",
"data",
":",
"response",
"=",
"requests",
".",
"put",
"(",
"url",
",",
"data",
"=",
"data",
",",
"timeout",
"=",
"config",
"[",
"\"network_timeout\"",
"]",
")",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Error while uploading profiler report: %s\"",
",",
"e",
")",
"if",
"hasattr",
"(",
"e",
",",
"\"response\"",
")",
":",
"logger",
".",
"debug",
"(",
"e",
".",
"response",
".",
"content",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Profiler report uploaded successfully\"",
")",
"finally",
":",
"if",
"os",
".",
"path",
".",
"isfile",
"(",
"filename",
")",
":",
"os",
".",
"remove",
"(",
"filename",
")"
] |
Uploads a profiler report to IOpipe
:param url: The signed URL
:param filename: The profiler report file
:param config: The IOpipe config
|
[
"Uploads",
"a",
"profiler",
"report",
"to",
"IOpipe"
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/contrib/profiler/request.py#L12-L33
|
18,269
|
iopipe/iopipe-python
|
iopipe/mock_system.py
|
read_pid_stat
|
def read_pid_stat(pid):
"""
Mocks read_pid_stat as this is a Linux-specific operation.
"""
return {
"utime": random.randint(0, 999999999),
"stime": random.randint(0, 999999999),
"cutime": random.randint(0, 999999999),
"cstime": random.randint(0, 999999999),
}
|
python
|
def read_pid_stat(pid):
"""
Mocks read_pid_stat as this is a Linux-specific operation.
"""
return {
"utime": random.randint(0, 999999999),
"stime": random.randint(0, 999999999),
"cutime": random.randint(0, 999999999),
"cstime": random.randint(0, 999999999),
}
|
[
"def",
"read_pid_stat",
"(",
"pid",
")",
":",
"return",
"{",
"\"utime\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"\"stime\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"\"cutime\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"\"cstime\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"}"
] |
Mocks read_pid_stat as this is a Linux-specific operation.
|
[
"Mocks",
"read_pid_stat",
"as",
"this",
"is",
"a",
"Linux",
"-",
"specific",
"operation",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/mock_system.py#L40-L49
|
18,270
|
iopipe/iopipe-python
|
iopipe/mock_system.py
|
read_stat
|
def read_stat():
"""
Mocks read_stat as this is a Linux-specific operation.
"""
return [
{
"times": {
"user": random.randint(0, 999999999),
"nice": random.randint(0, 999999999),
"sys": random.randint(0, 999999999),
"idle": random.randint(0, 999999999),
"irq": random.randint(0, 999999999),
}
}
]
|
python
|
def read_stat():
"""
Mocks read_stat as this is a Linux-specific operation.
"""
return [
{
"times": {
"user": random.randint(0, 999999999),
"nice": random.randint(0, 999999999),
"sys": random.randint(0, 999999999),
"idle": random.randint(0, 999999999),
"irq": random.randint(0, 999999999),
}
}
]
|
[
"def",
"read_stat",
"(",
")",
":",
"return",
"[",
"{",
"\"times\"",
":",
"{",
"\"user\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"\"nice\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"\"sys\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"\"idle\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"\"irq\"",
":",
"random",
".",
"randint",
"(",
"0",
",",
"999999999",
")",
",",
"}",
"}",
"]"
] |
Mocks read_stat as this is a Linux-specific operation.
|
[
"Mocks",
"read_stat",
"as",
"this",
"is",
"a",
"Linux",
"-",
"specific",
"operation",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/mock_system.py#L63-L77
|
18,271
|
iopipe/iopipe-python
|
iopipe/agent.py
|
IOpipeCore.load_plugins
|
def load_plugins(self, plugins):
"""
Loads plugins that match the `Plugin` interface and are instantiated.
:param plugins: A list of plugin instances.
"""
def instantiate(plugin):
return plugin() if inspect.isclass(plugin) else plugin
loaded_plugins = []
plugins_seen = []
# Iterate over plugins in reverse to permit users to override default
# plugin config
for plugin in reversed(plugins):
if not is_plugin(plugin) or plugin.name in plugins_seen:
continue
# Build the plugins list in reverse to restore original order
loaded_plugins.insert(0, instantiate(plugin))
plugins_seen.append(plugin.name)
return loaded_plugins
|
python
|
def load_plugins(self, plugins):
"""
Loads plugins that match the `Plugin` interface and are instantiated.
:param plugins: A list of plugin instances.
"""
def instantiate(plugin):
return plugin() if inspect.isclass(plugin) else plugin
loaded_plugins = []
plugins_seen = []
# Iterate over plugins in reverse to permit users to override default
# plugin config
for plugin in reversed(plugins):
if not is_plugin(plugin) or plugin.name in plugins_seen:
continue
# Build the plugins list in reverse to restore original order
loaded_plugins.insert(0, instantiate(plugin))
plugins_seen.append(plugin.name)
return loaded_plugins
|
[
"def",
"load_plugins",
"(",
"self",
",",
"plugins",
")",
":",
"def",
"instantiate",
"(",
"plugin",
")",
":",
"return",
"plugin",
"(",
")",
"if",
"inspect",
".",
"isclass",
"(",
"plugin",
")",
"else",
"plugin",
"loaded_plugins",
"=",
"[",
"]",
"plugins_seen",
"=",
"[",
"]",
"# Iterate over plugins in reverse to permit users to override default",
"# plugin config",
"for",
"plugin",
"in",
"reversed",
"(",
"plugins",
")",
":",
"if",
"not",
"is_plugin",
"(",
"plugin",
")",
"or",
"plugin",
".",
"name",
"in",
"plugins_seen",
":",
"continue",
"# Build the plugins list in reverse to restore original order",
"loaded_plugins",
".",
"insert",
"(",
"0",
",",
"instantiate",
"(",
"plugin",
")",
")",
"plugins_seen",
".",
"append",
"(",
"plugin",
".",
"name",
")",
"return",
"loaded_plugins"
] |
Loads plugins that match the `Plugin` interface and are instantiated.
:param plugins: A list of plugin instances.
|
[
"Loads",
"plugins",
"that",
"match",
"the",
"Plugin",
"interface",
"and",
"are",
"instantiated",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/agent.py#L198-L220
|
18,272
|
iopipe/iopipe-python
|
iopipe/agent.py
|
IOpipeCore.run_hooks
|
def run_hooks(self, name, event=None, context=None):
"""
Runs plugin hooks for each registered plugin.
"""
hooks = {
"pre:setup": lambda p: p.pre_setup(self),
"post:setup": lambda p: p.post_setup(self),
"pre:invoke": lambda p: p.pre_invoke(event, context),
"post:invoke": lambda p: p.post_invoke(event, context),
"pre:report": lambda p: p.pre_report(self.report),
"post:report": lambda p: p.post_report(self.report),
}
if name in hooks:
for p in self.plugins:
if p.enabled:
try:
hooks[name](p)
except Exception as e:
logger.error(
"IOpipe plugin %s hook raised error" % (name, str(e))
)
logger.exception(e)
|
python
|
def run_hooks(self, name, event=None, context=None):
"""
Runs plugin hooks for each registered plugin.
"""
hooks = {
"pre:setup": lambda p: p.pre_setup(self),
"post:setup": lambda p: p.post_setup(self),
"pre:invoke": lambda p: p.pre_invoke(event, context),
"post:invoke": lambda p: p.post_invoke(event, context),
"pre:report": lambda p: p.pre_report(self.report),
"post:report": lambda p: p.post_report(self.report),
}
if name in hooks:
for p in self.plugins:
if p.enabled:
try:
hooks[name](p)
except Exception as e:
logger.error(
"IOpipe plugin %s hook raised error" % (name, str(e))
)
logger.exception(e)
|
[
"def",
"run_hooks",
"(",
"self",
",",
"name",
",",
"event",
"=",
"None",
",",
"context",
"=",
"None",
")",
":",
"hooks",
"=",
"{",
"\"pre:setup\"",
":",
"lambda",
"p",
":",
"p",
".",
"pre_setup",
"(",
"self",
")",
",",
"\"post:setup\"",
":",
"lambda",
"p",
":",
"p",
".",
"post_setup",
"(",
"self",
")",
",",
"\"pre:invoke\"",
":",
"lambda",
"p",
":",
"p",
".",
"pre_invoke",
"(",
"event",
",",
"context",
")",
",",
"\"post:invoke\"",
":",
"lambda",
"p",
":",
"p",
".",
"post_invoke",
"(",
"event",
",",
"context",
")",
",",
"\"pre:report\"",
":",
"lambda",
"p",
":",
"p",
".",
"pre_report",
"(",
"self",
".",
"report",
")",
",",
"\"post:report\"",
":",
"lambda",
"p",
":",
"p",
".",
"post_report",
"(",
"self",
".",
"report",
")",
",",
"}",
"if",
"name",
"in",
"hooks",
":",
"for",
"p",
"in",
"self",
".",
"plugins",
":",
"if",
"p",
".",
"enabled",
":",
"try",
":",
"hooks",
"[",
"name",
"]",
"(",
"p",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"error",
"(",
"\"IOpipe plugin %s hook raised error\"",
"%",
"(",
"name",
",",
"str",
"(",
"e",
")",
")",
")",
"logger",
".",
"exception",
"(",
"e",
")"
] |
Runs plugin hooks for each registered plugin.
|
[
"Runs",
"plugin",
"hooks",
"for",
"each",
"registered",
"plugin",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/agent.py#L222-L244
|
18,273
|
iopipe/iopipe-python
|
iopipe/agent.py
|
IOpipeCore.wait_for_futures
|
def wait_for_futures(self):
"""
Wait for all futures to complete. This should be done at the end of an
an invocation.
"""
[future for future in futures.as_completed(self.futures)]
self.futures = []
|
python
|
def wait_for_futures(self):
"""
Wait for all futures to complete. This should be done at the end of an
an invocation.
"""
[future for future in futures.as_completed(self.futures)]
self.futures = []
|
[
"def",
"wait_for_futures",
"(",
"self",
")",
":",
"[",
"future",
"for",
"future",
"in",
"futures",
".",
"as_completed",
"(",
"self",
".",
"futures",
")",
"]",
"self",
".",
"futures",
"=",
"[",
"]"
] |
Wait for all futures to complete. This should be done at the end of an
an invocation.
|
[
"Wait",
"for",
"all",
"futures",
"to",
"complete",
".",
"This",
"should",
"be",
"done",
"at",
"the",
"end",
"of",
"an",
"an",
"invocation",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/agent.py#L260-L266
|
18,274
|
iopipe/iopipe-python
|
iopipe/agent.py
|
IOpipeCore.validate_context
|
def validate_context(self, context):
"""
Checks to see if we're working with a valid lambda context object.
:returns: True if valid, False if not
:rtype: bool
"""
return all(
[
hasattr(context, attr)
for attr in [
"aws_request_id",
"function_name",
"function_version",
"get_remaining_time_in_millis",
"invoked_function_arn",
"log_group_name",
"log_stream_name",
"memory_limit_in_mb",
]
]
) and callable(context.get_remaining_time_in_millis)
|
python
|
def validate_context(self, context):
"""
Checks to see if we're working with a valid lambda context object.
:returns: True if valid, False if not
:rtype: bool
"""
return all(
[
hasattr(context, attr)
for attr in [
"aws_request_id",
"function_name",
"function_version",
"get_remaining_time_in_millis",
"invoked_function_arn",
"log_group_name",
"log_stream_name",
"memory_limit_in_mb",
]
]
) and callable(context.get_remaining_time_in_millis)
|
[
"def",
"validate_context",
"(",
"self",
",",
"context",
")",
":",
"return",
"all",
"(",
"[",
"hasattr",
"(",
"context",
",",
"attr",
")",
"for",
"attr",
"in",
"[",
"\"aws_request_id\"",
",",
"\"function_name\"",
",",
"\"function_version\"",
",",
"\"get_remaining_time_in_millis\"",
",",
"\"invoked_function_arn\"",
",",
"\"log_group_name\"",
",",
"\"log_stream_name\"",
",",
"\"memory_limit_in_mb\"",
",",
"]",
"]",
")",
"and",
"callable",
"(",
"context",
".",
"get_remaining_time_in_millis",
")"
] |
Checks to see if we're working with a valid lambda context object.
:returns: True if valid, False if not
:rtype: bool
|
[
"Checks",
"to",
"see",
"if",
"we",
"re",
"working",
"with",
"a",
"valid",
"lambda",
"context",
"object",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/agent.py#L268-L289
|
18,275
|
iopipe/iopipe-python
|
iopipe/contrib/trace/auto_http.py
|
patch_session_send
|
def patch_session_send(context, http_filter):
"""
Monkey patches requests' Session class, if available. Overloads the
send method to add tracing and metrics collection.
"""
if Session is None:
return
def send(self, *args, **kwargs):
id = ensure_utf8(str(uuid.uuid4()))
with context.iopipe.mark(id):
response = original_session_send(self, *args, **kwargs)
trace = context.iopipe.mark.measure(id)
context.iopipe.mark.delete(id)
collect_metrics_for_response(response, context, trace, http_filter)
return response
Session.send = send
|
python
|
def patch_session_send(context, http_filter):
"""
Monkey patches requests' Session class, if available. Overloads the
send method to add tracing and metrics collection.
"""
if Session is None:
return
def send(self, *args, **kwargs):
id = ensure_utf8(str(uuid.uuid4()))
with context.iopipe.mark(id):
response = original_session_send(self, *args, **kwargs)
trace = context.iopipe.mark.measure(id)
context.iopipe.mark.delete(id)
collect_metrics_for_response(response, context, trace, http_filter)
return response
Session.send = send
|
[
"def",
"patch_session_send",
"(",
"context",
",",
"http_filter",
")",
":",
"if",
"Session",
"is",
"None",
":",
"return",
"def",
"send",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"id",
"=",
"ensure_utf8",
"(",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
")",
"with",
"context",
".",
"iopipe",
".",
"mark",
"(",
"id",
")",
":",
"response",
"=",
"original_session_send",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"trace",
"=",
"context",
".",
"iopipe",
".",
"mark",
".",
"measure",
"(",
"id",
")",
"context",
".",
"iopipe",
".",
"mark",
".",
"delete",
"(",
"id",
")",
"collect_metrics_for_response",
"(",
"response",
",",
"context",
",",
"trace",
",",
"http_filter",
")",
"return",
"response",
"Session",
".",
"send",
"=",
"send"
] |
Monkey patches requests' Session class, if available. Overloads the
send method to add tracing and metrics collection.
|
[
"Monkey",
"patches",
"requests",
"Session",
"class",
"if",
"available",
".",
"Overloads",
"the",
"send",
"method",
"to",
"add",
"tracing",
"and",
"metrics",
"collection",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/contrib/trace/auto_http.py#L63-L80
|
18,276
|
iopipe/iopipe-python
|
iopipe/contrib/trace/auto_http.py
|
patch_botocore_session_send
|
def patch_botocore_session_send(context, http_filter):
"""
Monkey patches botocore's vendored requests, if available. Overloads the
Session class' send method to add tracing and metric collection.
"""
if BotocoreSession is None:
return
def send(self, *args, **kwargs):
id = str(uuid.uuid4())
with context.iopipe.mark(id):
response = original_botocore_session_send(self, *args, **kwargs)
trace = context.iopipe.mark.measure(id)
context.iopipe.mark.delete(id)
collect_metrics_for_response(response, context, trace, http_filter)
return response
BotocoreSession.send = send
|
python
|
def patch_botocore_session_send(context, http_filter):
"""
Monkey patches botocore's vendored requests, if available. Overloads the
Session class' send method to add tracing and metric collection.
"""
if BotocoreSession is None:
return
def send(self, *args, **kwargs):
id = str(uuid.uuid4())
with context.iopipe.mark(id):
response = original_botocore_session_send(self, *args, **kwargs)
trace = context.iopipe.mark.measure(id)
context.iopipe.mark.delete(id)
collect_metrics_for_response(response, context, trace, http_filter)
return response
BotocoreSession.send = send
|
[
"def",
"patch_botocore_session_send",
"(",
"context",
",",
"http_filter",
")",
":",
"if",
"BotocoreSession",
"is",
"None",
":",
"return",
"def",
"send",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
":",
"id",
"=",
"str",
"(",
"uuid",
".",
"uuid4",
"(",
")",
")",
"with",
"context",
".",
"iopipe",
".",
"mark",
"(",
"id",
")",
":",
"response",
"=",
"original_botocore_session_send",
"(",
"self",
",",
"*",
"args",
",",
"*",
"*",
"kwargs",
")",
"trace",
"=",
"context",
".",
"iopipe",
".",
"mark",
".",
"measure",
"(",
"id",
")",
"context",
".",
"iopipe",
".",
"mark",
".",
"delete",
"(",
"id",
")",
"collect_metrics_for_response",
"(",
"response",
",",
"context",
",",
"trace",
",",
"http_filter",
")",
"return",
"response",
"BotocoreSession",
".",
"send",
"=",
"send"
] |
Monkey patches botocore's vendored requests, if available. Overloads the
Session class' send method to add tracing and metric collection.
|
[
"Monkey",
"patches",
"botocore",
"s",
"vendored",
"requests",
"if",
"available",
".",
"Overloads",
"the",
"Session",
"class",
"send",
"method",
"to",
"add",
"tracing",
"and",
"metric",
"collection",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/contrib/trace/auto_http.py#L83-L100
|
18,277
|
iopipe/iopipe-python
|
iopipe/contrib/trace/auto_http.py
|
collect_metrics_for_response
|
def collect_metrics_for_response(http_response, context, trace, http_filter):
"""
Collects relevant metrics from a requests Response object and adds them to
the IOpipe context.
"""
http_response = copy.deepcopy(http_response)
if http_filter is not None and callable(http_filter):
http_response = http_filter(http_response)
if http_response is False:
return
request = None
if hasattr(http_response, "request"):
parsed_url = None
if hasattr(http_response.request, "url"):
parsed_url = urlparse(http_response.request.url)
request_headers = []
if hasattr(http_response.request, "headers"):
request_headers = [
{"key": ensure_utf8(k), "string": ensure_utf8(v)}
for k, v in http_response.request.headers.items()
if k.lower() in INCLUDE_HEADERS
]
request = Request(
hash=ensure_utf8(getattr(parsed_url, "fragment", None)),
headers=request_headers,
hostname=ensure_utf8(getattr(parsed_url, "hostname", None)),
method=ensure_utf8(getattr(http_response.request, "method", None)),
path=ensure_utf8(getattr(parsed_url, "path", None)),
# TODO: Determine if this is redundant
pathname=ensure_utf8(getattr(parsed_url, "path", None)),
port=ensure_utf8(getattr(parsed_url, "port", None)),
protocol=ensure_utf8(getattr(parsed_url, "scheme", None)),
query=ensure_utf8(getattr(parsed_url, "query", None)),
url=ensure_utf8(getattr(http_response.request, "url", None)),
)
response_headers = []
if hasattr(http_response, "headers"):
response_headers = [
{"key": ensure_utf8(k), "string": ensure_utf8(v)}
for k, v in http_response.headers.items()
if k.lower() in INCLUDE_HEADERS
]
response = Response(
headers=response_headers,
statusCode=ensure_utf8(getattr(http_response, "status_code", None)),
statusMessage=None,
)
context.iopipe.mark.http_trace(trace, request, response)
|
python
|
def collect_metrics_for_response(http_response, context, trace, http_filter):
"""
Collects relevant metrics from a requests Response object and adds them to
the IOpipe context.
"""
http_response = copy.deepcopy(http_response)
if http_filter is not None and callable(http_filter):
http_response = http_filter(http_response)
if http_response is False:
return
request = None
if hasattr(http_response, "request"):
parsed_url = None
if hasattr(http_response.request, "url"):
parsed_url = urlparse(http_response.request.url)
request_headers = []
if hasattr(http_response.request, "headers"):
request_headers = [
{"key": ensure_utf8(k), "string": ensure_utf8(v)}
for k, v in http_response.request.headers.items()
if k.lower() in INCLUDE_HEADERS
]
request = Request(
hash=ensure_utf8(getattr(parsed_url, "fragment", None)),
headers=request_headers,
hostname=ensure_utf8(getattr(parsed_url, "hostname", None)),
method=ensure_utf8(getattr(http_response.request, "method", None)),
path=ensure_utf8(getattr(parsed_url, "path", None)),
# TODO: Determine if this is redundant
pathname=ensure_utf8(getattr(parsed_url, "path", None)),
port=ensure_utf8(getattr(parsed_url, "port", None)),
protocol=ensure_utf8(getattr(parsed_url, "scheme", None)),
query=ensure_utf8(getattr(parsed_url, "query", None)),
url=ensure_utf8(getattr(http_response.request, "url", None)),
)
response_headers = []
if hasattr(http_response, "headers"):
response_headers = [
{"key": ensure_utf8(k), "string": ensure_utf8(v)}
for k, v in http_response.headers.items()
if k.lower() in INCLUDE_HEADERS
]
response = Response(
headers=response_headers,
statusCode=ensure_utf8(getattr(http_response, "status_code", None)),
statusMessage=None,
)
context.iopipe.mark.http_trace(trace, request, response)
|
[
"def",
"collect_metrics_for_response",
"(",
"http_response",
",",
"context",
",",
"trace",
",",
"http_filter",
")",
":",
"http_response",
"=",
"copy",
".",
"deepcopy",
"(",
"http_response",
")",
"if",
"http_filter",
"is",
"not",
"None",
"and",
"callable",
"(",
"http_filter",
")",
":",
"http_response",
"=",
"http_filter",
"(",
"http_response",
")",
"if",
"http_response",
"is",
"False",
":",
"return",
"request",
"=",
"None",
"if",
"hasattr",
"(",
"http_response",
",",
"\"request\"",
")",
":",
"parsed_url",
"=",
"None",
"if",
"hasattr",
"(",
"http_response",
".",
"request",
",",
"\"url\"",
")",
":",
"parsed_url",
"=",
"urlparse",
"(",
"http_response",
".",
"request",
".",
"url",
")",
"request_headers",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"http_response",
".",
"request",
",",
"\"headers\"",
")",
":",
"request_headers",
"=",
"[",
"{",
"\"key\"",
":",
"ensure_utf8",
"(",
"k",
")",
",",
"\"string\"",
":",
"ensure_utf8",
"(",
"v",
")",
"}",
"for",
"k",
",",
"v",
"in",
"http_response",
".",
"request",
".",
"headers",
".",
"items",
"(",
")",
"if",
"k",
".",
"lower",
"(",
")",
"in",
"INCLUDE_HEADERS",
"]",
"request",
"=",
"Request",
"(",
"hash",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"parsed_url",
",",
"\"fragment\"",
",",
"None",
")",
")",
",",
"headers",
"=",
"request_headers",
",",
"hostname",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"parsed_url",
",",
"\"hostname\"",
",",
"None",
")",
")",
",",
"method",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"http_response",
".",
"request",
",",
"\"method\"",
",",
"None",
")",
")",
",",
"path",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"parsed_url",
",",
"\"path\"",
",",
"None",
")",
")",
",",
"# TODO: Determine if this is redundant",
"pathname",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"parsed_url",
",",
"\"path\"",
",",
"None",
")",
")",
",",
"port",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"parsed_url",
",",
"\"port\"",
",",
"None",
")",
")",
",",
"protocol",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"parsed_url",
",",
"\"scheme\"",
",",
"None",
")",
")",
",",
"query",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"parsed_url",
",",
"\"query\"",
",",
"None",
")",
")",
",",
"url",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"http_response",
".",
"request",
",",
"\"url\"",
",",
"None",
")",
")",
",",
")",
"response_headers",
"=",
"[",
"]",
"if",
"hasattr",
"(",
"http_response",
",",
"\"headers\"",
")",
":",
"response_headers",
"=",
"[",
"{",
"\"key\"",
":",
"ensure_utf8",
"(",
"k",
")",
",",
"\"string\"",
":",
"ensure_utf8",
"(",
"v",
")",
"}",
"for",
"k",
",",
"v",
"in",
"http_response",
".",
"headers",
".",
"items",
"(",
")",
"if",
"k",
".",
"lower",
"(",
")",
"in",
"INCLUDE_HEADERS",
"]",
"response",
"=",
"Response",
"(",
"headers",
"=",
"response_headers",
",",
"statusCode",
"=",
"ensure_utf8",
"(",
"getattr",
"(",
"http_response",
",",
"\"status_code\"",
",",
"None",
")",
")",
",",
"statusMessage",
"=",
"None",
",",
")",
"context",
".",
"iopipe",
".",
"mark",
".",
"http_trace",
"(",
"trace",
",",
"request",
",",
"response",
")"
] |
Collects relevant metrics from a requests Response object and adds them to
the IOpipe context.
|
[
"Collects",
"relevant",
"metrics",
"from",
"a",
"requests",
"Response",
"object",
"and",
"adds",
"them",
"to",
"the",
"IOpipe",
"context",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/contrib/trace/auto_http.py#L125-L178
|
18,278
|
iopipe/iopipe-python
|
iopipe/plugins.py
|
get_plugin_meta
|
def get_plugin_meta(plugins):
"""
Returns meta data about plugins.
:param plugins: A list of plugins.
:type plugins: list
:returns: A list of dicts containing plugin meta data.
:rtype: list
"""
return [
{
"name": p.name,
"version": p.version,
"homepage": p.homepage,
"enabled": p.enabled,
}
for p in plugins
if is_plugin(p)
]
|
python
|
def get_plugin_meta(plugins):
"""
Returns meta data about plugins.
:param plugins: A list of plugins.
:type plugins: list
:returns: A list of dicts containing plugin meta data.
:rtype: list
"""
return [
{
"name": p.name,
"version": p.version,
"homepage": p.homepage,
"enabled": p.enabled,
}
for p in plugins
if is_plugin(p)
]
|
[
"def",
"get_plugin_meta",
"(",
"plugins",
")",
":",
"return",
"[",
"{",
"\"name\"",
":",
"p",
".",
"name",
",",
"\"version\"",
":",
"p",
".",
"version",
",",
"\"homepage\"",
":",
"p",
".",
"homepage",
",",
"\"enabled\"",
":",
"p",
".",
"enabled",
",",
"}",
"for",
"p",
"in",
"plugins",
"if",
"is_plugin",
"(",
"p",
")",
"]"
] |
Returns meta data about plugins.
:param plugins: A list of plugins.
:type plugins: list
:returns: A list of dicts containing plugin meta data.
:rtype: list
|
[
"Returns",
"meta",
"data",
"about",
"plugins",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/plugins.py#L4-L22
|
18,279
|
iopipe/iopipe-python
|
iopipe/plugins.py
|
is_plugin
|
def is_plugin(plugin):
"""
Returns true if the plugin implements the `Plugin` interface.
:param plugin: The plugin to check.
:returns: True if plugin, False otherwise.
:rtype: bool
"""
try:
return isinstance(plugin, Plugin) or issubclass(plugin, Plugin)
except TypeError:
return False
|
python
|
def is_plugin(plugin):
"""
Returns true if the plugin implements the `Plugin` interface.
:param plugin: The plugin to check.
:returns: True if plugin, False otherwise.
:rtype: bool
"""
try:
return isinstance(plugin, Plugin) or issubclass(plugin, Plugin)
except TypeError:
return False
|
[
"def",
"is_plugin",
"(",
"plugin",
")",
":",
"try",
":",
"return",
"isinstance",
"(",
"plugin",
",",
"Plugin",
")",
"or",
"issubclass",
"(",
"plugin",
",",
"Plugin",
")",
"except",
"TypeError",
":",
"return",
"False"
] |
Returns true if the plugin implements the `Plugin` interface.
:param plugin: The plugin to check.
:returns: True if plugin, False otherwise.
:rtype: bool
|
[
"Returns",
"true",
"if",
"the",
"plugin",
"implements",
"the",
"Plugin",
"interface",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/plugins.py#L25-L36
|
18,280
|
iopipe/iopipe-python
|
iopipe/plugins.py
|
with_metaclass
|
def with_metaclass(meta, *bases):
"""Python 2 and 3 compatible way to do meta classes"""
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, "temporary_class", (), {})
|
python
|
def with_metaclass(meta, *bases):
"""Python 2 and 3 compatible way to do meta classes"""
class metaclass(meta):
def __new__(cls, name, this_bases, d):
return meta(name, bases, d)
return type.__new__(metaclass, "temporary_class", (), {})
|
[
"def",
"with_metaclass",
"(",
"meta",
",",
"*",
"bases",
")",
":",
"class",
"metaclass",
"(",
"meta",
")",
":",
"def",
"__new__",
"(",
"cls",
",",
"name",
",",
"this_bases",
",",
"d",
")",
":",
"return",
"meta",
"(",
"name",
",",
"bases",
",",
"d",
")",
"return",
"type",
".",
"__new__",
"(",
"metaclass",
",",
"\"temporary_class\"",
",",
"(",
")",
",",
"{",
"}",
")"
] |
Python 2 and 3 compatible way to do meta classes
|
[
"Python",
"2",
"and",
"3",
"compatible",
"way",
"to",
"do",
"meta",
"classes"
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/plugins.py#L39-L46
|
18,281
|
iopipe/iopipe-python
|
iopipe/report.py
|
Report.extract_context_data
|
def extract_context_data(self):
"""
Returns the contents of a AWS Lambda context.
:returns: A dict of relevant context data.
:rtype: dict
"""
data = {}
for k, v in {
# camel case names in the report to align with AWS standards
"functionName": "function_name",
"functionVersion": "function_version",
"memoryLimitInMB": "memory_limit_in_mb",
"invokedFunctionArn": "invoked_function_arn",
"awsRequestId": "aws_request_id",
"logGroupName": "log_group_name",
"logStreamName": "log_stream_name",
}.items():
if hasattr(self.context, v):
data[k] = getattr(self.context, v)
if (
hasattr(self.context, "invoked_function_arn")
and "AWS_SAM_LOCAL" in os.environ
):
data["invokedFunctionArn"] = (
"arn:aws:lambda:local:0:function:%s"
% data.get("functionName", "unknown")
)
if hasattr(self.context, "get_remaining_time_in_millis") and callable(
self.context.get_remaining_time_in_millis
):
data[
"getRemainingTimeInMillis"
] = self.context.get_remaining_time_in_millis()
data["traceId"] = os.getenv("_X_AMZN_TRACE_ID", "")
return data
|
python
|
def extract_context_data(self):
"""
Returns the contents of a AWS Lambda context.
:returns: A dict of relevant context data.
:rtype: dict
"""
data = {}
for k, v in {
# camel case names in the report to align with AWS standards
"functionName": "function_name",
"functionVersion": "function_version",
"memoryLimitInMB": "memory_limit_in_mb",
"invokedFunctionArn": "invoked_function_arn",
"awsRequestId": "aws_request_id",
"logGroupName": "log_group_name",
"logStreamName": "log_stream_name",
}.items():
if hasattr(self.context, v):
data[k] = getattr(self.context, v)
if (
hasattr(self.context, "invoked_function_arn")
and "AWS_SAM_LOCAL" in os.environ
):
data["invokedFunctionArn"] = (
"arn:aws:lambda:local:0:function:%s"
% data.get("functionName", "unknown")
)
if hasattr(self.context, "get_remaining_time_in_millis") and callable(
self.context.get_remaining_time_in_millis
):
data[
"getRemainingTimeInMillis"
] = self.context.get_remaining_time_in_millis()
data["traceId"] = os.getenv("_X_AMZN_TRACE_ID", "")
return data
|
[
"def",
"extract_context_data",
"(",
"self",
")",
":",
"data",
"=",
"{",
"}",
"for",
"k",
",",
"v",
"in",
"{",
"# camel case names in the report to align with AWS standards",
"\"functionName\"",
":",
"\"function_name\"",
",",
"\"functionVersion\"",
":",
"\"function_version\"",
",",
"\"memoryLimitInMB\"",
":",
"\"memory_limit_in_mb\"",
",",
"\"invokedFunctionArn\"",
":",
"\"invoked_function_arn\"",
",",
"\"awsRequestId\"",
":",
"\"aws_request_id\"",
",",
"\"logGroupName\"",
":",
"\"log_group_name\"",
",",
"\"logStreamName\"",
":",
"\"log_stream_name\"",
",",
"}",
".",
"items",
"(",
")",
":",
"if",
"hasattr",
"(",
"self",
".",
"context",
",",
"v",
")",
":",
"data",
"[",
"k",
"]",
"=",
"getattr",
"(",
"self",
".",
"context",
",",
"v",
")",
"if",
"(",
"hasattr",
"(",
"self",
".",
"context",
",",
"\"invoked_function_arn\"",
")",
"and",
"\"AWS_SAM_LOCAL\"",
"in",
"os",
".",
"environ",
")",
":",
"data",
"[",
"\"invokedFunctionArn\"",
"]",
"=",
"(",
"\"arn:aws:lambda:local:0:function:%s\"",
"%",
"data",
".",
"get",
"(",
"\"functionName\"",
",",
"\"unknown\"",
")",
")",
"if",
"hasattr",
"(",
"self",
".",
"context",
",",
"\"get_remaining_time_in_millis\"",
")",
"and",
"callable",
"(",
"self",
".",
"context",
".",
"get_remaining_time_in_millis",
")",
":",
"data",
"[",
"\"getRemainingTimeInMillis\"",
"]",
"=",
"self",
".",
"context",
".",
"get_remaining_time_in_millis",
"(",
")",
"data",
"[",
"\"traceId\"",
"]",
"=",
"os",
".",
"getenv",
"(",
"\"_X_AMZN_TRACE_ID\"",
",",
"\"\"",
")",
"return",
"data"
] |
Returns the contents of a AWS Lambda context.
:returns: A dict of relevant context data.
:rtype: dict
|
[
"Returns",
"the",
"contents",
"of",
"a",
"AWS",
"Lambda",
"context",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/report.py#L82-L117
|
18,282
|
iopipe/iopipe-python
|
iopipe/report.py
|
Report.retain_error
|
def retain_error(self, error, frame=None):
"""
Adds details of an error to the report.
:param error: The error exception to add to the report.
"""
if frame is None:
stack = traceback.format_exc()
self.labels.add("@iopipe/error")
else:
stack = "\n".join(traceback.format_stack(frame))
self.labels.add("@iopipe/timeout")
details = {
"name": type(error).__name__,
"message": "{}".format(error),
"stack": stack,
}
self.report["errors"] = details
|
python
|
def retain_error(self, error, frame=None):
"""
Adds details of an error to the report.
:param error: The error exception to add to the report.
"""
if frame is None:
stack = traceback.format_exc()
self.labels.add("@iopipe/error")
else:
stack = "\n".join(traceback.format_stack(frame))
self.labels.add("@iopipe/timeout")
details = {
"name": type(error).__name__,
"message": "{}".format(error),
"stack": stack,
}
self.report["errors"] = details
|
[
"def",
"retain_error",
"(",
"self",
",",
"error",
",",
"frame",
"=",
"None",
")",
":",
"if",
"frame",
"is",
"None",
":",
"stack",
"=",
"traceback",
".",
"format_exc",
"(",
")",
"self",
".",
"labels",
".",
"add",
"(",
"\"@iopipe/error\"",
")",
"else",
":",
"stack",
"=",
"\"\\n\"",
".",
"join",
"(",
"traceback",
".",
"format_stack",
"(",
"frame",
")",
")",
"self",
".",
"labels",
".",
"add",
"(",
"\"@iopipe/timeout\"",
")",
"details",
"=",
"{",
"\"name\"",
":",
"type",
"(",
"error",
")",
".",
"__name__",
",",
"\"message\"",
":",
"\"{}\"",
".",
"format",
"(",
"error",
")",
",",
"\"stack\"",
":",
"stack",
",",
"}",
"self",
".",
"report",
"[",
"\"errors\"",
"]",
"=",
"details"
] |
Adds details of an error to the report.
:param error: The error exception to add to the report.
|
[
"Adds",
"details",
"of",
"an",
"error",
"to",
"the",
"report",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/report.py#L119-L136
|
18,283
|
iopipe/iopipe-python
|
iopipe/report.py
|
Report.prepare
|
def prepare(self, error=None, frame=None):
"""
Prepare the report to be sent to IOpipe.
:param error: An optional error to add to report.
:param frame: A stack frame to add to report in the event of a timeout.
"""
if error:
self.retain_error(error, frame)
self.report["environment"]["host"]["boot_id"] = system.read_bootid()
# convert labels to list for sending
self.report["labels"] = list(self.labels)
meminfo = system.read_meminfo()
self.report.update(
{
"aws": self.extract_context_data(),
"timestampEnd": int(time.time() * 1000),
}
)
self.report["environment"]["os"].update(
{
"cpus": system.read_stat(),
"freemem": meminfo["MemFree"],
"hostname": system.read_hostname(),
"totalmem": meminfo["MemTotal"],
"usedmem": meminfo["MemTotal"] - meminfo["MemFree"],
}
)
self.report["environment"]["os"]["linux"]["pid"] = {
"self": {
"stat": system.read_pid_stat("self"),
"stat_start": self.stat_start,
"status": system.read_pid_status("self"),
}
}
self.report["disk"] = system.read_disk()
self.report["duration"] = int((monotonic() - self.start_time) * 1e9)
|
python
|
def prepare(self, error=None, frame=None):
"""
Prepare the report to be sent to IOpipe.
:param error: An optional error to add to report.
:param frame: A stack frame to add to report in the event of a timeout.
"""
if error:
self.retain_error(error, frame)
self.report["environment"]["host"]["boot_id"] = system.read_bootid()
# convert labels to list for sending
self.report["labels"] = list(self.labels)
meminfo = system.read_meminfo()
self.report.update(
{
"aws": self.extract_context_data(),
"timestampEnd": int(time.time() * 1000),
}
)
self.report["environment"]["os"].update(
{
"cpus": system.read_stat(),
"freemem": meminfo["MemFree"],
"hostname": system.read_hostname(),
"totalmem": meminfo["MemTotal"],
"usedmem": meminfo["MemTotal"] - meminfo["MemFree"],
}
)
self.report["environment"]["os"]["linux"]["pid"] = {
"self": {
"stat": system.read_pid_stat("self"),
"stat_start": self.stat_start,
"status": system.read_pid_status("self"),
}
}
self.report["disk"] = system.read_disk()
self.report["duration"] = int((monotonic() - self.start_time) * 1e9)
|
[
"def",
"prepare",
"(",
"self",
",",
"error",
"=",
"None",
",",
"frame",
"=",
"None",
")",
":",
"if",
"error",
":",
"self",
".",
"retain_error",
"(",
"error",
",",
"frame",
")",
"self",
".",
"report",
"[",
"\"environment\"",
"]",
"[",
"\"host\"",
"]",
"[",
"\"boot_id\"",
"]",
"=",
"system",
".",
"read_bootid",
"(",
")",
"# convert labels to list for sending",
"self",
".",
"report",
"[",
"\"labels\"",
"]",
"=",
"list",
"(",
"self",
".",
"labels",
")",
"meminfo",
"=",
"system",
".",
"read_meminfo",
"(",
")",
"self",
".",
"report",
".",
"update",
"(",
"{",
"\"aws\"",
":",
"self",
".",
"extract_context_data",
"(",
")",
",",
"\"timestampEnd\"",
":",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1000",
")",
",",
"}",
")",
"self",
".",
"report",
"[",
"\"environment\"",
"]",
"[",
"\"os\"",
"]",
".",
"update",
"(",
"{",
"\"cpus\"",
":",
"system",
".",
"read_stat",
"(",
")",
",",
"\"freemem\"",
":",
"meminfo",
"[",
"\"MemFree\"",
"]",
",",
"\"hostname\"",
":",
"system",
".",
"read_hostname",
"(",
")",
",",
"\"totalmem\"",
":",
"meminfo",
"[",
"\"MemTotal\"",
"]",
",",
"\"usedmem\"",
":",
"meminfo",
"[",
"\"MemTotal\"",
"]",
"-",
"meminfo",
"[",
"\"MemFree\"",
"]",
",",
"}",
")",
"self",
".",
"report",
"[",
"\"environment\"",
"]",
"[",
"\"os\"",
"]",
"[",
"\"linux\"",
"]",
"[",
"\"pid\"",
"]",
"=",
"{",
"\"self\"",
":",
"{",
"\"stat\"",
":",
"system",
".",
"read_pid_stat",
"(",
"\"self\"",
")",
",",
"\"stat_start\"",
":",
"self",
".",
"stat_start",
",",
"\"status\"",
":",
"system",
".",
"read_pid_status",
"(",
"\"self\"",
")",
",",
"}",
"}",
"self",
".",
"report",
"[",
"\"disk\"",
"]",
"=",
"system",
".",
"read_disk",
"(",
")",
"self",
".",
"report",
"[",
"\"duration\"",
"]",
"=",
"int",
"(",
"(",
"monotonic",
"(",
")",
"-",
"self",
".",
"start_time",
")",
"*",
"1e9",
")"
] |
Prepare the report to be sent to IOpipe.
:param error: An optional error to add to report.
:param frame: A stack frame to add to report in the event of a timeout.
|
[
"Prepare",
"the",
"report",
"to",
"be",
"sent",
"to",
"IOpipe",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/report.py#L138-L182
|
18,284
|
iopipe/iopipe-python
|
iopipe/report.py
|
Report.send
|
def send(self):
"""
Sends the report to IOpipe.
"""
if self.sent is True:
return
self.sent = True
logger.debug("Sending report to IOpipe:")
logger.debug(json.dumps(self.report, indent=2, sort_keys=True))
self.client.submit_future(send_report, copy.deepcopy(self.report), self.config)
|
python
|
def send(self):
"""
Sends the report to IOpipe.
"""
if self.sent is True:
return
self.sent = True
logger.debug("Sending report to IOpipe:")
logger.debug(json.dumps(self.report, indent=2, sort_keys=True))
self.client.submit_future(send_report, copy.deepcopy(self.report), self.config)
|
[
"def",
"send",
"(",
"self",
")",
":",
"if",
"self",
".",
"sent",
"is",
"True",
":",
"return",
"self",
".",
"sent",
"=",
"True",
"logger",
".",
"debug",
"(",
"\"Sending report to IOpipe:\"",
")",
"logger",
".",
"debug",
"(",
"json",
".",
"dumps",
"(",
"self",
".",
"report",
",",
"indent",
"=",
"2",
",",
"sort_keys",
"=",
"True",
")",
")",
"self",
".",
"client",
".",
"submit_future",
"(",
"send_report",
",",
"copy",
".",
"deepcopy",
"(",
"self",
".",
"report",
")",
",",
"self",
".",
"config",
")"
] |
Sends the report to IOpipe.
|
[
"Sends",
"the",
"report",
"to",
"IOpipe",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/report.py#L184-L195
|
18,285
|
iopipe/iopipe-python
|
iopipe/send_report.py
|
send_report
|
def send_report(report, config):
"""
Sends the report to IOpipe's collector.
:param report: The report to be sent.
:param config: The IOpipe agent configuration.
"""
headers = {"Authorization": "Bearer {}".format(config["token"])}
url = "https://{host}{path}".format(**config)
try:
response = session.post(
url, json=report, headers=headers, timeout=config["network_timeout"]
)
response.raise_for_status()
except Exception as e:
logger.debug("Error sending report to IOpipe: %s" % e)
else:
logger.debug("Report sent to IOpipe successfully")
|
python
|
def send_report(report, config):
"""
Sends the report to IOpipe's collector.
:param report: The report to be sent.
:param config: The IOpipe agent configuration.
"""
headers = {"Authorization": "Bearer {}".format(config["token"])}
url = "https://{host}{path}".format(**config)
try:
response = session.post(
url, json=report, headers=headers, timeout=config["network_timeout"]
)
response.raise_for_status()
except Exception as e:
logger.debug("Error sending report to IOpipe: %s" % e)
else:
logger.debug("Report sent to IOpipe successfully")
|
[
"def",
"send_report",
"(",
"report",
",",
"config",
")",
":",
"headers",
"=",
"{",
"\"Authorization\"",
":",
"\"Bearer {}\"",
".",
"format",
"(",
"config",
"[",
"\"token\"",
"]",
")",
"}",
"url",
"=",
"\"https://{host}{path}\"",
".",
"format",
"(",
"*",
"*",
"config",
")",
"try",
":",
"response",
"=",
"session",
".",
"post",
"(",
"url",
",",
"json",
"=",
"report",
",",
"headers",
"=",
"headers",
",",
"timeout",
"=",
"config",
"[",
"\"network_timeout\"",
"]",
")",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Error sending report to IOpipe: %s\"",
"%",
"e",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Report sent to IOpipe successfully\"",
")"
] |
Sends the report to IOpipe's collector.
:param report: The report to be sent.
:param config: The IOpipe agent configuration.
|
[
"Sends",
"the",
"report",
"to",
"IOpipe",
"s",
"collector",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/send_report.py#L12-L30
|
18,286
|
iopipe/iopipe-python
|
iopipe/contrib/logger/request.py
|
upload_log_data
|
def upload_log_data(url, stream_or_file, config):
"""
Uploads log data to IOpipe.
:param url: The signed URL
:param stream_or_file: The log data stream or file
:param config: The IOpipe config
"""
try:
logger.debug("Uploading log data to IOpipe")
if isinstance(stream_or_file, StringIO):
stream_or_file.seek(0)
response = requests.put(
url, data=stream_or_file, timeout=config["network_timeout"]
)
else:
with open(stream_or_file, "rb") as data:
response = requests.put(
url, data=data, timeout=config["network_timeout"]
)
response.raise_for_status()
except Exception as e:
logger.debug("Error while uploading log data: %s", e)
logger.exception(e)
if hasattr(e, "response") and hasattr(e.response, "content"):
logger.debug(e.response.content)
else:
logger.debug("Log data uploaded successfully")
finally:
if isinstance(stream_or_file, str) and os.path.exists(stream_or_file):
os.remove(stream_or_file)
|
python
|
def upload_log_data(url, stream_or_file, config):
"""
Uploads log data to IOpipe.
:param url: The signed URL
:param stream_or_file: The log data stream or file
:param config: The IOpipe config
"""
try:
logger.debug("Uploading log data to IOpipe")
if isinstance(stream_or_file, StringIO):
stream_or_file.seek(0)
response = requests.put(
url, data=stream_or_file, timeout=config["network_timeout"]
)
else:
with open(stream_or_file, "rb") as data:
response = requests.put(
url, data=data, timeout=config["network_timeout"]
)
response.raise_for_status()
except Exception as e:
logger.debug("Error while uploading log data: %s", e)
logger.exception(e)
if hasattr(e, "response") and hasattr(e.response, "content"):
logger.debug(e.response.content)
else:
logger.debug("Log data uploaded successfully")
finally:
if isinstance(stream_or_file, str) and os.path.exists(stream_or_file):
os.remove(stream_or_file)
|
[
"def",
"upload_log_data",
"(",
"url",
",",
"stream_or_file",
",",
"config",
")",
":",
"try",
":",
"logger",
".",
"debug",
"(",
"\"Uploading log data to IOpipe\"",
")",
"if",
"isinstance",
"(",
"stream_or_file",
",",
"StringIO",
")",
":",
"stream_or_file",
".",
"seek",
"(",
"0",
")",
"response",
"=",
"requests",
".",
"put",
"(",
"url",
",",
"data",
"=",
"stream_or_file",
",",
"timeout",
"=",
"config",
"[",
"\"network_timeout\"",
"]",
")",
"else",
":",
"with",
"open",
"(",
"stream_or_file",
",",
"\"rb\"",
")",
"as",
"data",
":",
"response",
"=",
"requests",
".",
"put",
"(",
"url",
",",
"data",
"=",
"data",
",",
"timeout",
"=",
"config",
"[",
"\"network_timeout\"",
"]",
")",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Error while uploading log data: %s\"",
",",
"e",
")",
"logger",
".",
"exception",
"(",
"e",
")",
"if",
"hasattr",
"(",
"e",
",",
"\"response\"",
")",
"and",
"hasattr",
"(",
"e",
".",
"response",
",",
"\"content\"",
")",
":",
"logger",
".",
"debug",
"(",
"e",
".",
"response",
".",
"content",
")",
"else",
":",
"logger",
".",
"debug",
"(",
"\"Log data uploaded successfully\"",
")",
"finally",
":",
"if",
"isinstance",
"(",
"stream_or_file",
",",
"str",
")",
"and",
"os",
".",
"path",
".",
"exists",
"(",
"stream_or_file",
")",
":",
"os",
".",
"remove",
"(",
"stream_or_file",
")"
] |
Uploads log data to IOpipe.
:param url: The signed URL
:param stream_or_file: The log data stream or file
:param config: The IOpipe config
|
[
"Uploads",
"log",
"data",
"to",
"IOpipe",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/contrib/logger/request.py#L14-L44
|
18,287
|
iopipe/iopipe-python
|
iopipe/signer.py
|
get_signer_hostname
|
def get_signer_hostname():
"""
Returns the IOpipe signer hostname for a region
:returns: The signer hostname
:rtype str
"""
region = os.getenv("AWS_REGION", "")
region = region if region and region in SUPPORTED_REGIONS else "us-west-2"
return "signer.{region}.iopipe.com".format(region=region)
|
python
|
def get_signer_hostname():
"""
Returns the IOpipe signer hostname for a region
:returns: The signer hostname
:rtype str
"""
region = os.getenv("AWS_REGION", "")
region = region if region and region in SUPPORTED_REGIONS else "us-west-2"
return "signer.{region}.iopipe.com".format(region=region)
|
[
"def",
"get_signer_hostname",
"(",
")",
":",
"region",
"=",
"os",
".",
"getenv",
"(",
"\"AWS_REGION\"",
",",
"\"\"",
")",
"region",
"=",
"region",
"if",
"region",
"and",
"region",
"in",
"SUPPORTED_REGIONS",
"else",
"\"us-west-2\"",
"return",
"\"signer.{region}.iopipe.com\"",
".",
"format",
"(",
"region",
"=",
"region",
")"
] |
Returns the IOpipe signer hostname for a region
:returns: The signer hostname
:rtype str
|
[
"Returns",
"the",
"IOpipe",
"signer",
"hostname",
"for",
"a",
"region"
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/signer.py#L15-L24
|
18,288
|
iopipe/iopipe-python
|
iopipe/signer.py
|
get_signed_request
|
def get_signed_request(config, context, extension):
"""
Returns a signed request URL from IOpipe
:param config: The IOpipe config
:param context: The AWS context to request a signed URL
:param extension: The extension of the file to sign
:returns: A signed request URL
:rtype: str
"""
url = "https://{hostname}/".format(hostname=get_signer_hostname())
try:
logger.debug("Requesting signed request URL from %s", url)
response = requests.post(
url,
json={
"arn": context.invoked_function_arn,
"requestId": context.aws_request_id,
"timestamp": int(time.time() * 1000),
"extension": extension,
},
headers={"Authorization": config["token"]},
timeout=config["network_timeout"],
)
response.raise_for_status()
except Exception as e:
logger.debug("Error requesting signed request URL: %s", e)
if hasattr(e, "response"):
logger.debug(e.response.content)
else:
response = response.json()
logger.debug("Signed request URL received for %s", response["url"])
return response
|
python
|
def get_signed_request(config, context, extension):
"""
Returns a signed request URL from IOpipe
:param config: The IOpipe config
:param context: The AWS context to request a signed URL
:param extension: The extension of the file to sign
:returns: A signed request URL
:rtype: str
"""
url = "https://{hostname}/".format(hostname=get_signer_hostname())
try:
logger.debug("Requesting signed request URL from %s", url)
response = requests.post(
url,
json={
"arn": context.invoked_function_arn,
"requestId": context.aws_request_id,
"timestamp": int(time.time() * 1000),
"extension": extension,
},
headers={"Authorization": config["token"]},
timeout=config["network_timeout"],
)
response.raise_for_status()
except Exception as e:
logger.debug("Error requesting signed request URL: %s", e)
if hasattr(e, "response"):
logger.debug(e.response.content)
else:
response = response.json()
logger.debug("Signed request URL received for %s", response["url"])
return response
|
[
"def",
"get_signed_request",
"(",
"config",
",",
"context",
",",
"extension",
")",
":",
"url",
"=",
"\"https://{hostname}/\"",
".",
"format",
"(",
"hostname",
"=",
"get_signer_hostname",
"(",
")",
")",
"try",
":",
"logger",
".",
"debug",
"(",
"\"Requesting signed request URL from %s\"",
",",
"url",
")",
"response",
"=",
"requests",
".",
"post",
"(",
"url",
",",
"json",
"=",
"{",
"\"arn\"",
":",
"context",
".",
"invoked_function_arn",
",",
"\"requestId\"",
":",
"context",
".",
"aws_request_id",
",",
"\"timestamp\"",
":",
"int",
"(",
"time",
".",
"time",
"(",
")",
"*",
"1000",
")",
",",
"\"extension\"",
":",
"extension",
",",
"}",
",",
"headers",
"=",
"{",
"\"Authorization\"",
":",
"config",
"[",
"\"token\"",
"]",
"}",
",",
"timeout",
"=",
"config",
"[",
"\"network_timeout\"",
"]",
",",
")",
"response",
".",
"raise_for_status",
"(",
")",
"except",
"Exception",
"as",
"e",
":",
"logger",
".",
"debug",
"(",
"\"Error requesting signed request URL: %s\"",
",",
"e",
")",
"if",
"hasattr",
"(",
"e",
",",
"\"response\"",
")",
":",
"logger",
".",
"debug",
"(",
"e",
".",
"response",
".",
"content",
")",
"else",
":",
"response",
"=",
"response",
".",
"json",
"(",
")",
"logger",
".",
"debug",
"(",
"\"Signed request URL received for %s\"",
",",
"response",
"[",
"\"url\"",
"]",
")",
"return",
"response"
] |
Returns a signed request URL from IOpipe
:param config: The IOpipe config
:param context: The AWS context to request a signed URL
:param extension: The extension of the file to sign
:returns: A signed request URL
:rtype: str
|
[
"Returns",
"a",
"signed",
"request",
"URL",
"from",
"IOpipe"
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/signer.py#L27-L60
|
18,289
|
iopipe/iopipe-python
|
acceptance/serverless-layers/handler.py
|
handler
|
def handler(event, context):
"""Sample pure Lambda function
Parameters
----------
event: dict, required
API Gateway Lambda Proxy Input Format
{
"resource": "Resource path",
"path": "Path parameter",
"httpMethod": "Incoming request's method name"
"headers": {Incoming request headers}
"queryStringParameters": {query string parameters }
"pathParameters": {path parameters}
"stageVariables": {Applicable stage variables}
"requestContext": {Request context, including authorizer-returned key-value pairs}
"body": "A JSON string of the request payload."
"isBase64Encoded": "A boolean flag to indicate if the applicable request payload is Base64-encode"
}
https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
context: object, required
Lambda Context runtime methods and attributes
Attributes
----------
context.aws_request_id: str
Lambda request ID
context.client_context: object
Additional context when invoked through AWS Mobile SDK
context.function_name: str
Lambda function name
context.function_version: str
Function version identifier
context.get_remaining_time_in_millis: function
Time in milliseconds before function times out
context.identity:
Cognito identity provider context when invoked through AWS Mobile SDK
context.invoked_function_arn: str
Function ARN
context.log_group_name: str
Cloudwatch Log group name
context.log_stream_name: str
Cloudwatch Log stream name
context.memory_limit_in_mb: int
Function memory
https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
API Gateway Lambda Proxy Output Format: dict
'statusCode' and 'body' are required
{
"isBase64Encoded": true | false,
"statusCode": httpStatusCode,
"headers": {"headerName": "headerValue", ...},
"body": "..."
}
# api-gateway-simple-proxy-for-lambda-output-format
https: // docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
"""
try:
ip = requests.get("http://checkip.amazonaws.com/")
except requests.RequestException as e:
# Send some context about this error to Lambda Logs
print(e)
raise e
return {
"statusCode": 200,
"body": json.dumps(
{"message": "hello world", "location": ip.text.replace("\n", "")}
),
}
|
python
|
def handler(event, context):
"""Sample pure Lambda function
Parameters
----------
event: dict, required
API Gateway Lambda Proxy Input Format
{
"resource": "Resource path",
"path": "Path parameter",
"httpMethod": "Incoming request's method name"
"headers": {Incoming request headers}
"queryStringParameters": {query string parameters }
"pathParameters": {path parameters}
"stageVariables": {Applicable stage variables}
"requestContext": {Request context, including authorizer-returned key-value pairs}
"body": "A JSON string of the request payload."
"isBase64Encoded": "A boolean flag to indicate if the applicable request payload is Base64-encode"
}
https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
context: object, required
Lambda Context runtime methods and attributes
Attributes
----------
context.aws_request_id: str
Lambda request ID
context.client_context: object
Additional context when invoked through AWS Mobile SDK
context.function_name: str
Lambda function name
context.function_version: str
Function version identifier
context.get_remaining_time_in_millis: function
Time in milliseconds before function times out
context.identity:
Cognito identity provider context when invoked through AWS Mobile SDK
context.invoked_function_arn: str
Function ARN
context.log_group_name: str
Cloudwatch Log group name
context.log_stream_name: str
Cloudwatch Log stream name
context.memory_limit_in_mb: int
Function memory
https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
API Gateway Lambda Proxy Output Format: dict
'statusCode' and 'body' are required
{
"isBase64Encoded": true | false,
"statusCode": httpStatusCode,
"headers": {"headerName": "headerValue", ...},
"body": "..."
}
# api-gateway-simple-proxy-for-lambda-output-format
https: // docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
"""
try:
ip = requests.get("http://checkip.amazonaws.com/")
except requests.RequestException as e:
# Send some context about this error to Lambda Logs
print(e)
raise e
return {
"statusCode": 200,
"body": json.dumps(
{"message": "hello world", "location": ip.text.replace("\n", "")}
),
}
|
[
"def",
"handler",
"(",
"event",
",",
"context",
")",
":",
"try",
":",
"ip",
"=",
"requests",
".",
"get",
"(",
"\"http://checkip.amazonaws.com/\"",
")",
"except",
"requests",
".",
"RequestException",
"as",
"e",
":",
"# Send some context about this error to Lambda Logs",
"print",
"(",
"e",
")",
"raise",
"e",
"return",
"{",
"\"statusCode\"",
":",
"200",
",",
"\"body\"",
":",
"json",
".",
"dumps",
"(",
"{",
"\"message\"",
":",
"\"hello world\"",
",",
"\"location\"",
":",
"ip",
".",
"text",
".",
"replace",
"(",
"\"\\n\"",
",",
"\"\"",
")",
"}",
")",
",",
"}"
] |
Sample pure Lambda function
Parameters
----------
event: dict, required
API Gateway Lambda Proxy Input Format
{
"resource": "Resource path",
"path": "Path parameter",
"httpMethod": "Incoming request's method name"
"headers": {Incoming request headers}
"queryStringParameters": {query string parameters }
"pathParameters": {path parameters}
"stageVariables": {Applicable stage variables}
"requestContext": {Request context, including authorizer-returned key-value pairs}
"body": "A JSON string of the request payload."
"isBase64Encoded": "A boolean flag to indicate if the applicable request payload is Base64-encode"
}
https://docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html#api-gateway-simple-proxy-for-lambda-input-format
context: object, required
Lambda Context runtime methods and attributes
Attributes
----------
context.aws_request_id: str
Lambda request ID
context.client_context: object
Additional context when invoked through AWS Mobile SDK
context.function_name: str
Lambda function name
context.function_version: str
Function version identifier
context.get_remaining_time_in_millis: function
Time in milliseconds before function times out
context.identity:
Cognito identity provider context when invoked through AWS Mobile SDK
context.invoked_function_arn: str
Function ARN
context.log_group_name: str
Cloudwatch Log group name
context.log_stream_name: str
Cloudwatch Log stream name
context.memory_limit_in_mb: int
Function memory
https://docs.aws.amazon.com/lambda/latest/dg/python-context-object.html
Returns
------
API Gateway Lambda Proxy Output Format: dict
'statusCode' and 'body' are required
{
"isBase64Encoded": true | false,
"statusCode": httpStatusCode,
"headers": {"headerName": "headerValue", ...},
"body": "..."
}
# api-gateway-simple-proxy-for-lambda-output-format
https: // docs.aws.amazon.com/apigateway/latest/developerguide/set-up-lambda-proxy-integrations.html
|
[
"Sample",
"pure",
"Lambda",
"function"
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/acceptance/serverless-layers/handler.py#L6-L87
|
18,290
|
iopipe/iopipe-python
|
iopipe/system.py
|
read_meminfo
|
def read_meminfo():
"""
Returns system memory usage information.
:returns: The system memory usage.
:rtype: dict
"""
data = {}
with open("/proc/meminfo", "rb") as meminfo_file:
for row in meminfo_file:
fields = row.split()
# Example content:
# MemTotal: 3801016 kB
# MemFree: 1840972 kB
# MemAvailable: 3287752 kB
# HugePages_Total: 0
data[fields[0].decode("ascii")[:-1]] = int(fields[1]) * 1024
return data
|
python
|
def read_meminfo():
"""
Returns system memory usage information.
:returns: The system memory usage.
:rtype: dict
"""
data = {}
with open("/proc/meminfo", "rb") as meminfo_file:
for row in meminfo_file:
fields = row.split()
# Example content:
# MemTotal: 3801016 kB
# MemFree: 1840972 kB
# MemAvailable: 3287752 kB
# HugePages_Total: 0
data[fields[0].decode("ascii")[:-1]] = int(fields[1]) * 1024
return data
|
[
"def",
"read_meminfo",
"(",
")",
":",
"data",
"=",
"{",
"}",
"with",
"open",
"(",
"\"/proc/meminfo\"",
",",
"\"rb\"",
")",
"as",
"meminfo_file",
":",
"for",
"row",
"in",
"meminfo_file",
":",
"fields",
"=",
"row",
".",
"split",
"(",
")",
"# Example content:",
"# MemTotal: 3801016 kB",
"# MemFree: 1840972 kB",
"# MemAvailable: 3287752 kB",
"# HugePages_Total: 0",
"data",
"[",
"fields",
"[",
"0",
"]",
".",
"decode",
"(",
"\"ascii\"",
")",
"[",
":",
"-",
"1",
"]",
"]",
"=",
"int",
"(",
"fields",
"[",
"1",
"]",
")",
"*",
"1024",
"return",
"data"
] |
Returns system memory usage information.
:returns: The system memory usage.
:rtype: dict
|
[
"Returns",
"system",
"memory",
"usage",
"information",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/system.py#L50-L67
|
18,291
|
iopipe/iopipe-python
|
iopipe/system.py
|
read_pid_stat
|
def read_pid_stat(pid="self"):
"""
Returns system process stat information.
:param pid: The process ID.
:returns: The system stat information.
:rtype: dict
"""
with open("/proc/%s/stat" % (pid,), "rb") as f:
stat = f.readline().split()
return {
"utime": int(stat[13]),
"stime": int(stat[14]),
"cutime": int(stat[15]),
"cstime": int(stat[16]),
}
|
python
|
def read_pid_stat(pid="self"):
"""
Returns system process stat information.
:param pid: The process ID.
:returns: The system stat information.
:rtype: dict
"""
with open("/proc/%s/stat" % (pid,), "rb") as f:
stat = f.readline().split()
return {
"utime": int(stat[13]),
"stime": int(stat[14]),
"cutime": int(stat[15]),
"cstime": int(stat[16]),
}
|
[
"def",
"read_pid_stat",
"(",
"pid",
"=",
"\"self\"",
")",
":",
"with",
"open",
"(",
"\"/proc/%s/stat\"",
"%",
"(",
"pid",
",",
")",
",",
"\"rb\"",
")",
"as",
"f",
":",
"stat",
"=",
"f",
".",
"readline",
"(",
")",
".",
"split",
"(",
")",
"return",
"{",
"\"utime\"",
":",
"int",
"(",
"stat",
"[",
"13",
"]",
")",
",",
"\"stime\"",
":",
"int",
"(",
"stat",
"[",
"14",
"]",
")",
",",
"\"cutime\"",
":",
"int",
"(",
"stat",
"[",
"15",
"]",
")",
",",
"\"cstime\"",
":",
"int",
"(",
"stat",
"[",
"16",
"]",
")",
",",
"}"
] |
Returns system process stat information.
:param pid: The process ID.
:returns: The system stat information.
:rtype: dict
|
[
"Returns",
"system",
"process",
"stat",
"information",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/system.py#L70-L85
|
18,292
|
iopipe/iopipe-python
|
iopipe/system.py
|
read_pid_status
|
def read_pid_status(pid="self"):
"""
Returns the system process sstatus.
:param pid: The process ID.
:returns: The system process status.
:rtype: dict
"""
data = {}
with open("/proc/%s/status" % (pid,), "rb") as status_file:
for row in status_file:
fields = row.split()
if fields and fields[0] in [b"VmRSS:", b"Threads:", b"FDSize:"]:
try:
data[fields[0].decode("ascii")[:-1]] = int(fields[1])
except ValueError:
data[fields[0].decode("ascii")[:-1]] = fields[1].decode("ascii")
return data
|
python
|
def read_pid_status(pid="self"):
"""
Returns the system process sstatus.
:param pid: The process ID.
:returns: The system process status.
:rtype: dict
"""
data = {}
with open("/proc/%s/status" % (pid,), "rb") as status_file:
for row in status_file:
fields = row.split()
if fields and fields[0] in [b"VmRSS:", b"Threads:", b"FDSize:"]:
try:
data[fields[0].decode("ascii")[:-1]] = int(fields[1])
except ValueError:
data[fields[0].decode("ascii")[:-1]] = fields[1].decode("ascii")
return data
|
[
"def",
"read_pid_status",
"(",
"pid",
"=",
"\"self\"",
")",
":",
"data",
"=",
"{",
"}",
"with",
"open",
"(",
"\"/proc/%s/status\"",
"%",
"(",
"pid",
",",
")",
",",
"\"rb\"",
")",
"as",
"status_file",
":",
"for",
"row",
"in",
"status_file",
":",
"fields",
"=",
"row",
".",
"split",
"(",
")",
"if",
"fields",
"and",
"fields",
"[",
"0",
"]",
"in",
"[",
"b\"VmRSS:\"",
",",
"b\"Threads:\"",
",",
"b\"FDSize:\"",
"]",
":",
"try",
":",
"data",
"[",
"fields",
"[",
"0",
"]",
".",
"decode",
"(",
"\"ascii\"",
")",
"[",
":",
"-",
"1",
"]",
"]",
"=",
"int",
"(",
"fields",
"[",
"1",
"]",
")",
"except",
"ValueError",
":",
"data",
"[",
"fields",
"[",
"0",
"]",
".",
"decode",
"(",
"\"ascii\"",
")",
"[",
":",
"-",
"1",
"]",
"]",
"=",
"fields",
"[",
"1",
"]",
".",
"decode",
"(",
"\"ascii\"",
")",
"return",
"data"
] |
Returns the system process sstatus.
:param pid: The process ID.
:returns: The system process status.
:rtype: dict
|
[
"Returns",
"the",
"system",
"process",
"sstatus",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/system.py#L88-L105
|
18,293
|
iopipe/iopipe-python
|
iopipe/system.py
|
read_stat
|
def read_stat():
"""
Returns the system stat information.
:returns: The system stat information.
:rtype: list
"""
data = []
with open("/proc/stat", "rb") as stat_file:
for line in stat_file:
cpu_stat = line.split()
if cpu_stat[0][:3] != b"cpu":
break
# First cpu line is aggregation of following lines, skip it
if len(cpu_stat[0]) == 3:
continue
data.append(
{
"times": {
"user": int(cpu_stat[1]),
"nice": int(cpu_stat[2]),
"sys": int(cpu_stat[3]),
"idle": int(cpu_stat[4]),
"irq": int(cpu_stat[6]),
}
}
)
return data
|
python
|
def read_stat():
"""
Returns the system stat information.
:returns: The system stat information.
:rtype: list
"""
data = []
with open("/proc/stat", "rb") as stat_file:
for line in stat_file:
cpu_stat = line.split()
if cpu_stat[0][:3] != b"cpu":
break
# First cpu line is aggregation of following lines, skip it
if len(cpu_stat[0]) == 3:
continue
data.append(
{
"times": {
"user": int(cpu_stat[1]),
"nice": int(cpu_stat[2]),
"sys": int(cpu_stat[3]),
"idle": int(cpu_stat[4]),
"irq": int(cpu_stat[6]),
}
}
)
return data
|
[
"def",
"read_stat",
"(",
")",
":",
"data",
"=",
"[",
"]",
"with",
"open",
"(",
"\"/proc/stat\"",
",",
"\"rb\"",
")",
"as",
"stat_file",
":",
"for",
"line",
"in",
"stat_file",
":",
"cpu_stat",
"=",
"line",
".",
"split",
"(",
")",
"if",
"cpu_stat",
"[",
"0",
"]",
"[",
":",
"3",
"]",
"!=",
"b\"cpu\"",
":",
"break",
"# First cpu line is aggregation of following lines, skip it",
"if",
"len",
"(",
"cpu_stat",
"[",
"0",
"]",
")",
"==",
"3",
":",
"continue",
"data",
".",
"append",
"(",
"{",
"\"times\"",
":",
"{",
"\"user\"",
":",
"int",
"(",
"cpu_stat",
"[",
"1",
"]",
")",
",",
"\"nice\"",
":",
"int",
"(",
"cpu_stat",
"[",
"2",
"]",
")",
",",
"\"sys\"",
":",
"int",
"(",
"cpu_stat",
"[",
"3",
"]",
")",
",",
"\"idle\"",
":",
"int",
"(",
"cpu_stat",
"[",
"4",
"]",
")",
",",
"\"irq\"",
":",
"int",
"(",
"cpu_stat",
"[",
"6",
"]",
")",
",",
"}",
"}",
")",
"return",
"data"
] |
Returns the system stat information.
:returns: The system stat information.
:rtype: list
|
[
"Returns",
"the",
"system",
"stat",
"information",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/system.py#L108-L135
|
18,294
|
iopipe/iopipe-python
|
iopipe/config.py
|
set_config
|
def set_config(**config):
"""
Returns IOpipe configuration options, setting defaults as necessary.
"""
config.setdefault("debug", bool(strtobool(os.getenv("IOPIPE_DEBUG", "false"))))
config.setdefault("enabled", bool(strtobool(os.getenv("IOPIPE_ENABLED", "true"))))
config.setdefault("host", get_hostname())
config.setdefault("install_method", os.getenv("IOPIPE_INSTALL_METHOD", "manual"))
config.setdefault("network_timeout", os.getenv("IOPIPE_NETWORK_TIMEOUT", 5000))
config.setdefault("path", get_collector_path())
config.setdefault("plugins", [])
config.setdefault("sync_http", False)
config.setdefault("timeout_window", os.getenv("IOPIPE_TIMEOUT_WINDOW", 500))
config.setdefault(
"token", os.getenv("IOPIPE_TOKEN") or os.getenv("IOPIPE_CLIENTID") or ""
)
if "client_id" in config:
config["token"] = config.pop("client_id")
if "url" in config:
url = config.pop("url")
config["host"] = get_hostname(url)
config["path"] = get_collector_path(url)
if "." in str(config["network_timeout"]):
warnings.warn(
"IOpipe's 'network_timeout' is now in milliseconds, expressed as an integer"
)
try:
config["debug"] = bool(config["debug"])
except ValueError:
config["debug"] = False
try:
config["network_timeout"] = int(config["network_timeout"]) / 1000.0
except ValueError:
config["network_timeout"] = 5.0
if "." in str(config["timeout_window"]):
warnings.warn(
"IOpipe's 'timeout_window' is now in milliseconds, expressed as an integer"
)
try:
config["timeout_window"] = int(config["timeout_window"]) / 1000.0
except ValueError:
config["timeout_window"] = 0.5
return config
|
python
|
def set_config(**config):
"""
Returns IOpipe configuration options, setting defaults as necessary.
"""
config.setdefault("debug", bool(strtobool(os.getenv("IOPIPE_DEBUG", "false"))))
config.setdefault("enabled", bool(strtobool(os.getenv("IOPIPE_ENABLED", "true"))))
config.setdefault("host", get_hostname())
config.setdefault("install_method", os.getenv("IOPIPE_INSTALL_METHOD", "manual"))
config.setdefault("network_timeout", os.getenv("IOPIPE_NETWORK_TIMEOUT", 5000))
config.setdefault("path", get_collector_path())
config.setdefault("plugins", [])
config.setdefault("sync_http", False)
config.setdefault("timeout_window", os.getenv("IOPIPE_TIMEOUT_WINDOW", 500))
config.setdefault(
"token", os.getenv("IOPIPE_TOKEN") or os.getenv("IOPIPE_CLIENTID") or ""
)
if "client_id" in config:
config["token"] = config.pop("client_id")
if "url" in config:
url = config.pop("url")
config["host"] = get_hostname(url)
config["path"] = get_collector_path(url)
if "." in str(config["network_timeout"]):
warnings.warn(
"IOpipe's 'network_timeout' is now in milliseconds, expressed as an integer"
)
try:
config["debug"] = bool(config["debug"])
except ValueError:
config["debug"] = False
try:
config["network_timeout"] = int(config["network_timeout"]) / 1000.0
except ValueError:
config["network_timeout"] = 5.0
if "." in str(config["timeout_window"]):
warnings.warn(
"IOpipe's 'timeout_window' is now in milliseconds, expressed as an integer"
)
try:
config["timeout_window"] = int(config["timeout_window"]) / 1000.0
except ValueError:
config["timeout_window"] = 0.5
return config
|
[
"def",
"set_config",
"(",
"*",
"*",
"config",
")",
":",
"config",
".",
"setdefault",
"(",
"\"debug\"",
",",
"bool",
"(",
"strtobool",
"(",
"os",
".",
"getenv",
"(",
"\"IOPIPE_DEBUG\"",
",",
"\"false\"",
")",
")",
")",
")",
"config",
".",
"setdefault",
"(",
"\"enabled\"",
",",
"bool",
"(",
"strtobool",
"(",
"os",
".",
"getenv",
"(",
"\"IOPIPE_ENABLED\"",
",",
"\"true\"",
")",
")",
")",
")",
"config",
".",
"setdefault",
"(",
"\"host\"",
",",
"get_hostname",
"(",
")",
")",
"config",
".",
"setdefault",
"(",
"\"install_method\"",
",",
"os",
".",
"getenv",
"(",
"\"IOPIPE_INSTALL_METHOD\"",
",",
"\"manual\"",
")",
")",
"config",
".",
"setdefault",
"(",
"\"network_timeout\"",
",",
"os",
".",
"getenv",
"(",
"\"IOPIPE_NETWORK_TIMEOUT\"",
",",
"5000",
")",
")",
"config",
".",
"setdefault",
"(",
"\"path\"",
",",
"get_collector_path",
"(",
")",
")",
"config",
".",
"setdefault",
"(",
"\"plugins\"",
",",
"[",
"]",
")",
"config",
".",
"setdefault",
"(",
"\"sync_http\"",
",",
"False",
")",
"config",
".",
"setdefault",
"(",
"\"timeout_window\"",
",",
"os",
".",
"getenv",
"(",
"\"IOPIPE_TIMEOUT_WINDOW\"",
",",
"500",
")",
")",
"config",
".",
"setdefault",
"(",
"\"token\"",
",",
"os",
".",
"getenv",
"(",
"\"IOPIPE_TOKEN\"",
")",
"or",
"os",
".",
"getenv",
"(",
"\"IOPIPE_CLIENTID\"",
")",
"or",
"\"\"",
")",
"if",
"\"client_id\"",
"in",
"config",
":",
"config",
"[",
"\"token\"",
"]",
"=",
"config",
".",
"pop",
"(",
"\"client_id\"",
")",
"if",
"\"url\"",
"in",
"config",
":",
"url",
"=",
"config",
".",
"pop",
"(",
"\"url\"",
")",
"config",
"[",
"\"host\"",
"]",
"=",
"get_hostname",
"(",
"url",
")",
"config",
"[",
"\"path\"",
"]",
"=",
"get_collector_path",
"(",
"url",
")",
"if",
"\".\"",
"in",
"str",
"(",
"config",
"[",
"\"network_timeout\"",
"]",
")",
":",
"warnings",
".",
"warn",
"(",
"\"IOpipe's 'network_timeout' is now in milliseconds, expressed as an integer\"",
")",
"try",
":",
"config",
"[",
"\"debug\"",
"]",
"=",
"bool",
"(",
"config",
"[",
"\"debug\"",
"]",
")",
"except",
"ValueError",
":",
"config",
"[",
"\"debug\"",
"]",
"=",
"False",
"try",
":",
"config",
"[",
"\"network_timeout\"",
"]",
"=",
"int",
"(",
"config",
"[",
"\"network_timeout\"",
"]",
")",
"/",
"1000.0",
"except",
"ValueError",
":",
"config",
"[",
"\"network_timeout\"",
"]",
"=",
"5.0",
"if",
"\".\"",
"in",
"str",
"(",
"config",
"[",
"\"timeout_window\"",
"]",
")",
":",
"warnings",
".",
"warn",
"(",
"\"IOpipe's 'timeout_window' is now in milliseconds, expressed as an integer\"",
")",
"try",
":",
"config",
"[",
"\"timeout_window\"",
"]",
"=",
"int",
"(",
"config",
"[",
"\"timeout_window\"",
"]",
")",
"/",
"1000.0",
"except",
"ValueError",
":",
"config",
"[",
"\"timeout_window\"",
"]",
"=",
"0.5",
"return",
"config"
] |
Returns IOpipe configuration options, setting defaults as necessary.
|
[
"Returns",
"IOpipe",
"configuration",
"options",
"setting",
"defaults",
"as",
"necessary",
"."
] |
4eb653977341bc67f8b1b87aedb3aaaefc25af61
|
https://github.com/iopipe/iopipe-python/blob/4eb653977341bc67f8b1b87aedb3aaaefc25af61/iopipe/config.py#L8-L59
|
18,295
|
pief/python-netsnmpagent
|
netsnmpapi.py
|
b
|
def b(s):
""" Encodes Unicode strings to byte strings, if necessary. """
return s if isinstance(s, bytes) else s.encode(locale.getpreferredencoding())
|
python
|
def b(s):
""" Encodes Unicode strings to byte strings, if necessary. """
return s if isinstance(s, bytes) else s.encode(locale.getpreferredencoding())
|
[
"def",
"b",
"(",
"s",
")",
":",
"return",
"s",
"if",
"isinstance",
"(",
"s",
",",
"bytes",
")",
"else",
"s",
".",
"encode",
"(",
"locale",
".",
"getpreferredencoding",
"(",
")",
")"
] |
Encodes Unicode strings to byte strings, if necessary.
|
[
"Encodes",
"Unicode",
"strings",
"to",
"byte",
"strings",
"if",
"necessary",
"."
] |
b1aad1c7f034509c40d9ab17d59be32e809bd31d
|
https://github.com/pief/python-netsnmpagent/blob/b1aad1c7f034509c40d9ab17d59be32e809bd31d/netsnmpapi.py#L16-L19
|
18,296
|
pief/python-netsnmpagent
|
examples/threading_agent.py
|
LogMsg
|
def LogMsg(msg):
""" Writes a formatted log message with a timestamp to stdout. """
global headerlogged
if headerlogged == 0:
print("{0:<8} {1:<90} {2}".format(
"Time",
"MainThread",
"UpdateSNMPObjsThread"
))
print("{0:-^120}".format("-"))
headerlogged = 1
threadname = threading.currentThread().name
funcname = sys._getframe(1).f_code.co_name
if funcname == "<module>":
funcname = "Main code path"
elif funcname == "LogNetSnmpMsg":
funcname = "net-snmp code"
else:
funcname = "{0}()".format(funcname)
if threadname == "MainThread":
logmsg = "{0} {1:<112.112}".format(
time.strftime("%T", time.localtime(time.time())),
"{0}: {1}".format(funcname, msg)
)
else:
logmsg = "{0} {1:>112.112}".format(
time.strftime("%T", time.localtime(time.time())),
"{0}: {1}".format(funcname, msg)
)
print(logmsg)
|
python
|
def LogMsg(msg):
""" Writes a formatted log message with a timestamp to stdout. """
global headerlogged
if headerlogged == 0:
print("{0:<8} {1:<90} {2}".format(
"Time",
"MainThread",
"UpdateSNMPObjsThread"
))
print("{0:-^120}".format("-"))
headerlogged = 1
threadname = threading.currentThread().name
funcname = sys._getframe(1).f_code.co_name
if funcname == "<module>":
funcname = "Main code path"
elif funcname == "LogNetSnmpMsg":
funcname = "net-snmp code"
else:
funcname = "{0}()".format(funcname)
if threadname == "MainThread":
logmsg = "{0} {1:<112.112}".format(
time.strftime("%T", time.localtime(time.time())),
"{0}: {1}".format(funcname, msg)
)
else:
logmsg = "{0} {1:>112.112}".format(
time.strftime("%T", time.localtime(time.time())),
"{0}: {1}".format(funcname, msg)
)
print(logmsg)
|
[
"def",
"LogMsg",
"(",
"msg",
")",
":",
"global",
"headerlogged",
"if",
"headerlogged",
"==",
"0",
":",
"print",
"(",
"\"{0:<8} {1:<90} {2}\"",
".",
"format",
"(",
"\"Time\"",
",",
"\"MainThread\"",
",",
"\"UpdateSNMPObjsThread\"",
")",
")",
"print",
"(",
"\"{0:-^120}\"",
".",
"format",
"(",
"\"-\"",
")",
")",
"headerlogged",
"=",
"1",
"threadname",
"=",
"threading",
".",
"currentThread",
"(",
")",
".",
"name",
"funcname",
"=",
"sys",
".",
"_getframe",
"(",
"1",
")",
".",
"f_code",
".",
"co_name",
"if",
"funcname",
"==",
"\"<module>\"",
":",
"funcname",
"=",
"\"Main code path\"",
"elif",
"funcname",
"==",
"\"LogNetSnmpMsg\"",
":",
"funcname",
"=",
"\"net-snmp code\"",
"else",
":",
"funcname",
"=",
"\"{0}()\"",
".",
"format",
"(",
"funcname",
")",
"if",
"threadname",
"==",
"\"MainThread\"",
":",
"logmsg",
"=",
"\"{0} {1:<112.112}\"",
".",
"format",
"(",
"time",
".",
"strftime",
"(",
"\"%T\"",
",",
"time",
".",
"localtime",
"(",
"time",
".",
"time",
"(",
")",
")",
")",
",",
"\"{0}: {1}\"",
".",
"format",
"(",
"funcname",
",",
"msg",
")",
")",
"else",
":",
"logmsg",
"=",
"\"{0} {1:>112.112}\"",
".",
"format",
"(",
"time",
".",
"strftime",
"(",
"\"%T\"",
",",
"time",
".",
"localtime",
"(",
"time",
".",
"time",
"(",
")",
")",
")",
",",
"\"{0}: {1}\"",
".",
"format",
"(",
"funcname",
",",
"msg",
")",
")",
"print",
"(",
"logmsg",
")"
] |
Writes a formatted log message with a timestamp to stdout.
|
[
"Writes",
"a",
"formatted",
"log",
"message",
"with",
"a",
"timestamp",
"to",
"stdout",
"."
] |
b1aad1c7f034509c40d9ab17d59be32e809bd31d
|
https://github.com/pief/python-netsnmpagent/blob/b1aad1c7f034509c40d9ab17d59be32e809bd31d/examples/threading_agent.py#L70-L104
|
18,297
|
pief/python-netsnmpagent
|
examples/threading_agent.py
|
UpdateSNMPObjs
|
def UpdateSNMPObjs():
""" Function that does the actual data update. """
global threadingString
LogMsg("Beginning data update.")
data = ""
# Obtain the data by calling an external command. We don't use
# subprocess.check_output() here for compatibility with Python versions
# older than 2.7.
LogMsg("Calling external command \"sleep 5; date\".")
proc = subprocess.Popen(
"sleep 5; date", shell=True, env={ "LANG": "C" },
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
output = proc.communicate()[0].splitlines()[0]
rc = proc.poll()
if rc != 0:
LogMsg("An error occured executing the command: {0}".format(output))
return
msg = "Updating \"threadingString\" object with data \"{0}\"."
LogMsg(msg.format(output))
threadingString.update(output)
LogMsg("Data update done, exiting thread.")
|
python
|
def UpdateSNMPObjs():
""" Function that does the actual data update. """
global threadingString
LogMsg("Beginning data update.")
data = ""
# Obtain the data by calling an external command. We don't use
# subprocess.check_output() here for compatibility with Python versions
# older than 2.7.
LogMsg("Calling external command \"sleep 5; date\".")
proc = subprocess.Popen(
"sleep 5; date", shell=True, env={ "LANG": "C" },
stdout=subprocess.PIPE, stderr=subprocess.STDOUT
)
output = proc.communicate()[0].splitlines()[0]
rc = proc.poll()
if rc != 0:
LogMsg("An error occured executing the command: {0}".format(output))
return
msg = "Updating \"threadingString\" object with data \"{0}\"."
LogMsg(msg.format(output))
threadingString.update(output)
LogMsg("Data update done, exiting thread.")
|
[
"def",
"UpdateSNMPObjs",
"(",
")",
":",
"global",
"threadingString",
"LogMsg",
"(",
"\"Beginning data update.\"",
")",
"data",
"=",
"\"\"",
"# Obtain the data by calling an external command. We don't use",
"# subprocess.check_output() here for compatibility with Python versions",
"# older than 2.7.",
"LogMsg",
"(",
"\"Calling external command \\\"sleep 5; date\\\".\"",
")",
"proc",
"=",
"subprocess",
".",
"Popen",
"(",
"\"sleep 5; date\"",
",",
"shell",
"=",
"True",
",",
"env",
"=",
"{",
"\"LANG\"",
":",
"\"C\"",
"}",
",",
"stdout",
"=",
"subprocess",
".",
"PIPE",
",",
"stderr",
"=",
"subprocess",
".",
"STDOUT",
")",
"output",
"=",
"proc",
".",
"communicate",
"(",
")",
"[",
"0",
"]",
".",
"splitlines",
"(",
")",
"[",
"0",
"]",
"rc",
"=",
"proc",
".",
"poll",
"(",
")",
"if",
"rc",
"!=",
"0",
":",
"LogMsg",
"(",
"\"An error occured executing the command: {0}\"",
".",
"format",
"(",
"output",
")",
")",
"return",
"msg",
"=",
"\"Updating \\\"threadingString\\\" object with data \\\"{0}\\\".\"",
"LogMsg",
"(",
"msg",
".",
"format",
"(",
"output",
")",
")",
"threadingString",
".",
"update",
"(",
"output",
")",
"LogMsg",
"(",
"\"Data update done, exiting thread.\"",
")"
] |
Function that does the actual data update.
|
[
"Function",
"that",
"does",
"the",
"actual",
"data",
"update",
"."
] |
b1aad1c7f034509c40d9ab17d59be32e809bd31d
|
https://github.com/pief/python-netsnmpagent/blob/b1aad1c7f034509c40d9ab17d59be32e809bd31d/examples/threading_agent.py#L131-L157
|
18,298
|
pief/python-netsnmpagent
|
netsnmpagent.py
|
netsnmpAgent.getRegistered
|
def getRegistered(self, context = ""):
""" Returns a dictionary with the currently registered SNMP objects.
Returned is a dictionary objects for the specified "context",
which defaults to the default context. """
myobjs = {}
try:
# Python 2.x
objs_iterator = self._objs[context].iteritems()
except AttributeError:
# Python 3.x
objs_iterator = self._objs[context].items()
for oidstr, snmpobj in objs_iterator:
myobjs[oidstr] = {
"type": type(snmpobj).__name__,
"value": snmpobj.value()
}
return dict(myobjs)
|
python
|
def getRegistered(self, context = ""):
""" Returns a dictionary with the currently registered SNMP objects.
Returned is a dictionary objects for the specified "context",
which defaults to the default context. """
myobjs = {}
try:
# Python 2.x
objs_iterator = self._objs[context].iteritems()
except AttributeError:
# Python 3.x
objs_iterator = self._objs[context].items()
for oidstr, snmpobj in objs_iterator:
myobjs[oidstr] = {
"type": type(snmpobj).__name__,
"value": snmpobj.value()
}
return dict(myobjs)
|
[
"def",
"getRegistered",
"(",
"self",
",",
"context",
"=",
"\"\"",
")",
":",
"myobjs",
"=",
"{",
"}",
"try",
":",
"# Python 2.x",
"objs_iterator",
"=",
"self",
".",
"_objs",
"[",
"context",
"]",
".",
"iteritems",
"(",
")",
"except",
"AttributeError",
":",
"# Python 3.x",
"objs_iterator",
"=",
"self",
".",
"_objs",
"[",
"context",
"]",
".",
"items",
"(",
")",
"for",
"oidstr",
",",
"snmpobj",
"in",
"objs_iterator",
":",
"myobjs",
"[",
"oidstr",
"]",
"=",
"{",
"\"type\"",
":",
"type",
"(",
"snmpobj",
")",
".",
"__name__",
",",
"\"value\"",
":",
"snmpobj",
".",
"value",
"(",
")",
"}",
"return",
"dict",
"(",
"myobjs",
")"
] |
Returns a dictionary with the currently registered SNMP objects.
Returned is a dictionary objects for the specified "context",
which defaults to the default context.
|
[
"Returns",
"a",
"dictionary",
"with",
"the",
"currently",
"registered",
"SNMP",
"objects",
"."
] |
b1aad1c7f034509c40d9ab17d59be32e809bd31d
|
https://github.com/pief/python-netsnmpagent/blob/b1aad1c7f034509c40d9ab17d59be32e809bd31d/netsnmpagent.py#L696-L713
|
18,299
|
pief/python-netsnmpagent
|
netsnmpagent.py
|
netsnmpAgent.start
|
def start(self):
""" Starts the agent. Among other things, this means connecting
to the master agent, if configured that way. """
if self._status != netsnmpAgentStatus.CONNECTED \
and self._status != netsnmpAgentStatus.RECONNECTING:
self._status = netsnmpAgentStatus.FIRSTCONNECT
libnsa.init_snmp(b(self.AgentName))
if self._status == netsnmpAgentStatus.CONNECTFAILED:
msg = "Error connecting to snmpd instance at \"{0}\" -- " \
"incorrect \"MasterSocket\" or snmpd not running?"
msg = msg.format(self.MasterSocket)
raise netsnmpAgentException(msg)
|
python
|
def start(self):
""" Starts the agent. Among other things, this means connecting
to the master agent, if configured that way. """
if self._status != netsnmpAgentStatus.CONNECTED \
and self._status != netsnmpAgentStatus.RECONNECTING:
self._status = netsnmpAgentStatus.FIRSTCONNECT
libnsa.init_snmp(b(self.AgentName))
if self._status == netsnmpAgentStatus.CONNECTFAILED:
msg = "Error connecting to snmpd instance at \"{0}\" -- " \
"incorrect \"MasterSocket\" or snmpd not running?"
msg = msg.format(self.MasterSocket)
raise netsnmpAgentException(msg)
|
[
"def",
"start",
"(",
"self",
")",
":",
"if",
"self",
".",
"_status",
"!=",
"netsnmpAgentStatus",
".",
"CONNECTED",
"and",
"self",
".",
"_status",
"!=",
"netsnmpAgentStatus",
".",
"RECONNECTING",
":",
"self",
".",
"_status",
"=",
"netsnmpAgentStatus",
".",
"FIRSTCONNECT",
"libnsa",
".",
"init_snmp",
"(",
"b",
"(",
"self",
".",
"AgentName",
")",
")",
"if",
"self",
".",
"_status",
"==",
"netsnmpAgentStatus",
".",
"CONNECTFAILED",
":",
"msg",
"=",
"\"Error connecting to snmpd instance at \\\"{0}\\\" -- \"",
"\"incorrect \\\"MasterSocket\\\" or snmpd not running?\"",
"msg",
"=",
"msg",
".",
"format",
"(",
"self",
".",
"MasterSocket",
")",
"raise",
"netsnmpAgentException",
"(",
"msg",
")"
] |
Starts the agent. Among other things, this means connecting
to the master agent, if configured that way.
|
[
"Starts",
"the",
"agent",
".",
"Among",
"other",
"things",
"this",
"means",
"connecting",
"to",
"the",
"master",
"agent",
"if",
"configured",
"that",
"way",
"."
] |
b1aad1c7f034509c40d9ab17d59be32e809bd31d
|
https://github.com/pief/python-netsnmpagent/blob/b1aad1c7f034509c40d9ab17d59be32e809bd31d/netsnmpagent.py#L715-L726
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.